--- a/make/autoconf/spec.gmk.in Fri Mar 23 11:14:43 2018 -0700
+++ b/make/autoconf/spec.gmk.in Tue Mar 20 04:36:44 2018 +0100
@@ -512,6 +512,8 @@
NM:=@NM@
GNM:=@GNM@
STRIP:=@STRIP@
+OBJDUMP:=@OBJDUMP@
+CXXFILT:=@CXXFILT@
LIPO:=@LIPO@
INSTALL_NAME_TOOL:=@INSTALL_NAME_TOOL@
--- a/make/autoconf/toolchain.m4 Fri Mar 23 11:14:43 2018 -0700
+++ b/make/autoconf/toolchain.m4 Tue Mar 20 04:36:44 2018 +0100
@@ -863,6 +863,14 @@
# bails if argument is missing.
BASIC_FIXUP_EXECUTABLE(OBJDUMP)
fi
+
+ case $TOOLCHAIN_TYPE in
+ gcc|clang|solstudio)
+ BASIC_CHECK_TOOLS(CXXFILT, [c++filt])
+ BASIC_CHECK_NONEMPTY(CXXFILT)
+ BASIC_FIXUP_EXECUTABLE(CXXFILT)
+ ;;
+ esac
])
# Setup the build tools (i.e, the compiler and linker used to build programs
--- a/make/conf/jib-profiles.js Fri Mar 23 11:14:43 2018 -0700
+++ b/make/conf/jib-profiles.js Tue Mar 20 04:36:44 2018 +0100
@@ -816,7 +816,7 @@
var getJibProfilesDependencies = function (input, common) {
var devkit_platform_revisions = {
- linux_x64: "gcc4.9.2-OEL6.4+1.2",
+ linux_x64: "gcc4.9.2-OEL6.4+1.3",
macosx_x64: "Xcode6.3-MacOSX10.9+1.0",
solaris_x64: "SS12u4-Solaris11u1+1.0",
solaris_sparcv9: "SS12u4-Solaris11u1+1.1",
--- a/make/devkit/Tools.gmk Fri Mar 23 11:14:43 2018 -0700
+++ b/make/devkit/Tools.gmk Tue Mar 20 04:36:44 2018 +0100
@@ -575,8 +575,8 @@
ln -s $(TARGET)-$* $@
missing-links := $(addprefix $(PREFIX)/bin/, \
- addr2line ar as c++ c++filt elfedit g++ gcc gprof ld nm objcopy ranlib readelf \
- size strings strip ld.bfd ld.gold dtrace)
+ addr2line ar as c++ c++filt dwp elfedit g++ gcc gcc-$(GCC_VER) gprof ld ld.bfd \
+ ld.gold nm objcopy objdump ranlib readelf size strings strip)
endif
##########################################################################################
--- a/make/hotspot/lib/CompileJvm.gmk Fri Mar 23 11:14:43 2018 -0700
+++ b/make/hotspot/lib/CompileJvm.gmk Tue Mar 20 04:36:44 2018 +0100
@@ -269,3 +269,62 @@
include lib/JvmMapfile.gmk
TARGETS += $(BUILD_LIBJVM)
+
+################################################################################
+# Hotspot disallows the use of global operators 'new' and 'delete'. This build
+# time check helps enforce this requirement. If you trigger this check and the
+# reference is not obvious from the source, GNU objdump can be used to help find
+# the reference if compiled with GCC:
+#
+# objdump -lrdSC <path/to/file.o>
+#
+# -C demangle
+# -d disassemble
+# -r print relocation entries, interspersed with the disassembly
+# -S print source code, intermixed with disassembly
+# -l include filenames and line numbers
+#
+# Search the output for the operator(s) of interest, to see where they are
+# referenced.
+
+ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang solstudio), )
+
+ DEMANGLED_REGEXP := [^:]operator (new|delete)
+
+ # Running c++filt to find offending symbols in all files is too expensive,
+ # especially on Solaris, so use mangled names when looking for symbols.
+ # Save the demangling for when something is actually found.
+ ifeq ($(TOOLCHAIN_TYPE), solstudio)
+ MANGLED_SYMS := \
+ __1c2n6FL_pv_ \
+ __1c2N6FL_pv_ \
+ __1c2k6Fpv_v_ \
+ __1c2K6Fpv_v_ \
+ #
+ UNDEF_PATTERN := UNDEF
+ else
+ MANGLED_SYMS := \
+ _ZdaPv \
+ _ZdlPv \
+ _Znam \
+ _Znwm \
+ #
+ UNDEF_PATTERN := ' U '
+ endif
+
+ define SetupOperatorNewDeleteCheck
+ $1.op_check: $1
+ if [ -n "`$(NM) $$< | $(GREP) $(addprefix -e , $(MANGLED_SYMS)) \
+ | $(GREP) $(UNDEF_PATTERN)`" ]; then \
+ $(ECHO) "$$<: Error: Use of global operators new and delete is not allowed in Hotspot:"; \
+ $(NM) $$< | $(CXXFILT) | $(EGREP) '$(DEMANGLED_REGEXP)' | $(GREP) $(UNDEF_PATTERN); \
+ $(ECHO) "See: $(TOPDIR)/make/hotspot/lib/CompileJvm.gmk"; \
+ exit 1; \
+ fi
+ $(TOUCH) $$@
+
+ TARGETS += $1.op_check
+ endef
+
+ $(foreach o, $(BUILD_LIBJVM_ALL_OBJS), $(eval $(call SetupOperatorNewDeleteCheck,$o)))
+endif
--- a/make/hotspot/lib/JvmOverrideFiles.gmk Fri Mar 23 11:14:43 2018 -0700
+++ b/make/hotspot/lib/JvmOverrideFiles.gmk Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
ifeq ($(TOOLCHAIN_TYPE), gcc)
BUILD_LIBJVM_vmStructs.cpp_CXXFLAGS := -fno-var-tracking-assignments -O0
BUILD_LIBJVM_jvmciCompilerToVM.cpp_CXXFLAGS := -fno-var-tracking-assignments
+ BUILD_LIBJVM_jvmciCompilerToVMInit.cpp_CXXFLAGS := -fno-var-tracking-assignments
BUILD_LIBJVM_assembler_x86.cpp_CXXFLAGS := -Wno-maybe-uninitialized
BUILD_LIBJVM_interp_masm_x86.cpp_CXXFLAGS := -Wno-uninitialized
endif
@@ -111,6 +112,9 @@
endif
+ # Workaround for jvmciCompilerToVM.cpp long compilation time
+ BUILD_LIBJVM_jvmciCompilerToVMInit.cpp_OPTIMIZATION := NONE
+
else ifeq ($(OPENJDK_TARGET_OS), macosx)
# The copied fdlibm routines in these files must not be optimized
BUILD_LIBJVM_sharedRuntimeTrans.cpp_CXXFLAGS := $(CXX_O_FLAG_NONE)
@@ -175,5 +179,10 @@
os_windows.cpp \
os_windows_x86.cpp \
osThread_windows.cpp \
+ jvmciCompilerToVMInit.cpp \
#
+
+ # Workaround for jvmciCompilerToVM.cpp long compilation time
+ BUILD_LIBJVM_jvmciCompilerToVMInit.cpp_OPTIMIZATION := NONE
+
endif
--- a/make/test/JtregNativeHotspot.gmk Fri Mar 23 11:14:43 2018 -0700
+++ b/make/test/JtregNativeHotspot.gmk Tue Mar 20 04:36:44 2018 +0100
@@ -57,11 +57,12 @@
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rw := -z noexecstack
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rwx := -z execstack
BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeinvoke := -ljvm -lpthread
+ BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exestack-gap := -ljvm -lpthread
BUILD_TEST_exeinvoke_exeinvoke.c_OPTIMIZATION := NONE
BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeFPRegs := -ldl
else
BUILD_HOTSPOT_JTREG_EXCLUDE += libtest-rw.c libtest-rwx.c libTestJNI.c \
- exeinvoke.c
+ exeinvoke.c exestack-gap.c
endif
ifeq ($(OPENJDK_TARGET_OS), windows)
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -37,7 +37,7 @@
// secondary complication -- not all code employing C call convention
// executes as x86 code though -- we generate some of it
-class Argument VALUE_OBJ_CLASS_SPEC {
+class Argument {
public:
enum {
n_int_register_parameters_c = 8, // r0, r1, ... r7 (c_rarg0, c_rarg1, ...)
@@ -338,7 +338,7 @@
static inline unsigned long uabs(int n) { return uabs((unsigned int)n); }
// Addressing modes
-class Address VALUE_OBJ_CLASS_SPEC {
+class Address {
public:
enum mode { no_mode, base_plus_offset, pre, post, pcrel,
--- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -33,7 +33,7 @@
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#endif
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -44,8 +44,8 @@
#include "runtime/vframeArray.hpp"
#include "vmreg_aarch64.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@@ -1107,7 +1107,7 @@
// arg0 : previous value of memory
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->kind() != BarrierSet::G1SATBCTLogging) {
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
__ mov(r0, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
__ should_not_reach_here();
@@ -1162,6 +1162,14 @@
{
StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
+ __ mov(r0, (int)id);
+ __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
+ __ should_not_reach_here();
+ break;
+ }
+
// arg0: store_address
Address store_addr(rfp, 2*BytesPerWord);
--- a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -213,10 +213,6 @@
return (oop *)(fp() + interpreter_frame_oop_temp_offset);
}
-inline int frame::pd_oop_map_offset_adjustment() const {
- return 0;
-}
-
inline int frame::interpreter_frame_monitor_size() {
return BasicObjectLock::size();
}
@@ -243,24 +239,6 @@
// Compiled frames
-inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
- return (nof_args - local_index + (local_index < nof_args ? 1: -1));
-}
-
-inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
- return local_offset_for_compiler(local_index, nof_args, max_nof_locals, max_nof_monitors);
-}
-
-inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) {
- return (nof_args - (max_nof_locals + max_nof_monitors*2) - 1);
-}
-
-inline bool frame::volatile_across_calls(Register reg) {
- return true;
-}
-
-
-
inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop *)map->location(r0->as_VMReg());
guarantee(result_adr != NULL, "bad register save location");
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,7 +27,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -48,9 +48,9 @@
#include "runtime/thread.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -49,7 +49,7 @@
// The base class for different kinds of native instruction abstractions.
// Provides the primitive operations to manipulate code relative to this.
-class NativeInstruction VALUE_OBJ_CLASS_SPEC {
+class NativeInstruction {
friend class Relocation;
friend bool is_NativeCallTrampolineStub_at(address);
public:
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -633,7 +633,7 @@
void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized, RegSet saved_regs) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
__ push(saved_regs, sp);
@@ -680,7 +680,7 @@
assert_different_registers(start, end, scratch);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
__ push(saved_regs, sp);
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -293,7 +293,8 @@
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::throw_AbstractMethodError));
+ InterpreterRuntime::throw_AbstractMethodErrorWithMethod),
+ rmethod);
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -29,7 +29,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateTable.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/objArrayKlass.hpp"
@@ -148,7 +148,7 @@
assert(val == noreg || val == r0, "parameter is just for looks");
switch (barrier) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
// flatten object address if needed
if (obj.index() == noreg && obj.offset() == 0) {
@@ -3440,6 +3440,8 @@
Label no_such_interface, no_such_method;
+ // Preserve method for throw_AbstractMethodErrorVerbose.
+ __ mov(r16, rmethod);
// Receiver subtype check against REFC.
// Superklass in r0. Subklass in r3. Blows rscratch2, r13
__ lookup_interface_method(// inputs: rec. class, interface, itable index
@@ -3460,8 +3462,10 @@
__ subw(rmethod, rmethod, Method::itable_index_max);
__ negw(rmethod, rmethod);
+ // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
+ __ mov(rlocals, r3);
__ lookup_interface_method(// inputs: rec. class, interface, itable index
- r3, r0, rmethod,
+ rlocals, r0, rmethod,
// outputs: method, scan temp. reg
rmethod, r13,
no_such_interface);
@@ -3490,7 +3494,8 @@
// throw exception
__ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
- __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+ // Pass arguments for generating a verbose error message.
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose), r3, r16);
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
@@ -3498,8 +3503,9 @@
// throw exception
__ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
+ // Pass arguments for generating a verbose error message.
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::throw_IncompatibleClassChangeError));
+ InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose), r3, r0);
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return;
--- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -201,7 +201,12 @@
__ br(rscratch1);
__ bind(L_no_such_interface);
- __ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
+ // Handle IncompatibleClassChangeError in itable stubs.
+ // More detailed error message.
+ // We force resolving of the call site by jumping to the "handle
+ // wrong method" stub, and so let the interpreter runtime do all the
+ // dirty work.
+ __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ flush();
--- a/src/hotspot/cpu/arm/assembler_arm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/assembler_arm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,8 +43,8 @@
#include "utilities/hashtable.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
--- a/src/hotspot/cpu/arm/assembler_arm.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/assembler_arm.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -76,7 +76,7 @@
// ARM Addressing Modes 2 and 3 - Load and store
-class Address VALUE_OBJ_CLASS_SPEC {
+class Address {
private:
Register _base;
Register _index;
@@ -334,7 +334,7 @@
};
#ifdef COMPILER2
-class VFP VALUE_OBJ_CLASS_SPEC {
+class VFP {
// Helper classes to detect whether a floating point constant can be
// encoded in a fconstd or fconsts instruction
// The conversion from the imm8, 8 bit constant, to the floating
--- a/src/hotspot/cpu/arm/assembler_arm_32.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/assembler_arm_32.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,8 +43,8 @@
#include "utilities/hashtable.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
--- a/src/hotspot/cpu/arm/assembler_arm_32.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/assembler_arm_32.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define CPU_ARM_VM_ASSEMBLER_ARM_32_HPP
// ARM Addressing Mode 1 - Data processing operands
-class AsmOperand VALUE_OBJ_CLASS_SPEC {
+class AsmOperand {
private:
int _encoding;
@@ -99,7 +99,7 @@
// ARM Addressing Mode 4 - Load and store multiple
-class RegisterSet VALUE_OBJ_CLASS_SPEC {
+class RegisterSet {
private:
int _encoding;
@@ -155,7 +155,7 @@
#endif
// ARM Addressing Mode 5 - Load and store multiple VFP registers
-class FloatRegisterSet VALUE_OBJ_CLASS_SPEC {
+class FloatRegisterSet {
private:
int _encoding;
--- a/src/hotspot/cpu/arm/assembler_arm_64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/assembler_arm_64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,8 +43,8 @@
#include "utilities/hashtable.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
--- a/src/hotspot/cpu/arm/assembler_arm_64.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/assembler_arm_64.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
};
// Shifted register operand for data processing instructions.
-class AsmOperand VALUE_OBJ_CLASS_SPEC {
+class AsmOperand {
private:
Register _reg;
AsmShift _shift;
--- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
#include "utilities/macros.hpp"
#include "vmreg_arm.inline.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -43,8 +43,8 @@
#include "utilities/align.hpp"
#include "vmreg_arm.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
// Note: Rtemp usage is this file should not impact C2 and should be
@@ -540,6 +540,14 @@
__ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
+ __ mov(R0, (int)id);
+ __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
+ __ should_not_reach_here();
+ break;
+ }
+
// save at least the registers that need saving if the runtime is called
#ifdef AARCH64
__ raw_push(R0, R1);
@@ -612,6 +620,14 @@
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
+ __ mov(R0, (int)id);
+ __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
+ __ should_not_reach_here();
+ break;
+ }
+
Label done;
Label recheck;
Label runtime;
--- a/src/hotspot/cpu/arm/frame_arm.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/frame_arm.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -230,10 +230,6 @@
// Compiled frames
-inline bool frame::volatile_across_calls(Register reg) {
- return true;
-}
-
inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop*) map->location(R0->as_VMReg());
guarantee(result_adr != NULL, "bad register save location");
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -43,8 +43,8 @@
#include "runtime/sharedRuntime.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
--- a/src/hotspot/cpu/arm/interpreterRT_arm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/interpreterRT_arm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -44,9 +44,9 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
// Introduced AddressLiteral and its subclasses to ease portability from
// x86 and avoid relocation issues
-class AddressLiteral VALUE_OBJ_CLASS_SPEC {
+class AddressLiteral {
RelocationHolder _rspec;
// Typically we use AddressLiterals we want to use their rval
// However in some situations we want the lval (effect address) of the item.
@@ -1394,7 +1394,7 @@
// The purpose of this class is to build several code fragments of the same size
// in order to allow fast table branch.
-class FixedSizeCodeBlock VALUE_OBJ_CLASS_SPEC {
+class FixedSizeCodeBlock {
public:
FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled);
~FixedSizeCodeBlock();
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@
// back-end extensions or the actual instructions size.
class NativeInstruction;
-class RawNativeInstruction VALUE_OBJ_CLASS_SPEC {
+class RawNativeInstruction {
public:
enum ARM_specific {
--- a/src/hotspot/cpu/arm/nativeInst_arm_64.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/nativeInst_arm_64.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
// back-end extensions or the actual instructions size.
class NativeInstruction;
-class RawNativeInstruction VALUE_OBJ_CLASS_SPEC {
+class RawNativeInstruction {
public:
enum ARM_specific {
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -2870,7 +2870,7 @@
void gen_write_ref_array_pre_barrier(Register addr, Register count, int callee_saved_regs) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
assert( addr->encoding() < callee_saved_regs, "addr must be saved");
assert(count->encoding() < callee_saved_regs, "count must be saved");
@@ -2932,7 +2932,7 @@
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
BLOCK_COMMENT("G1PostBarrier");
if (addr != R0) {
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -28,7 +28,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/templateTable.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/cpCache.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayKlass.hpp"
@@ -193,7 +193,7 @@
assert_different_registers(obj.base(), new_val, tmp1, tmp2, tmp3, noreg);
switch (barrier) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
// flatten object address if needed
assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
--- a/src/hotspot/cpu/arm/vtableStubs_arm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/arm/vtableStubs_arm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -158,8 +158,13 @@
__ bind(L_no_such_interface);
- assert(StubRoutines::throw_IncompatibleClassChangeError_entry() != NULL, "check initialization order");
- __ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, Rtemp);
+ // Handle IncompatibleClassChangeError in itable stubs.
+ // More detailed error message.
+ // We force resolving of the call site by jumping to the "handle
+ // wrong method" stub, and so let the interpreter runtime do all the
+ // dirty work.
+ assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
+ __ jump(SharedRuntime::get_handle_wrong_method_stub(), relocInfo::runtime_call_type, Rtemp);
masm->flush();
--- a/src/hotspot/cpu/ppc/assembler_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/assembler_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -38,8 +38,8 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -31,7 +31,7 @@
// Address is an abstraction used to represent a memory location
// as used in assembler instructions.
// PPC instructions grok either baseReg + indexReg or baseReg + disp.
-class Address VALUE_OBJ_CLASS_SPEC {
+class Address {
private:
Register _base; // Base register.
Register _index; // Index register.
@@ -64,7 +64,7 @@
bool is_const() const { return _base == noreg && _index == noreg; }
};
-class AddressLiteral VALUE_OBJ_CLASS_SPEC {
+class AddressLiteral {
private:
address _address;
RelocationHolder _rspec;
@@ -117,7 +117,7 @@
// with the PPC Application Binary Interface, or ABI. This is
// often referred to as the native or C calling convention.
-class Argument VALUE_OBJ_CLASS_SPEC {
+class Argument {
private:
int _number; // The number of the argument.
public:
@@ -153,7 +153,7 @@
#if !defined(ABI_ELFv2)
// A ppc64 function descriptor.
-struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC {
+struct FunctionDescriptor {
private:
address _entry;
address _toc;
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -34,7 +34,7 @@
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -289,14 +289,6 @@
}
-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base,
- LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
- LIR_Opr tmp = FrameMap::R0_opr;
- __ load(new LIR_Address(base, disp, type), tmp, info);
- __ cmp(condition, reg, tmp);
-}
-
-
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
assert(left != result, "should be different registers");
if (is_power_of_2(c + 1)) {
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -83,13 +83,6 @@
}
-void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
- Unimplemented(); // Currently unused.
- //if (C1Breakpoint) illtrap();
- //inline_cache_check(receiver, ic_klass);
-}
-
-
void C1_MacroAssembler::verified_entry() {
if (C1Breakpoint) illtrap();
// build frame
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -43,8 +43,8 @@
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
// Implementation of StubAssembler
@@ -711,7 +711,7 @@
case g1_pre_barrier_slow_id:
{
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->kind() != BarrierSet::G1SATBCTLogging) {
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
goto unimplemented_entry;
}
@@ -788,7 +788,7 @@
case g1_post_barrier_slow_id:
{
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->kind() != BarrierSet::G1SATBCTLogging) {
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
goto unimplemented_entry;
}
--- a/src/hotspot/cpu/ppc/interpreterRT_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/interpreterRT_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -28,7 +28,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,9 +44,9 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER2
@@ -3171,8 +3171,8 @@
Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int;
assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2);
- G1SATBCardTableLoggingModRefBS* bs =
- barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
+ G1BarrierSet* bs =
+ barrier_set_cast<G1BarrierSet>(Universe::heap()->barrier_set());
CardTable* ct = bs->card_table();
// Does store cross heap regions?
@@ -4451,561 +4451,304 @@
* @param table register pointing to CRC table
* @param constants register pointing to CRC table for 128-bit aligned memory
* @param barretConstants register pointing to table for barrett reduction
- * @param t0 volatile register
- * @param t1 volatile register
- * @param t2 volatile register
- * @param t3 volatile register
+ * @param t0-t4 temp registers
*/
-void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
- Register constants, Register barretConstants,
- Register t0, Register t1, Register t2, Register t3, Register t4,
- bool invertCRC) {
+void MacroAssembler::kernel_crc32_1word_vpmsum(Register crc, Register buf, Register len, Register table,
+ Register constants, Register barretConstants,
+ Register t0, Register t1, Register t2, Register t3, Register t4,
+ bool invertCRC) {
assert_different_registers(crc, buf, len, table);
- Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;
-
- Register prealign = t0;
- Register postalign = t0;
-
- BLOCK_COMMENT("kernel_crc32_1word_vpmsumb {");
-
- // 1. use kernel_crc32_1word for shorter than 384bit
+ Label L_alignedHead, L_tail;
+
+ BLOCK_COMMENT("kernel_crc32_1word_vpmsum {");
+
+ // 1. ~c
+ if (invertCRC) {
+ nand(crc, crc, crc); // 1s complement of crc
+ }
+
+ // 2. use kernel_crc32_1word for short len
clrldi(len, len, 32);
- cmpdi(CCR0, len, 384);
- bge(CCR0, L_start);
-
- Register tc0 = t4;
- Register tc1 = constants;
- Register tc2 = barretConstants;
- kernel_crc32_1word(crc, buf, len, table,t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
- b(L_end);
-
- BIND(L_start);
-
- // 2. ~c
- if (invertCRC) {
- nand(crc, crc, crc); // 1s complement of crc
- }
-
- // 3. calculate from 0 to first 128bit-aligned address
- clrldi_(prealign, buf, 57);
- beq(CCR0, L_alignedHead);
-
- subfic(prealign, prealign, 128);
-
- subf(len, prealign, len);
- update_byteLoop_crc32(crc, buf, prealign, table, t2, false);
-
- // 4. calculate from first 128bit-aligned address to last 128bit-aligned address
- BIND(L_alignedHead);
-
- clrldi(postalign, len, 57);
- subf(len, postalign, len);
-
- // len must be more than 256bit
- kernel_crc32_1word_aligned(crc, buf, len, constants, barretConstants, t1, t2, t3);
-
- // 5. calculate remaining
- cmpdi(CCR0, postalign, 0);
- beq(CCR0, L_tail);
-
- update_byteLoop_crc32(crc, buf, postalign, table, t2, false);
-
- BIND(L_tail);
-
- // 6. ~c
- if (invertCRC) {
- nand(crc, crc, crc); // 1s complement of crc
- }
-
- BIND(L_end);
-
- BLOCK_COMMENT("} kernel_crc32_1word_vpmsumb");
+ cmpdi(CCR0, len, 512);
+ blt(CCR0, L_tail);
+
+ // 3. calculate from 0 to first aligned address
+ const int alignment = 16;
+ Register prealign = t0;
+
+ andi_(prealign, buf, alignment - 1);
+ beq(CCR0, L_alignedHead);
+ subfic(prealign, prealign, alignment);
+
+ subf(len, prealign, len);
+ update_byteLoop_crc32(crc, buf, prealign, table, t2, false);
+
+ // 4. calculate from first aligned address as far as possible
+ BIND(L_alignedHead);
+ kernel_crc32_1word_aligned(crc, buf, len, constants, barretConstants, t0, t1, t2, t3, t4);
+
+ // 5. remaining bytes
+ BIND(L_tail);
+ Register tc0 = t4;
+ Register tc1 = constants;
+ Register tc2 = barretConstants;
+ kernel_crc32_1word(crc, buf, len, table, t0, t1, t2, t3, tc0, tc1, tc2, table, false);
+
+ // 6. ~c
+ if (invertCRC) {
+ nand(crc, crc, crc); // 1s complement of crc
+ }
+
+ BLOCK_COMMENT("} kernel_crc32_1word_vpmsum");
}
/**
* @param crc register containing existing CRC (32-bit)
* @param buf register pointing to input byte buffer (byte*)
- * @param len register containing number of bytes
+ * @param len register containing number of bytes (will get updated to remaining bytes)
* @param constants register pointing to CRC table for 128-bit aligned memory
* @param barretConstants register pointing to table for barrett reduction
- * @param t0 volatile register
- * @param t1 volatile register
- * @param t2 volatile register
+ * @param t0-t4 temp registers
+ * Precondition: len should be >= 512. Otherwise, nothing will be done.
*/
void MacroAssembler::kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
- Register constants, Register barretConstants, Register t0, Register t1, Register t2) {
- Label L_mainLoop, L_tail, L_alignTail, L_barrett_reduction, L_end, L_first_warm_up_done, L_first_cool_down, L_second_cool_down, L_XOR, L_test;
- Label L_lv0, L_lv1, L_lv2, L_lv3, L_lv4, L_lv5, L_lv6, L_lv7, L_lv8, L_lv9, L_lv10, L_lv11, L_lv12, L_lv13, L_lv14, L_lv15;
- Label L_1, L_2, L_3, L_4;
-
- Register rLoaded = t0;
- Register rTmp1 = t1;
- Register rTmp2 = t2;
- Register off16 = R22;
- Register off32 = R23;
- Register off48 = R24;
- Register off64 = R25;
- Register off80 = R26;
- Register off96 = R27;
- Register off112 = R28;
- Register rIdx = R29;
- Register rMax = R30;
- Register constantsPos = R31;
-
- VectorRegister mask_32bit = VR24;
- VectorRegister mask_64bit = VR25;
- VectorRegister zeroes = VR26;
- VectorRegister const1 = VR27;
- VectorRegister const2 = VR28;
+ Register constants, Register barretConstants,
+ Register t0, Register t1, Register t2, Register t3, Register t4) {
// Save non-volatile vector registers (frameless).
- Register offset = t1; int offsetInt = 0;
- offsetInt -= 16; li(offset, -16); stvx(VR20, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); stvx(VR21, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); stvx(VR22, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); stvx(VR23, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); stvx(VR24, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); stvx(VR25, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); stvx(VR26, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); stvx(VR27, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); stvx(VR28, offset, R1_SP);
- offsetInt -= 8; std(R22, offsetInt, R1_SP);
- offsetInt -= 8; std(R23, offsetInt, R1_SP);
- offsetInt -= 8; std(R24, offsetInt, R1_SP);
- offsetInt -= 8; std(R25, offsetInt, R1_SP);
- offsetInt -= 8; std(R26, offsetInt, R1_SP);
- offsetInt -= 8; std(R27, offsetInt, R1_SP);
- offsetInt -= 8; std(R28, offsetInt, R1_SP);
- offsetInt -= 8; std(R29, offsetInt, R1_SP);
- offsetInt -= 8; std(R30, offsetInt, R1_SP);
- offsetInt -= 8; std(R31, offsetInt, R1_SP);
-
- // Set constants
- li(off16, 16);
- li(off32, 32);
- li(off48, 48);
- li(off64, 64);
- li(off80, 80);
- li(off96, 96);
- li(off112, 112);
-
- clrldi(crc, crc, 32);
-
- vxor(zeroes, zeroes, zeroes);
- vspltisw(VR0, -1);
-
- vsldoi(mask_32bit, zeroes, VR0, 4);
- vsldoi(mask_64bit, zeroes, VR0, 8);
-
- // Get the initial value into v8
- vxor(VR8, VR8, VR8);
- mtvrd(VR8, crc);
- vsldoi(VR8, zeroes, VR8, 8); // shift into bottom 32 bits
-
- li (rLoaded, 0);
-
- rldicr(rIdx, len, 0, 56);
-
- {
- BIND(L_1);
- // Checksum in blocks of MAX_SIZE (32768)
- lis(rMax, 0);
- ori(rMax, rMax, 32768);
- mr(rTmp2, rMax);
- cmpd(CCR0, rIdx, rMax);
- bgt(CCR0, L_2);
- mr(rMax, rIdx);
-
- BIND(L_2);
- subf(rIdx, rMax, rIdx);
-
- // our main loop does 128 bytes at a time
- srdi(rMax, rMax, 7);
-
- /*
- * Work out the offset into the constants table to start at. Each
- * constant is 16 bytes, and it is used against 128 bytes of input
- * data - 128 / 16 = 8
- */
- sldi(rTmp1, rMax, 4);
- srdi(rTmp2, rTmp2, 3);
- subf(rTmp1, rTmp1, rTmp2);
-
- // We reduce our final 128 bytes in a separate step
- addi(rMax, rMax, -1);
- mtctr(rMax);
-
- // Find the start of our constants
- add(constantsPos, constants, rTmp1);
-
- // zero VR0-v7 which will contain our checksums
- vxor(VR0, VR0, VR0);
- vxor(VR1, VR1, VR1);
- vxor(VR2, VR2, VR2);
- vxor(VR3, VR3, VR3);
- vxor(VR4, VR4, VR4);
- vxor(VR5, VR5, VR5);
- vxor(VR6, VR6, VR6);
- vxor(VR7, VR7, VR7);
-
- lvx(const1, constantsPos);
-
- /*
- * If we are looping back to consume more data we use the values
- * already in VR16-v23.
- */
- cmpdi(CCR0, rLoaded, 1);
- beq(CCR0, L_3);
- {
-
- // First warm up pass
- lvx(VR16, buf);
- lvx(VR17, off16, buf);
- lvx(VR18, off32, buf);
- lvx(VR19, off48, buf);
- lvx(VR20, off64, buf);
- lvx(VR21, off80, buf);
- lvx(VR22, off96, buf);
- lvx(VR23, off112, buf);
- addi(buf, buf, 8*16);
-
- // xor in initial value
- vxor(VR16, VR16, VR8);
- }
-
- BIND(L_3);
- bdz(L_first_warm_up_done);
-
- addi(constantsPos, constantsPos, 16);
- lvx(const2, constantsPos);
-
- // Second warm up pass
- vpmsumd(VR8, VR16, const1);
- lvx(VR16, buf);
-
- vpmsumd(VR9, VR17, const1);
- lvx(VR17, off16, buf);
-
- vpmsumd(VR10, VR18, const1);
- lvx(VR18, off32, buf);
-
- vpmsumd(VR11, VR19, const1);
- lvx(VR19, off48, buf);
-
- vpmsumd(VR12, VR20, const1);
- lvx(VR20, off64, buf);
-
- vpmsumd(VR13, VR21, const1);
- lvx(VR21, off80, buf);
-
- vpmsumd(VR14, VR22, const1);
- lvx(VR22, off96, buf);
-
- vpmsumd(VR15, VR23, const1);
- lvx(VR23, off112, buf);
-
- addi(buf, buf, 8 * 16);
-
- bdz(L_first_cool_down);
-
- /*
- * main loop. We modulo schedule it such that it takes three iterations
- * to complete - first iteration load, second iteration vpmsum, third
- * iteration xor.
- */
- {
- BIND(L_4);
- lvx(const1, constantsPos); addi(constantsPos, constantsPos, 16);
-
- vxor(VR0, VR0, VR8);
- vpmsumd(VR8, VR16, const2);
- lvx(VR16, buf);
-
- vxor(VR1, VR1, VR9);
- vpmsumd(VR9, VR17, const2);
- lvx(VR17, off16, buf);
-
- vxor(VR2, VR2, VR10);
- vpmsumd(VR10, VR18, const2);
- lvx(VR18, off32, buf);
-
- vxor(VR3, VR3, VR11);
- vpmsumd(VR11, VR19, const2);
- lvx(VR19, off48, buf);
- lvx(const2, constantsPos);
-
- vxor(VR4, VR4, VR12);
- vpmsumd(VR12, VR20, const1);
- lvx(VR20, off64, buf);
-
- vxor(VR5, VR5, VR13);
- vpmsumd(VR13, VR21, const1);
- lvx(VR21, off80, buf);
-
- vxor(VR6, VR6, VR14);
- vpmsumd(VR14, VR22, const1);
- lvx(VR22, off96, buf);
-
- vxor(VR7, VR7, VR15);
- vpmsumd(VR15, VR23, const1);
- lvx(VR23, off112, buf);
-
- addi(buf, buf, 8 * 16);
-
- bdnz(L_4);
+ Register offset = t1;
+ int offsetInt = 0;
+ offsetInt -= 16; li(offset, offsetInt); stvx(VR20, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); stvx(VR21, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); stvx(VR22, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); stvx(VR23, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); stvx(VR24, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); stvx(VR25, offset, R1_SP);
+#ifndef VM_LITTLE_ENDIAN
+ offsetInt -= 16; li(offset, offsetInt); stvx(VR26, offset, R1_SP);
+#endif
+ offsetInt -= 8; std(R14, offsetInt, R1_SP);
+ offsetInt -= 8; std(R15, offsetInt, R1_SP);
+ offsetInt -= 8; std(R16, offsetInt, R1_SP);
+ offsetInt -= 8; std(R17, offsetInt, R1_SP);
+
+ // Implementation uses an inner loop which uses between 256 and 16 * unroll_factor
+ // bytes per iteration. The basic scheme is:
+ // lvx: load vector (Big Endian needs reversal)
+ // vpmsumw: carry-less 32 bit multiplications with constant representing a large CRC shift
+ // vxor: xor partial results together to get unroll_factor2 vectors
+
+ // Outer loop performs the CRC shifts needed to combine the unroll_factor2 vectors.
+
+ // Using 16 * unroll_factor / unroll_factor_2 bytes for constants.
+ const int unroll_factor = 2048;
+ const int unroll_factor2 = 8;
+
+ // Support registers.
+ Register offs[] = { noreg, t0, t1, t2, t3, t4, crc /* will live in VCRC */, R14 };
+ Register num_bytes = R15,
+ loop_count = R16,
+ cur_const = R17;
+ // Constant array for outer loop: unroll_factor2 - 1 registers,
+ // Constant array for inner loop: unroll_factor / unroll_factor2 registers.
+ VectorRegister consts0[] = { VR16, VR17, VR18, VR19, VR20, VR21, VR22 },
+ consts1[] = { VR23, VR24 };
+ // Data register arrays: 2 arrays with unroll_factor2 registers.
+ VectorRegister data0[] = { VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7 },
+ data1[] = { VR8, VR9, VR10, VR11, VR12, VR13, VR14, VR15 };
+
+ VectorRegister VCRC = data0[0];
+ VectorRegister Vc = VR25;
+ VectorRegister swap_bytes = VR26; // Only for Big Endian.
+
+ // We have at least 1 iteration (ensured by caller).
+ Label L_outer_loop, L_inner_loop, L_last;
+
+ // If supported set DSCR pre-fetch to deepest.
+ if (VM_Version::has_mfdscr()) {
+ load_const_optimized(t0, VM_Version::_dscr_val | 7);
+ mtdscr(t0);
+ }
+
+ mtvrwz(VCRC, crc); // crc lives lives in VCRC, now
+
+ for (int i = 1; i < unroll_factor2; ++i) {
+ li(offs[i], 16 * i);
+ }
+
+ // Load consts for outer loop
+ lvx(consts0[0], constants);
+ for (int i = 1; i < unroll_factor2 - 1; ++i) {
+ lvx(consts0[i], offs[i], constants);
+ }
+ addi(constants, constants, (unroll_factor2 - 1) * 16);
+
+ load_const_optimized(num_bytes, 16 * unroll_factor);
+ load_const_optimized(loop_count, unroll_factor / (2 * unroll_factor2) - 1); // One double-iteration peeled off.
+
+ // Reuse data registers outside of the loop.
+ VectorRegister Vtmp = data1[0];
+ VectorRegister Vtmp2 = data1[1];
+ VectorRegister zeroes = data1[2];
+
+ vspltisb(Vtmp, 0);
+ vsldoi(VCRC, Vtmp, VCRC, 8); // 96 bit zeroes, 32 bit CRC.
+
+ // Load vector for vpermxor (to xor both 64 bit parts together)
+ lvsl(Vtmp, buf); // 000102030405060708090a0b0c0d0e0f
+ vspltisb(Vc, 4);
+ vsl(Vc, Vtmp, Vc); // 00102030405060708090a0b0c0d0e0f0
+ xxspltd(Vc->to_vsr(), Vc->to_vsr(), 0);
+ vor(Vc, Vtmp, Vc); // 001122334455667708192a3b4c5d6e7f
+
+#ifdef VM_LITTLE_ENDIAN
+#define BE_swap_bytes(x)
+#else
+ vspltisb(Vtmp2, 0xf);
+ vxor(swap_bytes, Vtmp, Vtmp2);
+#define BE_swap_bytes(x) vperm(x, x, x, swap_bytes)
+#endif
+
+ cmpd(CCR0, len, num_bytes);
+ blt(CCR0, L_last);
+
+ // ********** Main loop start **********
+ align(32);
+ bind(L_outer_loop);
+
+ // Begin of unrolled first iteration (no xor).
+ lvx(data1[0], buf);
+ mr(cur_const, constants);
+ for (int i = 1; i < unroll_factor2 / 2; ++i) {
+ lvx(data1[i], offs[i], buf);
+ }
+ vpermxor(VCRC, VCRC, VCRC, Vc); // xor both halves to 64 bit result.
+ lvx(consts1[0], cur_const);
+ mtctr(loop_count);
+ for (int i = 0; i < unroll_factor2 / 2; ++i) {
+ BE_swap_bytes(data1[i]);
+ if (i == 0) { vxor(data1[0], data1[0], VCRC); } // xor in previous CRC.
+ lvx(data1[i + unroll_factor2 / 2], offs[i + unroll_factor2 / 2], buf);
+ vpmsumw(data0[i], data1[i], consts1[0]);
+ }
+ addi(buf, buf, 16 * unroll_factor2);
+ subf(len, num_bytes, len);
+ lvx(consts1[1], offs[1], cur_const);
+ addi(cur_const, cur_const, 32);
+ // Begin of unrolled second iteration (head).
+ for (int i = 0; i < unroll_factor2 / 2; ++i) {
+ BE_swap_bytes(data1[i + unroll_factor2 / 2]);
+ if (i == 0) { lvx(data1[0], buf); } else { lvx(data1[i], offs[i], buf); }
+ vpmsumw(data0[i + unroll_factor2 / 2], data1[i + unroll_factor2 / 2], consts1[0]);
+ }
+ for (int i = 0; i < unroll_factor2 / 2; ++i) {
+ BE_swap_bytes(data1[i]);
+ lvx(data1[i + unroll_factor2 / 2], offs[i + unroll_factor2 / 2], buf);
+ vpmsumw(data1[i], data1[i], consts1[1]);
+ }
+ addi(buf, buf, 16 * unroll_factor2);
+
+ // Generate most performance relevant code. Loads + half of the vpmsumw have been generated.
+ // Double-iteration allows using the 2 constant registers alternatingly.
+ align(32);
+ bind(L_inner_loop);
+ for (int j = 1; j < 3; ++j) { // j < unroll_factor / unroll_factor2 - 1 for complete unrolling.
+ if (j & 1) {
+ lvx(consts1[0], cur_const);
+ } else {
+ lvx(consts1[1], offs[1], cur_const);
+ addi(cur_const, cur_const, 32);
}
-
- BIND(L_first_cool_down);
-
- // First cool down pass
- lvx(const1, constantsPos);
- addi(constantsPos, constantsPos, 16);
-
- vxor(VR0, VR0, VR8);
- vpmsumd(VR8, VR16, const1);
-
- vxor(VR1, VR1, VR9);
- vpmsumd(VR9, VR17, const1);
-
- vxor(VR2, VR2, VR10);
- vpmsumd(VR10, VR18, const1);
-
- vxor(VR3, VR3, VR11);
- vpmsumd(VR11, VR19, const1);
-
- vxor(VR4, VR4, VR12);
- vpmsumd(VR12, VR20, const1);
-
- vxor(VR5, VR5, VR13);
- vpmsumd(VR13, VR21, const1);
-
- vxor(VR6, VR6, VR14);
- vpmsumd(VR14, VR22, const1);
-
- vxor(VR7, VR7, VR15);
- vpmsumd(VR15, VR23, const1);
-
- BIND(L_second_cool_down);
- // Second cool down pass
- vxor(VR0, VR0, VR8);
- vxor(VR1, VR1, VR9);
- vxor(VR2, VR2, VR10);
- vxor(VR3, VR3, VR11);
- vxor(VR4, VR4, VR12);
- vxor(VR5, VR5, VR13);
- vxor(VR6, VR6, VR14);
- vxor(VR7, VR7, VR15);
-
- /*
- * vpmsumd produces a 96 bit result in the least significant bits
- * of the register. Since we are bit reflected we have to shift it
- * left 32 bits so it occupies the least significant bits in the
- * bit reflected domain.
- */
- vsldoi(VR0, VR0, zeroes, 4);
- vsldoi(VR1, VR1, zeroes, 4);
- vsldoi(VR2, VR2, zeroes, 4);
- vsldoi(VR3, VR3, zeroes, 4);
- vsldoi(VR4, VR4, zeroes, 4);
- vsldoi(VR5, VR5, zeroes, 4);
- vsldoi(VR6, VR6, zeroes, 4);
- vsldoi(VR7, VR7, zeroes, 4);
-
- // xor with last 1024 bits
- lvx(VR8, buf);
- lvx(VR9, off16, buf);
- lvx(VR10, off32, buf);
- lvx(VR11, off48, buf);
- lvx(VR12, off64, buf);
- lvx(VR13, off80, buf);
- lvx(VR14, off96, buf);
- lvx(VR15, off112, buf);
- addi(buf, buf, 8 * 16);
-
- vxor(VR16, VR0, VR8);
- vxor(VR17, VR1, VR9);
- vxor(VR18, VR2, VR10);
- vxor(VR19, VR3, VR11);
- vxor(VR20, VR4, VR12);
- vxor(VR21, VR5, VR13);
- vxor(VR22, VR6, VR14);
- vxor(VR23, VR7, VR15);
-
- li(rLoaded, 1);
- cmpdi(CCR0, rIdx, 0);
- addi(rIdx, rIdx, 128);
- bne(CCR0, L_1);
- }
-
- // Work out how many bytes we have left
- andi_(len, len, 127);
-
- // Calculate where in the constant table we need to start
- subfic(rTmp1, len, 128);
- add(constantsPos, constantsPos, rTmp1);
-
- // How many 16 byte chunks are in the tail
- srdi(rIdx, len, 4);
- mtctr(rIdx);
-
- /*
- * Reduce the previously calculated 1024 bits to 64 bits, shifting
- * 32 bits to include the trailing 32 bits of zeros
- */
- lvx(VR0, constantsPos);
- lvx(VR1, off16, constantsPos);
- lvx(VR2, off32, constantsPos);
- lvx(VR3, off48, constantsPos);
- lvx(VR4, off64, constantsPos);
- lvx(VR5, off80, constantsPos);
- lvx(VR6, off96, constantsPos);
- lvx(VR7, off112, constantsPos);
- addi(constantsPos, constantsPos, 8 * 16);
-
- vpmsumw(VR0, VR16, VR0);
- vpmsumw(VR1, VR17, VR1);
- vpmsumw(VR2, VR18, VR2);
- vpmsumw(VR3, VR19, VR3);
- vpmsumw(VR4, VR20, VR4);
- vpmsumw(VR5, VR21, VR5);
- vpmsumw(VR6, VR22, VR6);
- vpmsumw(VR7, VR23, VR7);
-
- // Now reduce the tail (0 - 112 bytes)
- cmpdi(CCR0, rIdx, 0);
- beq(CCR0, L_XOR);
-
- lvx(VR16, buf); addi(buf, buf, 16);
- lvx(VR17, constantsPos);
- vpmsumw(VR16, VR16, VR17);
- vxor(VR0, VR0, VR16);
- beq(CCR0, L_XOR);
-
- lvx(VR16, buf); addi(buf, buf, 16);
- lvx(VR17, off16, constantsPos);
- vpmsumw(VR16, VR16, VR17);
- vxor(VR0, VR0, VR16);
- beq(CCR0, L_XOR);
-
- lvx(VR16, buf); addi(buf, buf, 16);
- lvx(VR17, off32, constantsPos);
- vpmsumw(VR16, VR16, VR17);
- vxor(VR0, VR0, VR16);
- beq(CCR0, L_XOR);
-
- lvx(VR16, buf); addi(buf, buf, 16);
- lvx(VR17, off48,constantsPos);
- vpmsumw(VR16, VR16, VR17);
- vxor(VR0, VR0, VR16);
- beq(CCR0, L_XOR);
-
- lvx(VR16, buf); addi(buf, buf, 16);
- lvx(VR17, off64, constantsPos);
- vpmsumw(VR16, VR16, VR17);
- vxor(VR0, VR0, VR16);
- beq(CCR0, L_XOR);
-
- lvx(VR16, buf); addi(buf, buf, 16);
- lvx(VR17, off80, constantsPos);
- vpmsumw(VR16, VR16, VR17);
- vxor(VR0, VR0, VR16);
- beq(CCR0, L_XOR);
-
- lvx(VR16, buf); addi(buf, buf, 16);
- lvx(VR17, off96, constantsPos);
- vpmsumw(VR16, VR16, VR17);
- vxor(VR0, VR0, VR16);
-
- // Now xor all the parallel chunks together
- BIND(L_XOR);
- vxor(VR0, VR0, VR1);
- vxor(VR2, VR2, VR3);
- vxor(VR4, VR4, VR5);
- vxor(VR6, VR6, VR7);
-
- vxor(VR0, VR0, VR2);
- vxor(VR4, VR4, VR6);
-
- vxor(VR0, VR0, VR4);
-
- b(L_barrett_reduction);
-
- BIND(L_first_warm_up_done);
- lvx(const1, constantsPos);
- addi(constantsPos, constantsPos, 16);
- vpmsumd(VR8, VR16, const1);
- vpmsumd(VR9, VR17, const1);
- vpmsumd(VR10, VR18, const1);
- vpmsumd(VR11, VR19, const1);
- vpmsumd(VR12, VR20, const1);
- vpmsumd(VR13, VR21, const1);
- vpmsumd(VR14, VR22, const1);
- vpmsumd(VR15, VR23, const1);
- b(L_second_cool_down);
-
- BIND(L_barrett_reduction);
-
- lvx(const1, barretConstants);
- addi(barretConstants, barretConstants, 16);
- lvx(const2, barretConstants);
-
- vsldoi(VR1, VR0, VR0, 8);
- vxor(VR0, VR0, VR1); // xor two 64 bit results together
-
- // shift left one bit
- vspltisb(VR1, 1);
- vsl(VR0, VR0, VR1);
-
- vand(VR0, VR0, mask_64bit);
-
- /*
- * The reflected version of Barrett reduction. Instead of bit
- * reflecting our data (which is expensive to do), we bit reflect our
- * constants and our algorithm, which means the intermediate data in
- * our vector registers goes from 0-63 instead of 63-0. We can reflect
- * the algorithm because we don't carry in mod 2 arithmetic.
- */
- vand(VR1, VR0, mask_32bit); // bottom 32 bits of a
- vpmsumd(VR1, VR1, const1); // ma
- vand(VR1, VR1, mask_32bit); // bottom 32bits of ma
- vpmsumd(VR1, VR1, const2); // qn */
- vxor(VR0, VR0, VR1); // a - qn, subtraction is xor in GF(2)
-
- /*
- * Since we are bit reflected, the result (ie the low 32 bits) is in
- * the high 32 bits. We just need to shift it left 4 bytes
- * V0 [ 0 1 X 3 ]
- * V0 [ 0 X 2 3 ]
- */
- vsldoi(VR0, VR0, zeroes, 4); // shift result into top 64 bits of
-
- // Get it into r3
- mfvrd(crc, VR0);
-
- BIND(L_end);
-
+ for (int i = 0; i < unroll_factor2; ++i) {
+ int idx = i + unroll_factor2 / 2, inc = 0; // For modulo-scheduled input.
+ if (idx >= unroll_factor2) { idx -= unroll_factor2; inc = 1; }
+ BE_swap_bytes(data1[idx]);
+ vxor(data0[i], data0[i], data1[i]);
+ if (i == 0) lvx(data1[0], buf); else lvx(data1[i], offs[i], buf);
+ vpmsumw(data1[idx], data1[idx], consts1[(j + inc) & 1]);
+ }
+ addi(buf, buf, 16 * unroll_factor2);
+ }
+ bdnz(L_inner_loop);
+
+ // Tail of last iteration (no loads).
+ for (int i = 0; i < unroll_factor2 / 2; ++i) {
+ BE_swap_bytes(data1[i + unroll_factor2 / 2]);
+ vxor(data0[i], data0[i], data1[i]);
+ vpmsumw(data1[i + unroll_factor2 / 2], data1[i + unroll_factor2 / 2], consts1[1]);
+ }
+ for (int i = 0; i < unroll_factor2 / 2; ++i) {
+ vpmsumw(data0[i], data0[i], consts0[unroll_factor2 - 2 - i]); // First half of fixup shifts.
+ vxor(data0[i + unroll_factor2 / 2], data0[i + unroll_factor2 / 2], data1[i + unroll_factor2 / 2]);
+ }
+
+ // Last data register is ok, other ones need fixup shift.
+ for (int i = unroll_factor2 / 2; i < unroll_factor2 - 1; ++i) {
+ vpmsumw(data0[i], data0[i], consts0[unroll_factor2 - 2 - i]);
+ }
+
+ // Combine to 128 bit result vector VCRC = data0[0].
+ for (int i = 1; i < unroll_factor2; i<<=1) {
+ for (int j = 0; j <= unroll_factor2 - 2*i; j+=2*i) {
+ vxor(data0[j], data0[j], data0[j+i]);
+ }
+ }
+ cmpd(CCR0, len, num_bytes);
+ bge(CCR0, L_outer_loop);
+
+ // Last chance with lower num_bytes.
+ bind(L_last);
+ srdi(loop_count, len, exact_log2(16 * 2 * unroll_factor2)); // Use double-iterations.
+ add_const_optimized(constants, constants, 16 * (unroll_factor / unroll_factor2)); // Point behind last one.
+ sldi(R0, loop_count, exact_log2(16 * 2)); // Bytes of constants to be used.
+ clrrdi(num_bytes, len, exact_log2(16 * 2 * unroll_factor2));
+ subf(constants, R0, constants); // Point to constant to be used first.
+
+ addic_(loop_count, loop_count, -1); // One double-iteration peeled off.
+ bgt(CCR0, L_outer_loop);
+ // ********** Main loop end **********
+#undef BE_swap_bytes
+
+ // Restore DSCR pre-fetch value.
+ if (VM_Version::has_mfdscr()) {
+ load_const_optimized(t0, VM_Version::_dscr_val);
+ mtdscr(t0);
+ }
+
+ vspltisb(zeroes, 0);
+
+ // Combine to 64 bit result.
+ vpermxor(VCRC, VCRC, VCRC, Vc); // xor both halves to 64 bit result.
+
+ // Reduce to 32 bit CRC: Remainder by multiply-high.
+ lvx(Vtmp, barretConstants);
+ vsldoi(Vtmp2, zeroes, VCRC, 12); // Extract high 32 bit.
+ vpmsumd(Vtmp2, Vtmp2, Vtmp); // Multiply by inverse long poly.
+ vsldoi(Vtmp2, zeroes, Vtmp2, 12); // Extract high 32 bit.
+ vsldoi(Vtmp, zeroes, Vtmp, 8);
+ vpmsumd(Vtmp2, Vtmp2, Vtmp); // Multiply quotient by long poly.
+ vxor(VCRC, VCRC, Vtmp2); // Remainder fits into 32 bit.
+
+ // Move result. len is already updated.
+ vsldoi(VCRC, VCRC, zeroes, 8);
+ mfvrd(crc, VCRC);
+
+ // Restore non-volatile Vector registers (frameless).
offsetInt = 0;
- // Restore non-volatile Vector registers (frameless).
- offsetInt -= 16; li(offset, -16); lvx(VR20, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); lvx(VR21, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); lvx(VR22, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); lvx(VR23, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); lvx(VR24, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); lvx(VR25, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); lvx(VR26, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); lvx(VR27, offset, R1_SP);
- offsetInt -= 16; addi(offset, offset, -16); lvx(VR28, offset, R1_SP);
- offsetInt -= 8; ld(R22, offsetInt, R1_SP);
- offsetInt -= 8; ld(R23, offsetInt, R1_SP);
- offsetInt -= 8; ld(R24, offsetInt, R1_SP);
- offsetInt -= 8; ld(R25, offsetInt, R1_SP);
- offsetInt -= 8; ld(R26, offsetInt, R1_SP);
- offsetInt -= 8; ld(R27, offsetInt, R1_SP);
- offsetInt -= 8; ld(R28, offsetInt, R1_SP);
- offsetInt -= 8; ld(R29, offsetInt, R1_SP);
- offsetInt -= 8; ld(R30, offsetInt, R1_SP);
- offsetInt -= 8; ld(R31, offsetInt, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); lvx(VR20, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); lvx(VR21, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); lvx(VR22, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); lvx(VR23, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); lvx(VR24, offset, R1_SP);
+ offsetInt -= 16; li(offset, offsetInt); lvx(VR25, offset, R1_SP);
+#ifndef VM_LITTLE_ENDIAN
+ offsetInt -= 16; li(offset, offsetInt); lvx(VR26, offset, R1_SP);
+#endif
+ offsetInt -= 8; ld(R14, offsetInt, R1_SP);
+ offsetInt -= 8; ld(R15, offsetInt, R1_SP);
+ offsetInt -= 8; ld(R16, offsetInt, R1_SP);
+ offsetInt -= 8; ld(R17, offsetInt, R1_SP);
}
void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp, bool invertCRC) {
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -856,13 +856,13 @@
void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
Register t0, Register t1, Register t2, Register t3,
bool invertCRC);
- void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
+ void kernel_crc32_1word_vpmsum(Register crc, Register buf, Register len, Register table,
Register constants, Register barretConstants,
Register t0, Register t1, Register t2, Register t3, Register t4,
bool invertCRC);
void kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
Register constants, Register barretConstants,
- Register t0, Register t1, Register t2);
+ Register t0, Register t1, Register t2, Register t3, Register t4);
void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
bool invertCRC);
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -45,7 +45,7 @@
// The base class for different kinds of native instruction abstractions.
// It provides the primitive operations to manipulate code relative to this.
-class NativeInstruction VALUE_OBJ_CLASS_SPEC {
+class NativeInstruction {
friend class Relocation;
public:
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -627,7 +627,7 @@
Register preserve1 = noreg, Register preserve2 = noreg) {
BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
int spill_slots = 3;
@@ -689,7 +689,7 @@
BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
int spill_slots = (preserve != noreg) ? 1 : 0;
const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
@@ -3627,7 +3627,6 @@
const Register table = R6; // crc table address
-#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
@@ -3650,16 +3649,14 @@
StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
- __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
+ __ kernel_crc32_1word_vpmsum(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr();
BLOCK_COMMENT("} Stub body");
- } else
-#endif
- {
+ } else {
StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, true);
}
@@ -3690,8 +3687,6 @@
const Register table = R6; // crc table address
-#if 0 // no vector support yet for CRC32C
-#ifdef VM_LITTLE_ENDIAN
// arguments to kernel_crc32:
const Register crc = R3_ARG1; // Current checksum, preset by caller or result from previous call.
const Register data = R4_ARG2; // source byte array
@@ -3714,17 +3709,14 @@
StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
- __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
+ __ kernel_crc32_1word_vpmsum(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
BLOCK_COMMENT("return");
__ mr_if_needed(R3_RET, crc); // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
__ blr();
BLOCK_COMMENT("} Stub body");
- } else
-#endif
-#endif
- {
+ } else {
StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
generate_CRC_updateBytes(name, table, false);
}
--- a/src/hotspot/cpu/ppc/stubRoutines_ppc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/stubRoutines_ppc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,20 +56,21 @@
// CRC32 Intrinsics.
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
- static juint* _constants;
- static juint* _barret_constants;
+ static juint *_crc_constants, *_crc_barret_constants;
+ static juint *_crc32c_constants, *_crc32c_barret_constants;
public:
// CRC32 Intrinsics.
static void generate_load_table_addr(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
- static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_constants_addr(MacroAssembler* masm, Register table);
static void generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table);
- static juint* generate_crc_constants();
- static juint* generate_crc_barret_constants();
-
+ static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
+ static void generate_load_crc32c_constants_addr(MacroAssembler* masm, Register table);
+ static void generate_load_crc32c_barret_constants_addr(MacroAssembler* masm, Register table);
+ static juint* generate_crc_constants(juint reverse_poly);
+ static juint* generate_crc_barret_constants(juint reverse_poly);
};
#endif // CPU_PPC_VM_STUBROUTINES_PPC_HPP
--- a/src/hotspot/cpu/ppc/stubRoutines_ppc_64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/stubRoutines_ppc_64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,316 +34,149 @@
#define __ masm->
// CRC32(C) Intrinsics.
-void StubRoutines::ppc64::generate_load_crc32c_table_addr(MacroAssembler* masm, Register table) {
- __ load_const_optimized(table, StubRoutines::_crc32c_table_addr, R0);
-}
-
void StubRoutines::ppc64::generate_load_crc_table_addr(MacroAssembler* masm, Register table) {
__ load_const_optimized(table, StubRoutines::_crc_table_adr, R0);
}
void StubRoutines::ppc64::generate_load_crc_constants_addr(MacroAssembler* masm, Register table) {
- __ load_const_optimized(table, (address)StubRoutines::ppc64::_constants, R0);
+ __ load_const_optimized(table, (address)StubRoutines::ppc64::_crc_constants, R0);
}
void StubRoutines::ppc64::generate_load_crc_barret_constants_addr(MacroAssembler* masm, Register table) {
- __ load_const_optimized(table, (address)StubRoutines::ppc64::_barret_constants, R0);
+ __ load_const_optimized(table, (address)StubRoutines::ppc64::_crc_barret_constants, R0);
+}
+
+void StubRoutines::ppc64::generate_load_crc32c_table_addr(MacroAssembler* masm, Register table) {
+ __ load_const_optimized(table, StubRoutines::_crc32c_table_addr, R0);
+}
+
+void StubRoutines::ppc64::generate_load_crc32c_constants_addr(MacroAssembler* masm, Register table) {
+ __ load_const_optimized(table, (address)StubRoutines::ppc64::_crc32c_constants, R0);
+}
+
+void StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(MacroAssembler* masm, Register table) {
+ __ load_const_optimized(table, (address)StubRoutines::ppc64::_crc32c_barret_constants, R0);
+}
+
+// CRC constants and compute functions
+#define REVERSE_CRC32_POLY 0xEDB88320
+#define REVERSE_CRC32C_POLY 0x82F63B78
+#define INVERSE_REVERSE_CRC32_POLY 0x1aab14226ull
+#define INVERSE_REVERSE_CRC32C_POLY 0x105fd79bdull
+#define UNROLL_FACTOR 2048
+#define UNROLL_FACTOR2 8
+
+static juint fold_word(juint w, juint reverse_poly) {
+ for (int i = 0; i < 32; i++) {
+ int poly_if_odd = (-(w & 1)) & reverse_poly;
+ w = (w >> 1) ^ poly_if_odd;
+ }
+ return w;
+}
+
+static julong numberOfLeadingZeros(julong p) {
+ julong l = 1ull << 63;
+ for (int i = 0; i < 64; ++i) {
+ if (p & l) return i;
+ l >>= 1;
+ }
+ return 64;
+}
+
+static julong compute_inverse_poly(julong long_poly) {
+ // 2^64 / p
+ julong mod = 0, div = 0;
+ int d = numberOfLeadingZeros(long_poly);
+ int s = d + 1;
+ do {
+ mod ^= (long_poly << s);
+ div |= (1L << s);
+ s = d - numberOfLeadingZeros(mod);
+ } while (s >= 0);
+ return div;
}
-juint* StubRoutines::ppc64::generate_crc_constants() {
- juint constants[CRC32_CONSTANTS_SIZE] = {
- // Reduce 262144 kbits to 1024 bits
- 0x99ea94a8UL, 0x00000000UL, 0x651797d2UL, 0x00000001UL, // x^261120 mod p(x)` << 1, x^261184 mod p(x)` << 1
- 0x945a8420UL, 0x00000000UL, 0x21e0d56cUL, 0x00000000UL, // x^260096 mod p(x)` << 1, x^260160 mod p(x)` << 1
- 0x30762706UL, 0x00000000UL, 0x0f95ecaaUL, 0x00000000UL, // x^259072 mod p(x)` << 1, x^259136 mod p(x)` << 1
- 0xa52fc582UL, 0x00000001UL, 0xebd224acUL, 0x00000001UL, // x^258048 mod p(x)` << 1, x^258112 mod p(x)` << 1
- 0xa4a7167aUL, 0x00000001UL, 0x0ccb97caUL, 0x00000000UL, // x^257024 mod p(x)` << 1, x^257088 mod p(x)` << 1
- 0x0c18249aUL, 0x00000000UL, 0x006ec8a8UL, 0x00000001UL, // x^256000 mod p(x)` << 1, x^256064 mod p(x)` << 1
- 0xa924ae7cUL, 0x00000000UL, 0x4f58f196UL, 0x00000001UL, // x^254976 mod p(x)` << 1, x^255040 mod p(x)` << 1
- 0xe12ccc12UL, 0x00000001UL, 0xa7192ca6UL, 0x00000001UL, // x^253952 mod p(x)` << 1, x^254016 mod p(x)` << 1
- 0xa0b9d4acUL, 0x00000000UL, 0x9a64bab2UL, 0x00000001UL, // x^252928 mod p(x)` << 1, x^252992 mod p(x)` << 1
- 0x95e8ddfeUL, 0x00000000UL, 0x14f4ed2eUL, 0x00000000UL, // x^251904 mod p(x)` << 1, x^251968 mod p(x)` << 1
- 0x233fddc4UL, 0x00000000UL, 0x1092b6a2UL, 0x00000001UL, // x^250880 mod p(x)` << 1, x^250944 mod p(x)` << 1
- 0xb4529b62UL, 0x00000001UL, 0xc8a1629cUL, 0x00000000UL, // x^249856 mod p(x)` << 1, x^249920 mod p(x)` << 1
- 0xa7fa0e64UL, 0x00000001UL, 0x7bf32e8eUL, 0x00000001UL, // x^248832 mod p(x)` << 1, x^248896 mod p(x)` << 1
- 0xb5334592UL, 0x00000001UL, 0xf8cc6582UL, 0x00000001UL, // x^247808 mod p(x)` << 1, x^247872 mod p(x)` << 1
- 0x1f8ee1b4UL, 0x00000001UL, 0x8631ddf0UL, 0x00000000UL, // x^246784 mod p(x)` << 1, x^246848 mod p(x)` << 1
- 0x6252e632UL, 0x00000000UL, 0x7e5a76d0UL, 0x00000000UL, // x^245760 mod p(x)` << 1, x^245824 mod p(x)` << 1
- 0xab973e84UL, 0x00000000UL, 0x2b09b31cUL, 0x00000000UL, // x^244736 mod p(x)` << 1, x^244800 mod p(x)` << 1
- 0x7734f5ecUL, 0x00000000UL, 0xb2df1f84UL, 0x00000001UL, // x^243712 mod p(x)` << 1, x^243776 mod p(x)` << 1
- 0x7c547798UL, 0x00000000UL, 0xd6f56afcUL, 0x00000001UL, // x^242688 mod p(x)` << 1, x^242752 mod p(x)` << 1
- 0x7ec40210UL, 0x00000000UL, 0xb9b5e70cUL, 0x00000001UL, // x^241664 mod p(x)` << 1, x^241728 mod p(x)` << 1
- 0xab1695a8UL, 0x00000001UL, 0x34b626d2UL, 0x00000000UL, // x^240640 mod p(x)` << 1, x^240704 mod p(x)` << 1
- 0x90494bbaUL, 0x00000000UL, 0x4c53479aUL, 0x00000001UL, // x^239616 mod p(x)` << 1, x^239680 mod p(x)` << 1
- 0x123fb816UL, 0x00000001UL, 0xa6d179a4UL, 0x00000001UL, // x^238592 mod p(x)` << 1, x^238656 mod p(x)` << 1
- 0xe188c74cUL, 0x00000001UL, 0x5abd16b4UL, 0x00000001UL, // x^237568 mod p(x)` << 1, x^237632 mod p(x)` << 1
- 0xc2d3451cUL, 0x00000001UL, 0x018f9852UL, 0x00000000UL, // x^236544 mod p(x)` << 1, x^236608 mod p(x)` << 1
- 0xf55cf1caUL, 0x00000000UL, 0x1fb3084aUL, 0x00000000UL, // x^235520 mod p(x)` << 1, x^235584 mod p(x)` << 1
- 0xa0531540UL, 0x00000001UL, 0xc53dfb04UL, 0x00000000UL, // x^234496 mod p(x)` << 1, x^234560 mod p(x)` << 1
- 0x32cd7ebcUL, 0x00000001UL, 0xe10c9ad6UL, 0x00000000UL, // x^233472 mod p(x)` << 1, x^233536 mod p(x)` << 1
- 0x73ab7f36UL, 0x00000000UL, 0x25aa994aUL, 0x00000000UL, // x^232448 mod p(x)` << 1, x^232512 mod p(x)` << 1
- 0x41aed1c2UL, 0x00000000UL, 0xfa3a74c4UL, 0x00000000UL, // x^231424 mod p(x)` << 1, x^231488 mod p(x)` << 1
- 0x36c53800UL, 0x00000001UL, 0x33eb3f40UL, 0x00000000UL, // x^230400 mod p(x)` << 1, x^230464 mod p(x)` << 1
- 0x26835a30UL, 0x00000001UL, 0x7193f296UL, 0x00000001UL, // x^229376 mod p(x)` << 1, x^229440 mod p(x)` << 1
- 0x6241b502UL, 0x00000000UL, 0x43f6c86aUL, 0x00000000UL, // x^228352 mod p(x)` << 1, x^228416 mod p(x)` << 1
- 0xd5196ad4UL, 0x00000000UL, 0x6b513ec6UL, 0x00000001UL, // x^227328 mod p(x)` << 1, x^227392 mod p(x)` << 1
- 0x9cfa769aUL, 0x00000000UL, 0xc8f25b4eUL, 0x00000000UL, // x^226304 mod p(x)` << 1, x^226368 mod p(x)` << 1
- 0x920e5df4UL, 0x00000000UL, 0xa45048ecUL, 0x00000001UL, // x^225280 mod p(x)` << 1, x^225344 mod p(x)` << 1
- 0x69dc310eUL, 0x00000001UL, 0x0c441004UL, 0x00000000UL, // x^224256 mod p(x)` << 1, x^224320 mod p(x)` << 1
- 0x09fc331cUL, 0x00000000UL, 0x0e17cad6UL, 0x00000000UL, // x^223232 mod p(x)` << 1, x^223296 mod p(x)` << 1
- 0x0d94a81eUL, 0x00000001UL, 0x253ae964UL, 0x00000001UL, // x^222208 mod p(x)` << 1, x^222272 mod p(x)` << 1
- 0x27a20ab2UL, 0x00000000UL, 0xd7c88ebcUL, 0x00000001UL, // x^221184 mod p(x)` << 1, x^221248 mod p(x)` << 1
- 0x14f87504UL, 0x00000001UL, 0xe7ca913aUL, 0x00000001UL, // x^220160 mod p(x)` << 1, x^220224 mod p(x)` << 1
- 0x4b076d96UL, 0x00000000UL, 0x33ed078aUL, 0x00000000UL, // x^219136 mod p(x)` << 1, x^219200 mod p(x)` << 1
- 0xda4d1e74UL, 0x00000000UL, 0xe1839c78UL, 0x00000000UL, // x^218112 mod p(x)` << 1, x^218176 mod p(x)` << 1
- 0x1b81f672UL, 0x00000000UL, 0x322b267eUL, 0x00000001UL, // x^217088 mod p(x)` << 1, x^217152 mod p(x)` << 1
- 0x9367c988UL, 0x00000000UL, 0x638231b6UL, 0x00000000UL, // x^216064 mod p(x)` << 1, x^216128 mod p(x)` << 1
- 0x717214caUL, 0x00000001UL, 0xee7f16f4UL, 0x00000001UL, // x^215040 mod p(x)` << 1, x^215104 mod p(x)` << 1
- 0x9f47d820UL, 0x00000000UL, 0x17d9924aUL, 0x00000001UL, // x^214016 mod p(x)` << 1, x^214080 mod p(x)` << 1
- 0x0d9a47d2UL, 0x00000001UL, 0xe1a9e0c4UL, 0x00000000UL, // x^212992 mod p(x)` << 1, x^213056 mod p(x)` << 1
- 0xa696c58cUL, 0x00000000UL, 0x403731dcUL, 0x00000001UL, // x^211968 mod p(x)` << 1, x^212032 mod p(x)` << 1
- 0x2aa28ec6UL, 0x00000000UL, 0xa5ea9682UL, 0x00000001UL, // x^210944 mod p(x)` << 1, x^211008 mod p(x)` << 1
- 0xfe18fd9aUL, 0x00000001UL, 0x01c5c578UL, 0x00000001UL, // x^209920 mod p(x)` << 1, x^209984 mod p(x)` << 1
- 0x9d4fc1aeUL, 0x00000001UL, 0xdddf6494UL, 0x00000000UL, // x^208896 mod p(x)` << 1, x^208960 mod p(x)` << 1
- 0xba0e3deaUL, 0x00000001UL, 0xf1c3db28UL, 0x00000000UL, // x^207872 mod p(x)` << 1, x^207936 mod p(x)` << 1
- 0x74b59a5eUL, 0x00000000UL, 0x3112fb9cUL, 0x00000001UL, // x^206848 mod p(x)` << 1, x^206912 mod p(x)` << 1
- 0xf2b5ea98UL, 0x00000000UL, 0xb680b906UL, 0x00000000UL, // x^205824 mod p(x)` << 1, x^205888 mod p(x)` << 1
- 0x87132676UL, 0x00000001UL, 0x1a282932UL, 0x00000000UL, // x^204800 mod p(x)` << 1, x^204864 mod p(x)` << 1
- 0x0a8c6ad4UL, 0x00000001UL, 0x89406e7eUL, 0x00000000UL, // x^203776 mod p(x)` << 1, x^203840 mod p(x)` << 1
- 0xe21dfe70UL, 0x00000001UL, 0xdef6be8cUL, 0x00000001UL, // x^202752 mod p(x)` << 1, x^202816 mod p(x)` << 1
- 0xda0050e4UL, 0x00000001UL, 0x75258728UL, 0x00000000UL, // x^201728 mod p(x)` << 1, x^201792 mod p(x)` << 1
- 0x772172aeUL, 0x00000000UL, 0x9536090aUL, 0x00000001UL, // x^200704 mod p(x)` << 1, x^200768 mod p(x)` << 1
- 0xe47724aaUL, 0x00000000UL, 0xf2455bfcUL, 0x00000000UL, // x^199680 mod p(x)` << 1, x^199744 mod p(x)` << 1
- 0x3cd63ac4UL, 0x00000000UL, 0x8c40baf4UL, 0x00000001UL, // x^198656 mod p(x)` << 1, x^198720 mod p(x)` << 1
- 0xbf47d352UL, 0x00000001UL, 0x4cd390d4UL, 0x00000000UL, // x^197632 mod p(x)` << 1, x^197696 mod p(x)` << 1
- 0x8dc1d708UL, 0x00000001UL, 0xe4ece95aUL, 0x00000001UL, // x^196608 mod p(x)` << 1, x^196672 mod p(x)` << 1
- 0x2d4620a4UL, 0x00000000UL, 0x1a3ee918UL, 0x00000000UL, // x^195584 mod p(x)` << 1, x^195648 mod p(x)` << 1
- 0x58fd1740UL, 0x00000000UL, 0x7c652fb8UL, 0x00000000UL, // x^194560 mod p(x)` << 1, x^194624 mod p(x)` << 1
- 0xdadd9bfcUL, 0x00000000UL, 0x1c67842cUL, 0x00000001UL, // x^193536 mod p(x)` << 1, x^193600 mod p(x)` << 1
- 0xea2140beUL, 0x00000001UL, 0x254f759cUL, 0x00000000UL, // x^192512 mod p(x)` << 1, x^192576 mod p(x)` << 1
- 0x9de128baUL, 0x00000000UL, 0x7ece94caUL, 0x00000000UL, // x^191488 mod p(x)` << 1, x^191552 mod p(x)` << 1
- 0x3ac3aa8eUL, 0x00000001UL, 0x38f258c2UL, 0x00000000UL, // x^190464 mod p(x)` << 1, x^190528 mod p(x)` << 1
- 0x99980562UL, 0x00000000UL, 0xcdf17b00UL, 0x00000001UL, // x^189440 mod p(x)` << 1, x^189504 mod p(x)` << 1
- 0xc1579c86UL, 0x00000001UL, 0x1f882c16UL, 0x00000001UL, // x^188416 mod p(x)` << 1, x^188480 mod p(x)` << 1
- 0x68dbbf94UL, 0x00000000UL, 0x00093fc8UL, 0x00000001UL, // x^187392 mod p(x)` << 1, x^187456 mod p(x)` << 1
- 0x4509fb04UL, 0x00000000UL, 0xcd684f16UL, 0x00000001UL, // x^186368 mod p(x)` << 1, x^186432 mod p(x)` << 1
- 0x202f6398UL, 0x00000001UL, 0x4bc6a70aUL, 0x00000000UL, // x^185344 mod p(x)` << 1, x^185408 mod p(x)` << 1
- 0x3aea243eUL, 0x00000001UL, 0x4fc7e8e4UL, 0x00000000UL, // x^184320 mod p(x)` << 1, x^184384 mod p(x)` << 1
- 0xb4052ae6UL, 0x00000001UL, 0x30103f1cUL, 0x00000001UL, // x^183296 mod p(x)` << 1, x^183360 mod p(x)` << 1
- 0xcd2a0ae8UL, 0x00000001UL, 0x11b0024cUL, 0x00000001UL, // x^182272 mod p(x)` << 1, x^182336 mod p(x)` << 1
- 0xfe4aa8b4UL, 0x00000001UL, 0x0b3079daUL, 0x00000001UL, // x^181248 mod p(x)` << 1, x^181312 mod p(x)` << 1
- 0xd1559a42UL, 0x00000001UL, 0x0192bcc2UL, 0x00000001UL, // x^180224 mod p(x)` << 1, x^180288 mod p(x)` << 1
- 0xf3e05eccUL, 0x00000001UL, 0x74838d50UL, 0x00000000UL, // x^179200 mod p(x)` << 1, x^179264 mod p(x)` << 1
- 0x04ddd2ccUL, 0x00000001UL, 0x1b20f520UL, 0x00000000UL, // x^178176 mod p(x)` << 1, x^178240 mod p(x)` << 1
- 0x5393153cUL, 0x00000001UL, 0x50c3590aUL, 0x00000000UL, // x^177152 mod p(x)` << 1, x^177216 mod p(x)` << 1
- 0x57e942c6UL, 0x00000000UL, 0xb41cac8eUL, 0x00000000UL, // x^176128 mod p(x)` << 1, x^176192 mod p(x)` << 1
- 0x2c633850UL, 0x00000001UL, 0x0c72cc78UL, 0x00000000UL, // x^175104 mod p(x)` << 1, x^175168 mod p(x)` << 1
- 0xebcaae4cUL, 0x00000000UL, 0x30cdb032UL, 0x00000000UL, // x^174080 mod p(x)` << 1, x^174144 mod p(x)` << 1
- 0x3ee532a6UL, 0x00000001UL, 0x3e09fc32UL, 0x00000001UL, // x^173056 mod p(x)` << 1, x^173120 mod p(x)` << 1
- 0xbf0cbc7eUL, 0x00000001UL, 0x1ed624d2UL, 0x00000000UL, // x^172032 mod p(x)` << 1, x^172096 mod p(x)` << 1
- 0xd50b7a5aUL, 0x00000000UL, 0x781aee1aUL, 0x00000000UL, // x^171008 mod p(x)` << 1, x^171072 mod p(x)` << 1
- 0x02fca6e8UL, 0x00000000UL, 0xc4d8348cUL, 0x00000001UL, // x^169984 mod p(x)` << 1, x^170048 mod p(x)` << 1
- 0x7af40044UL, 0x00000000UL, 0x57a40336UL, 0x00000000UL, // x^168960 mod p(x)` << 1, x^169024 mod p(x)` << 1
- 0x16178744UL, 0x00000000UL, 0x85544940UL, 0x00000000UL, // x^167936 mod p(x)` << 1, x^168000 mod p(x)` << 1
- 0x4c177458UL, 0x00000001UL, 0x9cd21e80UL, 0x00000001UL, // x^166912 mod p(x)` << 1, x^166976 mod p(x)` << 1
- 0x1b6ddf04UL, 0x00000001UL, 0x3eb95bc0UL, 0x00000001UL, // x^165888 mod p(x)` << 1, x^165952 mod p(x)` << 1
- 0xf3e29cccUL, 0x00000001UL, 0xdfc9fdfcUL, 0x00000001UL, // x^164864 mod p(x)` << 1, x^164928 mod p(x)` << 1
- 0x35ae7562UL, 0x00000001UL, 0xcd028bc2UL, 0x00000000UL, // x^163840 mod p(x)` << 1, x^163904 mod p(x)` << 1
- 0x90ef812cUL, 0x00000001UL, 0x90db8c44UL, 0x00000000UL, // x^162816 mod p(x)` << 1, x^162880 mod p(x)` << 1
- 0x67a2c786UL, 0x00000000UL, 0x0010a4ceUL, 0x00000001UL, // x^161792 mod p(x)` << 1, x^161856 mod p(x)` << 1
- 0x48b9496cUL, 0x00000000UL, 0xc8f4c72cUL, 0x00000001UL, // x^160768 mod p(x)` << 1, x^160832 mod p(x)` << 1
- 0x5a422de6UL, 0x00000001UL, 0x1c26170cUL, 0x00000000UL, // x^159744 mod p(x)` << 1, x^159808 mod p(x)` << 1
- 0xef0e3640UL, 0x00000001UL, 0xe3fccf68UL, 0x00000000UL, // x^158720 mod p(x)` << 1, x^158784 mod p(x)` << 1
- 0x006d2d26UL, 0x00000001UL, 0xd513ed24UL, 0x00000000UL, // x^157696 mod p(x)` << 1, x^157760 mod p(x)` << 1
- 0x170d56d6UL, 0x00000001UL, 0x141beadaUL, 0x00000000UL, // x^156672 mod p(x)` << 1, x^156736 mod p(x)` << 1
- 0xa5fb613cUL, 0x00000000UL, 0x1071aea0UL, 0x00000001UL, // x^155648 mod p(x)` << 1, x^155712 mod p(x)` << 1
- 0x40bbf7fcUL, 0x00000000UL, 0x2e19080aUL, 0x00000001UL, // x^154624 mod p(x)` << 1, x^154688 mod p(x)` << 1
- 0x6ac3a5b2UL, 0x00000001UL, 0x00ecf826UL, 0x00000001UL, // x^153600 mod p(x)` << 1, x^153664 mod p(x)` << 1
- 0xabf16230UL, 0x00000000UL, 0x69b09412UL, 0x00000000UL, // x^152576 mod p(x)` << 1, x^152640 mod p(x)` << 1
- 0xebe23facUL, 0x00000001UL, 0x22297bacUL, 0x00000001UL, // x^151552 mod p(x)` << 1, x^151616 mod p(x)` << 1
- 0x8b6a0894UL, 0x00000000UL, 0xe9e4b068UL, 0x00000000UL, // x^150528 mod p(x)` << 1, x^150592 mod p(x)` << 1
- 0x288ea478UL, 0x00000001UL, 0x4b38651aUL, 0x00000000UL, // x^149504 mod p(x)` << 1, x^149568 mod p(x)` << 1
- 0x6619c442UL, 0x00000001UL, 0x468360e2UL, 0x00000001UL, // x^148480 mod p(x)` << 1, x^148544 mod p(x)` << 1
- 0x86230038UL, 0x00000000UL, 0x121c2408UL, 0x00000000UL, // x^147456 mod p(x)` << 1, x^147520 mod p(x)` << 1
- 0x7746a756UL, 0x00000001UL, 0xda7e7d08UL, 0x00000000UL, // x^146432 mod p(x)` << 1, x^146496 mod p(x)` << 1
- 0x91b8f8f8UL, 0x00000001UL, 0x058d7652UL, 0x00000001UL, // x^145408 mod p(x)` << 1, x^145472 mod p(x)` << 1
- 0x8e167708UL, 0x00000000UL, 0x4a098a90UL, 0x00000001UL, // x^144384 mod p(x)` << 1, x^144448 mod p(x)` << 1
- 0x48b22d54UL, 0x00000001UL, 0x20dbe72eUL, 0x00000000UL, // x^143360 mod p(x)` << 1, x^143424 mod p(x)` << 1
- 0x44ba2c3cUL, 0x00000000UL, 0x1e7323e8UL, 0x00000001UL, // x^142336 mod p(x)` << 1, x^142400 mod p(x)` << 1
- 0xb54d2b52UL, 0x00000000UL, 0xd5d4bf94UL, 0x00000000UL, // x^141312 mod p(x)` << 1, x^141376 mod p(x)` << 1
- 0x05a4fd8aUL, 0x00000000UL, 0x99d8746cUL, 0x00000001UL, // x^140288 mod p(x)` << 1, x^140352 mod p(x)` << 1
- 0x39f9fc46UL, 0x00000001UL, 0xce9ca8a0UL, 0x00000000UL, // x^139264 mod p(x)` << 1, x^139328 mod p(x)` << 1
- 0x5a1fa824UL, 0x00000001UL, 0x136edeceUL, 0x00000000UL, // x^138240 mod p(x)` << 1, x^138304 mod p(x)` << 1
- 0x0a61ae4cUL, 0x00000000UL, 0x9b92a068UL, 0x00000001UL, // x^137216 mod p(x)` << 1, x^137280 mod p(x)` << 1
- 0x45e9113eUL, 0x00000001UL, 0x71d62206UL, 0x00000000UL, // x^136192 mod p(x)` << 1, x^136256 mod p(x)` << 1
- 0x6a348448UL, 0x00000000UL, 0xdfc50158UL, 0x00000000UL, // x^135168 mod p(x)` << 1, x^135232 mod p(x)` << 1
- 0x4d80a08cUL, 0x00000000UL, 0x517626bcUL, 0x00000001UL, // x^134144 mod p(x)` << 1, x^134208 mod p(x)` << 1
- 0x4b6837a0UL, 0x00000001UL, 0x48d1e4faUL, 0x00000001UL, // x^133120 mod p(x)` << 1, x^133184 mod p(x)` << 1
- 0x6896a7fcUL, 0x00000001UL, 0x94d8266eUL, 0x00000000UL, // x^132096 mod p(x)` << 1, x^132160 mod p(x)` << 1
- 0x4f187140UL, 0x00000001UL, 0x606c5e34UL, 0x00000000UL, // x^131072 mod p(x)` << 1, x^131136 mod p(x)` << 1
- 0x9581b9daUL, 0x00000001UL, 0x9766beaaUL, 0x00000001UL, // x^130048 mod p(x)` << 1, x^130112 mod p(x)` << 1
- 0x091bc984UL, 0x00000001UL, 0xd80c506cUL, 0x00000001UL, // x^129024 mod p(x)` << 1, x^129088 mod p(x)` << 1
- 0x1067223cUL, 0x00000000UL, 0x1e73837cUL, 0x00000000UL, // x^128000 mod p(x)` << 1, x^128064 mod p(x)` << 1
- 0xab16ea02UL, 0x00000001UL, 0x64d587deUL, 0x00000000UL, // x^126976 mod p(x)` << 1, x^127040 mod p(x)` << 1
- 0x3c4598a8UL, 0x00000001UL, 0xf4a507b0UL, 0x00000000UL, // x^125952 mod p(x)` << 1, x^126016 mod p(x)` << 1
- 0xb3735430UL, 0x00000000UL, 0x40e342fcUL, 0x00000000UL, // x^124928 mod p(x)` << 1, x^124992 mod p(x)` << 1
- 0xbb3fc0c0UL, 0x00000001UL, 0xd5ad9c3aUL, 0x00000001UL, // x^123904 mod p(x)` << 1, x^123968 mod p(x)` << 1
- 0x570ae19cUL, 0x00000001UL, 0x94a691a4UL, 0x00000000UL, // x^122880 mod p(x)` << 1, x^122944 mod p(x)` << 1
- 0xea910712UL, 0x00000001UL, 0x271ecdfaUL, 0x00000001UL, // x^121856 mod p(x)` << 1, x^121920 mod p(x)` << 1
- 0x67127128UL, 0x00000001UL, 0x9e54475aUL, 0x00000000UL, // x^120832 mod p(x)` << 1, x^120896 mod p(x)` << 1
- 0x19e790a2UL, 0x00000000UL, 0xc9c099eeUL, 0x00000000UL, // x^119808 mod p(x)` << 1, x^119872 mod p(x)` << 1
- 0x3788f710UL, 0x00000000UL, 0x9a2f736cUL, 0x00000000UL, // x^118784 mod p(x)` << 1, x^118848 mod p(x)` << 1
- 0x682a160eUL, 0x00000001UL, 0xbb9f4996UL, 0x00000000UL, // x^117760 mod p(x)` << 1, x^117824 mod p(x)` << 1
- 0x7f0ebd2eUL, 0x00000000UL, 0xdb688050UL, 0x00000001UL, // x^116736 mod p(x)` << 1, x^116800 mod p(x)` << 1
- 0x2b032080UL, 0x00000000UL, 0xe9b10af4UL, 0x00000000UL, // x^115712 mod p(x)` << 1, x^115776 mod p(x)` << 1
- 0xcfd1664aUL, 0x00000000UL, 0x2d4545e4UL, 0x00000001UL, // x^114688 mod p(x)` << 1, x^114752 mod p(x)` << 1
- 0xaa1181c2UL, 0x00000000UL, 0x0361139cUL, 0x00000000UL, // x^113664 mod p(x)` << 1, x^113728 mod p(x)` << 1
- 0xddd08002UL, 0x00000000UL, 0xa5a1a3a8UL, 0x00000001UL, // x^112640 mod p(x)` << 1, x^112704 mod p(x)` << 1
- 0xe8dd0446UL, 0x00000000UL, 0x6844e0b0UL, 0x00000000UL, // x^111616 mod p(x)` << 1, x^111680 mod p(x)` << 1
- 0xbbd94a00UL, 0x00000001UL, 0xc3762f28UL, 0x00000000UL, // x^110592 mod p(x)` << 1, x^110656 mod p(x)` << 1
- 0xab6cd180UL, 0x00000000UL, 0xd26287a2UL, 0x00000001UL, // x^109568 mod p(x)` << 1, x^109632 mod p(x)` << 1
- 0x31803ce2UL, 0x00000000UL, 0xf6f0bba8UL, 0x00000001UL, // x^108544 mod p(x)` << 1, x^108608 mod p(x)` << 1
- 0x24f40b0cUL, 0x00000000UL, 0x2ffabd62UL, 0x00000000UL, // x^107520 mod p(x)` << 1, x^107584 mod p(x)` << 1
- 0xba1d9834UL, 0x00000001UL, 0xfb4516b8UL, 0x00000000UL, // x^106496 mod p(x)` << 1, x^106560 mod p(x)` << 1
- 0x04de61aaUL, 0x00000001UL, 0x8cfa961cUL, 0x00000001UL, // x^105472 mod p(x)` << 1, x^105536 mod p(x)` << 1
- 0x13e40d46UL, 0x00000001UL, 0x9e588d52UL, 0x00000001UL, // x^104448 mod p(x)` << 1, x^104512 mod p(x)` << 1
- 0x415598a0UL, 0x00000001UL, 0x180f0bbcUL, 0x00000001UL, // x^103424 mod p(x)` << 1, x^103488 mod p(x)` << 1
- 0xbf6c8c90UL, 0x00000000UL, 0xe1d9177aUL, 0x00000000UL, // x^102400 mod p(x)` << 1, x^102464 mod p(x)` << 1
- 0x788b0504UL, 0x00000001UL, 0x05abc27cUL, 0x00000001UL, // x^101376 mod p(x)` << 1, x^101440 mod p(x)` << 1
- 0x38385d02UL, 0x00000000UL, 0x972e4a58UL, 0x00000000UL, // x^100352 mod p(x)` << 1, x^100416 mod p(x)` << 1
- 0xb6c83844UL, 0x00000001UL, 0x83499a5eUL, 0x00000001UL, // x^99328 mod p(x)` << 1, x^99392 mod p(x)` << 1
- 0x51061a8aUL, 0x00000000UL, 0xc96a8ccaUL, 0x00000001UL, // x^98304 mod p(x)` << 1, x^98368 mod p(x)` << 1
- 0x7351388aUL, 0x00000001UL, 0xa1a5b60cUL, 0x00000001UL, // x^97280 mod p(x)` << 1, x^97344 mod p(x)` << 1
- 0x32928f92UL, 0x00000001UL, 0xe4b6ac9cUL, 0x00000000UL, // x^96256 mod p(x)` << 1, x^96320 mod p(x)` << 1
- 0xe6b4f48aUL, 0x00000000UL, 0x807e7f5aUL, 0x00000001UL, // x^95232 mod p(x)` << 1, x^95296 mod p(x)` << 1
- 0x39d15e90UL, 0x00000000UL, 0x7a7e3bc8UL, 0x00000001UL, // x^94208 mod p(x)` << 1, x^94272 mod p(x)` << 1
- 0x312d6074UL, 0x00000000UL, 0xd73975daUL, 0x00000000UL, // x^93184 mod p(x)` << 1, x^93248 mod p(x)` << 1
- 0x7bbb2cc4UL, 0x00000001UL, 0x7375d038UL, 0x00000001UL, // x^92160 mod p(x)` << 1, x^92224 mod p(x)` << 1
- 0x6ded3e18UL, 0x00000001UL, 0x193680bcUL, 0x00000000UL, // x^91136 mod p(x)` << 1, x^91200 mod p(x)` << 1
- 0xf1638b16UL, 0x00000000UL, 0x999b06f6UL, 0x00000000UL, // x^90112 mod p(x)` << 1, x^90176 mod p(x)` << 1
- 0xd38b9eccUL, 0x00000001UL, 0xf685d2b8UL, 0x00000001UL, // x^89088 mod p(x)` << 1, x^89152 mod p(x)` << 1
- 0x8b8d09dcUL, 0x00000001UL, 0xf4ecbed2UL, 0x00000001UL, // x^88064 mod p(x)` << 1, x^88128 mod p(x)` << 1
- 0xe7bc27d2UL, 0x00000000UL, 0xba16f1a0UL, 0x00000000UL, // x^87040 mod p(x)` << 1, x^87104 mod p(x)` << 1
- 0x275e1e96UL, 0x00000000UL, 0x15aceac4UL, 0x00000001UL, // x^86016 mod p(x)` << 1, x^86080 mod p(x)` << 1
- 0xe2e3031eUL, 0x00000000UL, 0xaeff6292UL, 0x00000001UL, // x^84992 mod p(x)` << 1, x^85056 mod p(x)` << 1
- 0x041c84d8UL, 0x00000001UL, 0x9640124cUL, 0x00000000UL, // x^83968 mod p(x)` << 1, x^84032 mod p(x)` << 1
- 0x706ce672UL, 0x00000000UL, 0x14f41f02UL, 0x00000001UL, // x^82944 mod p(x)` << 1, x^83008 mod p(x)` << 1
- 0x5d5070daUL, 0x00000001UL, 0x9c5f3586UL, 0x00000000UL, // x^81920 mod p(x)` << 1, x^81984 mod p(x)` << 1
- 0x38f9493aUL, 0x00000000UL, 0x878275faUL, 0x00000001UL, // x^80896 mod p(x)` << 1, x^80960 mod p(x)` << 1
- 0xa3348a76UL, 0x00000000UL, 0xddc42ce8UL, 0x00000000UL, // x^79872 mod p(x)` << 1, x^79936 mod p(x)` << 1
- 0xad0aab92UL, 0x00000001UL, 0x81d2c73aUL, 0x00000001UL, // x^78848 mod p(x)` << 1, x^78912 mod p(x)` << 1
- 0x9e85f712UL, 0x00000001UL, 0x41c9320aUL, 0x00000001UL, // x^77824 mod p(x)` << 1, x^77888 mod p(x)` << 1
- 0x5a871e76UL, 0x00000000UL, 0x5235719aUL, 0x00000001UL, // x^76800 mod p(x)` << 1, x^76864 mod p(x)` << 1
- 0x7249c662UL, 0x00000001UL, 0xbe27d804UL, 0x00000000UL, // x^75776 mod p(x)` << 1, x^75840 mod p(x)` << 1
- 0x3a084712UL, 0x00000000UL, 0x6242d45aUL, 0x00000000UL, // x^74752 mod p(x)` << 1, x^74816 mod p(x)` << 1
- 0xed438478UL, 0x00000000UL, 0x9a53638eUL, 0x00000000UL, // x^73728 mod p(x)` << 1, x^73792 mod p(x)` << 1
- 0xabac34ccUL, 0x00000000UL, 0x001ecfb6UL, 0x00000001UL, // x^72704 mod p(x)` << 1, x^72768 mod p(x)` << 1
- 0x5f35ef3eUL, 0x00000000UL, 0x6d7c2d64UL, 0x00000001UL, // x^71680 mod p(x)` << 1, x^71744 mod p(x)` << 1
- 0x47d6608cUL, 0x00000000UL, 0xd0ce46c0UL, 0x00000001UL, // x^70656 mod p(x)` << 1, x^70720 mod p(x)` << 1
- 0x2d01470eUL, 0x00000000UL, 0x24c907b4UL, 0x00000001UL, // x^69632 mod p(x)` << 1, x^69696 mod p(x)` << 1
- 0x58bbc7b0UL, 0x00000001UL, 0x18a555caUL, 0x00000000UL, // x^68608 mod p(x)` << 1, x^68672 mod p(x)` << 1
- 0xc0a23e8eUL, 0x00000000UL, 0x6b0980bcUL, 0x00000000UL, // x^67584 mod p(x)` << 1, x^67648 mod p(x)` << 1
- 0xebd85c88UL, 0x00000001UL, 0x8bbba964UL, 0x00000000UL, // x^66560 mod p(x)` << 1, x^66624 mod p(x)` << 1
- 0x9ee20bb2UL, 0x00000001UL, 0x070a5a1eUL, 0x00000001UL, // x^65536 mod p(x)` << 1, x^65600 mod p(x)` << 1
- 0xacabf2d6UL, 0x00000001UL, 0x2204322aUL, 0x00000000UL, // x^64512 mod p(x)` << 1, x^64576 mod p(x)` << 1
- 0xb7963d56UL, 0x00000001UL, 0xa27524d0UL, 0x00000000UL, // x^63488 mod p(x)` << 1, x^63552 mod p(x)` << 1
- 0x7bffa1feUL, 0x00000001UL, 0x20b1e4baUL, 0x00000000UL, // x^62464 mod p(x)` << 1, x^62528 mod p(x)` << 1
- 0x1f15333eUL, 0x00000000UL, 0x32cc27fcUL, 0x00000000UL, // x^61440 mod p(x)` << 1, x^61504 mod p(x)` << 1
- 0x8593129eUL, 0x00000001UL, 0x44dd22b8UL, 0x00000000UL, // x^60416 mod p(x)` << 1, x^60480 mod p(x)` << 1
- 0x9cb32602UL, 0x00000001UL, 0xdffc9e0aUL, 0x00000000UL, // x^59392 mod p(x)` << 1, x^59456 mod p(x)` << 1
- 0x42b05cc8UL, 0x00000001UL, 0xb7a0ed14UL, 0x00000001UL, // x^58368 mod p(x)` << 1, x^58432 mod p(x)` << 1
- 0xbe49e7a4UL, 0x00000001UL, 0xc7842488UL, 0x00000000UL, // x^57344 mod p(x)` << 1, x^57408 mod p(x)` << 1
- 0x08f69d6cUL, 0x00000001UL, 0xc02a4feeUL, 0x00000001UL, // x^56320 mod p(x)` << 1, x^56384 mod p(x)` << 1
- 0x6c0971f0UL, 0x00000000UL, 0x3c273778UL, 0x00000000UL, // x^55296 mod p(x)` << 1, x^55360 mod p(x)` << 1
- 0x5b16467aUL, 0x00000000UL, 0xd63f8894UL, 0x00000001UL, // x^54272 mod p(x)` << 1, x^54336 mod p(x)` << 1
- 0x551a628eUL, 0x00000001UL, 0x6be557d6UL, 0x00000000UL, // x^53248 mod p(x)` << 1, x^53312 mod p(x)` << 1
- 0x9e42ea92UL, 0x00000001UL, 0x6a7806eaUL, 0x00000000UL, // x^52224 mod p(x)` << 1, x^52288 mod p(x)` << 1
- 0x2fa83ff2UL, 0x00000001UL, 0x6155aa0cUL, 0x00000001UL, // x^51200 mod p(x)` << 1, x^51264 mod p(x)` << 1
- 0x1ca9cde0UL, 0x00000001UL, 0x908650acUL, 0x00000000UL, // x^50176 mod p(x)` << 1, x^50240 mod p(x)` << 1
- 0xc8e5cd74UL, 0x00000000UL, 0xaa5a8084UL, 0x00000000UL, // x^49152 mod p(x)` << 1, x^49216 mod p(x)` << 1
- 0x96c27f0cUL, 0x00000000UL, 0x91bb500aUL, 0x00000001UL, // x^48128 mod p(x)` << 1, x^48192 mod p(x)` << 1
- 0x2baed926UL, 0x00000000UL, 0x64e9bed0UL, 0x00000000UL, // x^47104 mod p(x)` << 1, x^47168 mod p(x)` << 1
- 0x7c8de8d2UL, 0x00000001UL, 0x9444f302UL, 0x00000000UL, // x^46080 mod p(x)` << 1, x^46144 mod p(x)` << 1
- 0xd43d6068UL, 0x00000000UL, 0x9db07d3cUL, 0x00000001UL, // x^45056 mod p(x)` << 1, x^45120 mod p(x)` << 1
- 0xcb2c4b26UL, 0x00000000UL, 0x359e3e6eUL, 0x00000001UL, // x^44032 mod p(x)` << 1, x^44096 mod p(x)` << 1
- 0x45b8da26UL, 0x00000001UL, 0xe4f10dd2UL, 0x00000001UL, // x^43008 mod p(x)` << 1, x^43072 mod p(x)` << 1
- 0x8fff4b08UL, 0x00000001UL, 0x24f5735eUL, 0x00000001UL, // x^41984 mod p(x)` << 1, x^42048 mod p(x)` << 1
- 0x50b58ed0UL, 0x00000001UL, 0x24760a4cUL, 0x00000001UL, // x^40960 mod p(x)` << 1, x^41024 mod p(x)` << 1
- 0x549f39bcUL, 0x00000001UL, 0x0f1fc186UL, 0x00000000UL, // x^39936 mod p(x)` << 1, x^40000 mod p(x)` << 1
- 0xef4d2f42UL, 0x00000000UL, 0x150e4cc4UL, 0x00000000UL, // x^38912 mod p(x)` << 1, x^38976 mod p(x)` << 1
- 0xb1468572UL, 0x00000001UL, 0x2a6204e8UL, 0x00000000UL, // x^37888 mod p(x)` << 1, x^37952 mod p(x)` << 1
- 0x3d7403b2UL, 0x00000001UL, 0xbeb1d432UL, 0x00000000UL, // x^36864 mod p(x)` << 1, x^36928 mod p(x)` << 1
- 0xa4681842UL, 0x00000001UL, 0x35f3f1f0UL, 0x00000001UL, // x^35840 mod p(x)` << 1, x^35904 mod p(x)` << 1
- 0x67714492UL, 0x00000001UL, 0x74fe2232UL, 0x00000000UL, // x^34816 mod p(x)` << 1, x^34880 mod p(x)` << 1
- 0xe599099aUL, 0x00000001UL, 0x1ac6e2baUL, 0x00000000UL, // x^33792 mod p(x)` << 1, x^33856 mod p(x)` << 1
- 0xfe128194UL, 0x00000000UL, 0x13fca91eUL, 0x00000000UL, // x^32768 mod p(x)` << 1, x^32832 mod p(x)` << 1
- 0x77e8b990UL, 0x00000000UL, 0x83f4931eUL, 0x00000001UL, // x^31744 mod p(x)` << 1, x^31808 mod p(x)` << 1
- 0xa267f63aUL, 0x00000001UL, 0xb6d9b4e4UL, 0x00000000UL, // x^30720 mod p(x)` << 1, x^30784 mod p(x)` << 1
- 0x945c245aUL, 0x00000001UL, 0xb5188656UL, 0x00000000UL, // x^29696 mod p(x)` << 1, x^29760 mod p(x)` << 1
- 0x49002e76UL, 0x00000001UL, 0x27a81a84UL, 0x00000000UL, // x^28672 mod p(x)` << 1, x^28736 mod p(x)` << 1
- 0xbb8310a4UL, 0x00000001UL, 0x25699258UL, 0x00000001UL, // x^27648 mod p(x)` << 1, x^27712 mod p(x)` << 1
- 0x9ec60bccUL, 0x00000001UL, 0xb23de796UL, 0x00000001UL, // x^26624 mod p(x)` << 1, x^26688 mod p(x)` << 1
- 0x2d8590aeUL, 0x00000001UL, 0xfe4365dcUL, 0x00000000UL, // x^25600 mod p(x)` << 1, x^25664 mod p(x)` << 1
- 0x65b00684UL, 0x00000000UL, 0xc68f497aUL, 0x00000000UL, // x^24576 mod p(x)` << 1, x^24640 mod p(x)` << 1
- 0x5e5aeadcUL, 0x00000001UL, 0xfbf521eeUL, 0x00000000UL, // x^23552 mod p(x)` << 1, x^23616 mod p(x)` << 1
- 0xb77ff2b0UL, 0x00000000UL, 0x5eac3378UL, 0x00000001UL, // x^22528 mod p(x)` << 1, x^22592 mod p(x)` << 1
- 0x88da2ff6UL, 0x00000001UL, 0x34914b90UL, 0x00000001UL, // x^21504 mod p(x)` << 1, x^21568 mod p(x)` << 1
- 0x63da929aUL, 0x00000000UL, 0x16335cfeUL, 0x00000000UL, // x^20480 mod p(x)` << 1, x^20544 mod p(x)` << 1
- 0x389caa80UL, 0x00000001UL, 0x0372d10cUL, 0x00000001UL, // x^19456 mod p(x)` << 1, x^19520 mod p(x)` << 1
- 0x3db599d2UL, 0x00000001UL, 0x5097b908UL, 0x00000001UL, // x^18432 mod p(x)` << 1, x^18496 mod p(x)` << 1
- 0x22505a86UL, 0x00000001UL, 0x227a7572UL, 0x00000001UL, // x^17408 mod p(x)` << 1, x^17472 mod p(x)` << 1
- 0x6bd72746UL, 0x00000001UL, 0x9a8f75c0UL, 0x00000000UL, // x^16384 mod p(x)` << 1, x^16448 mod p(x)` << 1
- 0xc3faf1d4UL, 0x00000001UL, 0x682c77a2UL, 0x00000000UL, // x^15360 mod p(x)` << 1, x^15424 mod p(x)` << 1
- 0x111c826cUL, 0x00000001UL, 0x231f091cUL, 0x00000000UL, // x^14336 mod p(x)` << 1, x^14400 mod p(x)` << 1
- 0x153e9fb2UL, 0x00000000UL, 0x7d4439f2UL, 0x00000000UL, // x^13312 mod p(x)` << 1, x^13376 mod p(x)` << 1
- 0x2b1f7b60UL, 0x00000000UL, 0x7e221efcUL, 0x00000001UL, // x^12288 mod p(x)` << 1, x^12352 mod p(x)` << 1
- 0xb1dba570UL, 0x00000000UL, 0x67457c38UL, 0x00000001UL, // x^11264 mod p(x)` << 1, x^11328 mod p(x)` << 1
- 0xf6397b76UL, 0x00000001UL, 0xbdf081c4UL, 0x00000000UL, // x^10240 mod p(x)` << 1, x^10304 mod p(x)` << 1
- 0x56335214UL, 0x00000001UL, 0x6286d6b0UL, 0x00000001UL, // x^9216 mod p(x)` << 1, x^9280 mod p(x)` << 1
- 0xd70e3986UL, 0x00000001UL, 0xc84f001cUL, 0x00000000UL, // x^8192 mod p(x)` << 1, x^8256 mod p(x)` << 1
- 0x3701a774UL, 0x00000000UL, 0x64efe7c0UL, 0x00000000UL, // x^7168 mod p(x)` << 1, x^7232 mod p(x)` << 1
- 0xac81ef72UL, 0x00000000UL, 0x0ac2d904UL, 0x00000000UL, // x^6144 mod p(x)` << 1, x^6208 mod p(x)` << 1
- 0x33212464UL, 0x00000001UL, 0xfd226d14UL, 0x00000000UL, // x^5120 mod p(x)` << 1, x^5184 mod p(x)` << 1
- 0xe4e45610UL, 0x00000000UL, 0x1cfd42e0UL, 0x00000001UL, // x^4096 mod p(x)` << 1, x^4160 mod p(x)` << 1
- 0x0c1bd370UL, 0x00000000UL, 0x6e5a5678UL, 0x00000001UL, // x^3072 mod p(x)` << 1, x^3136 mod p(x)` << 1
- 0xa7b9e7a6UL, 0x00000001UL, 0xd888fe22UL, 0x00000001UL, // x^2048 mod p(x)` << 1, x^2112 mod p(x)` << 1
- 0x7d657a10UL, 0x00000000UL, 0xaf77fcd4UL, 0x00000001UL, // x^1024 mod p(x)` << 1, x^1088 mod p(x)` << 1
-
- // Reduce final 1024-2048 bits to 64 bits, shifting 32 bits to include the trailing 32 bits of zeros
- 0xec447f11UL, 0x99168a18UL, 0x13e8221eUL, 0xed837b26UL, // x^2048 mod p(x)`, x^2016 mod p(x)`, x^1984 mod p(x)`, x^1952 mod p(x)`
- 0x8fd2cd3cUL, 0xe23e954eUL, 0x47b9ce5aUL, 0xc8acdd81UL, // x^1920 mod p(x)`, x^1888 mod p(x)`, x^1856 mod p(x)`, x^1824 mod p(x)`
- 0x6b1d2b53UL, 0x92f8befeUL, 0xd4277e25UL, 0xd9ad6d87UL, // x^1792 mod p(x)`, x^1760 mod p(x)`, x^1728 mod p(x)`, x^1696 mod p(x)`
- 0x291ea462UL, 0xf38a3556UL, 0x33fbca3bUL, 0xc10ec5e0UL, // x^1664 mod p(x)`, x^1632 mod p(x)`, x^1600 mod p(x)`, x^1568 mod p(x)`
- 0x62b6ca4bUL, 0x974ac562UL, 0x82e02e2fUL, 0xc0b55b0eUL, // x^1536 mod p(x)`, x^1504 mod p(x)`, x^1472 mod p(x)`, x^1440 mod p(x)`
- 0x784d2a56UL, 0x855712b3UL, 0xe172334dUL, 0x71aa1df0UL, // x^1408 mod p(x)`, x^1376 mod p(x)`, x^1344 mod p(x)`, x^1312 mod p(x)`
- 0x0eaee722UL, 0xa5abe9f8UL, 0x3969324dUL, 0xfee3053eUL, // x^1280 mod p(x)`, x^1248 mod p(x)`, x^1216 mod p(x)`, x^1184 mod p(x)`
- 0xdb54814cUL, 0x1fa0943dUL, 0x3eb2bd08UL, 0xf44779b9UL, // x^1152 mod p(x)`, x^1120 mod p(x)`, x^1088 mod p(x)`, x^1056 mod p(x)`
- 0xd7bbfe6aUL, 0xa53ff440UL, 0x00cc3374UL, 0xf5449b3fUL, // x^1024 mod p(x)`, x^992 mod p(x)`, x^960 mod p(x)`, x^928 mod p(x)`
- 0x6325605cUL, 0xebe7e356UL, 0xd777606eUL, 0x6f8346e1UL, // x^896 mod p(x)`, x^864 mod p(x)`, x^832 mod p(x)`, x^800 mod p(x)`
- 0xe5b592b8UL, 0xc65a272cUL, 0xc0b95347UL, 0xe3ab4f2aUL, // x^768 mod p(x)`, x^736 mod p(x)`, x^704 mod p(x)`, x^672 mod p(x)`
- 0x4721589fUL, 0x5705a9caUL, 0x329ecc11UL, 0xaa2215eaUL, // x^640 mod p(x)`, x^608 mod p(x)`, x^576 mod p(x)`, x^544 mod p(x)`
- 0x88d14467UL, 0xe3720acbUL, 0xd95efd26UL, 0x1ed8f66eUL, // x^512 mod p(x)`, x^480 mod p(x)`, x^448 mod p(x)`, x^416 mod p(x)`
- 0x15141c31UL, 0xba1aca03UL, 0xa700e96aUL, 0x78ed02d5UL, // x^384 mod p(x)`, x^352 mod p(x)`, x^320 mod p(x)`, x^288 mod p(x)`
- 0xed627daeUL, 0xad2a31b3UL, 0x32b39da3UL, 0xba8ccbe8UL, // x^256 mod p(x)`, x^224 mod p(x)`, x^192 mod p(x)`, x^160 mod p(x)`
- 0xa06a2517UL, 0x6655004fUL, 0xb1e6b092UL, 0xedb88320UL // x^128 mod p(x)`, x^96 mod p(x)`, x^64 mod p(x)`, x^32 mod p(x)`
- };
-
- juint* ptr = (juint*) malloc(sizeof(juint) * CRC32_CONSTANTS_SIZE);
+// Constants to fold n words as needed by macroAssembler.
+juint* StubRoutines::ppc64::generate_crc_constants(juint reverse_poly) {
+ juint* ptr = (juint*) malloc(sizeof(juint) * 4 * (UNROLL_FACTOR2 - 1 + UNROLL_FACTOR / UNROLL_FACTOR2));
guarantee(((intptr_t)ptr & 0xF) == 0, "16-byte alignment needed");
guarantee(ptr != NULL, "allocation error of a crc table");
- memcpy((void*)ptr, constants, sizeof(juint) * CRC32_CONSTANTS_SIZE);
+
+ // Generate constants for outer loop
+ juint v0, v1, v2, v3 = 1;
+ for (int i = 0; i < UNROLL_FACTOR2 - 1; ++i) {
+ v0 = fold_word(v3, reverse_poly);
+ v1 = fold_word(v0, reverse_poly);
+ v2 = fold_word(v1, reverse_poly);
+ v3 = fold_word(v2, reverse_poly);
+#ifdef VM_LITTLE_ENDIAN
+ ptr[4*i ] = v3;
+ ptr[4*i+1] = v2;
+ ptr[4*i+2] = v3;
+ ptr[4*i+3] = v2;
+#else
+ ptr[4*i ] = v2;
+ ptr[4*i+1] = v3;
+ ptr[4*i+2] = v2;
+ ptr[4*i+3] = v3;
+#endif
+ }
+
+ // Generate constants for inner loop
+ juint* ptr2 = ptr + 4 * (UNROLL_FACTOR2 - 1);
+ v3 = 1; // Restart from scratch.
+ for (int i = 0; i < UNROLL_FACTOR; ++i) {
+ v0 = fold_word(v3, reverse_poly);
+ v1 = fold_word(v0, reverse_poly);
+ v2 = fold_word(v1, reverse_poly);
+ v3 = fold_word(v2, reverse_poly);
+ if (i % UNROLL_FACTOR2 == 0) {
+ int idx = UNROLL_FACTOR / UNROLL_FACTOR2 - 1 - i / UNROLL_FACTOR2;
+ for (int j = 0; j < 4; ++j) {
+#ifdef VM_LITTLE_ENDIAN
+ ptr2[4*idx ] = v3;
+ ptr2[4*idx+1] = v2;
+ ptr2[4*idx+2] = v1;
+ ptr2[4*idx+3] = v0;
+#else
+ ptr2[4*idx ] = v0;
+ ptr2[4*idx+1] = v1;
+ ptr2[4*idx+2] = v2;
+ ptr2[4*idx+3] = v3;
+#endif
+ }
+ }
+ }
+
return ptr;
}
-juint* StubRoutines::ppc64::generate_crc_barret_constants() {
- juint barret_constants[CRC32_BARRET_CONSTANTS] = {
- 0xf7011641UL, 0x00000001UL, 0x00000000UL, 0x00000000UL,
- 0xdb710641UL, 0x00000001UL, 0x00000000UL, 0x00000000UL
- };
- juint* ptr = (juint*) malloc(sizeof(juint) * CRC32_CONSTANTS_SIZE);
+// Constants to reduce 64 to 32 bit as needed by macroAssembler.
+juint* StubRoutines::ppc64::generate_crc_barret_constants(juint reverse_poly) {
+ juint* ptr = (juint*) malloc(sizeof(juint) * CRC32_BARRET_CONSTANTS);
guarantee(((intptr_t)ptr & 0xF) == 0, "16-byte alignment needed");
guarantee(ptr != NULL, "allocation error of a crc table");
- memcpy((void*) ptr, barret_constants, sizeof(juint) * CRC32_BARRET_CONSTANTS);
+
+ julong* c = (julong*)ptr;
+ julong long_poly = (((julong)reverse_poly) << 1) | 1;
+ julong inverse_long_poly = compute_inverse_poly(long_poly);
+#ifdef VM_LITTLE_ENDIAN
+ c[0] = inverse_long_poly;
+ c[1] = long_poly;
+#else
+ c[0] = long_poly;
+ c[1] = inverse_long_poly;
+#endif
+
+#ifdef ASSERT
+ if (reverse_poly == REVERSE_CRC32_POLY) {
+ assert(INVERSE_REVERSE_CRC32_POLY == inverse_long_poly, "sanity");
+ } else if (reverse_poly == REVERSE_CRC32C_POLY) {
+ assert(INVERSE_REVERSE_CRC32C_POLY == inverse_long_poly, "sanity");
+ }
+#endif
+
+ //printf("inv poly: 0x%016llx\n", (long long unsigned int)inverse_long_poly);
return ptr;
}
@@ -939,6 +772,8 @@
#endif
};
-juint* StubRoutines::ppc64::_constants = StubRoutines::ppc64::generate_crc_constants();
+juint* StubRoutines::ppc64::_crc_constants = StubRoutines::ppc64::generate_crc_constants(REVERSE_CRC32_POLY);
+juint* StubRoutines::ppc64::_crc32c_constants = StubRoutines::ppc64::generate_crc_constants(REVERSE_CRC32C_POLY);
-juint* StubRoutines::ppc64::_barret_constants = StubRoutines::ppc64::generate_crc_barret_constants();
+juint* StubRoutines::ppc64::_crc_barret_constants = StubRoutines::ppc64::generate_crc_barret_constants(REVERSE_CRC32_POLY);
+juint* StubRoutines::ppc64::_crc32c_barret_constants = StubRoutines::ppc64::generate_crc_barret_constants(REVERSE_CRC32C_POLY);
--- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -452,8 +452,8 @@
// This is not a leaf but we have a JavaFrameAnchor now and we will
// check (create) exceptions afterward so this is ok.
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError),
- R16_thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod),
+ R16_thread, R19_method);
// Pop the C frame and restore LR.
__ pop_frame();
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -30,7 +30,7 @@
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateInterpreter.hpp"
#include "interpreter/templateTable.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -64,7 +64,7 @@
switch (barrier) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
// Load and record the previous value.
__ g1_write_barrier_pre(Rbase, offset,
@@ -3688,11 +3688,15 @@
// Vtable entry was NULL => Throw abstract method error.
__ bind(Lthrow_ame);
- call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+ // Pass arguments for generating a verbose error message.
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
+ Rrecv_klass, Rmethod);
// Interface was not found => Throw incompatible class change error.
__ bind(L_no_such_interface);
- call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
+ // Pass arguments for generating a verbose error message.
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
+ Rrecv_klass, Rinterface_klass);
DEBUG_ONLY( __ should_not_reach_here(); )
// Special case of invokeinterface called for virtual method of
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -186,8 +186,7 @@
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
- // If defined(VM_LITTLE_ENDIAN) and running on Power8 or newer hardware,
- // the implementation uses the vector instructions available with Power8.
+ // If running on Power8 or newer hardware, the implementation uses the available vector instructions.
// In all other cases, the implementation uses only generally available instructions.
if (!UseCRC32Intrinsics) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
--- a/src/hotspot/cpu/s390/assembler_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/assembler_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -39,8 +39,8 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif
--- a/src/hotspot/cpu/s390/assembler_s390.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -31,7 +31,7 @@
// Immediate is an abstraction to represent the various immediate
// operands which exist on z/Architecture. Neither this class nor
// instances hereof have an own state. It consists of methods only.
-class Immediate VALUE_OBJ_CLASS_SPEC {
+class Immediate {
public:
static bool is_simm(int64_t x, unsigned int nbits) {
@@ -82,7 +82,7 @@
// displacements which exist with addresses on z/ArchiTecture.
// Neither this class nor instances hereof have an own state. It
// consists of methods only.
-class Displacement VALUE_OBJ_CLASS_SPEC {
+class Displacement {
public: // These tests are used outside the (Macro)Assembler world, e.g. in ad-file.
@@ -101,7 +101,7 @@
// form they are used on z/Architecture for instructions which access
// their operand with pc-relative addresses. Neither this class nor
// instances hereof have an own state. It consists of methods only.
-class RelAddr VALUE_OBJ_CLASS_SPEC {
+class RelAddr {
private: // No public use at all. Solely for (Macro)Assembler.
@@ -177,7 +177,7 @@
//
// Note: A register location is represented via a Register, not
// via an address for efficiency & simplicity reasons.
-class Address VALUE_OBJ_CLASS_SPEC {
+class Address {
private:
Register _base; // Base register.
Register _index; // Index register
@@ -275,7 +275,7 @@
friend class Assembler;
};
-class AddressLiteral VALUE_OBJ_CLASS_SPEC {
+class AddressLiteral {
private:
address _address;
RelocationHolder _rspec;
@@ -398,7 +398,7 @@
// memory or in a register, in a manner consistent with the
// z/Architecture Application Binary Interface, or ABI. This is often
// referred to as the native or C calling convention.
-class Argument VALUE_OBJ_CLASS_SPEC {
+class Argument {
private:
int _number;
bool _is_in;
--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -35,7 +35,7 @@
#include "utilities/macros.hpp"
#include "vmreg_s390.inline.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
--- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -226,10 +226,6 @@
__ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
}
-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
- __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
-}
-
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
if (tmp->is_valid()) {
if (is_power_of_2(c + 1)) {
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -73,10 +73,6 @@
push_frame(frame_size_in_bytes);
}
-void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
- ShouldNotCallThis(); // unused
-}
-
void C1_MacroAssembler::verified_entry() {
if (C1Breakpoint) z_illtrap(0xC1);
}
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -43,8 +43,8 @@
#include "vmreg_s390.inline.hpp"
#include "registerSaver_s390.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
// Implementation of StubAssembler
@@ -768,7 +768,7 @@
{ // Z_R1_scratch: previous value of memory
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->kind() != BarrierSet::G1SATBCTLogging) {
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
__ should_not_reach_here(FILE_AND_LINE);
break;
}
@@ -837,7 +837,7 @@
case g1_post_barrier_slow_id:
{ // Z_R1_scratch: oop address, address of updated memory slot
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->kind() != BarrierSet::G1SATBCTLogging) {
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
__ should_not_reach_here(FILE_AND_LINE);
break;
}
--- a/src/hotspot/cpu/s390/interpreterRT_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/interpreterRT_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -28,7 +28,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -51,9 +51,9 @@
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif
@@ -3709,9 +3709,9 @@
assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
- G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
+ G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(Universe::heap()->barrier_set());
CardTable* ct = bs->card_table();
- assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
+ assert(bs->kind() == BarrierSet::G1BarrierSet, "wrong barrier");
BLOCK_COMMENT("g1_write_barrier_post {");
--- a/src/hotspot/cpu/s390/nativeInst_s390.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -72,7 +72,7 @@
// N a t i v e I n s t r u c t i o n
//-------------------------------------
-class NativeInstruction VALUE_OBJ_CLASS_SPEC {
+class NativeInstruction {
friend class Relocation;
public:
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -2105,7 +2105,7 @@
// blocking or unlocking.
// An OOP result (handle) is done specially in the slow-path code.
//--------------------------------------------------------------------
- switch (ret_type) { //GLGLGL
+ switch (ret_type) {
case T_VOID: break; // Nothing to do!
case T_FLOAT: break; // Got it where we want it (unless slow-path)
case T_DOUBLE: break; // Got it where we want it (unless slow-path)
--- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -697,7 +697,7 @@
BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target is uninitialized.
if (!dest_uninitialized) {
// Is marking active?
@@ -742,7 +742,7 @@
void gen_write_ref_array_post_barrier(Register addr, Register count, bool branchToEnd) {
BarrierSet* const bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
if (branchToEnd) {
assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
--- a/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/templateInterpreterGenerator_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -458,7 +458,8 @@
__ save_return_pc(); // Save Z_R14.
__ push_frame_abi160(0); // Without new frame the RT call could overwrite the saved Z_R14.
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError), Z_thread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod),
+ Z_thread, Z_method);
__ pop_frame();
__ restore_return_pc(); // Restore Z_R14.
@@ -686,7 +687,7 @@
return entry;
}
-address TemplateInterpreterGenerator::generate_deopt_entry_for (TosState state,
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
int step,
address continuation) {
address entry = __ pc();
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -29,7 +29,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateTable.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -206,7 +206,7 @@
__ verify_oop(val);
switch (barrier) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
#ifdef ASSERT
if (val_is_null) { // Check if the flag setting reflects reality.
@@ -3742,8 +3742,12 @@
// Throw exception.
__ restore_bcp(); // Bcp must be correct for exception handler (was destroyed).
__ restore_locals(); // Make sure locals pointer is correct as well (was destroyed).
+ // Pass arguments for generating a verbose error message.
+ __ z_lgr(Z_tmp_1, method); // Prevent register clash.
__ call_VM(noreg,
- CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+ CAST_FROM_FN_PTR(address,
+ InterpreterRuntime::throw_AbstractMethodErrorVerbose),
+ klass, Z_tmp_1);
// The call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
@@ -3752,8 +3756,11 @@
// Throw exception.
__ restore_bcp(); // Bcp must be correct for exception handler (was destroyed).
__ restore_locals(); // Make sure locals pointer is correct as well (was destroyed).
+ // Pass arguments for generating a verbose error message.
__ call_VM(noreg,
- CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
+ CAST_FROM_FN_PTR(address,
+ InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
+ klass, interface);
// The call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
--- a/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
#include "utilities/macros.hpp"
#include "vmreg_sparc.inline.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
--- a/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -285,13 +285,6 @@
}
-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
- LIR_Opr o7opr = FrameMap::O7_opr;
- __ load(new LIR_Address(base, disp, type), o7opr, info);
- __ cmp(condition, reg, o7opr);
-}
-
-
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
assert(left != result, "should be different registers");
if (is_power_of_2(c + 1)) {
--- a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,12 +64,6 @@
}
-void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
- if (C1Breakpoint) breakpoint_trap();
- inline_cache_check(receiver, ic_klass);
-}
-
-
void C1_MacroAssembler::verified_entry() {
if (C1Breakpoint) breakpoint_trap();
// build frame
--- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -41,8 +41,8 @@
#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
// Implementation of StubAssembler
@@ -762,7 +762,7 @@
case g1_pre_barrier_slow_id:
{ // G4: previous value of memory
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->kind() != BarrierSet::G1SATBCTLogging) {
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
__ save_frame(0);
__ set((int)id, O1);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
@@ -833,7 +833,7 @@
case g1_post_barrier_slow_id:
{
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->kind() != BarrierSet::G1SATBCTLogging) {
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
__ save_frame(0);
__ set((int)id, O1);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
--- a/src/hotspot/cpu/sparc/frame_sparc.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/frame_sparc.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,12 +86,6 @@
inline intptr_t* frame::real_fp() const { return fp(); }
-// Used only in frame::oopmapreg_to_location
-// This return a value in VMRegImpl::slot_size
-inline int frame::pd_oop_map_offset_adjustment() const {
- return _sp_adjustment_by_callee * VMRegImpl::slots_per_word;
-}
-
inline intptr_t** frame::interpreter_frame_locals_addr() const {
return (intptr_t**) sp_addr_at( Llocals->sp_offset_in_saved_window());
}
@@ -191,61 +185,6 @@
}
-inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
- // always allocate non-argument locals 0..5 as if they were arguments:
- int allocated_above_frame = nof_args;
- if (allocated_above_frame < callee_register_argument_save_area_words)
- allocated_above_frame = callee_register_argument_save_area_words;
- if (allocated_above_frame > max_nof_locals)
- allocated_above_frame = max_nof_locals;
-
- // Note: monitors (BasicLock blocks) are never allocated in argument slots
- //assert(local_index >= 0 && local_index < max_nof_locals, "bad local index");
- if (local_index < allocated_above_frame)
- return local_index + callee_register_argument_save_area_sp_offset;
- else
- return local_index - (max_nof_locals + max_nof_monitors*2) + compiler_frame_vm_locals_fp_offset;
-}
-
-inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
- assert(local_index >= max_nof_locals && ((local_index - max_nof_locals) & 1) && (local_index - max_nof_locals) < max_nof_monitors*2, "bad monitor index");
-
- // The compiler uses the __higher__ of two indexes allocated to the monitor.
- // Increasing local indexes are mapped to increasing memory locations,
- // so the start of the BasicLock is associated with the __lower__ index.
-
- int offset = (local_index-1) - (max_nof_locals + max_nof_monitors*2) + compiler_frame_vm_locals_fp_offset;
-
- // We allocate monitors aligned zero mod 8:
- assert((offset & 1) == 0, "monitor must be an an even address.");
- // This works because all monitors are allocated after
- // all locals, and because the highest address corresponding to any
- // monitor index is always even.
- assert((compiler_frame_vm_locals_fp_offset & 1) == 0, "end of monitors must be even address");
-
- return offset;
-}
-
-inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) {
- // always allocate non-argument locals 0..5 as if they were arguments:
- int allocated_above_frame = nof_args;
- if (allocated_above_frame < callee_register_argument_save_area_words)
- allocated_above_frame = callee_register_argument_save_area_words;
- if (allocated_above_frame > max_nof_locals)
- allocated_above_frame = max_nof_locals;
-
- int allocated_in_frame = (max_nof_locals + max_nof_monitors*2) - allocated_above_frame;
-
- return compiler_frame_vm_locals_fp_offset - allocated_in_frame;
-}
-
-// On SPARC, the %lN and %iN registers are non-volatile.
-inline bool frame::volatile_across_calls(Register reg) {
- // This predicate is (presently) applied only to temporary registers,
- // and so it need not recognize non-volatile globals.
- return reg->is_out() || reg->is_global();
-}
-
inline oop frame::saved_oop_result(RegisterMap* map) const {
return *((oop*) map->location(O0->as_VMReg()));
}
--- a/src/hotspot/cpu/sparc/interpreterRT_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/interpreterRT_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -46,9 +46,9 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
#ifdef COMPILER2
@@ -3665,8 +3665,8 @@
if (new_val == G0) return;
- G1SATBCardTableLoggingModRefBS* bs =
- barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
+ G1BarrierSet* bs =
+ barrier_set_cast<G1BarrierSet>(Universe::heap()->barrier_set());
CardTable* ct = bs->card_table();
if (G1RSBarrierRegionFilter) {
@@ -3706,8 +3706,8 @@
if (heap->kind() == CollectedHeap::G1CollectedHeap) {
// Only needed for G1
if (dirty_card_log_enqueue == 0) {
- G1SATBCardTableLoggingModRefBS* bs =
- barrier_set_cast<G1SATBCardTableLoggingModRefBS>(heap->barrier_set());
+ G1BarrierSet* bs =
+ barrier_set_cast<G1BarrierSet>(heap->barrier_set());
CardTable *ct = bs->card_table();
generate_dirty_card_log_enqueue(ct->byte_map_base());
assert(dirty_card_log_enqueue != 0, "postcondition.");
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -239,7 +239,7 @@
// Note: A register location is represented via a Register, not
// via an address for efficiency & simplicity reasons.
-class Address VALUE_OBJ_CLASS_SPEC {
+class Address {
private:
Register _base; // Base register.
RegisterOrConstant _index_or_disp; // Index register or constant displacement.
@@ -320,7 +320,7 @@
};
-class AddressLiteral VALUE_OBJ_CLASS_SPEC {
+class AddressLiteral {
private:
address _address;
RelocationHolder _rspec;
@@ -452,7 +452,7 @@
// with the SPARC Application Binary Interface, or ABI. This is
// often referred to as the native or C calling convention.
-class Argument VALUE_OBJ_CLASS_SPEC {
+class Argument {
private:
int _number;
bool _is_in;
--- a/src/hotspot/cpu/sparc/nativeInst_sparc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/nativeInst_sparc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
// - - NativeIllegalInstruction
// The base class for different kinds of native instruction abstractions.
// Provides the primitive operations to manipulate code relative to this.
-class NativeInstruction VALUE_OBJ_CLASS_SPEC {
+class NativeInstruction {
friend class Relocation;
public:
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -836,7 +836,7 @@
void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
Register tmp = O5;
@@ -898,7 +898,7 @@
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
// Get some new fresh output registers.
__ save_frame(0);
--- a/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -191,7 +191,7 @@
address entry = __ pc();
// abstract method entry
// throw exception
- __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), G5_method);
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
return entry;
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -27,7 +27,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateTable.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
@@ -55,7 +55,7 @@
assert(index == noreg || offset == 0, "only one offset");
switch (barrier) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
// Load and record the previous value.
__ g1_write_barrier_pre(base, index, offset,
@@ -3137,8 +3137,10 @@
__ sub(Rindex, Method::itable_index_max, Rindex);
__ neg(Rindex);
+ // Preserve O2_Klass for throw_AbstractMethodErrorVerbose
+ __ mov(O2_Klass, O4);
__ lookup_interface_method(// inputs: rec. class, interface, itable index
- O2_Klass, Rinterface, Rindex,
+ O4, Rinterface, Rindex,
// outputs: method, scan temp reg, temp reg
G5_method, Rscratch, Rtemp,
L_no_such_interface);
@@ -3147,7 +3149,9 @@
{
Label ok;
__ br_notnull_short(G5_method, Assembler::pt, ok);
- call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+ // Pass arguments for generating a verbose error message.
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
+ O2_Klass, Rmethod);
__ should_not_reach_here();
__ bind(ok);
}
@@ -3160,7 +3164,9 @@
__ call_from_interpreter(Rcall, Gargs, Rret);
__ bind(L_no_such_interface);
- call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
+ // Pass arguments for generating a verbose error message.
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
+ O2_Klass, Rinterface);
__ should_not_reach_here();
}
@@ -3536,7 +3542,7 @@
void TemplateTable::_breakpoint() {
// Note: We get here even if we are single stepping..
- // jbug inists on setting breakpoints at every bytecode
+ // jbug insists on setting breakpoints at every bytecode
// even if we are in single step mode.
transition(vtos, vtos);
--- a/src/hotspot/cpu/sparc/vtableStubs_sparc.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/sparc/vtableStubs_sparc.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -212,7 +212,12 @@
__ delayed()->nop();
__ bind(L_no_such_interface);
- AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry());
+ // Handle IncompatibleClassChangeError in itable stubs.
+ // More detailed error message.
+ // We force resolving of the call site by jumping to the "handle
+ // wrong method" stub, and so let the interpreter runtime do all the
+ // dirty work.
+ AddressLiteral icce(SharedRuntime::get_handle_wrong_method_stub());
__ jump_to(icce, G3_scratch);
__ delayed()->restore();
--- a/src/hotspot/cpu/x86/assembler_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -38,8 +38,8 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
@@ -3915,6 +3915,15 @@
emit_int8((unsigned char)(0xC0 | encode));
}
+void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) {
+ assert(VM_Version::supports_vpopcntdq(), "must support vpopcntdq feature");
+ InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+ attributes.set_is_evex_instruction();
+ int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
+ emit_int8(0x55);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
void Assembler::popf() {
emit_int8((unsigned char)0x9D);
}
--- a/src/hotspot/cpu/x86/assembler_x86.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
// Contains all the definitions needed for x86 assembly code generation.
// Calling convention
-class Argument VALUE_OBJ_CLASS_SPEC {
+class Argument {
public:
enum {
#ifdef _LP64
@@ -155,7 +155,7 @@
class ArrayAddress;
-class Address VALUE_OBJ_CLASS_SPEC {
+class Address {
public:
enum ScaleFactor {
no_scale = -1,
@@ -333,7 +333,7 @@
// on the instruction and the platform. As small step on the way to merging i486/amd64
// directories.
//
-class AddressLiteral VALUE_OBJ_CLASS_SPEC {
+class AddressLiteral {
friend class ArrayAddress;
RelocationHolder _rspec;
// Typically we use AddressLiterals we want to use their rval
@@ -423,7 +423,7 @@
// address amd64 can't. We create a class that expresses the concept but does extra
// magic on amd64 to get the final result
-class ArrayAddress VALUE_OBJ_CLASS_SPEC {
+class ArrayAddress {
private:
AddressLiteral _base;
@@ -1633,6 +1633,8 @@
void popcntl(Register dst, Address src);
void popcntl(Register dst, Register src);
+ void vpopcntd(XMMRegister dst, XMMRegister src, int vector_len);
+
#ifdef _LP64
void popcntq(Register dst, Address src);
void popcntq(Register dst, Register src);
--- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#endif // INCLUDE_ALL_GCS
--- a/src/hotspot/cpu/x86/c1_FpuStackSim_x86.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/c1_FpuStackSim_x86.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
class Compilation;
-class FpuStackSim VALUE_OBJ_CLASS_SPEC {
+class FpuStackSim {
private:
Compilation* _compilation;
int _stack_size;
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -230,11 +230,6 @@
}
-void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
- __ cmp_reg_mem(condition, reg, new LIR_Address(base, disp, type), info);
-}
-
-
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
if (tmp->is_valid() && c > 0 && c < max_jint) {
if (is_power_of_2(c + 1)) {
--- a/src/hotspot/cpu/x86/c1_LinearScan_x86.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/c1_LinearScan_x86.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -122,7 +122,7 @@
}
-class FpuStackAllocator VALUE_OBJ_CLASS_SPEC {
+class FpuStackAllocator {
private:
Compilation* _compilation;
LinearScan* _allocator;
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -340,12 +340,6 @@
}
-void C1_MacroAssembler::unverified_entry(Register receiver, Register ic_klass) {
- if (C1Breakpoint) int3();
- inline_cache_check(receiver, ic_klass);
-}
-
-
void C1_MacroAssembler::verified_entry() {
if (C1Breakpoint || VerifyFPU || !UseStackBanging) {
// Verified Entry first instruction should be 5 bytes long for correct
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -42,8 +42,8 @@
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@@ -1563,7 +1563,7 @@
// arg0 : previous value of memory
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->kind() != BarrierSet::G1SATBCTLogging) {
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
__ movptr(rax, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
__ should_not_reach_here();
@@ -1632,6 +1632,13 @@
{
StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
+ __ movptr(rax, (int)id);
+ __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
+ __ should_not_reach_here();
+ break;
+ }
// arg0: store_address
Address store_addr(rbp, 2*BytesPerWord);
--- a/src/hotspot/cpu/x86/frame_x86.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/frame_x86.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -210,10 +210,6 @@
return (oop *)(fp() + interpreter_frame_oop_temp_offset);
}
-inline int frame::pd_oop_map_offset_adjustment() const {
- return 0;
-}
-
inline int frame::interpreter_frame_monitor_size() {
return BasicObjectLock::size();
}
@@ -239,22 +235,6 @@
// Compiled frames
-inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
- return (nof_args - local_index + (local_index < nof_args ? 1: -1));
-}
-
-inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
- return local_offset_for_compiler(local_index, nof_args, max_nof_locals, max_nof_monitors);
-}
-
-inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) {
- return (nof_args - (max_nof_locals + max_nof_monitors*2) - 1);
-}
-
-inline bool frame::volatile_across_calls(Register reg) {
- return true;
-}
-
inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop *)map->location(rax->as_VMReg());
guarantee(result_adr != NULL, "bad register save location");
--- a/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,7 +26,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -46,9 +46,9 @@
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
#include "crc32c.h"
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -50,7 +50,7 @@
// The base class for different kinds of native instruction abstractions.
// Provides the primitive operations to manipulate code relative to this.
-class NativeInstruction VALUE_OBJ_CLASS_SPEC {
+class NativeInstruction {
friend class Relocation;
public:
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -679,7 +679,7 @@
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!uninitialized_target) {
Register thread = rax;
@@ -729,7 +729,7 @@
assert_different_registers(start, count);
switch (bs->kind()) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
__ pusha(); // push registers
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1201,7 +1201,7 @@
void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
Label filtered;
@@ -1257,7 +1257,7 @@
assert_different_registers(start, count, scratch);
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
__ pusha(); // push registers (overkill)
if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
@@ -4425,7 +4425,7 @@
* c_rarg0 - x address
* c_rarg1 - x length
* c_rarg2 - y address
- * c_rarg3 - y lenth
+ * c_rarg3 - y length
* not Win64
* c_rarg4 - z address
* c_rarg5 - z length
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1354,7 +1354,7 @@
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
// throw exception
- __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorWithMethod), rbx);
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -28,7 +28,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateTable.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
@@ -154,7 +154,7 @@
assert(val == noreg || val == rax, "parameter is just for looks");
switch (barrier) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
{
// flatten object address if needed
// We do it regardless of precise because we need the registers
@@ -3872,6 +3872,8 @@
Label no_such_interface, no_such_method;
+ // Preserve method for throw_AbstractMethodErrorVerbose.
+ __ mov(rcx, rbx);
// Receiver subtype check against REFC.
// Superklass in rax. Subklass in rdx. Blows rcx, rdi.
__ lookup_interface_method(// inputs: rec. class, interface, itable index
@@ -3893,8 +3895,10 @@
__ subl(rbx, Method::itable_index_max);
__ negl(rbx);
+ // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
+ __ mov(rlocals, rdx);
__ lookup_interface_method(// inputs: rec. class, interface, itable index
- rdx, rax, rbx,
+ rlocals, rax, rbx,
// outputs: method, scan temp. reg
rbx, rbcp,
no_such_interface);
@@ -3926,8 +3930,19 @@
__ pop(rbx); // pop return address (pushed by prepare_invoke)
__ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
- __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
- // the call_VM checks for exception, so we should never return here.
+ // Pass arguments for generating a verbose error message.
+#ifdef _LP64
+ Register recvKlass = c_rarg1;
+ Register method = c_rarg2;
+ if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
+ if (method != rcx) { __ movq(method, rcx); }
+#else
+ Register recvKlass = rdx;
+ Register method = rcx;
+#endif
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
+ recvKlass, method);
+ // The call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
__ bind(no_such_interface);
@@ -3935,8 +3950,10 @@
__ pop(rbx); // pop return address (pushed by prepare_invoke)
__ restore_bcp(); // rbcp must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
- __ call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::throw_IncompatibleClassChangeError));
+ // Pass arguments for generating a verbose error message.
+ LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
+ recvKlass, rax);
// the call_VM checks for exception, so we should never return here.
__ should_not_reach_here();
}
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -257,6 +257,8 @@
__ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
+ __ movl(Address(rsi, 8), rcx);
+ __ movl(Address(rsi, 12), rdx);
//
// Extended cpuid(0x80000000)
@@ -662,6 +664,7 @@
_features &= ~CPU_AVX512CD;
_features &= ~CPU_AVX512BW;
_features &= ~CPU_AVX512VL;
+ _features &= ~CPU_AVX512_VPOPCNTDQ;
}
if (UseAVX < 2)
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -228,6 +228,38 @@
} bits;
};
+ union SefCpuid7Ecx {
+ uint32_t value;
+ struct {
+ uint32_t prefetchwt1 : 1,
+ avx512_vbmi : 1,
+ umip : 1,
+ pku : 1,
+ ospke : 1,
+ : 1,
+ avx512_vbmi2 : 1,
+ : 1,
+ gfni : 1,
+ vaes : 1,
+ vpclmulqdq : 1,
+ avx512_vnni : 1,
+ avx512_bitalg : 1,
+ : 1,
+ avx512_vpopcntdq : 1,
+ : 17;
+ } bits;
+ };
+
+ union SefCpuid7Edx {
+ uint32_t value;
+ struct {
+ uint32_t : 2,
+ avx512_4vnniw : 1,
+ avx512_4fmaps : 1,
+ : 28;
+ } bits;
+ };
+
union ExtCpuid1EEbx {
uint32_t value;
struct {
@@ -300,7 +332,8 @@
#define CPU_AVX512VL ((uint64_t)UCONST64(0x200000000)) // EVEX instructions with smaller vector length
#define CPU_SHA ((uint64_t)UCONST64(0x400000000)) // SHA instructions
#define CPU_FMA ((uint64_t)UCONST64(0x800000000)) // FMA instructions
-#define CPU_VZEROUPPER ((uint64_t)UCONST64(0x1000000000)) // Vzeroupper instruction
+#define CPU_VZEROUPPER ((uint64_t)UCONST64(0x1000000000)) // Vzeroupper instruction
+#define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64(0x2000000000)) // Vector popcount
enum Extended_Family {
// AMD
@@ -353,8 +386,8 @@
// cpuid function 7 (structured extended features)
SefCpuid7Eax sef_cpuid7_eax;
SefCpuid7Ebx sef_cpuid7_ebx;
- uint32_t sef_cpuid7_ecx; // unused currently
- uint32_t sef_cpuid7_edx; // unused currently
+ SefCpuid7Ecx sef_cpuid7_ecx;
+ SefCpuid7Edx sef_cpuid7_edx;
// cpuid function 0xB (processor topology)
// ecx = 0
@@ -507,6 +540,8 @@
result |= CPU_AVX512BW;
if (_cpuid_info.sef_cpuid7_ebx.bits.avx512vl != 0)
result |= CPU_AVX512VL;
+ if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
+ result |= CPU_AVX512_VPOPCNTDQ;
}
}
if(_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
@@ -783,6 +818,7 @@
static bool supports_sha() { return (_features & CPU_SHA) != 0; }
static bool supports_fma() { return (_features & CPU_FMA) != 0 && supports_avx(); }
static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
+ static bool supports_vpopcntdq() { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; }
// Intel features
static bool is_intel_family_core() { return is_intel() &&
--- a/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -212,7 +212,12 @@
__ jmp(Address(method, Method::from_compiled_offset()));
__ bind(L_no_such_interface);
- __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
+ // Handle IncompatibleClassChangeError in itable stubs.
+ // More detailed error message.
+ // We force resolving of the call site by jumping to the "handle
+ // wrong method" stub, and so let the interpreter runtime do all the
+ // dirty work.
+ __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ flush();
--- a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -182,10 +182,10 @@
const Register method = rbx;
__ load_klass(recv_klass_reg, j_rarg0); // restore recv_klass_reg
__ lookup_interface_method(// inputs: rec. class, interface, itable index
- recv_klass_reg, holder_klass_reg, itable_index,
- // outputs: method, scan temp. reg
- method, temp_reg,
- L_no_such_interface);
+ recv_klass_reg, holder_klass_reg, itable_index,
+ // outputs: method, scan temp. reg
+ method, temp_reg,
+ L_no_such_interface);
// If we take a trap while this arg is on the stack we will not
// be able to walk the stack properly. This is not an issue except
@@ -213,7 +213,12 @@
__ jmp(Address(method, Method::from_compiled_offset()));
__ bind(L_no_such_interface);
- __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
+ // Handle IncompatibleClassChangeError in itable stubs.
+ // More detailed error message.
+ // We force resolving of the call site by jumping to the "handle
+ // wrong method" stub, and so let the interpreter runtime do all the
+ // dirty work.
+ __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ flush();
--- a/src/hotspot/cpu/x86/x86.ad Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/x86/x86.ad Tue Mar 20 04:36:44 2018 +0100
@@ -1223,6 +1223,10 @@
if (!UsePopCountInstruction)
ret_value = false;
break;
+ case Op_PopCountVI:
+ if (!UsePopCountInstruction || !VM_Version::supports_vpopcntdq())
+ ret_value = false;
+ break;
case Op_MulVI:
if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX
ret_value = false;
@@ -10788,3 +10792,49 @@
%}
ins_pipe( pipe_slow );
%}
+
+// --------------------------------- PopCount --------------------------------------
+
+instruct vpopcount2I(vecD dst, vecD src) %{
+ predicate(VM_Version::supports_vpopcntdq() && UsePopCountInstruction && n->as_Vector()->length() == 2);
+ match(Set dst (PopCountVI src));
+ format %{ "vpopcntd $dst,$src\t! vector popcount packed2I" %}
+ ins_encode %{
+ int vector_len = 0;
+ __ vpopcntd($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vpopcount4I(vecX dst, vecX src) %{
+ predicate(VM_Version::supports_vpopcntdq() && UsePopCountInstruction && n->as_Vector()->length() == 4);
+ match(Set dst (PopCountVI src));
+ format %{ "vpopcntd $dst,$src\t! vector popcount packed4I" %}
+ ins_encode %{
+ int vector_len = 0;
+ __ vpopcntd($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vpopcount8I(vecY dst, vecY src) %{
+ predicate(VM_Version::supports_vpopcntdq() && UsePopCountInstruction && n->as_Vector()->length() == 8);
+ match(Set dst (PopCountVI src));
+ format %{ "vpopcntd $dst,$src\t! vector popcount packed8I" %}
+ ins_encode %{
+ int vector_len = 1;
+ __ vpopcntd($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
+instruct vpopcount16I(vecZ dst, vecZ src) %{
+ predicate(VM_Version::supports_vpopcntdq() && UsePopCountInstruction && n->as_Vector()->length() == 16);
+ match(Set dst (PopCountVI src));
+ format %{ "vpopcntd $dst,$src\t! vector popcount packed16I" %}
+ ins_encode %{
+ int vector_len = 2;
+ __ vpopcntd($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+ %}
+ ins_pipe( pipe_slow );
+%}
--- a/src/hotspot/cpu/zero/assembler_zero.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/zero/assembler_zero.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -38,8 +38,8 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#endif // INCLUDE_ALL_GCS
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -31,6 +31,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "oops/arrayOop.hpp"
+#include "oops/cpCache.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/cpu/zero/globalDefinitions_zero.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/zero/globalDefinitions_zero.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,6 +26,10 @@
#ifndef CPU_ZERO_VM_GLOBALDEFINITIONS_ZERO_HPP
#define CPU_ZERO_VM_GLOBALDEFINITIONS_ZERO_HPP
+#ifdef _LP64
+#define SUPPORTS_NATIVE_CX8
+#endif
+
#include <ffi.h>
// Indicates whether the C calling conventions require that
--- a/src/hotspot/cpu/zero/interpreterRT_zero.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/zero/interpreterRT_zero.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,7 +27,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/cpu/zero/methodHandles_zero.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/zero/methodHandles_zero.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -28,6 +28,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
--- a/src/hotspot/cpu/zero/nativeInst_zero.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/cpu/zero/nativeInst_zero.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -46,7 +46,7 @@
// The base class for different kinds of native instruction abstractions.
// Provides the primitive operations to manipulate code relative to this.
-class NativeInstruction VALUE_OBJ_CLASS_SPEC {
+class NativeInstruction {
public:
bool is_jump() {
ShouldNotCallThis();
--- a/src/hotspot/os/linux/os_linux.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os/linux/os_linux.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -629,6 +629,10 @@
}
}
+void os::Linux::expand_stack_to(address bottom) {
+ _expand_stack_to(bottom);
+}
+
bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
assert(t!=NULL, "just checking");
assert(t->osthread()->expanding_stack(), "expand should be set");
@@ -3053,12 +3057,10 @@
return res != (uintptr_t) MAP_FAILED;
}
-// If there is no page mapped/committed, top (bottom + size) is returned
-static address get_stack_mapped_bottom(address bottom,
- size_t size,
- bool committed_only /* must have backing pages */) {
- // address used to test if the page is mapped/committed
- address test_addr = bottom + size;
+static address get_stack_commited_bottom(address bottom, size_t size) {
+ address nbot = bottom;
+ address ntop = bottom + size;
+
size_t page_sz = os::vm_page_size();
unsigned pages = size / page_sz;
@@ -3070,39 +3072,38 @@
while (imin < imax) {
imid = (imax + imin) / 2;
- test_addr = bottom + (imid * page_sz);
+ nbot = ntop - (imid * page_sz);
// Use a trick with mincore to check whether the page is mapped or not.
// mincore sets vec to 1 if page resides in memory and to 0 if page
// is swapped output but if page we are asking for is unmapped
// it returns -1,ENOMEM
- mincore_return_value = mincore(test_addr, page_sz, vec);
-
- if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) {
- // Page is not mapped/committed go up
- // to find first mapped/committed page
- if ((mincore_return_value == -1 && errno != EAGAIN)
- || (committed_only && (vec[0] & 0x01) == 0)) {
- assert(mincore_return_value != -1 || errno == ENOMEM, "Unexpected mincore errno");
-
- imin = imid + 1;
+ mincore_return_value = mincore(nbot, page_sz, vec);
+
+ if (mincore_return_value == -1) {
+ // Page is not mapped go up
+ // to find first mapped page
+ if (errno != EAGAIN) {
+ assert(errno == ENOMEM, "Unexpected mincore errno");
+ imax = imid;
}
} else {
- // mapped/committed, go down
- imax= imid;
+ // Page is mapped go down
+ // to find first not mapped page
+ imin = imid + 1;
}
}
- // Adjust stack bottom one page up if last checked page is not mapped/committed
- if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) {
- assert(mincore_return_value != -1 || (errno != EAGAIN && errno != ENOMEM),
- "Should not get to here");
-
- test_addr = test_addr + page_sz;
- }
-
- return test_addr;
-}
+ nbot = nbot + page_sz;
+
+ // Adjust stack bottom one page up if last checked page is not mapped
+ if (mincore_return_value == -1) {
+ nbot = nbot + page_sz;
+ }
+
+ return nbot;
+}
+
// Linux uses a growable mapping for the stack, and if the mapping for
// the stack guard pages is not removed when we detach a thread the
@@ -3140,9 +3141,9 @@
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
// Fallback to slow path on all errors, including EAGAIN
- stack_extent = (uintptr_t) get_stack_mapped_bottom(os::Linux::initial_thread_stack_bottom(),
- (size_t)addr - stack_extent,
- false /* committed_only */);
+ stack_extent = (uintptr_t) get_stack_commited_bottom(
+ os::Linux::initial_thread_stack_bottom(),
+ (size_t)addr - stack_extent);
}
if (stack_extent < (uintptr_t)addr) {
@@ -3169,11 +3170,6 @@
return os::uncommit_memory(addr, size);
}
-size_t os::committed_stack_size(address bottom, size_t size) {
- address bot = get_stack_mapped_bottom(bottom, size, true /* committed_only */);
- return size_t(bottom + size - bot);
-}
-
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
// at 'requested_addr'. If there are existing memory mappings at the same
// location, however, they will be overwritten. If 'fixed' is false,
--- a/src/hotspot/os/linux/os_linux.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os/linux/os_linux.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -218,6 +218,8 @@
// none present
private:
+ static void expand_stack_to(address bottom);
+
typedef int (*sched_getcpu_func_t)(void);
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
typedef int (*numa_max_node_func_t)(void);
@@ -258,8 +260,8 @@
static void set_numa_bitmask_isbitset(numa_bitmask_isbitset_func_t func) { _numa_bitmask_isbitset = func; }
static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
- static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = *ptr; }
- static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = *ptr; }
+ static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
+ static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
static int sched_getcpu_syscall(void);
public:
static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
@@ -303,6 +305,18 @@
static bool isnode_in_existing_nodes(unsigned int n) {
if (_numa_bitmask_isbitset != NULL && _numa_nodes_ptr != NULL) {
return _numa_bitmask_isbitset(_numa_nodes_ptr, n);
+ } else if (_numa_bitmask_isbitset != NULL && _numa_all_nodes_ptr != NULL) {
+ // Not all libnuma API v2 implement numa_nodes_ptr, so it's not possible
+ // to trust the API version for checking its absence. On the other hand,
+ // numa_nodes_ptr found in libnuma 2.0.9 and above is the only way to get
+ // a complete view of all numa nodes in the system, hence numa_nodes_ptr
+ // is used to handle CPU and nodes on architectures (like PowerPC) where
+ // there can exist nodes with CPUs but no memory or vice-versa and the
+ // nodes may be non-contiguous. For most of the architectures, like
+ // x86_64, numa_node_ptr presents the same node set as found in
+ // numa_all_nodes_ptr so it's possible to use numa_all_nodes_ptr as a
+ // substitute.
+ return _numa_bitmask_isbitset(_numa_all_nodes_ptr, n);
} else
return 0;
}
--- a/src/hotspot/os/posix/os_posix.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os/posix/os_posix.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -23,10 +23,12 @@
*/
#include "jvm.h"
+#include "memory/allocation.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
+#include "services/memTracker.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
--- a/src/hotspot/os/windows/os_windows.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os/windows/os_windows.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -363,25 +363,6 @@
return sz;
}
-size_t os::committed_stack_size(address bottom, size_t size) {
- MEMORY_BASIC_INFORMATION minfo;
- address top = bottom + size;
- size_t committed_size = 0;
-
- while (committed_size < size) {
- // top is exclusive
- VirtualQuery(top - 1, &minfo, sizeof(minfo));
- if ((minfo.State & MEM_COMMIT) != 0) {
- committed_size += minfo.RegionSize;
- top -= minfo.RegionSize;
- } else {
- break;
- }
- }
-
- return MIN2(committed_size, size);
-}
-
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
const struct tm* time_struct_ptr = localtime(clock);
if (time_struct_ptr != NULL) {
@@ -1537,7 +1518,7 @@
result = _vsnprintf(buf, len, fmt, args);
// If output (including NUL terminator) is truncated, the buffer
// won't be NUL terminated. Add the trailing NUL specified by C99.
- if ((result < 0) || (result >= len)) {
+ if ((result < 0) || ((size_t)result >= len)) {
buf[len - 1] = '\0';
}
}
--- a/src/hotspot/os/windows/symbolengine.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os/windows/symbolengine.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -375,10 +375,10 @@
const int len_returned = (int)::GetModuleFileName(hMod, filebuffer, (DWORD)file_buffer_capacity);
DEBUG_ONLY(g_buffers.dir_name.check();)
if (len_returned == 0) {
- // Error. This is suspicious - this may happen if a module has just been
- // unloaded concurrently after our call to EnumProcessModules and
- // GetModuleFileName, but probably just indicates a coding error.
- assert(false, "GetModuleFileName failed (%u)", ::GetLastError());
+ // This may happen when a module gets unloaded after our call to EnumProcessModules.
+ // It should be rare but may sporadically happen. Just ignore and continue with the
+ // next module.
+ continue;
} else if (len_returned == file_buffer_capacity) {
// Truncation. Just skip this module and continue with the next module.
continue;
--- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -80,7 +80,6 @@
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
--- a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -78,7 +77,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -91,7 +89,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -105,7 +102,6 @@
#ifdef AMD64
template<>
struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
--- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -52,7 +52,6 @@
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const { T data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
@@ -60,7 +59,6 @@
template<size_t byte_size>
struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const { __atomic_store(p, &v, __ATOMIC_RELEASE); }
@@ -68,7 +66,6 @@
template<size_t byte_size>
struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
--- a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -132,7 +132,6 @@
template<>
struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const {
@@ -148,7 +147,6 @@
template<>
struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const {
@@ -164,7 +162,6 @@
template<>
struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const {
@@ -180,7 +177,6 @@
template<>
struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const {
@@ -196,7 +192,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -210,7 +205,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -224,7 +218,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -238,7 +231,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
--- a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -83,7 +83,6 @@
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
--- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -76,7 +76,6 @@
template<size_t byte_size>
struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
T operator()(const volatile T* p) const { register T t = *p; inlasm_zarch_acquire(); return t; }
--- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -74,7 +73,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -87,7 +85,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -101,7 +98,6 @@
#ifdef AMD64
template<>
struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -834,6 +834,28 @@
void os::workaround_expand_exec_shield_cs_limit() {
#if defined(IA32)
size_t page_size = os::vm_page_size();
+
+ /*
+ * JDK-8197429
+ *
+ * Expand the stack mapping to the end of the initial stack before
+ * attempting to install the codebuf. This is needed because newer
+ * Linux kernels impose a distance of a megabyte between stack
+ * memory and other memory regions. If we try to install the
+ * codebuf before expanding the stack the installation will appear
+ * to succeed but we'll get a segfault later if we expand the stack
+ * in Java code.
+ *
+ */
+ if (os::is_primordial_thread()) {
+ address limit = Linux::initial_thread_stack_bottom();
+ if (! DisablePrimordialThreadGuardPages) {
+ limit += JavaThread::stack_red_zone_size() +
+ JavaThread::stack_yellow_zone_size();
+ }
+ os::Linux::expand_stack_to(limit);
+ }
+
/*
* Take the highest VA the OS will give us and exec
*
@@ -852,6 +874,16 @@
char* hint = (char*)(Linux::initial_thread_stack_bottom() -
(JavaThread::stack_guard_zone_size() + page_size));
char* codebuf = os::attempt_reserve_memory_at(page_size, hint);
+
+ if (codebuf == NULL) {
+ // JDK-8197429: There may be a stack gap of one megabyte between
+ // the limit of the stack and the nearest memory region: this is a
+ // Linux kernel workaround for CVE-2017-1000364. If we failed to
+ // map our codebuf, try again at an address one megabyte lower.
+ hint -= 1 * M;
+ codebuf = os::attempt_reserve_memory_at(page_size, hint);
+ }
+
if ((codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true))) {
return; // No matter, we tried, best effort.
}
--- a/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
// Implement ADD using a CAS loop.
template<size_t byte_size>
-struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
+struct Atomic::PlatformAdd {
template<typename I, typename D>
inline D operator()(I add_value, D volatile* dest) const {
D old_value = *dest;
--- a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,6 @@
#ifndef AMD64
template<>
struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -89,7 +88,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
@@ -103,7 +101,6 @@
template<>
struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
- VALUE_OBJ_CLASS_SPEC
{
template <typename T>
void operator()(T v, volatile T* p) const {
--- a/src/hotspot/share/adlc/arena.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/adlc/arena.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,17 +25,12 @@
#ifndef SHARE_VM_ADLC_ARENA_HPP
#define SHARE_VM_ADLC_ARENA_HPP
-// All classes in the virtual machine must be subclassed
-// by one of the following allocation classes:
-//
+// All classes in adlc may be derived
+// from one of the following allocation classes:
//
-// For objects allocated in the C-heap (managed by: free & malloc).
+// For objects allocated in the C-heap (managed by: malloc & free).
// - CHeapObj
//
-//
-// For embedded objects.
-// - ValueObj
-//
// For classes used as name spaces.
// - AllStatic
//
@@ -48,15 +43,6 @@
};
-// Base class for objects used as value objects.
-// Calling new or delete will result in fatal error.
-
-class ValueObj {
- public:
- void* operator new(size_t size) throw();
- void operator delete(void* p);
-};
-
// Base class for classes that constitute name spaces.
class AllStatic {
--- a/src/hotspot/share/adlc/formssel.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/adlc/formssel.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -4180,7 +4180,7 @@
"URShiftVB","URShiftVS","URShiftVI","URShiftVL",
"ReplicateB","ReplicateS","ReplicateI","ReplicateL","ReplicateF","ReplicateD",
"LoadVector","StoreVector",
- "FmaVD", "FmaVF",
+ "FmaVD", "FmaVF","PopCountVI",
// Next are not supported currently.
"PackB","PackS","PackI","PackL","PackF","PackD","Pack2L","Pack2D",
"ExtractB","ExtractUB","ExtractC","ExtractS","ExtractI","ExtractL","ExtractF","ExtractD"
--- a/src/hotspot/share/aot/aotCodeHeap.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -34,7 +34,8 @@
#include "interpreter/abstractInterpreter.hpp"
#include "jvmci/compilerRuntime.hpp"
#include "jvmci/jvmciRuntime.hpp"
-#include "oops/method.hpp"
+#include "memory/allocation.inline.hpp"
+#include "oops/method.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vm_operations.hpp"
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,8 @@
#include "gc/shared/gcLocker.hpp"
#include "jvmci/compilerRuntime.hpp"
#include "jvmci/jvmciRuntime.hpp"
-#include "oops/method.hpp"
+#include "oops/method.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -448,4 +449,3 @@
}
}
}
-
--- a/src/hotspot/share/aot/aotLoader.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/aot/aotLoader.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,9 @@
#include "aot/aotCodeHeap.hpp"
#include "aot/aotLoader.inline.hpp"
#include "jvmci/jvmciRuntime.hpp"
+#include "memory/allocation.inline.hpp"
#include "oops/method.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/timerTrace.hpp"
--- a/src/hotspot/share/asm/assembler.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/asm/assembler.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -71,7 +71,7 @@
* Labels may only be used within a single CodeSection. If you need
* to create references between code sections, use explicit relocations.
*/
-class Label VALUE_OBJ_CLASS_SPEC {
+class Label {
private:
enum { PatchCacheSize = 4 };
@@ -171,7 +171,7 @@
// A union type for code which has to assemble both constant and
// non-constant operands, when the distinction cannot be made
// statically.
-class RegisterOrConstant VALUE_OBJ_CLASS_SPEC {
+class RegisterOrConstant {
private:
Register _r;
intptr_t _c;
--- a/src/hotspot/share/asm/codeBuffer.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/asm/codeBuffer.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -77,7 +77,7 @@
// This class represents a stream of code and associated relocations.
// There are a few in each CodeBuffer.
// They are filled concurrently, and concatenated at the end.
-class CodeSection VALUE_OBJ_CLASS_SPEC {
+class CodeSection {
friend class CodeBuffer;
public:
typedef int csize_t; // code size type; would be size_t except for history
@@ -246,7 +246,7 @@
};
class CodeString;
-class CodeStrings VALUE_OBJ_CLASS_SPEC {
+class CodeStrings {
private:
#ifndef PRODUCT
CodeString* _strings;
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -41,7 +41,7 @@
#include "runtime/vm_version.hpp"
#include "utilities/bitMap.inline.hpp"
-class BlockListBuilder VALUE_OBJ_CLASS_SPEC {
+class BlockListBuilder {
private:
Compilation* _compilation;
IRScope* _scope;
--- a/src/hotspot/share/c1/c1_GraphBuilder.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/c1/c1_GraphBuilder.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
class MemoryBuffer;
-class GraphBuilder VALUE_OBJ_CLASS_SPEC {
+class GraphBuilder {
private:
// Per-scope data. These are pushed and popped as we descend into
// inlined methods. Currently in order to generate good code in the
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1459,7 +1459,7 @@
// Do the pre-write barrier, if any.
switch (_bs->kind()) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
break;
#endif // INCLUDE_ALL_GCS
@@ -1475,7 +1475,7 @@
void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
switch (_bs->kind()) {
#if INCLUDE_ALL_GCS
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
G1SATBCardTableModRef_post_barrier(addr, new_val);
break;
#endif // INCLUDE_ALL_GCS
--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -340,7 +340,6 @@
// machine dependent
void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info);
- void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info);
void arraycopy_helper(Intrinsic* x, int* flags, ciArrayKlass** expected_type);
--- a/src/hotspot/share/c1/c1_MacroAssembler.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/c1/c1_MacroAssembler.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,6 @@
void build_frame(int frame_size_in_bytes, int bang_size_in_bytes);
void remove_frame(int frame_size_in_bytes);
- void unverified_entry(Register receiver, Register ic_klass);
void verified_entry();
void verify_stack_oop(int offset) PRODUCT_RETURN;
void verify_not_null_oop(Register r) PRODUCT_RETURN;
--- a/src/hotspot/share/c1/c1_Optimizer.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/c1/c1_Optimizer.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "c1/c1_Instruction.hpp"
#include "memory/allocation.hpp"
-class Optimizer VALUE_OBJ_CLASS_SPEC {
+class Optimizer {
private:
IR* _ir;
--- a/src/hotspot/share/c1/c1_RangeCheckElimination.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/c1/c1_RangeCheckElimination.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
};
// Implementation
-class RangeCheckEliminator VALUE_OBJ_CLASS_SPEC {
+class RangeCheckEliminator {
private:
int _number_of_instructions;
bool _optimistic; // Insert predicates and deoptimize when they fail
--- a/src/hotspot/share/c1/c1_Runtime1.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1402,13 +1402,7 @@
Klass* klass_oop = src->klass();
if (klass_oop != dst->klass()) return ac_failed;
TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);
- const int l2es = klass->log2_element_size();
- const int ihs = klass->array_header_in_bytes() / wordSize;
- char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);
- char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es);
- // Potential problem: memmove is not guaranteed to be word atomic
- // Revisit in Merlin
- memmove(dst_addr, src_addr, length << l2es);
+ klass->copy_array(arrayOop(src), src_pos, arrayOop(dst), dst_pos, length, Thread::current());
return ac_ok;
} else if (src->is_objArray() && dst->is_objArray()) {
if (UseCompressedOops) {
--- a/src/hotspot/share/ci/ciConstant.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/ci/ciConstant.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@
// ciConstant
//
// This class represents a constant value.
-class ciConstant VALUE_OBJ_CLASS_SPEC {
+class ciConstant {
friend class VMStructs;
private:
friend class ciEnv;
--- a/src/hotspot/share/ci/ciEnv.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/ci/ciEnv.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -45,7 +45,10 @@
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
+#include "oops/constantPool.inline.hpp"
+#include "oops/cpCache.inline.hpp"
+#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
--- a/src/hotspot/share/ci/ciField.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/ci/ciField.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/linkResolver.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/fieldDescriptor.hpp"
--- a/src/hotspot/share/ci/ciFlags.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/ci/ciFlags.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
// ciFlags
//
// This class represents klass or method flags.
-class ciFlags VALUE_OBJ_CLASS_SPEC {
+class ciFlags {
private:
friend class ciInstanceKlass;
friend class ciField;
--- a/src/hotspot/share/ci/ciMethod.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/ci/ciMethod.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -42,6 +42,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/generateOopMap.hpp"
+#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/deoptimization.hpp"
--- a/src/hotspot/share/ci/ciReplay.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/ci/ciReplay.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/copy.hpp"
#include "utilities/macros.hpp"
--- a/src/hotspot/share/ci/ciTypeFlow.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/ci/ciTypeFlow.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -137,7 +137,7 @@
void print_on(outputStream* st) const PRODUCT_RETURN;
};
- class LocalSet VALUE_OBJ_CLASS_SPEC {
+ class LocalSet {
private:
enum Constants { max = 63 };
uint64_t _bits;
--- a/src/hotspot/share/classfile/bytecodeAssembler.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/bytecodeAssembler.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -28,6 +28,7 @@
#include "interpreter/bytecodes.hpp"
#include "memory/oopFactory.hpp"
#include "oops/constantPool.hpp"
+#include "runtime/handles.inline.hpp"
#include "utilities/bytes.hpp"
u2 BytecodeConstantPool::find_or_add(BytecodeCPEntry const& bcpe) {
@@ -55,6 +56,11 @@
cp->set_pool_holder(_orig->pool_holder());
_orig->copy_cp_to(1, _orig->length() - 1, cp, 1, CHECK_NULL);
+ // Preserve dynamic constant information from the original pool
+ if (_orig->has_dynamic_constant()) {
+ cp->set_has_dynamic_constant();
+ }
+
for (int i = 0; i < _entries.length(); ++i) {
BytecodeCPEntry entry = _entries.at(i);
int idx = i + _orig->length();
--- a/src/hotspot/share/classfile/bytecodeAssembler.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/bytecodeAssembler.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
};
// Entries in a yet-to-be-created constant pool. Limited types for now.
-class BytecodeCPEntry VALUE_OBJ_CLASS_SPEC {
+class BytecodeCPEntry {
public:
enum tag {
ERROR_TAG,
--- a/src/hotspot/share/classfile/classFileParser.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classFileParser.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,8 +44,9 @@
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/annotations.hpp"
+#include "oops/constantPool.inline.hpp"
#include "oops/fieldStreams.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
@@ -1857,7 +1858,7 @@
// Class file LocalVariableTable elements.
-class Classfile_LVT_Element VALUE_OBJ_CLASS_SPEC {
+class Classfile_LVT_Element {
public:
u2 start_bci;
u2 length;
@@ -3766,9 +3767,7 @@
int next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields();
int next_static_double_offset = next_static_oop_offset +
((fac->count[STATIC_OOP]) * heapOopSize);
- if ( fac->count[STATIC_DOUBLE] &&
- (Universe::field_type_should_be_aligned(T_DOUBLE) ||
- Universe::field_type_should_be_aligned(T_LONG)) ) {
+ if (fac->count[STATIC_DOUBLE]) {
next_static_double_offset = align_up(next_static_double_offset, BytesPerLong);
}
@@ -4381,7 +4380,7 @@
// add super class dependency
Klass* const super = defined_klass->super();
if (super != NULL) {
- defining_loader_data->record_dependency(super, CHECK);
+ defining_loader_data->record_dependency(super);
}
// add super interface dependencies
@@ -4389,7 +4388,7 @@
if (local_interfaces != NULL) {
const int length = local_interfaces->length();
for (int i = 0; i < length; i++) {
- defining_loader_data->record_dependency(local_interfaces->at(i), CHECK);
+ defining_loader_data->record_dependency(local_interfaces->at(i));
}
}
}
@@ -5362,6 +5361,16 @@
void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loadhook, TRAPS) {
assert(ik != NULL, "invariant");
+ // Set name and CLD before adding to CLD
+ ik->set_class_loader_data(_loader_data);
+ ik->set_name(_class_name);
+
+ // Add all classes to our internal class loader list here,
+ // including classes in the bootstrap (NULL) class loader.
+ const bool publicize = !is_internal();
+
+ _loader_data->add_class(ik, publicize);
+
set_klass_to_deallocate(ik);
assert(_field_info != NULL, "invariant");
@@ -5376,7 +5385,6 @@
ik->set_should_verify_class(_need_verify);
// Not yet: supers are done below to support the new subtype-checking fields
- ik->set_class_loader_data(_loader_data);
ik->set_nonstatic_field_size(_field_info->nonstatic_field_size);
ik->set_has_nonstatic_fields(_field_info->has_nonstatic_fields);
assert(_fac != NULL, "invariant");
@@ -5407,8 +5415,6 @@
// has to be changed accordingly.
ik->set_initial_method_idnum(ik->methods()->length());
- ik->set_name(_class_name);
-
if (is_anonymous()) {
// _this_class_index is a CONSTANT_Class entry that refers to this
// anonymous class itself. If this class needs to refer to its own methods or
--- a/src/hotspot/share/classfile/classFileParser.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classFileParser.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
//
// The bytes describing the class file structure is read from a Stream object
-class ClassFileParser VALUE_OBJ_CLASS_SPEC {
+class ClassFileParser {
class ClassAnnotationCollector;
class FieldAllocationCount;
--- a/src/hotspot/share/classfile/classListParser.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classListParser.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -34,6 +34,7 @@
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/fieldType.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/hashtable.inline.hpp"
--- a/src/hotspot/share/classfile/classLoader.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classLoader.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,7 +26,7 @@
#include "jvm.h"
#include "jimage.hpp"
#include "classfile/classFileStream.hpp"
-#include "classfile/classLoader.hpp"
+#include "classfile/classLoader.inline.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderExt.hpp"
#include "classfile/javaClasses.hpp"
@@ -48,9 +48,10 @@
#include "memory/filemap.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp"
+#include "oops/method.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
@@ -1456,9 +1457,6 @@
if (has_jrt_entry()) {
e = _jrt_entry;
stream = _jrt_entry->open_stream(file_name, CHECK_NULL);
- if (!context.check(stream, classpath_index)) {
- return NULL;
- }
} else {
// Exploded build - attempt to locate class in its defining module's location.
assert(_exploded_entries != NULL, "No exploded build entries present");
@@ -1477,9 +1475,6 @@
e = _first_append_entry;
while (e != NULL) {
stream = e->open_stream(file_name, CHECK_NULL);
- if (!context.check(stream, classpath_index)) {
- return NULL;
- }
if (NULL != stream) {
break;
}
--- a/src/hotspot/share/classfile/classLoader.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classLoader.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -27,7 +27,6 @@
#include "jimage.hpp"
#include "runtime/handles.hpp"
-#include "runtime/orderAccess.hpp"
#include "runtime/perfData.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
@@ -49,13 +48,9 @@
private:
ClassPathEntry* volatile _next;
public:
- // Next entry in class path
- ClassPathEntry* next() const { return OrderAccess::load_acquire(&_next); }
+ ClassPathEntry* next() const;
virtual ~ClassPathEntry() {}
- void set_next(ClassPathEntry* next) {
- // may have unlocked readers, so ensure visibility.
- OrderAccess::release_store(&_next, next);
- }
+ void set_next(ClassPathEntry* next);
virtual bool is_modules_image() const = 0;
virtual bool is_jar_file() const = 0;
virtual const char* name() const = 0;
@@ -396,25 +391,7 @@
static int compute_Object_vtable();
- static ClassPathEntry* classpath_entry(int n) {
- assert(n >= 0, "sanity");
- if (n == 0) {
- assert(has_jrt_entry(), "No class path entry at 0 for exploded module builds");
- return ClassLoader::_jrt_entry;
- } else {
- // The java runtime image is always the first entry
- // in the FileMapInfo::_classpath_entry_table. Even though
- // the _jrt_entry is not included in the _first_append_entry
- // linked list, it must be accounted for when comparing the
- // class path vs. the shared archive class path.
- ClassPathEntry* e = ClassLoader::_first_append_entry;
- while (--n >= 1) {
- assert(e != NULL, "Not that many classpath entries.");
- e = e->next();
- }
- return e;
- }
- }
+ static ClassPathEntry* classpath_entry(int n);
static bool is_in_patch_mod_entries(Symbol* module_name);
@@ -423,38 +400,13 @@
// Helper function used by CDS code to get the number of boot classpath
// entries during shared classpath setup time.
- static int num_boot_classpath_entries() {
- assert(DumpSharedSpaces, "Should only be called at CDS dump time");
- assert(has_jrt_entry(), "must have a java runtime image");
- int num_entries = 1; // count the runtime image
- ClassPathEntry* e = ClassLoader::_first_append_entry;
- while (e != NULL) {
- num_entries ++;
- e = e->next();
- }
- return num_entries;
- }
+ static int num_boot_classpath_entries();
- static ClassPathEntry* get_next_boot_classpath_entry(ClassPathEntry* e) {
- if (e == ClassLoader::_jrt_entry) {
- return ClassLoader::_first_append_entry;
- } else {
- return e->next();
- }
- }
+ static ClassPathEntry* get_next_boot_classpath_entry(ClassPathEntry* e);
// Helper function used by CDS code to get the number of app classpath
// entries during shared classpath setup time.
- static int num_app_classpath_entries() {
- assert(DumpSharedSpaces, "Should only be called at CDS dump time");
- int num_entries = 0;
- ClassPathEntry* e= ClassLoader::_app_classpath_entries;
- while (e != NULL) {
- num_entries ++;
- e = e->next();
- }
- return num_entries;
- }
+ static int num_app_classpath_entries();
static void check_shared_classpath(const char *path);
static void finalize_shared_paths_misc_info();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/classfile/classLoader.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_CLASSLOADER_INLINE_HPP
+#define SHARE_VM_CLASSFILE_CLASSLOADER_INLINE_HPP
+
+#include "classfile/classLoader.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+// Next entry in class path
+inline ClassPathEntry* ClassPathEntry::next() const { return OrderAccess::load_acquire(&_next); }
+
+inline void ClassPathEntry::set_next(ClassPathEntry* next) {
+ // may have unlocked readers, so ensure visibility.
+ OrderAccess::release_store(&_next, next);
+}
+
+inline ClassPathEntry* ClassLoader::classpath_entry(int n) {
+ assert(n >= 0, "sanity");
+ if (n == 0) {
+ assert(has_jrt_entry(), "No class path entry at 0 for exploded module builds");
+ return ClassLoader::_jrt_entry;
+ } else {
+ // The java runtime image is always the first entry
+ // in the FileMapInfo::_classpath_entry_table. Even though
+ // the _jrt_entry is not included in the _first_append_entry
+ // linked list, it must be accounted for when comparing the
+ // class path vs. the shared archive class path.
+ ClassPathEntry* e = ClassLoader::_first_append_entry;
+ while (--n >= 1) {
+ assert(e != NULL, "Not that many classpath entries.");
+ e = e->next();
+ }
+ return e;
+ }
+}
+
+#if INCLUDE_CDS
+
+// Helper function used by CDS code to get the number of boot classpath
+// entries during shared classpath setup time.
+
+inline int ClassLoader::num_boot_classpath_entries() {
+ assert(DumpSharedSpaces, "Should only be called at CDS dump time");
+ assert(has_jrt_entry(), "must have a java runtime image");
+ int num_entries = 1; // count the runtime image
+ ClassPathEntry* e = ClassLoader::_first_append_entry;
+ while (e != NULL) {
+ num_entries ++;
+ e = e->next();
+ }
+ return num_entries;
+}
+
+inline ClassPathEntry* ClassLoader::get_next_boot_classpath_entry(ClassPathEntry* e) {
+ if (e == ClassLoader::_jrt_entry) {
+ return ClassLoader::_first_append_entry;
+ } else {
+ return e->next();
+ }
+}
+
+// Helper function used by CDS code to get the number of app classpath
+// entries during shared classpath setup time.
+inline int ClassLoader::num_app_classpath_entries() {
+ assert(DumpSharedSpaces, "Should only be called at CDS dump time");
+ int num_entries = 0;
+ ClassPathEntry* e= ClassLoader::_app_classpath_entries;
+ while (e != NULL) {
+ num_entries ++;
+ e = e->next();
+ }
+ return num_entries;
+}
+
+#endif // INCLUDE_CDS
+
+#endif // SHARE_VM_CLASSFILE_CLASSLOADER_INLINE_HPP
--- a/src/hotspot/share/classfile/classLoaderData.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -59,6 +59,7 @@
#include "gc/shared/gcLocker.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
@@ -67,6 +68,7 @@
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/mutex.hpp"
@@ -85,7 +87,25 @@
ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
-ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
+void ClassLoaderData::init_null_class_loader_data() {
+ assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
+ assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
+
+ _the_null_class_loader_data = new ClassLoaderData(Handle(), false);
+ ClassLoaderDataGraph::_head = _the_null_class_loader_data;
+ assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
+
+ LogTarget(Debug, class, loader, data) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ ls.print("create ");
+ _the_null_class_loader_data->print_value_on(&ls);
+ ls.cr();
+ }
+}
+
+ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
_class_loader(h_class_loader()),
_is_anonymous(is_anonymous),
// An anonymous class loader data doesn't have anything to keep
@@ -96,7 +116,7 @@
_modules(NULL), _packages(NULL),
_claimed(0), _modified_oops(true), _accumulated_modified_oops(false),
_jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
- _next(NULL), _dependencies(dependencies),
+ _next(NULL),
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
Monitor::_safepoint_check_never)) {
@@ -112,28 +132,16 @@
// Create unnamed module for all other loaders
_unnamed_module = ModuleEntry::create_unnamed_module(this);
}
- } else {
- _unnamed_module = NULL;
- }
-
- if (!is_anonymous) {
_dictionary = create_dictionary();
} else {
+ _packages = NULL;
+ _unnamed_module = NULL;
_dictionary = NULL;
}
- TRACE_INIT_ID(this);
-}
-void ClassLoaderData::init_dependencies(TRAPS) {
- assert(!Universe::is_fully_initialized(), "should only be called when initializing");
- assert(is_the_null_class_loader_data(), "should only call this for the null class loader");
- _dependencies.init(CHECK);
-}
+ NOT_PRODUCT(_dependency_count = 0); // number of class loader dependencies
-void ClassLoaderData::Dependencies::init(TRAPS) {
- // Create empty dependencies array to add to. CMS requires this to be
- // an oop so that it can track additions via card marks. We think.
- _list_head = oopFactory::new_objectArray(2, CHECK);
+ TRACE_INIT_ID(this);
}
ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() {
@@ -156,6 +164,16 @@
return handle;
}
+int ClassLoaderData::ChunkedHandleList::count() const {
+ int count = 0;
+ Chunk* chunk = _head;
+ while (chunk != NULL) {
+ count += chunk->_size;
+ chunk = chunk->_next;
+ }
+ return count;
+}
+
inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chunk* c, const juint size) {
for (juint i = 0; i < size; i++) {
if (c->_data[i] != NULL) {
@@ -175,16 +193,15 @@
}
}
-#ifdef ASSERT
class VerifyContainsOopClosure : public OopClosure {
- oop* _target;
+ oop _target;
bool _found;
public:
- VerifyContainsOopClosure(oop* target) : _target(target), _found(false) {}
+ VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
void do_oop(oop* p) {
- if (p == _target) {
+ if (p != NULL && *p == _target) {
_found = true;
}
}
@@ -199,12 +216,24 @@
}
};
-bool ClassLoaderData::ChunkedHandleList::contains(oop* p) {
+bool ClassLoaderData::ChunkedHandleList::contains(oop p) {
VerifyContainsOopClosure cl(p);
oops_do(&cl);
return cl.found();
}
-#endif // ASSERT
+
+#ifndef PRODUCT
+bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
+ Chunk* chunk = _head;
+ while (chunk != NULL) {
+ if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[chunk->_size])) {
+ return true;
+ }
+ chunk = chunk->_next;
+ }
+ return false;
+}
+#endif // PRODUCT
bool ClassLoaderData::claim() {
if (_claimed == 1) {
@@ -244,14 +273,9 @@
}
f->do_oop(&_class_loader);
- _dependencies.oops_do(f);
_handles.oops_do(f);
}
-void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
- f->do_oop((oop*)&_list_head);
-}
-
void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_acquire
for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
@@ -326,7 +350,7 @@
}
}
-void ClassLoaderData::record_dependency(const Klass* k, TRAPS) {
+void ClassLoaderData::record_dependency(const Klass* k) {
assert(k != NULL, "invariant");
ClassLoaderData * const from_cld = this;
@@ -361,77 +385,27 @@
}
}
- // It's a dependency we won't find through GC, add it. This is relatively rare.
- // Must handle over GC point.
- Handle dependency(THREAD, to);
- from_cld->_dependencies.add(dependency, CHECK);
-
- // Added a potentially young gen oop to the ClassLoaderData
- record_modified_oops();
+ // It's a dependency we won't find through GC, add it.
+ if (!_handles.contains(to)) {
+ NOT_PRODUCT(Atomic::inc(&_dependency_count));
+ LogTarget(Trace, class, loader, data) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ ls.print("adding dependency from ");
+ print_value_on(&ls);
+ ls.print(" to ");
+ to_cld->print_value_on(&ls);
+ ls.cr();
+ }
+ Handle dependency(Thread::current(), to);
+ add_handle(dependency);
+ // Added a potentially young gen oop to the ClassLoaderData
+ record_modified_oops();
+ }
}
-void ClassLoaderData::Dependencies::add(Handle dependency, TRAPS) {
- // Check first if this dependency is already in the list.
- // Save a pointer to the last to add to under the lock.
- objArrayOop ok = _list_head;
- objArrayOop last = NULL;
- while (ok != NULL) {
- last = ok;
- if (ok->obj_at(0) == dependency()) {
- // Don't need to add it
- return;
- }
- ok = (objArrayOop)ok->obj_at(1);
- }
-
- // Must handle over GC points
- assert (last != NULL, "dependencies should be initialized");
- objArrayHandle last_handle(THREAD, last);
-
- // Create a new dependency node with fields for (class_loader or mirror, next)
- objArrayOop deps = oopFactory::new_objectArray(2, CHECK);
- deps->obj_at_put(0, dependency());
-
- // Must handle over GC points
- objArrayHandle new_dependency(THREAD, deps);
-
- // Add the dependency under lock
- locked_add(last_handle, new_dependency, THREAD);
-}
-
-void ClassLoaderData::Dependencies::locked_add(objArrayHandle last_handle,
- objArrayHandle new_dependency,
- Thread* THREAD) {
-
- // Have to lock and put the new dependency on the end of the dependency
- // array so the card mark for CMS sees that this dependency is new.
- // Can probably do this lock free with some effort.
- ObjectLocker ol(Handle(THREAD, _list_head), THREAD);
-
- oop loader_or_mirror = new_dependency->obj_at(0);
-
- // Since the dependencies are only added, add to the end.
- objArrayOop end = last_handle();
- objArrayOop last = NULL;
- while (end != NULL) {
- last = end;
- // check again if another thread added it to the end.
- if (end->obj_at(0) == loader_or_mirror) {
- // Don't need to add it
- return;
- }
- end = (objArrayOop)end->obj_at(1);
- }
- assert (last != NULL, "dependencies should be initialized");
- // fill in the first element with the oop in new_dependency.
- if (last->obj_at(0) == NULL) {
- last->obj_at_put(0, new_dependency->obj_at(0));
- } else {
- last->obj_at_put(1, new_dependency());
- }
-}
-
void ClassLoaderDataGraph::clear_claimed_marks() {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->clear_claimed();
@@ -453,15 +427,15 @@
}
}
- if (publicize && k->class_loader_data() != NULL) {
- ResourceMark rm;
- log_trace(class, loader, data)("Adding k: " PTR_FORMAT " %s to CLD: "
- PTR_FORMAT " loader: " PTR_FORMAT " %s",
- p2i(k),
- k->external_name(),
- p2i(k->class_loader_data()),
- p2i((void *)k->class_loader()),
- loader_name());
+ if (publicize) {
+ LogTarget(Trace, class, loader, data) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ ls.print("Adding k: " PTR_FORMAT " %s to ", p2i(k), k->external_name());
+ print_value_on(&ls);
+ ls.cr();
+ }
}
}
@@ -578,12 +552,8 @@
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
- ls.print(": unload loader data " INTPTR_FORMAT, p2i(this));
- ls.print(" for instance " INTPTR_FORMAT " of %s", p2i((void *)class_loader()),
- loader_name());
- if (is_anonymous()) {
- ls.print(" for anonymous class " INTPTR_FORMAT " ", p2i(_klasses));
- }
+ ls.print("unload ");
+ print_value_on(&ls);
ls.cr();
}
@@ -710,7 +680,7 @@
}
// release the metaspace
- Metaspace *m = _metaspace;
+ ClassLoaderMetaspace *m = _metaspace;
if (m != NULL) {
_metaspace = NULL;
delete m;
@@ -764,32 +734,26 @@
return is_builtin_class_loader_data() && !is_anonymous();
}
-Metaspace* ClassLoaderData::metaspace_non_null() {
+ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
// If the metaspace has not been allocated, create a new one. Might want
// to create smaller arena for Reflection class loaders also.
// The reason for the delayed allocation is because some class loaders are
// simply for delegating with no metadata of their own.
// Lock-free access requires load_acquire.
- Metaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
+ ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
if (metaspace == NULL) {
MutexLockerEx ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
// Check if _metaspace got allocated while we were waiting for this lock.
if ((metaspace = _metaspace) == NULL) {
if (this == the_null_class_loader_data()) {
assert (class_loader() == NULL, "Must be");
- metaspace = new Metaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
+ metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
} else if (is_anonymous()) {
- if (class_loader() != NULL) {
- log_trace(class, loader, data)("is_anonymous: %s", class_loader()->klass()->internal_name());
- }
- metaspace = new Metaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
+ metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
- if (class_loader() != NULL) {
- log_trace(class, loader, data)("is_reflection: %s", class_loader()->klass()->internal_name());
- }
- metaspace = new Metaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
+ metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
} else {
- metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
+ metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
}
// Ensure _metaspace is stable, since it is examined without a lock
OrderAccess::release_store(&_metaspace, metaspace);
@@ -808,7 +772,7 @@
assert(!is_unloading(), "Do not remove a handle for a CLD that is unloading");
oop* ptr = h.ptr_raw();
if (ptr != NULL) {
- assert(_handles.contains(ptr), "Got unexpected handle " PTR_FORMAT, p2i(ptr));
+ assert(_handles.owner_of(ptr), "Got unexpected handle " PTR_FORMAT, p2i(ptr));
// This root is not walked in safepoints, and hence requires an appropriate
// decorator that e.g. maintains the SATB invariant in SATB collectors.
RootAccess<IN_CONCURRENT_ROOT>::oop_store(ptr, oop(NULL));
@@ -902,49 +866,44 @@
}
// These anonymous class loaders are to contain classes used for JSR292
-ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
+ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(Handle loader) {
// Add a new class loader data to the graph.
- Handle lh(THREAD, loader);
- return ClassLoaderDataGraph::add(lh, true, THREAD);
+ return ClassLoaderDataGraph::add(loader, true);
}
-const char* ClassLoaderData::loader_name() {
+const char* ClassLoaderData::loader_name() const {
// Handles null class loader
return SystemDictionary::loader_name(class_loader());
}
-#ifndef PRODUCT
-// Define to dump klasses
-#undef CLD_DUMP_KLASSES
-void ClassLoaderData::dump(outputStream * const out) {
- out->print("ClassLoaderData CLD: " PTR_FORMAT ", loader: " PTR_FORMAT ", loader_klass: " PTR_FORMAT " %s {",
- p2i(this), p2i((void *)class_loader()),
- p2i(class_loader() != NULL ? class_loader()->klass() : NULL), loader_name());
- if (claimed()) out->print(" claimed ");
- if (is_unloading()) out->print(" unloading ");
- out->cr();
- if (metaspace_or_null() != NULL) {
- out->print_cr("metaspace: " INTPTR_FORMAT, p2i(metaspace_or_null()));
- metaspace_or_null()->dump(out);
+void ClassLoaderData::print_value_on(outputStream* out) const {
+ if (class_loader() != NULL) {
+ out->print("loader data: " INTPTR_FORMAT " for instance ", p2i(this));
+ class_loader()->print_value_on(out); // includes loader_name() and address of class loader instance
} else {
- out->print_cr("metaspace: NULL");
+ // loader data: 0xsomeaddr of <bootloader>
+ out->print("loader data: " INTPTR_FORMAT " of %s", p2i(this), loader_name());
}
+ if (is_anonymous()) {
+ out->print(" anonymous");
+ }
+}
-#ifdef CLD_DUMP_KLASSES
- if (Verbose) {
- Klass* k = _klasses;
- while (k != NULL) {
- out->print_cr("klass " PTR_FORMAT ", %s", p2i(k), k->name()->as_C_string());
- assert(k != k->next_link(), "no loops!");
- k = k->next_link();
- }
- }
-#endif // CLD_DUMP_KLASSES
-#undef CLD_DUMP_KLASSES
+#ifndef PRODUCT
+void ClassLoaderData::print_on(outputStream* out) const {
+ out->print("ClassLoaderData CLD: " PTR_FORMAT ", loader: " PTR_FORMAT ", loader_klass: %s {",
+ p2i(this), p2i((void *)class_loader()), loader_name());
+ if (is_anonymous()) out->print(" anonymous");
+ if (claimed()) out->print(" claimed");
+ if (is_unloading()) out->print(" unloading");
+ out->print(" metaspace: " INTPTR_FORMAT, p2i(metaspace_or_null()));
+
if (_jmethod_ids != NULL) {
Method::print_jmethod_ids(this, out);
}
+ out->print(" handles count %d", _handles.count());
+ out->print(" dependencies %d", _dependency_count);
out->print_cr("}");
}
#endif // PRODUCT
@@ -988,16 +947,12 @@
// Add a new class loader data node to the list. Assign the newly created
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
-ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) {
- // We need to allocate all the oops for the ClassLoaderData before allocating the
- // actual ClassLoaderData object.
- ClassLoaderData::Dependencies dependencies(CHECK_NULL);
-
+ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) {
NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
// ClassLoaderData in the graph since the CLD
// contains unhandled oops
- ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies);
+ ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
if (!is_anonymous) {
@@ -1021,9 +976,11 @@
if (exchanged == next) {
LogTarget(Debug, class, loader, data) lt;
if (lt.is_enabled()) {
- PauseNoSafepointVerifier pnsv(&no_safepoints); // Need safe points for JavaCalls::call_virtual
- LogStream ls(lt);
- print_creation(&ls, loader, cld, CHECK_NULL);
+ ResourceMark rm;
+ LogStream ls(lt);
+ ls.print("create ");
+ cld->print_value_on(&ls);
+ ls.cr();
}
return cld;
}
@@ -1031,36 +988,6 @@
} while (true);
}
-void ClassLoaderDataGraph::print_creation(outputStream* out, Handle loader, ClassLoaderData* cld, TRAPS) {
- Handle string;
- if (loader.not_null()) {
- // Include the result of loader.toString() in the output. This allows
- // the user of the log to identify the class loader instance.
- JavaValue result(T_OBJECT);
- Klass* spec_klass = SystemDictionary::ClassLoader_klass();
- JavaCalls::call_virtual(&result,
- loader,
- spec_klass,
- vmSymbols::toString_name(),
- vmSymbols::void_string_signature(),
- CHECK);
- assert(result.get_type() == T_OBJECT, "just checking");
- string = Handle(THREAD, (oop)result.get_jobject());
- }
-
- ResourceMark rm;
- out->print("create class loader data " INTPTR_FORMAT, p2i(cld));
- out->print(" for instance " INTPTR_FORMAT " of %s", p2i((void *)cld->class_loader()),
- cld->loader_name());
-
- if (string.not_null()) {
- out->print(": ");
- java_lang_String::print(string(), out);
- }
- out->cr();
-}
-
-
void ClassLoaderDataGraph::oops_do(OopClosure* f, bool must_claim) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->oops_do(f, must_claim);
@@ -1266,7 +1193,8 @@
bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
- if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
+ // Needs fixing, see JDK-8199007.
+ if (cld->metaspace_or_null() != NULL && Metaspace::contains(x)) {
return true;
}
}
@@ -1477,7 +1405,8 @@
#ifndef PRODUCT
// callable from debugger
extern "C" int print_loader_data_graph() {
- ClassLoaderDataGraph::dump_on(tty);
+ ResourceMark rm;
+ ClassLoaderDataGraph::print_on(tty);
return 0;
}
@@ -1487,32 +1416,13 @@
}
}
-void ClassLoaderDataGraph::dump_on(outputStream * const out) {
+void ClassLoaderDataGraph::print_on(outputStream * const out) {
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
- data->dump(out);
+ data->print_on(out);
}
- MetaspaceAux::dump(out);
}
#endif // PRODUCT
-void ClassLoaderData::print_value_on(outputStream* out) const {
- if (class_loader() == NULL) {
- out->print("NULL class loader");
- } else {
- out->print("class loader " INTPTR_FORMAT " ", p2i(this));
- class_loader()->print_value_on(out);
- }
-}
-
-void ClassLoaderData::print_on(outputStream* out) const {
- if (class_loader() == NULL) {
- out->print("NULL class loader");
- } else {
- out->print("class loader " INTPTR_FORMAT " ", p2i(this));
- class_loader()->print_on(out);
- }
-}
-
#if INCLUDE_TRACE
Ticks ClassLoaderDataGraph::_class_unload_time;
--- a/src/hotspot/share/classfile/classLoaderData.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -83,10 +83,10 @@
static volatile size_t _num_instance_classes;
static volatile size_t _num_array_classes;
- static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
+ static ClassLoaderData* add(Handle class_loader, bool anonymous);
static void post_class_unload_events();
public:
- static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
+ static ClassLoaderData* find_or_create(Handle class_loader);
static void purge();
static void clear_claimed_marks();
// oops do
@@ -151,10 +151,9 @@
static bool has_metaspace_oom() { return _metaspace_oom; }
static void set_metaspace_oom(bool value) { _metaspace_oom = value; }
- static void dump_on(outputStream * const out) PRODUCT_RETURN;
- static void dump() { dump_on(tty); }
+ static void print_on(outputStream * const out) PRODUCT_RETURN;
+ static void print() { print_on(tty); }
static void verify();
- static void print_creation(outputStream* out, Handle loader, ClassLoaderData* cld, TRAPS);
static bool unload_list_contains(const void* x);
@@ -181,23 +180,9 @@
class ClassLoaderData : public CHeapObj<mtClass> {
friend class VMStructs;
+
private:
- class Dependencies VALUE_OBJ_CLASS_SPEC {
- objArrayOop _list_head;
- void locked_add(objArrayHandle last,
- objArrayHandle new_dependency,
- Thread* THREAD);
- public:
- Dependencies() : _list_head(NULL) {}
- Dependencies(TRAPS) : _list_head(NULL) {
- init(CHECK);
- }
- void add(Handle dependency, TRAPS);
- void init(TRAPS);
- void oops_do(OopClosure* f);
- };
-
- class ChunkedHandleList VALUE_OBJ_CLASS_SPEC {
+ class ChunkedHandleList {
struct Chunk : public CHeapObj<mtClass> {
static const size_t CAPACITY = 32;
@@ -219,10 +204,11 @@
// Only one thread at a time can add, guarded by ClassLoaderData::metaspace_lock().
// However, multiple threads can execute oops_do concurrently with add.
oop* add(oop o);
-#ifdef ASSERT
- bool contains(oop* p);
-#endif
+ bool contains(oop p);
+ NOT_PRODUCT(bool owner_of(oop* p);)
void oops_do(OopClosure* f);
+
+ int count() const;
};
friend class ClassLoaderDataGraph;
@@ -237,10 +223,8 @@
oop _class_loader; // oop used to uniquely identify a class loader
// class loader or a canonical class path
- Dependencies _dependencies; // holds dependencies from this class loader
- // data to others.
- Metaspace * volatile _metaspace; // Meta-space where meta-data defined by the
+ ClassLoaderMetaspace * volatile _metaspace; // Meta-space where meta-data defined by the
// classes in the class loader are allocated.
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
bool _unloading; // true if this class loader goes away
@@ -261,6 +245,8 @@
ChunkedHandleList _handles; // Handles to constant pool arrays, Modules, etc, which
// have the same life cycle of the corresponding ClassLoader.
+ NOT_PRODUCT(volatile int _dependency_count;) // number of class loader dependencies
+
Klass* volatile _klasses; // The classes defined by the class loader.
PackageEntryTable* volatile _packages; // The packages defined by the class loader.
ModuleEntryTable* volatile _modules; // The modules defined by the class loader.
@@ -279,17 +265,12 @@
// Support for walking class loader data objects
ClassLoaderData* _next; /// Next loader_datas created
- // ReadOnly and ReadWrite metaspaces (static because only on the null
- // class loader for now).
- static Metaspace* _ro_metaspace;
- static Metaspace* _rw_metaspace;
-
TRACE_DEFINE_TRACE_ID_FIELD;
void set_next(ClassLoaderData* next) { _next = next; }
ClassLoaderData* next() const { return _next; }
- ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
+ ClassLoaderData(Handle h_class_loader, bool is_anonymous);
~ClassLoaderData();
// The CLD are not placed in the Heap, so the Card Table or
@@ -331,7 +312,7 @@
bool is_alive(BoolObjectClosure* is_alive_closure) const;
// Accessors
- Metaspace* metaspace_or_null() const { return _metaspace; }
+ ClassLoaderMetaspace* metaspace_or_null() const { return _metaspace; }
static ClassLoaderData* the_null_class_loader_data() {
return _the_null_class_loader_data;
@@ -341,20 +322,18 @@
bool is_anonymous() const { return _is_anonymous; }
- static void init_null_class_loader_data() {
- assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
- assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
-
- // We explicitly initialize the Dependencies object at a later phase in the initialization
- _the_null_class_loader_data = new ClassLoaderData(Handle(), false, Dependencies());
- ClassLoaderDataGraph::_head = _the_null_class_loader_data;
- assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
- }
+ static void init_null_class_loader_data();
bool is_the_null_class_loader_data() const {
return this == _the_null_class_loader_data;
}
+
+ // Returns true if this class loader data is for the system class loader.
+ // (Note that the class loader data may be anonymous.)
bool is_system_class_loader_data() const;
+
+ // Returns true if this class loader data is for the platform class loader.
+ // (Note that the class loader data may be anonymous.)
bool is_platform_class_loader_data() const;
// Returns true if this class loader data is for the boot class loader.
@@ -368,7 +347,7 @@
// The Metaspace is created lazily so may be NULL. This
// method will allocate a Metaspace if needed.
- Metaspace* metaspace_non_null();
+ ClassLoaderMetaspace* metaspace_non_null();
oop class_loader() const { return _class_loader; }
@@ -397,12 +376,11 @@
void set_jmethod_ids(JNIMethodBlock* new_block) { _jmethod_ids = new_block; }
void print() { print_on(tty); }
- void print_on(outputStream* out) const;
+ void print_on(outputStream* out) const PRODUCT_RETURN;
void print_value() { print_value_on(tty); }
void print_value_on(outputStream* out) const;
- void dump(outputStream * const out) PRODUCT_RETURN;
void verify();
- const char* loader_name();
+ const char* loader_name() const;
OopHandle add_handle(Handle h);
void remove_handle(OopHandle h);
@@ -410,8 +388,7 @@
void add_class(Klass* k, bool publicize = true);
void remove_class(Klass* k);
bool contains_klass(Klass* k);
- void record_dependency(const Klass* to, TRAPS);
- void init_dependencies(TRAPS);
+ void record_dependency(const Klass* to);
PackageEntryTable* packages() { return _packages; }
ModuleEntry* unnamed_module() { return _unnamed_module; }
ModuleEntryTable* modules();
@@ -424,8 +401,7 @@
static ClassLoaderData* class_loader_data(oop loader);
static ClassLoaderData* class_loader_data_or_null(oop loader);
- static ClassLoaderData* anonymous_class_loader_data(oop loader, TRAPS);
- static void print_loader(ClassLoaderData *loader_data, outputStream *out);
+ static ClassLoaderData* anonymous_class_loader_data(Handle loader);
TRACE_DEFINE_TRACE_ID_METHODS;
};
@@ -446,9 +422,9 @@
ClassLoaderDataGraphMetaspaceIterator();
~ClassLoaderDataGraphMetaspaceIterator();
bool repeat() { return _data != NULL; }
- Metaspace* get_next() {
+ ClassLoaderMetaspace* get_next() {
assert(_data != NULL, "Should not be NULL in call to the iterator");
- Metaspace* result = _data->metaspace_or_null();
+ ClassLoaderMetaspace* result = _data->metaspace_or_null();
_data = _data->next();
// This result might be NULL for class loaders without metaspace
// yet. It would be nice to return only non-null results but
--- a/src/hotspot/share/classfile/classLoaderData.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -22,6 +22,9 @@
*
*/
+#ifndef SHARE_VM_CLASSFILE_CLASSLOADERDATA_INLINE_HPP
+#define SHARE_VM_CLASSFILE_CLASSLOADERDATA_INLINE_HPP
+
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.hpp"
#include "oops/oop.inline.hpp"
@@ -40,7 +43,7 @@
}
-inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAPS) {
+inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader) {
guarantee(loader() != NULL && oopDesc::is_oop(loader()), "Loader must be oop");
// Gets the class loader data out of the java/lang/ClassLoader object, if non-null
// it's already in the loader_data, so no need to add
@@ -48,7 +51,7 @@
if (loader_data) {
return loader_data;
}
- return ClassLoaderDataGraph::add(loader, false, THREAD);
+ return ClassLoaderDataGraph::add(loader, false);
}
size_t ClassLoaderDataGraph::num_instance_classes() {
@@ -76,3 +79,5 @@
assert(count <= _num_array_classes, "Sanity");
Atomic::sub(count, &_num_array_classes);
}
+
+#endif // SHARE_VM_CLASSFILE_CLASSLOADERDATA_INLINE_HPP
--- a/src/hotspot/share/classfile/classLoaderExt.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classListParser.hpp"
-#include "classfile/classLoader.hpp"
+#include "classfile/classLoader.inline.hpp"
#include "classfile/classLoaderExt.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/klassFactory.hpp"
@@ -197,28 +197,6 @@
Thread* ClassLoaderExt::Context::_dump_thread = NULL;
-bool ClassLoaderExt::check(ClassLoaderExt::Context *context,
- const ClassFileStream* stream,
- const int classpath_index) {
- if (stream != NULL) {
- // Ignore any App classes from signed JAR file during CDS archiving
- // dumping
- if (DumpSharedSpaces &&
- SharedClassUtil::is_classpath_entry_signed(classpath_index) &&
- classpath_index >= _app_paths_start_index) {
- tty->print_cr("Preload Warning: Skipping %s from signed JAR",
- context->class_name());
- return false;
- }
- if (classpath_index >= _app_paths_start_index) {
- _has_app_classes = true;
- _has_platform_classes = true;
- }
- }
-
- return true;
-}
-
void ClassLoaderExt::record_result(ClassLoaderExt::Context *context,
Symbol* class_name,
const s2 classpath_index,
--- a/src/hotspot/share/classfile/classLoaderExt.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classLoaderExt.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,11 +65,6 @@
#endif
}
- bool check(const ClassFileStream* stream, const int classpath_index) {
- CDS_ONLY(return ClassLoaderExt::check(this, stream, classpath_index);)
- NOT_CDS(return true;)
- }
-
bool should_verify(int classpath_index) {
CDS_ONLY(return (classpath_index >= _app_paths_start_index);)
NOT_CDS(return false;)
@@ -156,10 +151,6 @@
return _has_app_classes || _has_platform_classes;
}
- static bool check(class ClassLoaderExt::Context *context,
- const ClassFileStream* stream,
- const int classpath_index);
-
static void record_result(class ClassLoaderExt::Context *context,
Symbol* class_name,
const s2 classpath_index,
--- a/src/hotspot/share/classfile/classLoaderStats.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/classLoaderStats.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -76,7 +76,7 @@
}
_total_classes += csc._num_classes;
- Metaspace* ms = cld->metaspace_or_null();
+ ClassLoaderMetaspace* ms = cld->metaspace_or_null();
if (ms != NULL) {
if(cld->is_anonymous()) {
cls->_anon_chunk_sz += ms->allocated_chunks_bytes();
--- a/src/hotspot/share/classfile/compactHashtable.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/compactHashtable.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
class SerializeClosure;
// Stats for symbol tables in the CDS archive
-class CompactHashtableStats VALUE_OBJ_CLASS_SPEC {
+class CompactHashtableStats {
public:
int hashentry_count;
int hashentry_bytes;
@@ -71,7 +71,7 @@
//
class CompactHashtableWriter: public StackObj {
public:
- class Entry VALUE_OBJ_CLASS_SPEC {
+ class Entry {
unsigned int _hash;
u4 _value;
@@ -194,7 +194,7 @@
// See CompactHashtableWriter::dump() for how the table is written at CDS
// dump time.
//
-class SimpleCompactHashtable VALUE_OBJ_CLASS_SPEC {
+class SimpleCompactHashtable {
protected:
address _base_address;
u4 _bucket_count;
@@ -281,7 +281,7 @@
// Because the dump file may be big (hundred of MB in extreme cases),
// we use mmap for fast access when reading it.
//
-class HashtableTextDump VALUE_OBJ_CLASS_SPEC {
+class HashtableTextDump {
int _fd;
const char* _base;
const char* _p;
--- a/src/hotspot/share/classfile/dictionary.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/dictionary.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/sharedClassUtil.hpp"
-#include "classfile/dictionary.hpp"
+#include "classfile/dictionary.inline.hpp"
#include "classfile/protectionDomainCache.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/systemDictionaryShared.hpp"
@@ -481,6 +481,7 @@
void Dictionary::reorder_dictionary_for_sharing() {
// Copy all the dictionary entries into a single master list.
+ assert(DumpSharedSpaces, "Should only be used at dump time");
DictionaryEntry* master_list = NULL;
for (int i = 0; i < table_size(); ++i) {
@@ -488,7 +489,7 @@
while (p != NULL) {
DictionaryEntry* next = p->next();
InstanceKlass*ik = p->instance_klass();
- if (ik->signers() != NULL) {
+ if (ik->has_signer_and_not_archived()) {
// We cannot include signed classes in the archive because the certificates
// used during dump time may be different than those used during
// runtime (due to expiration, etc).
@@ -604,13 +605,16 @@
Klass* e = probe->instance_klass();
bool is_defining_class =
(loader_data() == e->class_loader_data());
- st->print("%4d: %s%s, loader ", index, is_defining_class ? " " : "^", e->external_name());
- ClassLoaderData* loader_data = e->class_loader_data();
- if (loader_data == NULL) {
+ st->print("%4d: %s%s", index, is_defining_class ? " " : "^", e->external_name());
+ ClassLoaderData* cld = e->class_loader_data();
+ if (cld == NULL) {
// Shared class not restored yet in shared dictionary
- st->print("<shared, not restored>");
- } else {
- loader_data->print_value_on(st);
+ st->print(", loader data <shared, not restored>");
+ } else if (!loader_data()->is_the_null_class_loader_data()) {
+ // Class loader output for the dictionary for the null class loader data is
+ // redundant and obvious.
+ st->print(", ");
+ cld->print_value_on(st);
}
st->cr();
}
--- a/src/hotspot/share/classfile/dictionary.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/dictionary.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -29,7 +29,6 @@
#include "classfile/systemDictionary.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.hpp"
-#include "runtime/orderAccess.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/ostream.hpp"
@@ -170,12 +169,8 @@
ProtectionDomainEntry* pd_set() const { return _pd_set; }
void set_pd_set(ProtectionDomainEntry* new_head) { _pd_set = new_head; }
- ProtectionDomainEntry* pd_set_acquire() const {
- return OrderAccess::load_acquire(&_pd_set);
- }
- void release_set_pd_set(ProtectionDomainEntry* new_head) {
- OrderAccess::release_store(&_pd_set, new_head);
- }
+ ProtectionDomainEntry* pd_set_acquire() const;
+ void release_set_pd_set(ProtectionDomainEntry* new_head);
// Tells whether the initiating class' protection domain can access the klass in this entry
bool is_valid_protection_domain(Handle protection_domain) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/classfile/dictionary.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_DICTIONARY_INLINE_HPP
+#define SHARE_VM_CLASSFILE_DICTIONARY_INLINE_HPP
+
+#include "classfile/dictionary.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+inline ProtectionDomainEntry* DictionaryEntry::pd_set_acquire() const {
+ return OrderAccess::load_acquire(&_pd_set);
+}
+
+inline void DictionaryEntry::release_set_pd_set(ProtectionDomainEntry* new_head) {
+ OrderAccess::release_store(&_pd_set, new_head);
+}
+
+#endif // SHARE_VM_CLASSFILE_DICTIONARY_INLINE_HPP
--- a/src/hotspot/share/classfile/javaClasses.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/javaClasses.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -36,13 +36,14 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/oopFactory.hpp"
+#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/fieldStreams.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/klass.hpp"
-#include "oops/method.hpp"
+#include "oops/method.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
@@ -67,6 +68,11 @@
#define INJECTED_FIELD_COMPUTE_OFFSET(klass, name, signature, may_be_java) \
klass::_##name##_offset = JavaClasses::compute_injected_offset(JavaClasses::klass##_##name##_enum);
+#if INCLUDE_CDS
+#define INJECTED_FIELD_SERIALIZE_OFFSET(klass, name, signature, may_be_java) \
+ f->do_u4((u4*)&_##name##_offset);
+#endif
+
#define DECLARE_INJECTED_FIELD(klass, name, signature, may_be_java) \
{ SystemDictionary::WK_KLASS_ENUM_NAME(klass), vmSymbols::VM_SYMBOL_ENUM_NAME(name##_name), vmSymbols::VM_SYMBOL_ENUM_NAME(signature), may_be_java },
@@ -170,17 +176,43 @@
return is_instance_inlined(obj);
}
+#if INCLUDE_CDS
+#define FIELD_SERIALIZE_OFFSET(offset, klass, name, signature, is_static) \
+ f->do_u4((u4*)&offset)
+
+#define FIELD_SERIALIZE_OFFSET_OPTIONAL(offset, klass, name, signature) \
+ f->do_u4((u4*)&offset)
+#endif
+
+#define FIELD_COMPUTE_OFFSET(offset, klass, name, signature, is_static) \
+ compute_offset(offset, klass, name, vmSymbols::signature(), is_static)
+
+#define FIELD_COMPUTE_OFFSET_OPTIONAL(offset, klass, name, signature) \
+ compute_optional_offset(offset, klass, name, vmSymbols::signature())
+
+#define STRING_FIELDS_DO(macro) \
+ macro(value_offset, k, vmSymbols::value_name(), byte_array_signature, false); \
+ macro(hash_offset, k, "hash", int_signature, false); \
+ macro(coder_offset, k, "coder", byte_signature, false)
+
void java_lang_String::compute_offsets() {
- assert(!initialized, "offsets should be initialized only once");
+ if (initialized) {
+ return;
+ }
InstanceKlass* k = SystemDictionary::String_klass();
- compute_offset(value_offset, k, vmSymbols::value_name(), vmSymbols::byte_array_signature());
- compute_offset(hash_offset, k, "hash", vmSymbols::int_signature());
- compute_offset(coder_offset, k, "coder", vmSymbols::byte_signature());
+ STRING_FIELDS_DO(FIELD_COMPUTE_OFFSET);
initialized = true;
}
+#if INCLUDE_CDS
+void java_lang_String::serialize(SerializeClosure* f) {
+ STRING_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+ f->do_u4((u4*)&initialized);
+}
+#endif
+
class CompactStringsFixup : public FieldClosure {
private:
bool _value;
@@ -657,10 +689,10 @@
assert(str2->klass() == SystemDictionary::String_klass(),
"must be java String");
typeArrayOop value1 = java_lang_String::value_no_keepalive(str1);
- int length1 = java_lang_String::length(value1);
+ int length1 = java_lang_String::length(str1);
bool is_latin1 = java_lang_String::is_latin1(str1);
typeArrayOop value2 = java_lang_String::value_no_keepalive(str2);
- int length2 = java_lang_String::length(value2);
+ int length2 = java_lang_String::length(str2);
bool is_latin2 = java_lang_String::is_latin1(str2);
if ((length1 != length2) || (is_latin1 != is_latin2)) {
@@ -731,12 +763,17 @@
break;
case T_OBJECT:
{
- #ifdef ASSERT
- TempNewSymbol sym = SymbolTable::new_symbol("Ljava/lang/String;", CHECK);
- assert(fd->signature() == sym, "just checking");
- #endif
- oop string = fd->string_initial_value(CHECK);
- mirror()->obj_field_put(fd->offset(), string);
+ assert(fd->signature() == vmSymbols::string_signature(),
+ "just checking");
+ if (DumpSharedSpaces && oopDesc::is_archive_object(mirror())) {
+ // Archive the String field and update the pointer.
+ oop s = mirror()->obj_field(fd->offset());
+ oop archived_s = StringTable::create_archived_string(s, CHECK);
+ mirror()->obj_field_put(fd->offset(), archived_s);
+ } else {
+ oop string = fd->string_initial_value(CHECK);
+ mirror()->obj_field_put(fd->offset(), string);
+ }
}
break;
default:
@@ -764,6 +801,21 @@
}
}
}
+
+ if (k->is_shared() && k->has_raw_archived_mirror()) {
+ if (MetaspaceShared::open_archive_heap_region_mapped()) {
+ oop m = k->archived_java_mirror();
+ assert(m != NULL, "archived mirror is NULL");
+ assert(oopDesc::is_archive_object(m), "must be archived mirror object");
+ Handle m_h(THREAD, m);
+ // restore_archived_mirror() clears the klass' _has_raw_archived_mirror flag
+ restore_archived_mirror(k, m_h, Handle(), Handle(), Handle(), CHECK);
+ return;
+ } else {
+ k->set_java_mirror_handle(NULL);
+ k->clear_has_raw_archived_mirror();
+ }
+ }
create_mirror(k, Handle(), Handle(), Handle(), CHECK);
}
@@ -916,6 +968,277 @@
}
}
+#if INCLUDE_CDS_JAVA_HEAP
+// Clears mirror fields. Static final fields with initial values are reloaded
+// from constant pool. The object identity hash is in the object header and is
+// not affected.
+class ResetMirrorField: public FieldClosure {
+ private:
+ Handle _m;
+
+ public:
+ ResetMirrorField(Handle mirror) : _m(mirror) {}
+
+ void do_field(fieldDescriptor* fd) {
+ assert(DumpSharedSpaces, "dump time only");
+ assert(_m.not_null(), "Mirror cannot be NULL");
+
+ if (fd->is_static() && fd->has_initial_value()) {
+ initialize_static_field(fd, _m, Thread::current());
+ return;
+ }
+
+ BasicType ft = fd->field_type();
+ switch (ft) {
+ case T_BYTE:
+ _m()->byte_field_put(fd->offset(), 0);
+ break;
+ case T_CHAR:
+ _m()->char_field_put(fd->offset(), 0);
+ break;
+ case T_DOUBLE:
+ _m()->double_field_put(fd->offset(), 0);
+ break;
+ case T_FLOAT:
+ _m()->float_field_put(fd->offset(), 0);
+ break;
+ case T_INT:
+ _m()->int_field_put(fd->offset(), 0);
+ break;
+ case T_LONG:
+ _m()->long_field_put(fd->offset(), 0);
+ break;
+ case T_SHORT:
+ _m()->short_field_put(fd->offset(), 0);
+ break;
+ case T_BOOLEAN:
+ _m()->bool_field_put(fd->offset(), false);
+ break;
+ case T_ARRAY:
+ case T_OBJECT: {
+ // It might be useful to cache the String field, but
+ // for now just clear out any reference field
+ oop o = _m()->obj_field(fd->offset());
+ _m()->obj_field_put(fd->offset(), NULL);
+ break;
+ }
+ default:
+ ShouldNotReachHere();
+ break;
+ }
+ }
+};
+
+void java_lang_Class::archive_basic_type_mirrors(TRAPS) {
+ assert(MetaspaceShared::is_heap_object_archiving_allowed(),
+ "MetaspaceShared::is_heap_object_archiving_allowed() must be true");
+
+ for (int t = 0; t <= T_VOID; t++) {
+ oop m = Universe::_mirrors[t];
+ if (m != NULL) {
+ // Update the field at _array_klass_offset to point to the relocated array klass.
+ oop archived_m = MetaspaceShared::archive_heap_object(m, THREAD);
+ Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset));
+ assert(ak != NULL || t == T_VOID, "should not be NULL");
+ if (ak != NULL) {
+ Klass *reloc_ak = MetaspaceShared::get_relocated_klass(ak);
+ archived_m->metadata_field_put(_array_klass_offset, reloc_ak);
+ }
+
+ // Clear the fields. Just to be safe
+ Klass *k = m->klass();
+ Handle archived_mirror_h(THREAD, archived_m);
+ ResetMirrorField reset(archived_mirror_h);
+ InstanceKlass::cast(k)->do_nonstatic_fields(&reset);
+
+ log_trace(cds, mirror)("Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
+ type2name((BasicType)t), p2i(Universe::_mirrors[t]), p2i(archived_m));
+
+ Universe::_mirrors[t] = archived_m;
+ }
+ }
+
+ assert(Universe::_mirrors[T_INT] != NULL &&
+ Universe::_mirrors[T_FLOAT] != NULL &&
+ Universe::_mirrors[T_DOUBLE] != NULL &&
+ Universe::_mirrors[T_BYTE] != NULL &&
+ Universe::_mirrors[T_BOOLEAN] != NULL &&
+ Universe::_mirrors[T_CHAR] != NULL &&
+ Universe::_mirrors[T_LONG] != NULL &&
+ Universe::_mirrors[T_SHORT] != NULL &&
+ Universe::_mirrors[T_VOID] != NULL, "sanity");
+
+ Universe::set_int_mirror(Universe::_mirrors[T_INT]);
+ Universe::set_float_mirror(Universe::_mirrors[T_FLOAT]);
+ Universe::set_double_mirror(Universe::_mirrors[T_DOUBLE]);
+ Universe::set_byte_mirror(Universe::_mirrors[T_BYTE]);
+ Universe::set_bool_mirror(Universe::_mirrors[T_BOOLEAN]);
+ Universe::set_char_mirror(Universe::_mirrors[T_CHAR]);
+ Universe::set_long_mirror(Universe::_mirrors[T_LONG]);
+ Universe::set_short_mirror(Universe::_mirrors[T_SHORT]);
+ Universe::set_void_mirror(Universe::_mirrors[T_VOID]);
+}
+
+//
+// After the mirror object is successfully archived, the archived
+// klass is set with _has_archived_raw_mirror flag.
+//
+// The _has_archived_raw_mirror flag is cleared at runtime when the
+// archived mirror is restored. If archived java heap data cannot
+// be used at runtime, new mirror object is created for the shared
+// class. The _has_archived_raw_mirror is cleared also during the process.
+oop java_lang_Class::archive_mirror(Klass* k, TRAPS) {
+ assert(MetaspaceShared::is_heap_object_archiving_allowed(),
+ "MetaspaceShared::is_heap_object_archiving_allowed() must be true");
+
+ // Mirror is already archived
+ if (k->has_raw_archived_mirror()) {
+ assert(k->archived_java_mirror_raw() != NULL, "no archived mirror");
+ return k->archived_java_mirror_raw();
+ }
+
+ // No mirror
+ oop mirror = k->java_mirror();
+ if (mirror == NULL) {
+ return NULL;
+ }
+
+ if (k->is_instance_klass()) {
+ InstanceKlass *ik = InstanceKlass::cast(k);
+ assert(ik->signers() == NULL && !k->has_signer_and_not_archived(),
+ "class with signer cannot be supported");
+
+ if (!(ik->is_shared_boot_class() || ik->is_shared_platform_class() ||
+ ik->is_shared_app_class())) {
+ // Archiving mirror for classes from non-builtin loaders is not
+ // supported. Clear the _java_mirror within the archived class.
+ k->set_java_mirror_handle(NULL);
+ return NULL;
+ }
+ }
+
+ // Now start archiving the mirror object
+ oop archived_mirror = MetaspaceShared::archive_heap_object(mirror, THREAD);
+ if (archived_mirror == NULL) {
+ return NULL;
+ }
+
+ archived_mirror = process_archived_mirror(k, mirror, archived_mirror, THREAD);
+ if (archived_mirror == NULL) {
+ return NULL;
+ }
+
+ k->set_archived_java_mirror_raw(archived_mirror);
+
+ k->set_has_raw_archived_mirror();
+
+ ResourceMark rm;
+ log_trace(cds, mirror)("Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
+ k->external_name(), p2i(mirror), p2i(archived_mirror));
+
+ return archived_mirror;
+}
+
+// The process is based on create_mirror().
+oop java_lang_Class::process_archived_mirror(Klass* k, oop mirror,
+ oop archived_mirror,
+ Thread *THREAD) {
+ // Clear nonstatic fields in archived mirror. Some of the fields will be set
+ // to archived metadata and objects below.
+ Klass *c = archived_mirror->klass();
+ Handle archived_mirror_h(THREAD, archived_mirror);
+ ResetMirrorField reset(archived_mirror_h);
+ InstanceKlass::cast(c)->do_nonstatic_fields(&reset);
+
+ if (k->is_array_klass()) {
+ oop archived_comp_mirror;
+ if (k->is_typeArray_klass()) {
+ // The primitive type mirrors are already archived. Get the archived mirror.
+ oop comp_mirror = java_lang_Class::component_mirror(mirror);
+ archived_comp_mirror = MetaspaceShared::find_archived_heap_object(comp_mirror);
+ assert(archived_comp_mirror != NULL, "Must be");
+ } else {
+ assert(k->is_objArray_klass(), "Must be");
+ Klass* element_klass = ObjArrayKlass::cast(k)->element_klass();
+ assert(element_klass != NULL, "Must have an element klass");
+ archived_comp_mirror = archive_mirror(element_klass, THREAD);
+ if (archived_comp_mirror == NULL) {
+ return NULL;
+ }
+ }
+ java_lang_Class::set_component_mirror(archived_mirror, archived_comp_mirror);
+ } else {
+ assert(k->is_instance_klass(), "Must be");
+
+ // Reset local static fields in the mirror
+ InstanceKlass::cast(k)->do_local_static_fields(&reset);
+
+ java_lang_Class:set_init_lock(archived_mirror, NULL);
+
+ set_protection_domain(archived_mirror, NULL);
+ }
+
+ // clear class loader and mirror_module_field
+ set_class_loader(archived_mirror, NULL);
+ set_module(archived_mirror, NULL);
+
+ // The archived mirror's field at _klass_offset is still pointing to the original
+ // klass. Updated the field in the archived mirror to point to the relocated
+ // klass in the archive.
+ Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror));
+ log_debug(cds, mirror)("Relocate mirror metadata field at _klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
+ p2i(as_Klass(mirror)), p2i(reloc_k));
+ archived_mirror->metadata_field_put(_klass_offset, reloc_k);
+
+ // The field at _array_klass_offset is pointing to the original one dimension
+ // higher array klass if exists. Relocate the pointer.
+ Klass *arr = array_klass_acquire(mirror);
+ if (arr != NULL) {
+ Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr);
+ log_debug(cds, mirror)("Relocate mirror metadata field at _array_klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
+ p2i(arr), p2i(reloc_arr));
+ archived_mirror->metadata_field_put(_array_klass_offset, reloc_arr);
+ }
+ return archived_mirror;
+}
+
+// After the archived mirror object is restored, the shared klass'
+// _has_raw_archived_mirror flag is cleared
+void java_lang_Class::restore_archived_mirror(Klass *k, Handle mirror,
+ Handle class_loader, Handle module,
+ Handle protection_domain, TRAPS) {
+
+ // The java.lang.Class field offsets were archived and reloaded from archive.
+ // No need to put classes on the fixup_mirror_list before java.lang.Class
+ // is loaded.
+
+ if (!k->is_array_klass()) {
+ // - local static final fields with initial values were initialized at dump time
+
+ // create the init_lock
+ typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
+ set_init_lock(mirror(), r);
+
+ if (protection_domain.not_null()) {
+ set_protection_domain(mirror(), protection_domain());
+ }
+ }
+
+ assert(class_loader() == k->class_loader(), "should be same");
+ if (class_loader.not_null()) {
+ set_class_loader(mirror(), class_loader());
+ }
+
+ k->set_java_mirror(mirror);
+ k->clear_has_raw_archived_mirror();
+
+ set_mirror_module_field(k, mirror, module, THREAD);
+
+ ResourceMark rm;
+ log_trace(cds, mirror)("Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror()));
+}
+#endif // INCLUDE_CDS_JAVA_HEAP
+
void java_lang_Class::fixup_module_field(Klass* k, Handle module) {
assert(_module_offset != 0, "must have been computed already");
java_lang_Class::set_module(k->java_mirror(), module());
@@ -1167,15 +1490,21 @@
bool java_lang_Class::offsets_computed = false;
int java_lang_Class::classRedefinedCount_offset = -1;
+#define CLASS_FIELDS_DO(macro) \
+ macro(classRedefinedCount_offset, k, "classRedefinedCount", int_signature, false) ; \
+ macro(_class_loader_offset, k, "classLoader", classloader_signature, false); \
+ macro(_component_mirror_offset, k, "componentType", class_signature, false); \
+ macro(_module_offset, k, "module", module_signature, false)
+
void java_lang_Class::compute_offsets() {
- assert(!offsets_computed, "offsets should be initialized only once");
+ if (offsets_computed) {
+ return;
+ }
+
offsets_computed = true;
InstanceKlass* k = SystemDictionary::Class_klass();
- compute_offset(classRedefinedCount_offset, k, "classRedefinedCount", vmSymbols::int_signature());
- compute_offset(_class_loader_offset, k, "classLoader", vmSymbols::classloader_signature());
- compute_offset(_component_mirror_offset, k, "componentType", vmSymbols::class_signature());
- compute_offset(_module_offset, k, "module", vmSymbols::module_signature());
+ CLASS_FIELDS_DO(FIELD_COMPUTE_OFFSET);
// Init lock is a C union with component_mirror. Only instanceKlass mirrors have
// init_lock and only ArrayKlass mirrors have component_mirror. Since both are oops
@@ -1185,6 +1514,17 @@
CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
}
+#if INCLUDE_CDS
+void java_lang_Class::serialize(SerializeClosure* f) {
+ f->do_u4((u4*)&offsets_computed);
+ f->do_u4((u4*)&_init_lock_offset);
+
+ CLASS_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+
+ CLASS_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
int java_lang_Class::classRedefinedCount(oop the_class_mirror) {
if (classRedefinedCount_offset == -1) {
// If we don't have an offset for it then just return -1 as a marker.
@@ -1226,28 +1566,33 @@
int java_lang_Thread::_park_blocker_offset = 0;
int java_lang_Thread::_park_event_offset = 0 ;
+#define THREAD_FIELDS_DO(macro) \
+ macro(_name_offset, k, vmSymbols::name_name(), string_signature, false); \
+ macro(_group_offset, k, vmSymbols::group_name(), threadgroup_signature, false); \
+ macro(_contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), classloader_signature, false); \
+ macro(_inheritedAccessControlContext_offset, k, vmSymbols::inheritedAccessControlContext_name(), accesscontrolcontext_signature, false); \
+ macro(_priority_offset, k, vmSymbols::priority_name(), int_signature, false); \
+ macro(_daemon_offset, k, vmSymbols::daemon_name(), bool_signature, false); \
+ macro(_eetop_offset, k, "eetop", long_signature, false); \
+ macro(_stillborn_offset, k, "stillborn", bool_signature, false); \
+ macro(_stackSize_offset, k, "stackSize", long_signature, false); \
+ macro(_tid_offset, k, "tid", long_signature, false); \
+ macro(_thread_status_offset, k, "threadStatus", int_signature, false); \
+ macro(_park_blocker_offset, k, "parkBlocker", object_signature, false); \
+ macro(_park_event_offset, k, "nativeParkEventPointer", long_signature, false)
void java_lang_Thread::compute_offsets() {
assert(_group_offset == 0, "offsets should be initialized only once");
InstanceKlass* k = SystemDictionary::Thread_klass();
- compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
- compute_offset(_group_offset, k, vmSymbols::group_name(), vmSymbols::threadgroup_signature());
- compute_offset(_contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(),
- vmSymbols::classloader_signature());
- compute_offset(_inheritedAccessControlContext_offset, k, vmSymbols::inheritedAccessControlContext_name(),
- vmSymbols::accesscontrolcontext_signature());
- compute_offset(_priority_offset, k, vmSymbols::priority_name(), vmSymbols::int_signature());
- compute_offset(_daemon_offset, k, vmSymbols::daemon_name(), vmSymbols::bool_signature());
- compute_offset(_eetop_offset, k, "eetop", vmSymbols::long_signature());
- compute_offset(_stillborn_offset, k, "stillborn", vmSymbols::bool_signature());
- compute_offset(_stackSize_offset, k, "stackSize", vmSymbols::long_signature());
- compute_offset(_tid_offset, k, "tid", vmSymbols::long_signature());
- compute_offset(_thread_status_offset, k, "threadStatus", vmSymbols::int_signature());
- compute_offset(_park_blocker_offset, k, "parkBlocker", vmSymbols::object_signature());
- compute_offset(_park_event_offset, k, "nativeParkEventPointer", vmSymbols::long_signature());
-}
-
+ THREAD_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_Thread::serialize(SerializeClosure* f) {
+ THREAD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
JavaThread* java_lang_Thread::thread(oop java_thread) {
return (JavaThread*)java_thread->address_field(_eetop_offset);
@@ -1477,32 +1822,47 @@
return java_thread_group->bool_field(_daemon_offset) != 0;
}
+#define THREADGROUP_FIELDS_DO(macro) \
+ macro(_parent_offset, k, vmSymbols::parent_name(), threadgroup_signature, false); \
+ macro(_name_offset, k, vmSymbols::name_name(), string_signature, false); \
+ macro(_threads_offset, k, vmSymbols::threads_name(), thread_array_signature, false); \
+ macro(_groups_offset, k, vmSymbols::groups_name(), threadgroup_array_signature, false); \
+ macro(_maxPriority_offset, k, vmSymbols::maxPriority_name(), int_signature, false); \
+ macro(_destroyed_offset, k, vmSymbols::destroyed_name(), bool_signature, false); \
+ macro(_daemon_offset, k, vmSymbols::daemon_name(), bool_signature, false); \
+ macro(_nthreads_offset, k, vmSymbols::nthreads_name(), int_signature, false); \
+ macro(_ngroups_offset, k, vmSymbols::ngroups_name(), int_signature, false)
+
void java_lang_ThreadGroup::compute_offsets() {
assert(_parent_offset == 0, "offsets should be initialized only once");
InstanceKlass* k = SystemDictionary::ThreadGroup_klass();
-
- compute_offset(_parent_offset, k, vmSymbols::parent_name(), vmSymbols::threadgroup_signature());
- compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
- compute_offset(_threads_offset, k, vmSymbols::threads_name(), vmSymbols::thread_array_signature());
- compute_offset(_groups_offset, k, vmSymbols::groups_name(), vmSymbols::threadgroup_array_signature());
- compute_offset(_maxPriority_offset, k, vmSymbols::maxPriority_name(), vmSymbols::int_signature());
- compute_offset(_destroyed_offset, k, vmSymbols::destroyed_name(), vmSymbols::bool_signature());
- compute_offset(_daemon_offset, k, vmSymbols::daemon_name(), vmSymbols::bool_signature());
- compute_offset(_nthreads_offset, k, vmSymbols::nthreads_name(), vmSymbols::int_signature());
- compute_offset(_ngroups_offset, k, vmSymbols::ngroups_name(), vmSymbols::int_signature());
-}
-
+ THREADGROUP_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_ThreadGroup::serialize(SerializeClosure* f) {
+ THREADGROUP_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
+#define THROWABLE_FIELDS_DO(macro) \
+ macro(backtrace_offset, k, "backtrace", object_signature, false); \
+ macro(detailMessage_offset, k, "detailMessage", string_signature, false); \
+ macro(stackTrace_offset, k, "stackTrace", java_lang_StackTraceElement_array, false); \
+ macro(depth_offset, k, "depth", int_signature, false); \
+ macro(static_unassigned_stacktrace_offset, k, "UNASSIGNED_STACK", java_lang_StackTraceElement_array, true)
void java_lang_Throwable::compute_offsets() {
InstanceKlass* k = SystemDictionary::Throwable_klass();
- compute_offset(backtrace_offset, k, "backtrace", vmSymbols::object_signature());
- compute_offset(detailMessage_offset, k, "detailMessage", vmSymbols::string_signature());
- compute_offset(stackTrace_offset, k, "stackTrace", vmSymbols::java_lang_StackTraceElement_array());
- compute_offset(depth_offset, k, "depth", vmSymbols::int_signature());
- compute_offset(static_unassigned_stacktrace_offset, k, "UNASSIGNED_STACK", vmSymbols::java_lang_StackTraceElement_array(),
- /*is_static*/true);
-}
+ THROWABLE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_Throwable::serialize(SerializeClosure* f) {
+ THROWABLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
oop java_lang_Throwable::unassigned_stacktrace() {
InstanceKlass* ik = SystemDictionary::Throwable_klass();
@@ -2268,25 +2628,53 @@
java_lang_StackTraceElement::fill_in(stack_trace_element, holder, method, version, bci, name, CHECK);
}
+#define STACKFRAMEINFO_FIELDS_DO(macro) \
+ macro(_memberName_offset, k, "memberName", object_signature, false); \
+ macro(_bci_offset, k, "bci", short_signature, false)
+
void java_lang_StackFrameInfo::compute_offsets() {
InstanceKlass* k = SystemDictionary::StackFrameInfo_klass();
- compute_offset(_memberName_offset, k, "memberName", vmSymbols::object_signature());
- compute_offset(_bci_offset, k, "bci", vmSymbols::short_signature());
+ STACKFRAMEINFO_FIELDS_DO(FIELD_COMPUTE_OFFSET);
STACKFRAMEINFO_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
}
+#if INCLUDE_CDS
+void java_lang_StackFrameInfo::serialize(SerializeClosure* f) {
+ STACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+ STACKFRAMEINFO_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
+#define LIVESTACKFRAMEINFO_FIELDS_DO(macro) \
+ macro(_monitors_offset, k, "monitors", object_array_signature, false); \
+ macro(_locals_offset, k, "locals", object_array_signature, false); \
+ macro(_operands_offset, k, "operands", object_array_signature, false); \
+ macro(_mode_offset, k, "mode", int_signature, false)
+
void java_lang_LiveStackFrameInfo::compute_offsets() {
InstanceKlass* k = SystemDictionary::LiveStackFrameInfo_klass();
- compute_offset(_monitors_offset, k, "monitors", vmSymbols::object_array_signature());
- compute_offset(_locals_offset, k, "locals", vmSymbols::object_array_signature());
- compute_offset(_operands_offset, k, "operands", vmSymbols::object_array_signature());
- compute_offset(_mode_offset, k, "mode", vmSymbols::int_signature());
-}
+ LIVESTACKFRAMEINFO_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_LiveStackFrameInfo::serialize(SerializeClosure* f) {
+ LIVESTACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
+#define ACCESSIBLEOBJECT_FIELDS_DO(macro) \
+ macro(override_offset, k, "override", bool_signature, false)
void java_lang_reflect_AccessibleObject::compute_offsets() {
InstanceKlass* k = SystemDictionary::reflect_AccessibleObject_klass();
- compute_offset(override_offset, k, "override", vmSymbols::bool_signature());
-}
+ ACCESSIBLEOBJECT_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_reflect_AccessibleObject::serialize(SerializeClosure* f) {
+ ACCESSIBLEOBJECT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
jboolean java_lang_reflect_AccessibleObject::override(oop reflect) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
@@ -2298,27 +2686,36 @@
reflect->bool_field_put(override_offset, (int) value);
}
+#define METHOD_FIELDS_DO(macro) \
+ macro(clazz_offset, k, vmSymbols::clazz_name(), class_signature, false); \
+ macro(name_offset, k, vmSymbols::name_name(), string_signature, false); \
+ macro(returnType_offset, k, vmSymbols::returnType_name(), class_signature, false); \
+ macro(parameterTypes_offset, k, vmSymbols::parameterTypes_name(), class_array_signature, false); \
+ macro(exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), class_array_signature, false); \
+ macro(slot_offset, k, vmSymbols::slot_name(), int_signature, false); \
+ macro(modifiers_offset, k, vmSymbols::modifiers_name(), int_signature, false); \
+ macro##_OPTIONAL(signature_offset, k, vmSymbols::signature_name(), string_signature); \
+ macro##_OPTIONAL(annotations_offset, k, vmSymbols::annotations_name(), byte_array_signature); \
+ macro##_OPTIONAL(parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), byte_array_signature); \
+ macro##_OPTIONAL(annotation_default_offset, k, vmSymbols::annotation_default_name(), byte_array_signature); \
+ macro##_OPTIONAL(type_annotations_offset, k, vmSymbols::type_annotations_name(), byte_array_signature)
+
void java_lang_reflect_Method::compute_offsets() {
InstanceKlass* k = SystemDictionary::reflect_Method_klass();
- compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
- compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
- compute_offset(returnType_offset, k, vmSymbols::returnType_name(), vmSymbols::class_signature());
- compute_offset(parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature());
- compute_offset(exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature());
- compute_offset(slot_offset, k, vmSymbols::slot_name(), vmSymbols::int_signature());
- compute_offset(modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature());
// The generic signature and annotations fields are only present in 1.5
signature_offset = -1;
annotations_offset = -1;
parameter_annotations_offset = -1;
annotation_default_offset = -1;
type_annotations_offset = -1;
- compute_optional_offset(signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature());
- compute_optional_offset(annotations_offset, k, vmSymbols::annotations_name(), vmSymbols::byte_array_signature());
- compute_optional_offset(parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature());
- compute_optional_offset(annotation_default_offset, k, vmSymbols::annotation_default_name(), vmSymbols::byte_array_signature());
- compute_optional_offset(type_annotations_offset, k, vmSymbols::type_annotations_name(), vmSymbols::byte_array_signature());
-}
+ METHOD_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_reflect_Method::serialize(SerializeClosure* f) {
+ METHOD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
Handle java_lang_reflect_Method::create(TRAPS) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
@@ -2479,23 +2876,33 @@
method->obj_field_put(type_annotations_offset, value);
}
+#define CONSTRUCTOR_FIELDS_DO(macro) \
+ macro(clazz_offset, k, vmSymbols::clazz_name(), class_signature, false); \
+ macro(parameterTypes_offset, k, vmSymbols::parameterTypes_name(), class_array_signature, false); \
+ macro(exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), class_array_signature, false); \
+ macro(slot_offset, k, vmSymbols::slot_name(), int_signature, false); \
+ macro(modifiers_offset, k, vmSymbols::modifiers_name(), int_signature, false); \
+ macro##_OPTIONAL(signature_offset, k, vmSymbols::signature_name(), string_signature); \
+ macro##_OPTIONAL(annotations_offset, k, vmSymbols::annotations_name(), byte_array_signature); \
+ macro##_OPTIONAL(parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), byte_array_signature); \
+ macro##_OPTIONAL(type_annotations_offset, k, vmSymbols::type_annotations_name(), byte_array_signature)
+
+
void java_lang_reflect_Constructor::compute_offsets() {
InstanceKlass* k = SystemDictionary::reflect_Constructor_klass();
- compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
- compute_offset(parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature());
- compute_offset(exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature());
- compute_offset(slot_offset, k, vmSymbols::slot_name(), vmSymbols::int_signature());
- compute_offset(modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature());
// The generic signature and annotations fields are only present in 1.5
signature_offset = -1;
annotations_offset = -1;
parameter_annotations_offset = -1;
type_annotations_offset = -1;
- compute_optional_offset(signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature());
- compute_optional_offset(annotations_offset, k, vmSymbols::annotations_name(), vmSymbols::byte_array_signature());
- compute_optional_offset(parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature());
- compute_optional_offset(type_annotations_offset, k, vmSymbols::type_annotations_name(), vmSymbols::byte_array_signature());
-}
+ CONSTRUCTOR_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_reflect_Constructor::serialize(SerializeClosure* f) {
+ CONSTRUCTOR_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
Handle java_lang_reflect_Constructor::create(TRAPS) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
@@ -2621,21 +3028,30 @@
constructor->obj_field_put(type_annotations_offset, value);
}
+#define FIELD_FIELDS_DO(macro) \
+ macro(clazz_offset, k, vmSymbols::clazz_name(), class_signature, false); \
+ macro(name_offset, k, vmSymbols::name_name(), string_signature, false); \
+ macro(type_offset, k, vmSymbols::type_name(), class_signature, false); \
+ macro(slot_offset, k, vmSymbols::slot_name(), int_signature, false); \
+ macro(modifiers_offset, k, vmSymbols::modifiers_name(), int_signature, false); \
+ macro##_OPTIONAL(signature_offset, k, vmSymbols::signature_name(), string_signature); \
+ macro##_OPTIONAL(annotations_offset, k, vmSymbols::annotations_name(), byte_array_signature); \
+ macro##_OPTIONAL(type_annotations_offset, k, vmSymbols::type_annotations_name(), byte_array_signature)
+
void java_lang_reflect_Field::compute_offsets() {
InstanceKlass* k = SystemDictionary::reflect_Field_klass();
- compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
- compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
- compute_offset(type_offset, k, vmSymbols::type_name(), vmSymbols::class_signature());
- compute_offset(slot_offset, k, vmSymbols::slot_name(), vmSymbols::int_signature());
- compute_offset(modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature());
// The generic signature and annotations fields are only present in 1.5
signature_offset = -1;
annotations_offset = -1;
type_annotations_offset = -1;
- compute_optional_offset(signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature());
- compute_optional_offset(annotations_offset, k, vmSymbols::annotations_name(), vmSymbols::byte_array_signature());
- compute_optional_offset(type_annotations_offset, k, vmSymbols::type_annotations_name(), vmSymbols::byte_array_signature());
-}
+ FIELD_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_reflect_Field::serialize(SerializeClosure* f) {
+ FIELD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
Handle java_lang_reflect_Field::create(TRAPS) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
@@ -2745,19 +3161,37 @@
field->obj_field_put(type_annotations_offset, value);
}
+#define CONSTANTPOOL_FIELDS_DO(macro) \
+ macro(_oop_offset, k, "constantPoolOop", object_signature, false)
+
void reflect_ConstantPool::compute_offsets() {
InstanceKlass* k = SystemDictionary::reflect_ConstantPool_klass();
// The field is called ConstantPool* in the sun.reflect.ConstantPool class.
- compute_offset(_oop_offset, k, "constantPoolOop", vmSymbols::object_signature());
-}
+ CONSTANTPOOL_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void reflect_ConstantPool::serialize(SerializeClosure* f) {
+ CONSTANTPOOL_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
+#define PARAMETER_FIELDS_DO(macro) \
+ macro(name_offset, k, vmSymbols::name_name(), string_signature, false); \
+ macro(modifiers_offset, k, vmSymbols::modifiers_name(), int_signature, false); \
+ macro(index_offset, k, vmSymbols::index_name(), int_signature, false); \
+ macro(executable_offset, k, vmSymbols::executable_name(), executable_signature, false)
void java_lang_reflect_Parameter::compute_offsets() {
InstanceKlass* k = SystemDictionary::reflect_Parameter_klass();
- compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
- compute_offset(modifiers_offset, k, vmSymbols::modifiers_name(), vmSymbols::int_signature());
- compute_offset(index_offset, k, vmSymbols::index_name(), vmSymbols::int_signature());
- compute_offset(executable_offset, k, vmSymbols::executable_name(), vmSymbols::executable_signature());
-}
+ PARAMETER_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_reflect_Parameter::serialize(SerializeClosure* f) {
+ PARAMETER_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
Handle java_lang_reflect_Parameter::create(TRAPS) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
@@ -2829,13 +3263,22 @@
return jlmh;
}
+#define MODULE_FIELDS_DO(macro) \
+ macro(loader_offset, k, vmSymbols::loader_name(), classloader_signature, false); \
+ macro(name_offset, k, vmSymbols::name_name(), string_signature, false)
+
void java_lang_Module::compute_offsets() {
InstanceKlass* k = SystemDictionary::Module_klass();
- compute_offset(loader_offset, k, vmSymbols::loader_name(), vmSymbols::classloader_signature());
- compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
+ MODULE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
MODULE_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
}
+#if INCLUDE_CDS
+void java_lang_Module::serialize(SerializeClosure* f) {
+ MODULE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+ MODULE_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
+}
+#endif
oop java_lang_Module::loader(oop module) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
@@ -2857,7 +3300,7 @@
module->obj_field_put(name_offset, value);
}
-ModuleEntry* java_lang_Module::module_entry(oop module, TRAPS) {
+ModuleEntry* java_lang_Module::module_entry(oop module) {
assert(_module_entry_offset != -1, "Uninitialized module_entry_offset");
assert(module != NULL, "module can't be null");
assert(oopDesc::is_oop(module), "module must be oop");
@@ -2867,8 +3310,8 @@
// If the inject field containing the ModuleEntry* is null then return the
// class loader's unnamed module.
oop loader = java_lang_Module::loader(module);
- Handle h_loader = Handle(THREAD, loader);
- ClassLoaderData* loader_cld = SystemDictionary::register_loader(h_loader, CHECK_NULL);
+ Handle h_loader = Handle(Thread::current(), loader);
+ ClassLoaderData* loader_cld = SystemDictionary::register_loader(h_loader);
return loader_cld->unnamed_module();
}
return module_entry;
@@ -2912,10 +3355,19 @@
return InstanceKlass::cast(k)->constants();
}
+#define UNSAFESTATICFIELDACCESSORIMPL_FIELDS_DO(macro) \
+ macro(_base_offset, k, "base", object_signature, false)
+
void reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() {
InstanceKlass* k = SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass();
- compute_offset(_base_offset, k, "base", vmSymbols::object_signature());
-}
+ UNSAFESTATICFIELDACCESSORIMPL_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void reflect_UnsafeStaticFieldAccessorImpl::serialize(SerializeClosure* f) {
+ UNSAFESTATICFIELDACCESSORIMPL_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
oop java_lang_boxing_object::initialize_and_allocate(BasicType type, TRAPS) {
Klass* k = SystemDictionary::box_klass(type);
@@ -3074,11 +3526,20 @@
// Support for java_lang_ref_SoftReference
//
+#define SOFTREFERENCE_FIELDS_DO(macro) \
+ macro(timestamp_offset, k, "timestamp", long_signature, false); \
+ macro(static_clock_offset, k, "clock", long_signature, true)
+
void java_lang_ref_SoftReference::compute_offsets() {
InstanceKlass* k = SystemDictionary::SoftReference_klass();
- compute_offset(timestamp_offset, k, "timestamp", vmSymbols::long_signature());
- compute_offset(static_clock_offset, k, "clock", vmSymbols::long_signature(), true);
-}
+ SOFTREFERENCE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_ref_SoftReference::serialize(SerializeClosure* f) {
+ SOFTREFERENCE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
jlong java_lang_ref_SoftReference::timestamp(oop ref) {
return ref->long_field(timestamp_offset);
@@ -3107,10 +3568,19 @@
return dmh->obj_field(member_offset_in_bytes());
}
+#define DIRECTMETHODHANDLE_FIELDS_DO(macro) \
+ macro(_member_offset, k, "member", java_lang_invoke_MemberName_signature, false)
+
void java_lang_invoke_DirectMethodHandle::compute_offsets() {
InstanceKlass* k = SystemDictionary::DirectMethodHandle_klass();
- compute_offset(_member_offset, k, "member", vmSymbols::java_lang_invoke_MemberName_signature());
-}
+ DIRECTMETHODHANDLE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_invoke_DirectMethodHandle::serialize(SerializeClosure* f) {
+ DIRECTMETHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
// Support for java_lang_invoke_MethodHandle
@@ -3129,33 +3599,67 @@
int java_lang_invoke_LambdaForm::_vmentry_offset;
+#define METHODHANDLE_FIELDS_DO(macro) \
+ macro(_type_offset, k, vmSymbols::type_name(), java_lang_invoke_MethodType_signature, false); \
+ macro(_form_offset, k, "form", java_lang_invoke_LambdaForm_signature, false)
+
void java_lang_invoke_MethodHandle::compute_offsets() {
InstanceKlass* k = SystemDictionary::MethodHandle_klass();
- compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_lang_invoke_MethodType_signature());
- compute_offset(_form_offset, k, "form", vmSymbols::java_lang_invoke_LambdaForm_signature());
-}
+ METHODHANDLE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_invoke_MethodHandle::serialize(SerializeClosure* f) {
+ METHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
+#define MEMBERNAME_FIELDS_DO(macro) \
+ macro(_clazz_offset, k, vmSymbols::clazz_name(), class_signature, false); \
+ macro(_name_offset, k, vmSymbols::name_name(), string_signature, false); \
+ macro(_type_offset, k, vmSymbols::type_name(), object_signature, false); \
+ macro(_flags_offset, k, vmSymbols::flags_name(), int_signature, false); \
+ macro(_method_offset, k, vmSymbols::method_name(), java_lang_invoke_ResolvedMethodName_signature, false)
void java_lang_invoke_MemberName::compute_offsets() {
InstanceKlass* k = SystemDictionary::MemberName_klass();
- compute_offset(_clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
- compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
- compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::object_signature());
- compute_offset(_flags_offset, k, vmSymbols::flags_name(), vmSymbols::int_signature());
- compute_offset(_method_offset, k, vmSymbols::method_name(), vmSymbols::java_lang_invoke_ResolvedMethodName_signature());
+ MEMBERNAME_FIELDS_DO(FIELD_COMPUTE_OFFSET);
MEMBERNAME_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
}
+#if INCLUDE_CDS
+void java_lang_invoke_MemberName::serialize(SerializeClosure* f) {
+ MEMBERNAME_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+ MEMBERNAME_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
void java_lang_invoke_ResolvedMethodName::compute_offsets() {
InstanceKlass* k = SystemDictionary::ResolvedMethodName_klass();
assert(k != NULL, "jdk mismatch");
RESOLVEDMETHOD_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
}
+#if INCLUDE_CDS
+void java_lang_invoke_ResolvedMethodName::serialize(SerializeClosure* f) {
+ RESOLVEDMETHOD_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
+#define LAMBDAFORM_FIELDS_DO(macro) \
+ macro(_vmentry_offset, k, "vmentry", java_lang_invoke_MemberName_signature, false)
+
void java_lang_invoke_LambdaForm::compute_offsets() {
InstanceKlass* k = SystemDictionary::LambdaForm_klass();
assert (k != NULL, "jdk mismatch");
- compute_offset(_vmentry_offset, k, "vmentry", vmSymbols::java_lang_invoke_MemberName_signature());
-}
+ LAMBDAFORM_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_invoke_LambdaForm::serialize(SerializeClosure* f) {
+ LAMBDAFORM_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
bool java_lang_invoke_LambdaForm::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
@@ -3294,11 +3798,20 @@
int java_lang_invoke_MethodType::_rtype_offset;
int java_lang_invoke_MethodType::_ptypes_offset;
+#define METHODTYPE_FIELDS_DO(macro) \
+ macro(_rtype_offset, k, "rtype", class_signature, false); \
+ macro(_ptypes_offset, k, "ptypes", class_array_signature, false)
+
void java_lang_invoke_MethodType::compute_offsets() {
InstanceKlass* k = SystemDictionary::MethodType_klass();
- compute_offset(_rtype_offset, k, "rtype", vmSymbols::class_signature());
- compute_offset(_ptypes_offset, k, "ptypes", vmSymbols::class_array_signature());
-}
+ METHODTYPE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_invoke_MethodType::serialize(SerializeClosure* f) {
+ METHODTYPE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
void java_lang_invoke_MethodType::print_signature(oop mt, outputStream* st) {
st->print("(");
@@ -3379,12 +3892,20 @@
int java_lang_invoke_CallSite::_target_offset;
int java_lang_invoke_CallSite::_context_offset;
+#define CALLSITE_FIELDS_DO(macro) \
+ macro(_target_offset, k, "target", java_lang_invoke_MethodHandle_signature, false); \
+ macro(_context_offset, k, "context", java_lang_invoke_MethodHandleNatives_CallSiteContext_signature, false)
+
void java_lang_invoke_CallSite::compute_offsets() {
InstanceKlass* k = SystemDictionary::CallSite_klass();
- compute_offset(_target_offset, k, "target", vmSymbols::java_lang_invoke_MethodHandle_signature());
- compute_offset(_context_offset, k, "context",
- vmSymbols::java_lang_invoke_MethodHandleNatives_CallSiteContext_signature());
-}
+ CALLSITE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_invoke_CallSite::serialize(SerializeClosure* f) {
+ CALLSITE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
oop java_lang_invoke_CallSite::context(oop call_site) {
assert(java_lang_invoke_CallSite::is_instance(call_site), "");
@@ -3402,6 +3923,12 @@
CALLSITECONTEXT_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
}
+#if INCLUDE_CDS
+void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize(SerializeClosure* f) {
+ CALLSITECONTEXT_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
DependencyContext java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) {
assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
intptr_t* vmdeps_addr = (intptr_t*)call_site->field_addr(_vmdependencies_offset);
@@ -3416,16 +3943,23 @@
int java_security_AccessControlContext::_isPrivileged_offset = 0;
int java_security_AccessControlContext::_isAuthorized_offset = -1;
+#define ACCESSCONTROLCONTEXT_FIELDS_DO(macro) \
+ macro(_context_offset, k, "context", protectiondomain_signature, false); \
+ macro(_privilegedContext_offset, k, "privilegedContext", accesscontrolcontext_signature, false); \
+ macro(_isPrivileged_offset, k, "isPrivileged", bool_signature, false); \
+ macro(_isAuthorized_offset, k, "isAuthorized", bool_signature, false)
+
void java_security_AccessControlContext::compute_offsets() {
assert(_isPrivileged_offset == 0, "offsets should be initialized only once");
InstanceKlass* k = SystemDictionary::AccessControlContext_klass();
-
- compute_offset(_context_offset, k, "context", vmSymbols::protectiondomain_signature());
- compute_offset(_privilegedContext_offset, k, "privilegedContext", vmSymbols::accesscontrolcontext_signature());
- compute_offset(_isPrivileged_offset, k, "isPrivileged", vmSymbols::bool_signature());
- compute_offset(_isAuthorized_offset, k, "isAuthorized", vmSymbols::bool_signature());
-}
-
+ ACCESSCONTROLCONTEXT_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_security_AccessControlContext::serialize(SerializeClosure* f) {
+ ACCESSCONTROLCONTEXT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
bool java_security_AccessControlContext::is_authorized(Handle context) {
assert(context.not_null() && context->klass() == SystemDictionary::AccessControlContext_klass(), "Invalid type");
@@ -3469,25 +4003,29 @@
return HeapAccess<>::atomic_cmpxchg_at(new_data, loader, _loader_data_offset, expected_data);
}
+#define CLASSLOADER_FIELDS_DO(macro) \
+ macro(parallelCapable_offset, k1, "parallelLockMap", concurrenthashmap_signature, false); \
+ macro(name_offset, k1, vmSymbols::name_name(), string_signature, false); \
+ macro(unnamedModule_offset, k1, "unnamedModule", module_signature, false); \
+ macro(parent_offset, k1, "parent", classloader_signature, false)
+
void java_lang_ClassLoader::compute_offsets() {
assert(!offsets_computed, "offsets should be initialized only once");
offsets_computed = true;
InstanceKlass* k1 = SystemDictionary::ClassLoader_klass();
- compute_offset(parallelCapable_offset,
- k1, "parallelLockMap", vmSymbols::concurrenthashmap_signature());
-
- compute_offset(name_offset,
- k1, vmSymbols::name_name(), vmSymbols::string_signature());
-
- compute_offset(unnamedModule_offset,
- k1, "unnamedModule", vmSymbols::module_signature());
-
- compute_offset(parent_offset, k1, "parent", vmSymbols::classloader_signature());
+ CLASSLOADER_FIELDS_DO(FIELD_COMPUTE_OFFSET);
CLASSLOADER_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
}
+#if INCLUDE_CDS
+void java_lang_ClassLoader::serialize(SerializeClosure* f) {
+ CLASSLOADER_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+ CLASSLOADER_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
+}
+#endif
+
oop java_lang_ClassLoader::parent(oop loader) {
assert(is_instance(loader), "loader must be oop");
return loader->obj_field(parent_offset);
@@ -3571,13 +4109,22 @@
// Support for java_lang_System
//
+#define SYSTEM_FIELDS_DO(macro) \
+ macro(static_in_offset, k, "in", input_stream_signature, true); \
+ macro(static_out_offset, k, "out", print_stream_signature, true); \
+ macro(static_err_offset, k, "err", print_stream_signature, true); \
+ macro(static_security_offset, k, "security", security_manager_signature, true)
+
void java_lang_System::compute_offsets() {
InstanceKlass* k = SystemDictionary::System_klass();
- compute_offset(static_in_offset, k, "in", vmSymbols::input_stream_signature(), true);
- compute_offset(static_out_offset, k, "out", vmSymbols::print_stream_signature(), true);
- compute_offset(static_err_offset, k, "err", vmSymbols::print_stream_signature(), true);
- compute_offset(static_security_offset, k, "security", vmSymbols::security_manager_signature(), true);
-}
+ SYSTEM_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_System::serialize(SerializeClosure* f) {
+ SYSTEM_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
int java_lang_System::in_offset_in_bytes() { return static_in_offset; }
int java_lang_System::out_offset_in_bytes() { return static_out_offset; }
@@ -3683,19 +4230,27 @@
int reflect_ConstantPool::_oop_offset;
int reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
+#define STACKTRACEELEMENT_FIELDS_DO(macro) \
+ macro(declaringClassObject_offset, k, "declaringClassObject", class_signature, false); \
+ macro(classLoaderName_offset, k, "classLoaderName", string_signature, false); \
+ macro(moduleName_offset, k, "moduleName", string_signature, false); \
+ macro(moduleVersion_offset, k, "moduleVersion", string_signature, false); \
+ macro(declaringClass_offset, k, "declaringClass", string_signature, false); \
+ macro(methodName_offset, k, "methodName", string_signature, false); \
+ macro(fileName_offset, k, "fileName", string_signature, false); \
+ macro(lineNumber_offset, k, "lineNumber", int_signature, false)
// Support for java_lang_StackTraceElement
void java_lang_StackTraceElement::compute_offsets() {
InstanceKlass* k = SystemDictionary::StackTraceElement_klass();
- compute_offset(declaringClassObject_offset, k, "declaringClassObject", vmSymbols::class_signature());
- compute_offset(classLoaderName_offset, k, "classLoaderName", vmSymbols::string_signature());
- compute_offset(moduleName_offset, k, "moduleName", vmSymbols::string_signature());
- compute_offset(moduleVersion_offset, k, "moduleVersion", vmSymbols::string_signature());
- compute_offset(declaringClass_offset, k, "declaringClass", vmSymbols::string_signature());
- compute_offset(methodName_offset, k, "methodName", vmSymbols::string_signature());
- compute_offset(fileName_offset, k, "fileName", vmSymbols::string_signature());
- compute_offset(lineNumber_offset, k, "lineNumber", vmSymbols::int_signature());
-}
+ STACKTRACEELEMENT_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_StackTraceElement::serialize(SerializeClosure* f) {
+ STACKTRACEELEMENT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
void java_lang_StackTraceElement::set_fileName(oop element, oop value) {
element->obj_field_put(fileName_offset, value);
@@ -3754,16 +4309,23 @@
}
// Support for java Assertions - java_lang_AssertionStatusDirectives.
+#define ASSERTIONSTATUSDIRECTIVES_FIELDS_DO(macro) \
+ macro(classes_offset, k, "classes", string_array_signature, false); \
+ macro(classEnabled_offset, k, "classEnabled", bool_array_signature, false); \
+ macro(packages_offset, k, "packages", string_array_signature, false); \
+ macro(packageEnabled_offset, k, "packageEnabled", bool_array_signature, false); \
+ macro(deflt_offset, k, "deflt", bool_signature, false)
void java_lang_AssertionStatusDirectives::compute_offsets() {
InstanceKlass* k = SystemDictionary::AssertionStatusDirectives_klass();
- compute_offset(classes_offset, k, "classes", vmSymbols::string_array_signature());
- compute_offset(classEnabled_offset, k, "classEnabled", vmSymbols::bool_array_signature());
- compute_offset(packages_offset, k, "packages", vmSymbols::string_array_signature());
- compute_offset(packageEnabled_offset, k, "packageEnabled", vmSymbols::bool_array_signature());
- compute_offset(deflt_offset, k, "deflt", vmSymbols::bool_signature());
-}
-
+ ASSERTIONSTATUSDIRECTIVES_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_lang_AssertionStatusDirectives::serialize(SerializeClosure* f) {
+ ASSERTIONSTATUSDIRECTIVES_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
void java_lang_AssertionStatusDirectives::set_classes(oop o, oop val) {
o->obj_field_put(classes_offset, val);
@@ -3791,12 +4353,20 @@
return _limit_offset;
}
+#define BUFFER_FIELDS_DO(macro) \
+ macro(_limit_offset, k, "limit", int_signature, false)
void java_nio_Buffer::compute_offsets() {
InstanceKlass* k = SystemDictionary::nio_Buffer_klass();
assert(k != NULL, "must be loaded in 1.4+");
- compute_offset(_limit_offset, k, "limit", vmSymbols::int_signature());
-}
+ BUFFER_FIELDS_DO(FIELD_COMPUTE_OFFSET);
+}
+
+#if INCLUDE_CDS
+void java_nio_Buffer::serialize(SerializeClosure* f) {
+ BUFFER_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
+}
+#endif
void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
if (_owner_offset != 0) return;
@@ -3835,6 +4405,10 @@
// Compute non-hard-coded field offsets of all the classes in this file
void JavaClasses::compute_offsets() {
+ if (UseSharedSpaces) {
+ return; // field offsets are loaded from archive
+ }
+
// java_lang_Class::compute_offsets was called earlier in bootstrap
java_lang_System::compute_offsets();
java_lang_ClassLoader::compute_offsets();
--- a/src/hotspot/share/classfile/javaClasses.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/javaClasses.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -71,6 +71,7 @@
};
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Instance creation
static Handle create_from_unicode(jchar* unicode, int len, TRAPS);
@@ -222,6 +223,15 @@
static void fixup_mirror(Klass* k, TRAPS);
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
+ // Archiving
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+ static void archive_basic_type_mirrors(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
+ static oop archive_mirror(Klass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
+ static oop process_archived_mirror(Klass* k, oop mirror, oop archived_mirror, Thread *THREAD)
+ NOT_CDS_JAVA_HEAP_RETURN_(NULL);
+ static void restore_archived_mirror(Klass *k, Handle mirror, Handle class_loader, Handle module,
+ Handle protection_domain, TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
+
static void fixup_module_field(Klass* k, Handle module);
// Conversion
@@ -306,6 +316,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Instance creation
static oop create();
// Returns the JavaThread associated with the thread obj
@@ -406,6 +418,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// parent ThreadGroup
static oop parent(oop java_thread_group);
// name
@@ -485,6 +499,7 @@
static void print_stack_usage(Handle stream);
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Allocate space for backtrace (created but stack trace not filled in)
static void allocate_backtrace(Handle throwable, TRAPS);
@@ -515,6 +530,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Accessors
static jboolean override(oop reflect);
static void set_override(oop reflect, jboolean value);
@@ -546,6 +563,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Allocation
static Handle create(TRAPS);
@@ -615,6 +634,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Allocation
static Handle create(TRAPS);
@@ -673,6 +694,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Allocation
static Handle create(TRAPS);
@@ -728,6 +751,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Allocation
static Handle create(TRAPS);
@@ -758,6 +783,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Allocation
static Handle create(Handle loader, Handle module_name, TRAPS);
@@ -771,7 +798,7 @@
static oop name(oop module);
static void set_name(oop module, oop value);
- static ModuleEntry* module_entry(oop module, TRAPS);
+ static ModuleEntry* module_entry(oop module);
static void set_module_entry(oop module, ModuleEntry* module_entry);
friend class JavaClasses;
@@ -787,6 +814,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Allocation
static Handle create(TRAPS);
@@ -809,6 +838,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
static int base_offset() {
return _base_offset;
}
@@ -910,6 +941,7 @@
static void set_clock(jlong value);
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
};
// Interface to java.lang.invoke.MethodHandle objects
@@ -926,6 +958,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Accessors
static oop type(oop mh);
static void set_type(oop mh, oop mtype);
@@ -955,6 +989,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Accessors
static oop member(oop mh);
@@ -980,6 +1016,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
// Accessors
static oop vmentry(oop lform);
static void set_vmentry(oop lform, oop invoker);
@@ -1011,6 +1049,8 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+
static int vmtarget_offset_in_bytes() { return _vmtarget_offset; }
static Method* vmtarget(oop resolved_method);
@@ -1048,6 +1088,7 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static oop clazz(oop mname);
static void set_clazz(oop mname, oop clazz);
@@ -1112,6 +1153,7 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static oop rtype(oop mt);
static objArrayOop ptypes(oop mt);
@@ -1147,6 +1189,7 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static oop target( oop site);
static void set_target( oop site, oop target);
@@ -1180,6 +1223,7 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static DependencyContext vmdependencies(oop context);
@@ -1203,6 +1247,7 @@
static void compute_offsets();
public:
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS);
static bool is_authorized(Handle context);
@@ -1228,6 +1273,7 @@
public:
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static ClassLoaderData* loader_data(oop loader);
static ClassLoaderData* cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data);
@@ -1279,6 +1325,7 @@
static bool has_security_manager();
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Debugging
friend class JavaClasses;
@@ -1316,6 +1363,7 @@
int version, int bci, Symbol* name, TRAPS);
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Debugging
friend class JavaClasses;
@@ -1359,6 +1407,7 @@
static void set_version(oop info, short value);
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void to_stack_trace_element(Handle stackFrame, Handle stack_trace_element, TRAPS);
@@ -1380,6 +1429,7 @@
static void set_mode(oop info, int value);
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Debugging
friend class JavaClasses;
@@ -1404,6 +1454,7 @@
static void set_deflt(oop obj, bool val);
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
// Debugging
friend class JavaClasses;
@@ -1417,6 +1468,7 @@
public:
static int limit_offset();
static void compute_offsets();
+ static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
};
class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
--- a/src/hotspot/share/classfile/modules.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/modules.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -85,27 +85,27 @@
return java_lang_String::as_utf8_string(JNIHandles::resolve_non_null(version));
}
-static ModuleEntryTable* get_module_entry_table(Handle h_loader, TRAPS) {
+static ModuleEntryTable* get_module_entry_table(Handle h_loader) {
// This code can be called during start-up, before the classLoader's classLoader data got
// created. So, call register_loader() to make sure the classLoader data gets created.
- ClassLoaderData *loader_cld = SystemDictionary::register_loader(h_loader, CHECK_NULL);
+ ClassLoaderData *loader_cld = SystemDictionary::register_loader(h_loader);
return loader_cld->modules();
}
-static PackageEntryTable* get_package_entry_table(Handle h_loader, TRAPS) {
+static PackageEntryTable* get_package_entry_table(Handle h_loader) {
// This code can be called during start-up, before the classLoader's classLoader data got
// created. So, call register_loader() to make sure the classLoader data gets created.
- ClassLoaderData *loader_cld = SystemDictionary::register_loader(h_loader, CHECK_NULL);
+ ClassLoaderData *loader_cld = SystemDictionary::register_loader(h_loader);
return loader_cld->packages();
}
static ModuleEntry* get_module_entry(jobject module, TRAPS) {
- Handle module_h(THREAD, JNIHandles::resolve(module));
- if (!java_lang_Module::is_instance(module_h())) {
+ oop m = JNIHandles::resolve(module);
+ if (!java_lang_Module::is_instance(m)) {
THROW_MSG_NULL(vmSymbols::java_lang_IllegalArgumentException(),
"module is not an instance of type java.lang.Module");
}
- return java_lang_Module::module_entry(module_h(), CHECK_NULL);
+ return java_lang_Module::module_entry(m);
}
static PackageEntry* get_package_entry(ModuleEntry* module_entry, const char* package_name, TRAPS) {
@@ -124,7 +124,7 @@
ResourceMark rm(THREAD);
if (Modules::verify_package_name(package->as_C_string())) {
PackageEntryTable* const package_entry_table =
- get_package_entry_table(h_loader, CHECK_NULL);
+ get_package_entry_table(h_loader);
assert(package_entry_table != NULL, "Unexpected null package entry table");
return package_entry_table->lookup_only(package);
}
@@ -186,7 +186,7 @@
Handle h_loader(THREAD, loader);
// Ensure the boot loader's PackageEntryTable has been created
- PackageEntryTable* package_table = get_package_entry_table(h_loader, CHECK);
+ PackageEntryTable* package_table = get_package_entry_table(h_loader);
assert(pkg_list->length() == 0 || package_table != NULL, "Bad package_table");
// Ensure java.base's ModuleEntry has been created
@@ -346,7 +346,7 @@
pkg_list->append(pkg_symbol);
}
- ModuleEntryTable* module_table = get_module_entry_table(h_loader, CHECK);
+ ModuleEntryTable* module_table = get_module_entry_table(h_loader);
assert(module_table != NULL, "module entry table shouldn't be null");
// Create symbol* entry for module name.
@@ -382,7 +382,7 @@
MutexLocker ml(Module_lock, THREAD);
if (num_packages > 0) {
- package_table = get_package_entry_table(h_loader, CHECK);
+ package_table = get_package_entry_table(h_loader);
assert(package_table != NULL, "Missing package_table");
// Check that none of the packages exist in the class loader's package table.
--- a/src/hotspot/share/classfile/stringTable.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/stringTable.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -39,6 +39,7 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/diagnosticCommand.hpp"
#include "utilities/hashtable.inline.hpp"
@@ -703,7 +704,6 @@
assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be");
Thread* THREAD = Thread::current();
- G1CollectedHeap::heap()->begin_archive_alloc_range();
for (int i = 0; i < the_table()->table_size(); ++i) {
HashtableEntry<oop, mtSymbol>* bucket = the_table()->bucket(i);
for ( ; bucket != NULL; bucket = bucket->next()) {
@@ -727,7 +727,6 @@
}
}
- G1CollectedHeap::heap()->end_archive_alloc_range(string_space, os::vm_allocation_granularity());
return true;
}
--- a/src/hotspot/share/classfile/systemDictionary.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/systemDictionary.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -56,6 +56,7 @@
#include "oops/instanceKlass.hpp"
#include "oops/instanceRefKlass.hpp"
#include "oops/klass.inline.hpp"
+#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
@@ -149,9 +150,9 @@
CDS_ONLY(SystemDictionaryShared::initialize(CHECK);)
}
-ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) {
+ClassLoaderData* SystemDictionary::register_loader(Handle class_loader) {
if (class_loader() == NULL) return ClassLoaderData::the_null_class_loader_data();
- return ClassLoaderDataGraph::find_or_create(class_loader, THREAD);
+ return ClassLoaderDataGraph::find_or_create(class_loader);
}
// ----------------------------------------------------------------------------
@@ -663,7 +664,7 @@
// Fix for 4474172; see evaluation for more details
class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
- ClassLoaderData *loader_data = register_loader(class_loader, CHECK_NULL);
+ ClassLoaderData* loader_data = register_loader(class_loader);
Dictionary* dictionary = loader_data->dictionary();
unsigned int d_hash = dictionary->compute_hash(name);
@@ -988,7 +989,7 @@
// Create a new CLD for anonymous class, that uses the same class loader
// as the host_klass
guarantee(host_klass->class_loader() == class_loader(), "should be the same");
- loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
+ loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader);
} else {
loader_data = ClassLoaderData::class_loader_data(class_loader());
}
@@ -1056,15 +1057,6 @@
Handle protection_domain,
ClassFileStream* st,
TRAPS) {
-#if INCLUDE_CDS
- ResourceMark rm(THREAD);
- if (DumpSharedSpaces && !class_loader.is_null() &&
- !UseAppCDS && strcmp(class_name->as_C_string(), "Unnamed") != 0) {
- // If AppCDS is not enabled, don't define the class at dump time (except for the "Unnamed"
- // class, which is used by MethodHandles).
- THROW_MSG_NULL(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());
- }
-#endif
HandleMark hm(THREAD);
@@ -1075,7 +1067,7 @@
DoObjectLock = false;
}
- ClassLoaderData* loader_data = register_loader(class_loader, CHECK_NULL);
+ ClassLoaderData* loader_data = register_loader(class_loader);
// Make sure we are synchronized on the class loader before we proceed
Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
@@ -2513,11 +2505,10 @@
}
(*appendix_result) = Handle(THREAD, appendix);
// the target is stored in the cpCache and if a reference to this
- // MethodName is dropped we need a way to make sure the
+ // MemberName is dropped we need a way to make sure the
// class_loader containing this method is kept alive.
- // FIXME: the appendix might also preserve this dependency.
ClassLoaderData* this_key = accessing_klass->class_loader_data();
- this_key->record_dependency(m->method_holder(), CHECK_NULL); // Can throw OOM
+ this_key->record_dependency(m->method_holder());
return methodHandle(THREAD, m);
}
}
@@ -3052,6 +3043,9 @@
_master_dictionary(master_dictionary) {}
void do_cld(ClassLoaderData* cld) {
ResourceMark rm;
+ if (cld->is_anonymous()) {
+ return;
+ }
if (cld->is_system_class_loader_data() || cld->is_platform_class_loader_data()) {
for (int i = 0; i < cld->dictionary()->table_size(); ++i) {
Dictionary* curr_dictionary = cld->dictionary();
@@ -3079,13 +3073,17 @@
}
};
-// Combining platform and system loader dictionaries into boot loader dictionaries.
+// Combining platform and system loader dictionaries into boot loader dictionary.
// During run time, we only have one shared dictionary.
void SystemDictionary::combine_shared_dictionaries() {
assert(DumpSharedSpaces, "dump time only");
- Dictionary* master_dictionary = ClassLoaderData::the_null_class_loader_data()->dictionary();
- CombineDictionariesClosure cdc(master_dictionary);
- ClassLoaderDataGraph::cld_do(&cdc);
+ // If AppCDS isn't enabled, we only dump the classes in the boot loader dictionary
+ // into the shared archive.
+ if (UseAppCDS) {
+ Dictionary* master_dictionary = ClassLoaderData::the_null_class_loader_data()->dictionary();
+ CombineDictionariesClosure cdc(master_dictionary);
+ ClassLoaderDataGraph::cld_do(&cdc);
+ }
// These tables are no longer valid or necessary. Keeping them around will
// cause SystemDictionary::verify() to fail. Let's empty them.
--- a/src/hotspot/share/classfile/systemDictionary.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/systemDictionary.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -493,7 +493,7 @@
static void compute_java_loaders(TRAPS);
// Register a new class loader
- static ClassLoaderData* register_loader(Handle class_loader, TRAPS);
+ static ClassLoaderData* register_loader(Handle class_loader);
protected:
// Mirrors for primitive classes (created eagerly)
static oop check_mirror(oop m) {
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -49,6 +49,7 @@
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
@@ -497,7 +498,7 @@
// Fix for 4474172; see evaluation for more details
class_loader = Handle(
THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
- ClassLoaderData *loader_data = register_loader(class_loader, CHECK_NULL);
+ ClassLoaderData *loader_data = register_loader(class_loader);
Dictionary* dictionary = loader_data->dictionary();
unsigned int d_hash = dictionary->compute_hash(name);
--- a/src/hotspot/share/classfile/verificationType.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/verificationType.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/verificationType.hpp"
#include "classfile/verifier.hpp"
+#include "runtime/handles.inline.hpp"
VerificationType VerificationType::from_tag(u1 tag) {
switch (tag) {
--- a/src/hotspot/share/classfile/verificationType.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/verificationType.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
class ClassVerifier;
-class VerificationType VALUE_OBJ_CLASS_SPEC {
+class VerificationType {
private:
// Least significant bits of _handle are always 0, so we use these as
// the indicator that the _handle is valid. Otherwise, the _data field
--- a/src/hotspot/share/classfile/verifier.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/verifier.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -38,6 +38,7 @@
#include "logging/logStream.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/constantPool.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.hpp"
--- a/src/hotspot/share/classfile/verifier.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/verifier.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -96,7 +96,7 @@
#define CHECK_VERIFY_(verifier, result) \
CHECK_(result)); if ((verifier)->has_error()) return (result); ((void)0
-class TypeOrigin VALUE_OBJ_CLASS_SPEC {
+class TypeOrigin {
private:
typedef enum {
CF_LOCALS, // Comes from the current frame locals
@@ -146,7 +146,7 @@
#endif
};
-class ErrorContext VALUE_OBJ_CLASS_SPEC {
+class ErrorContext {
private:
typedef enum {
INVALID_BYTECODE, // There was a problem with the bytecode
--- a/src/hotspot/share/classfile/vmSymbols.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/classfile/vmSymbols.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "jvm.h"
#include "classfile/vmSymbols.hpp"
#include "compiler/compilerDirectives.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/code/codeBlob.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/codeBlob.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,7 +82,7 @@
class CodeBlobLayout;
-class CodeBlob VALUE_OBJ_CLASS_SPEC {
+class CodeBlob {
friend class VMStructs;
friend class JVMCIVMStructs;
friend class CodeCacheDumper;
--- a/src/hotspot/share/code/codeCache.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/codeCache.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -36,7 +36,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/method.hpp"
+#include "oops/method.inline.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/verifyOopClosure.hpp"
--- a/src/hotspot/share/code/compiledIC.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/compiledIC.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -34,9 +34,10 @@
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/method.hpp"
+#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
--- a/src/hotspot/share/code/compiledMethod.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/compiledMethod.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -28,8 +28,9 @@
#include "code/scopeDesc.hpp"
#include "code/codeCache.hpp"
#include "prims/methodHandles.hpp"
-#include "interpreter/bytecode.hpp"
+#include "interpreter/bytecode.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/method.inline.hpp"
#include "runtime/mutexLocker.hpp"
CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
--- a/src/hotspot/share/code/compiledMethod.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/compiledMethod.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,9 +54,9 @@
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
- int count() { return OrderAccess::load_acquire(&_count); }
+ int count();
// increment_count is only called under lock, but there may be concurrent readers.
- void increment_count() { OrderAccess::release_store(&_count, _count + 1); }
+ void increment_count();
public:
@@ -75,7 +75,7 @@
class nmethod;
// cache pc descs found in earlier inquiries
-class PcDescCache VALUE_OBJ_CLASS_SPEC {
+class PcDescCache {
friend class VMStructs;
private:
enum { cache_size = 4 };
@@ -109,7 +109,7 @@
PcDesc* scopes_pcs_end() const { return _upper; }
};
-class PcDescContainer VALUE_OBJ_CLASS_SPEC {
+class PcDescContainer {
private:
PcDescCache _pc_desc_cache;
public:
@@ -290,7 +290,7 @@
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
ExceptionCache* exception_cache() const { return _exception_cache; }
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
- void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store(&_exception_cache, ec); }
+ void release_set_exception_cache(ExceptionCache *ec);
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(BoolObjectClosure* is_alive);
--- a/src/hotspot/share/code/compiledMethod.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/compiledMethod.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,9 @@
;
}
+inline void CompiledMethod::release_set_exception_cache(ExceptionCache *ec) {
+ OrderAccess::release_store(&_exception_cache, ec);
+}
// -----------------------------------------------------------------------------
// CompiledMethod::get_deopt_original_pc
@@ -56,4 +59,13 @@
return NULL;
}
+
+// class ExceptionCache methods
+
+inline int ExceptionCache::count() { return OrderAccess::load_acquire(&_count); }
+
+// increment_count is only called under lock, but there may be concurrent readers.
+inline void ExceptionCache::increment_count() { OrderAccess::release_store(&_count, _count + 1); }
+
+
#endif //SHARE_VM_CODE_COMPILEDMETHOD_INLINE_HPP
--- a/src/hotspot/share/code/dependencies.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/dependencies.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -204,7 +204,7 @@
#if INCLUDE_JVMCI
// A Metadata* or object value recorded in an OopRecorder
- class DepValue VALUE_OBJ_CLASS_SPEC {
+ class DepValue {
private:
// Unique identifier of the value within the associated OopRecorder that
// encodes both the category of the value (0: invalid, positive: metadata, negative: object)
--- a/src/hotspot/share/code/exceptionHandlerTable.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/exceptionHandlerTable.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -83,7 +83,7 @@
// modified.
class nmethod;
-class ExceptionHandlerTable VALUE_OBJ_CLASS_SPEC {
+class ExceptionHandlerTable {
private:
HandlerTableEntry* _table; // the table
int _length; // the current length of the table
@@ -140,7 +140,7 @@
// Use 32-bit representation for offsets
typedef uint implicit_null_entry;
-class ImplicitExceptionTable VALUE_OBJ_CLASS_SPEC {
+class ImplicitExceptionTable {
uint _size;
uint _len;
implicit_null_entry *_data;
--- a/src/hotspot/share/code/icBuffer.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/icBuffer.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,10 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/stubRoutines.hpp"
--- a/src/hotspot/share/code/location.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/location.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "code/debugInfo.hpp"
#include "code/location.hpp"
+#include "runtime/handles.inline.hpp"
void Location::print_on(outputStream* st) const {
if(type() == invalid) {
--- a/src/hotspot/share/code/location.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/location.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
// Where: [4]
// Offset: [31..5]
-class Location VALUE_OBJ_CLASS_SPEC {
+class Location {
friend class VMStructs;
public:
enum Where {
--- a/src/hotspot/share/code/nmethod.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/nmethod.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -40,6 +40,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiImpl.hpp"
--- a/src/hotspot/share/code/pcDesc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/pcDesc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
class CompiledMethod;
-class PcDesc VALUE_OBJ_CLASS_SPEC {
+class PcDesc {
friend class VMStructs;
private:
int _pc_offset; // offset from start of nmethod
--- a/src/hotspot/share/code/relocInfo.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/relocInfo.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_CODE_RELOCINFO_HPP
#define SHARE_VM_CODE_RELOCINFO_HPP
-#include "memory/allocation.hpp"
#include "runtime/os.hpp"
#include "utilities/macros.hpp"
@@ -48,7 +47,7 @@
// oops in the code stream (strings, class loaders)
// Also, the source of relocation specs (oop_Relocation::spec, ...).
// RelocationHolder
-// A ValueObj type which acts as a union holding a Relocation object.
+// A value type which acts as a union holding a Relocation object.
// Represents a relocation spec passed into a CodeBuffer during assembly.
// RelocIterator
// A StackObj which iterates over the relocations associated with
@@ -252,7 +251,7 @@
class CodeSection;
class RelocIterator;
-class relocInfo VALUE_OBJ_CLASS_SPEC {
+class relocInfo {
friend class RelocIterator;
public:
enum relocType {
@@ -469,7 +468,7 @@
// Holder for flyweight relocation objects.
// Although the flyweight subclasses are of varying sizes,
// the holder is "one size fits all".
-class RelocationHolder VALUE_OBJ_CLASS_SPEC {
+class RelocationHolder {
friend class Relocation;
friend class CodeSection;
@@ -640,7 +639,7 @@
// It represents the relocation data of relocation record.
// So, the RelocIterator unpacks relocInfos into Relocations.
-class Relocation VALUE_OBJ_CLASS_SPEC {
+class Relocation {
friend class RelocationHolder;
friend class RelocIterator;
--- a/src/hotspot/share/code/stubs.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/code/stubs.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -57,7 +57,7 @@
// <--+
-class Stub VALUE_OBJ_CLASS_SPEC {
+class Stub {
public:
// Initialization/finalization
void initialize(int size,
--- a/src/hotspot/share/compiler/abstractCompiler.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/compiler/abstractCompiler.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,10 +33,10 @@
#if INCLUDE_JVMCI
// Per-compiler statistics
-class CompilerStatistics VALUE_OBJ_CLASS_SPEC {
+class CompilerStatistics {
friend class VMStructs;
- class Data VALUE_OBJ_CLASS_SPEC {
+ class Data {
friend class VMStructs;
public:
elapsedTimer _time; // time spent compiling
--- a/src/hotspot/share/compiler/compileBroker.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/compiler/compileBroker.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -39,7 +39,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/methodData.hpp"
-#include "oops/method.hpp"
+#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/nativeLookup.hpp"
#include "prims/whitebox.hpp"
--- a/src/hotspot/share/compiler/compilerDirectives.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/compiler/compilerDirectives.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "compiler/abstractCompiler.hpp"
#include "compiler/compilerDirectives.hpp"
#include "compiler/compilerOracle.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
CompilerDirectives::CompilerDirectives() :_match(NULL), _next(NULL), _ref_count(0) {
--- a/src/hotspot/share/compiler/oopMap.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/compiler/oopMap.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/signature.hpp"
#include "utilities/align.hpp"
#ifdef COMPILER1
--- a/src/hotspot/share/gc/cms/allocationStats.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/cms/allocationStats.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,11 +27,10 @@
#include "gc/shared/gcUtil.hpp"
#include "logging/log.hpp"
-#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-class AllocationStats VALUE_OBJ_CLASS_SPEC {
+class AllocationStats {
// A duration threshold (in ms) used to filter
// possibly unreliable samples.
static float _threshold;
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -36,7 +36,7 @@
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -48,7 +48,7 @@
class ObjectClosureCareful;
class Klass;
-class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
+class LinearAllocBlock {
public:
LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
_allocation_size_limit(0) {}
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -628,6 +628,7 @@
NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
_gc_counters = new CollectorCounters("CMS", 1);
+ _cgc_counters = new CollectorCounters("CMS stop-the-world phases", 2);
_completed_initialization = true;
_inter_sweep_timer.start(); // start of time
}
@@ -5553,18 +5554,18 @@
void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
GCTraceCPUTime tcpu;
- TraceCollectorStats tcs(counters());
+ TraceCollectorStats tcs_cgc(cgc_counters());
switch (op) {
case CMS_op_checkpointRootsInitial: {
GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
- SvcGCMarker sgcm(SvcGCMarker::OTHER);
+ SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
checkpointRootsInitial();
break;
}
case CMS_op_checkpointRootsFinal: {
GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
- SvcGCMarker sgcm(SvcGCMarker::OTHER);
+ SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
checkpointRootsFinal();
break;
}
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -79,7 +79,7 @@
// we have _shifter == 0. and for the mod union table we have
// shifter == CardTable::card_shift - LogHeapWordSize.)
// XXX 64-bit issues in BitMap?
-class CMSBitMap VALUE_OBJ_CLASS_SPEC {
+class CMSBitMap {
friend class VMStructs;
HeapWord* _bmStartWord; // base address of range covered by map
@@ -331,7 +331,7 @@
// Timing, allocation and promotion statistics for gc scheduling and incremental
// mode pacing. Most statistics are exponential averages.
//
-class CMSStats VALUE_OBJ_CLASS_SPEC {
+class CMSStats {
private:
ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen.
@@ -553,6 +553,7 @@
// Performance Counters
CollectorCounters* _gc_counters;
+ CollectorCounters* _cgc_counters;
// Initialization Errors
bool _completed_initialization;
@@ -927,7 +928,8 @@
NOT_PRODUCT(bool is_cms_reachable(HeapWord* addr);)
// Performance Counter Support
- CollectorCounters* counters() { return _gc_counters; }
+ CollectorCounters* counters() { return _gc_counters; }
+ CollectorCounters* cgc_counters() { return _cgc_counters; }
// Timer stuff
void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
--- a/src/hotspot/share/gc/cms/freeChunk.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/cms/freeChunk.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_CMS_FREECHUNK_HPP
#define SHARE_VM_GC_CMS_FREECHUNK_HPP
-#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "oops/markOop.hpp"
#include "runtime/mutex.hpp"
@@ -55,7 +54,7 @@
// but are not part of the free list and should not be coalesced into larger
// free blocks. These free blocks have their two LSB's set.
-class FreeChunk VALUE_OBJ_CLASS_SPEC {
+class FreeChunk {
friend class VMStructs;
// For 64 bit compressed oops, the markOop encodes both the size and the
// indication that this is a FreeChunk and not an object.
--- a/src/hotspot/share/gc/cms/promotionInfo.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/cms/promotionInfo.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,12 +26,11 @@
#define SHARE_VM_GC_CMS_PROMOTIONINFO_HPP
#include "gc/cms/freeChunk.hpp"
-#include "memory/allocation.hpp"
// Forward declarations
class CompactibleFreeListSpace;
-class PromotedObject VALUE_OBJ_CLASS_SPEC {
+class PromotedObject {
private:
enum {
promoted_mask = right_n_bits(2), // i.e. 0x3
@@ -114,7 +113,7 @@
void print() const { print_on(tty); }
};
-class PromotionInfo VALUE_OBJ_CLASS_SPEC {
+class PromotionInfo {
bool _tracking; // set if tracking
CompactibleFreeListSpace* _space; // the space to which this belongs
PromotedObject* _promoHead; // head of list of promoted objects
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/concurrentMarkThread.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -38,6 +38,7 @@
#include "gc/shared/suspendibleThreadSet.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/debug.hpp"
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -250,7 +250,7 @@
}
bool DirtyCardQueueSet::apply_closure_during_gc(CardTableEntryClosure* cl, uint worker_i) {
- assert_at_safepoint(false);
+ assert_at_safepoint();
return apply_closure_to_completed_buffer(cl, worker_i, 0, true);
}
--- a/src/hotspot/share/gc/g1/g1AllocRegion.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1AllocRegion.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -183,7 +183,6 @@
assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition");
_alloc_region = alloc_region;
- _alloc_region->set_allocation_context(allocation_context());
_count += 1;
trace("updated");
}
@@ -246,8 +245,7 @@
G1AllocRegion::G1AllocRegion(const char* name,
bool bot_updates)
: _name(name), _bot_updates(bot_updates),
- _alloc_region(NULL), _count(0), _used_bytes_before(0),
- _allocation_context(AllocationContext::system()) { }
+ _alloc_region(NULL), _count(0), _used_bytes_before(0) { }
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
--- a/src/hotspot/share/gc/g1/g1AllocRegion.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1AllocRegion.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
// and a lock will need to be taken when the active region needs to be
// replaced.
-class G1AllocRegion VALUE_OBJ_CLASS_SPEC {
+class G1AllocRegion {
private:
// The active allocating region we are currently allocating out
@@ -53,9 +53,6 @@
// correct use of init() and release()).
HeapRegion* volatile _alloc_region;
- // Allocation context associated with this alloc region.
- AllocationContext_t _allocation_context;
-
// It keeps track of the distinct number of regions that are used
// for allocation in the active interval of this object, i.e.,
// between a call to init() and a call to release(). The count
@@ -140,9 +137,6 @@
return (hr == _dummy_region) ? NULL : hr;
}
- void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
- AllocationContext_t allocation_context() { return _allocation_context; }
-
uint count() { return _count; }
// The following two are the building blocks for the allocation method.
--- a/src/hotspot/share/gc/g1/g1AllocationContext.hpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1ALLOCATIONCONTEXT_HPP
-#define SHARE_VM_GC_G1_G1ALLOCATIONCONTEXT_HPP
-
-#include "memory/allocation.hpp"
-
-typedef unsigned char AllocationContext_t;
-
-class AllocationContext : AllStatic {
-public:
- // Currently used context
- static AllocationContext_t current() {
- return 0;
- }
- // System wide default context
- static AllocationContext_t system() {
- return 0;
- }
-};
-
-#endif // SHARE_VM_GC_G1_G1ALLOCATIONCONTEXT_HPP
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -87,7 +87,7 @@
}
void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+ assert_at_safepoint_on_vm_thread();
_survivor_is_full = false;
_old_is_full = false;
@@ -100,41 +100,40 @@
}
void G1DefaultAllocator::release_gc_alloc_regions(EvacuationInfo& evacuation_info) {
- AllocationContext_t context = AllocationContext::current();
- evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
- old_gc_alloc_region(context)->count());
- survivor_gc_alloc_region(context)->release();
+ evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() +
+ old_gc_alloc_region()->count());
+ survivor_gc_alloc_region()->release();
// If we have an old GC alloc region to release, we'll save it in
// _retained_old_gc_alloc_region. If we don't
// _retained_old_gc_alloc_region will become NULL. This is what we
// want either way so no reason to check explicitly for either
// condition.
- _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
+ _retained_old_gc_alloc_region = old_gc_alloc_region()->release();
}
void G1DefaultAllocator::abandon_gc_alloc_regions() {
- assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
- assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+ assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition");
+ assert(old_gc_alloc_region()->get() == NULL, "pre-condition");
_retained_old_gc_alloc_region = NULL;
}
-bool G1DefaultAllocator::survivor_is_full(AllocationContext_t context) const {
+bool G1DefaultAllocator::survivor_is_full() const {
return _survivor_is_full;
}
-bool G1DefaultAllocator::old_is_full(AllocationContext_t context) const {
+bool G1DefaultAllocator::old_is_full() const {
return _old_is_full;
}
-void G1DefaultAllocator::set_survivor_full(AllocationContext_t context) {
+void G1DefaultAllocator::set_survivor_full() {
_survivor_is_full = true;
}
-void G1DefaultAllocator::set_old_full(AllocationContext_t context) {
+void G1DefaultAllocator::set_old_full() {
_old_is_full = true;
}
-size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) {
+size_t G1Allocator::unsafe_max_tlab_alloc() {
// Return the remaining space in the cur alloc region, but not less than
// the min TLAB size.
@@ -142,7 +141,7 @@
// since we can't allow tlabs to grow big enough to accommodate
// humongous objects.
- HeapRegion* hr = mutator_alloc_region(context)->get();
+ HeapRegion* hr = mutator_alloc_region()->get();
size_t max_tlab = _g1h->max_tlab_size() * wordSize;
if (hr == NULL) {
return max_tlab;
@@ -152,10 +151,9 @@
}
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
- size_t word_size,
- AllocationContext_t context) {
+ size_t word_size) {
size_t temp = 0;
- HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, context);
+ HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp);
assert(result == NULL || temp == word_size,
"Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
word_size, temp, p2i(result));
@@ -165,13 +163,12 @@
HeapWord* G1Allocator::par_allocate_during_gc(InCSetState dest,
size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- AllocationContext_t context) {
+ size_t* actual_word_size) {
switch (dest.value()) {
case InCSetState::Young:
- return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
+ return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
case InCSetState::Old:
- return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size, context);
+ return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
default:
ShouldNotReachHere();
return NULL; // Keep some compilers happy
@@ -180,21 +177,20 @@
HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- AllocationContext_t context) {
+ size_t* actual_word_size) {
assert(!_g1h->is_humongous(desired_word_size),
"we should not be seeing humongous-size allocations in this path");
- HeapWord* result = survivor_gc_alloc_region(context)->attempt_allocation(min_word_size,
- desired_word_size,
- actual_word_size);
- if (result == NULL && !survivor_is_full(context)) {
+ HeapWord* result = survivor_gc_alloc_region()->attempt_allocation(min_word_size,
+ desired_word_size,
+ actual_word_size);
+ if (result == NULL && !survivor_is_full()) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
- result = survivor_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
- desired_word_size,
- actual_word_size);
+ result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size,
+ desired_word_size,
+ actual_word_size);
if (result == NULL) {
- set_survivor_full(context);
+ set_survivor_full();
}
}
if (result != NULL) {
@@ -205,21 +201,20 @@
HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- AllocationContext_t context) {
+ size_t* actual_word_size) {
assert(!_g1h->is_humongous(desired_word_size),
"we should not be seeing humongous-size allocations in this path");
- HeapWord* result = old_gc_alloc_region(context)->attempt_allocation(min_word_size,
- desired_word_size,
- actual_word_size);
- if (result == NULL && !old_is_full(context)) {
+ HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size,
+ desired_word_size,
+ actual_word_size);
+ if (result == NULL && !old_is_full()) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
- result = old_gc_alloc_region(context)->attempt_allocation_locked(min_word_size,
- desired_word_size,
- actual_word_size);
+ result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size,
+ desired_word_size,
+ actual_word_size);
if (result == NULL) {
- set_old_full(context);
+ set_old_full();
}
}
return result;
@@ -240,7 +235,6 @@
HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
- AllocationContext_t context,
bool* plab_refill_failed) {
size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
@@ -250,15 +244,14 @@
if ((required_in_plab <= plab_word_size) &&
may_throw_away_buffer(required_in_plab, plab_word_size)) {
- PLAB* alloc_buf = alloc_buffer(dest, context);
+ PLAB* alloc_buf = alloc_buffer(dest);
alloc_buf->retire();
size_t actual_plab_size = 0;
HeapWord* buf = _allocator->par_allocate_during_gc(dest,
required_in_plab,
plab_word_size,
- &actual_plab_size,
- context);
+ &actual_plab_size);
assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
"Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
@@ -277,15 +270,15 @@
*plab_refill_failed = true;
}
// Try direct allocation.
- HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, context);
+ HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz);
if (result != NULL) {
_direct_allocated[dest.value()] += word_sz;
}
return result;
}
-void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
- alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
+void G1PLABAllocator::undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz) {
+ alloc_buffer(dest)->undo_allocation(obj, word_sz);
}
G1DefaultPLABAllocator::G1DefaultPLABAllocator(G1Allocator* allocator) :
--- a/src/hotspot/share/gc/g1/g1Allocator.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Allocator.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,7 +26,6 @@
#define SHARE_VM_GC_G1_G1ALLOCATOR_HPP
#include "gc/g1/g1AllocRegion.hpp"
-#include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1InCSetState.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/plab.hpp"
@@ -41,37 +40,33 @@
protected:
G1CollectedHeap* _g1h;
- virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
+ virtual MutatorAllocRegion* mutator_alloc_region() = 0;
- virtual bool survivor_is_full(AllocationContext_t context) const = 0;
- virtual bool old_is_full(AllocationContext_t context) const = 0;
+ virtual bool survivor_is_full() const = 0;
+ virtual bool old_is_full() const = 0;
- virtual void set_survivor_full(AllocationContext_t context) = 0;
- virtual void set_old_full(AllocationContext_t context) = 0;
+ virtual void set_survivor_full() = 0;
+ virtual void set_old_full() = 0;
// Accessors to the allocation regions.
- virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
- virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
+ virtual SurvivorGCAllocRegion* survivor_gc_alloc_region() = 0;
+ virtual OldGCAllocRegion* old_gc_alloc_region() = 0;
// Allocation attempt during GC for a survivor object / PLAB.
inline HeapWord* survivor_attempt_allocation(size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- AllocationContext_t context);
+ size_t* actual_word_size);
// Allocation attempt during GC for an old object / PLAB.
inline HeapWord* old_attempt_allocation(size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- AllocationContext_t context);
+ size_t* actual_word_size);
public:
G1Allocator(G1CollectedHeap* heap) : _g1h(heap) { }
virtual ~G1Allocator() { }
- static G1Allocator* create_allocator(G1CollectedHeap* g1h);
-
#ifdef ASSERT
// Do we currently have an active mutator region to allocate into?
- bool has_mutator_alloc_region(AllocationContext_t context) { return mutator_alloc_region(context)->get() != NULL; }
+ bool has_mutator_alloc_region() { return mutator_alloc_region()->get() != NULL; }
#endif
virtual void init_mutator_alloc_region() = 0;
virtual void release_mutator_alloc_region() = 0;
@@ -89,25 +84,23 @@
// Allocate blocks of memory during mutator time.
- inline HeapWord* attempt_allocation(size_t word_size, AllocationContext_t context);
- inline HeapWord* attempt_allocation_locked(size_t word_size, AllocationContext_t context);
- inline HeapWord* attempt_allocation_force(size_t word_size, AllocationContext_t context);
+ inline HeapWord* attempt_allocation(size_t word_size);
+ inline HeapWord* attempt_allocation_locked(size_t word_size);
+ inline HeapWord* attempt_allocation_force(size_t word_size);
- size_t unsafe_max_tlab_alloc(AllocationContext_t context);
+ size_t unsafe_max_tlab_alloc();
// Allocate blocks of memory during garbage collection. Will ensure an
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(InCSetState dest,
- size_t word_size,
- AllocationContext_t context);
+ size_t word_size);
HeapWord* par_allocate_during_gc(InCSetState dest,
size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- AllocationContext_t context);
+ size_t* actual_word_size);
virtual size_t used_in_alloc_regions() = 0;
};
@@ -135,11 +128,11 @@
public:
G1DefaultAllocator(G1CollectedHeap* heap);
- virtual bool survivor_is_full(AllocationContext_t context) const;
- virtual bool old_is_full(AllocationContext_t context) const ;
+ virtual bool survivor_is_full() const;
+ virtual bool old_is_full() const ;
- virtual void set_survivor_full(AllocationContext_t context);
- virtual void set_old_full(AllocationContext_t context);
+ virtual void set_survivor_full();
+ virtual void set_old_full();
virtual void init_mutator_alloc_region();
virtual void release_mutator_alloc_region();
@@ -152,15 +145,15 @@
return _retained_old_gc_alloc_region == hr;
}
- virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
+ virtual MutatorAllocRegion* mutator_alloc_region() {
return &_mutator_alloc_region;
}
- virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
+ virtual SurvivorGCAllocRegion* survivor_gc_alloc_region() {
return &_survivor_gc_alloc_region;
}
- virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
+ virtual OldGCAllocRegion* old_gc_alloc_region() {
return &_old_gc_alloc_region;
}
@@ -170,7 +163,7 @@
size_t result = 0;
// Read only once in case it is set to NULL concurrently
- HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
+ HeapRegion* hr = mutator_alloc_region()->get();
if (hr != NULL) {
result += hr->used();
}
@@ -198,7 +191,7 @@
size_t _direct_allocated[InCSetState::Num];
virtual void flush_and_retire_stats() = 0;
- virtual PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
+ virtual PLAB* alloc_buffer(InCSetState dest) = 0;
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
// there are no restrictions on survivor alignment.
@@ -215,16 +208,13 @@
}
HeapWord* allocate_new_plab(InCSetState dest,
- size_t word_sz,
- AllocationContext_t context);
+ size_t word_sz);
bool may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const;
public:
G1PLABAllocator(G1Allocator* allocator);
virtual ~G1PLABAllocator() { }
- static G1PLABAllocator* create_allocator(G1Allocator* allocator);
-
virtual void waste(size_t& wasted, size_t& undo_wasted) = 0;
// Allocate word_sz words in dest, either directly into the regions or by
@@ -233,27 +223,24 @@
// PLAB failed or not.
HeapWord* allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
- AllocationContext_t context,
bool* plab_refill_failed);
// Allocate word_sz words in the PLAB of dest. Returns the address of the
// allocated memory, NULL if not successful.
inline HeapWord* plab_allocate(InCSetState dest,
- size_t word_sz,
- AllocationContext_t context);
+ size_t word_sz);
HeapWord* allocate(InCSetState dest,
size_t word_sz,
- AllocationContext_t context,
bool* refill_failed) {
- HeapWord* const obj = plab_allocate(dest, word_sz, context);
+ HeapWord* const obj = plab_allocate(dest, word_sz);
if (obj != NULL) {
return obj;
}
- return allocate_direct_or_new_plab(dest, word_sz, context, refill_failed);
+ return allocate_direct_or_new_plab(dest, word_sz, refill_failed);
}
- void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context);
+ void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz);
};
// The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor
@@ -266,7 +253,7 @@
public:
G1DefaultPLABAllocator(G1Allocator* _allocator);
- virtual PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) {
+ virtual PLAB* alloc_buffer(InCSetState dest) {
assert(dest.is_valid(),
"Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value());
assert(_alloc_buffers[dest.value()] != NULL,
--- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -29,25 +29,24 @@
#include "gc/g1/g1AllocRegion.inline.hpp"
#include "gc/shared/plab.inline.hpp"
-HeapWord* G1Allocator::attempt_allocation(size_t word_size, AllocationContext_t context) {
- return mutator_alloc_region(context)->attempt_allocation(word_size);
+HeapWord* G1Allocator::attempt_allocation(size_t word_size) {
+ return mutator_alloc_region()->attempt_allocation(word_size);
}
-HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size, AllocationContext_t context) {
- HeapWord* result = mutator_alloc_region(context)->attempt_allocation_locked(word_size);
- assert(result != NULL || mutator_alloc_region(context)->get() == NULL,
- "Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(context)->get()));
+HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) {
+ HeapWord* result = mutator_alloc_region()->attempt_allocation_locked(word_size);
+ assert(result != NULL || mutator_alloc_region()->get() == NULL,
+ "Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region()->get()));
return result;
}
-HeapWord* G1Allocator::attempt_allocation_force(size_t word_size, AllocationContext_t context) {
- return mutator_alloc_region(context)->attempt_allocation_force(word_size);
+HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
+ return mutator_alloc_region()->attempt_allocation_force(word_size);
}
inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest,
- size_t word_sz,
- AllocationContext_t context) {
- PLAB* buffer = alloc_buffer(dest, context);
+ size_t word_sz) {
+ PLAB* buffer = alloc_buffer(dest);
if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
return buffer->allocate(word_sz);
} else {
--- a/src/hotspot/share/gc/g1/g1Allocator_ext.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1Allocator.inline.hpp"
-#include "gc/g1/g1CollectedHeap.hpp"
-
-G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
- return new G1DefaultAllocator(g1h);
-}
-
-G1PLABAllocator* G1PLABAllocator::create_allocator(G1Allocator* allocator) {
- return new G1DefaultPLABAllocator(allocator);
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1BarrierSet.inline.hpp"
+#include "gc/g1/g1CardTable.inline.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/g1/satbMarkQueue.hpp"
+#include "logging/log.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/thread.inline.hpp"
+
+G1BarrierSet::G1BarrierSet(G1CardTable* card_table) :
+ CardTableModRefBS(card_table, BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)),
+ _dcqs(JavaThread::dirty_card_queue_set())
+{ }
+
+void G1BarrierSet::enqueue(oop pre_val) {
+ // Nulls should have been already filtered.
+ assert(oopDesc::is_oop(pre_val, true), "Error");
+
+ if (!JavaThread::satb_mark_queue_set().is_active()) return;
+ Thread* thr = Thread::current();
+ if (thr->is_Java_thread()) {
+ JavaThread* jt = (JavaThread*)thr;
+ jt->satb_mark_queue().enqueue(pre_val);
+ } else {
+ MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
+ JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val);
+ }
+}
+
+template <class T> void
+G1BarrierSet::write_ref_array_pre_work(T* dst, int count) {
+ if (!JavaThread::satb_mark_queue_set().is_active()) return;
+ T* elem_ptr = dst;
+ for (int i = 0; i < count; i++, elem_ptr++) {
+ T heap_oop = oopDesc::load_heap_oop(elem_ptr);
+ if (!oopDesc::is_null(heap_oop)) {
+ enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
+ }
+ }
+}
+
+void G1BarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
+ if (!dest_uninitialized) {
+ write_ref_array_pre_work(dst, count);
+ }
+}
+
+void G1BarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
+ if (!dest_uninitialized) {
+ write_ref_array_pre_work(dst, count);
+ }
+}
+
+void G1BarrierSet::write_ref_field_post_slow(volatile jbyte* byte) {
+ // In the slow path, we know a card is not young
+ assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering");
+ OrderAccess::storeload();
+ if (*byte != G1CardTable::dirty_card_val()) {
+ *byte = G1CardTable::dirty_card_val();
+ Thread* thr = Thread::current();
+ if (thr->is_Java_thread()) {
+ JavaThread* jt = (JavaThread*)thr;
+ jt->dirty_card_queue().enqueue(byte);
+ } else {
+ MutexLockerEx x(Shared_DirtyCardQ_lock,
+ Mutex::_no_safepoint_check_flag);
+ _dcqs.shared_dirty_card_queue()->enqueue(byte);
+ }
+ }
+}
+
+void G1BarrierSet::invalidate(MemRegion mr) {
+ if (mr.is_empty()) {
+ return;
+ }
+ volatile jbyte* byte = _card_table->byte_for(mr.start());
+ jbyte* last_byte = _card_table->byte_for(mr.last());
+ Thread* thr = Thread::current();
+ // skip all consecutive young cards
+ for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++);
+
+ if (byte <= last_byte) {
+ OrderAccess::storeload();
+ // Enqueue if necessary.
+ if (thr->is_Java_thread()) {
+ JavaThread* jt = (JavaThread*)thr;
+ for (; byte <= last_byte; byte++) {
+ if (*byte == G1CardTable::g1_young_card_val()) {
+ continue;
+ }
+ if (*byte != G1CardTable::dirty_card_val()) {
+ *byte = G1CardTable::dirty_card_val();
+ jt->dirty_card_queue().enqueue(byte);
+ }
+ }
+ } else {
+ MutexLockerEx x(Shared_DirtyCardQ_lock,
+ Mutex::_no_safepoint_check_flag);
+ for (; byte <= last_byte; byte++) {
+ if (*byte == G1CardTable::g1_young_card_val()) {
+ continue;
+ }
+ if (*byte != G1CardTable::dirty_card_val()) {
+ *byte = G1CardTable::dirty_card_val();
+ _dcqs.shared_dirty_card_queue()->enqueue(byte);
+ }
+ }
+ }
+ }
+}
+
+void G1BarrierSet::on_thread_attach(JavaThread* thread) {
+ // This method initializes the SATB and dirty card queues before a
+ // JavaThread is added to the Java thread list. Right now, we don't
+ // have to do anything to the dirty card queue (it should have been
+ // activated when the thread was created), but we have to activate
+ // the SATB queue if the thread is created while a marking cycle is
+ // in progress. The activation / de-activation of the SATB queues at
+ // the beginning / end of a marking cycle is done during safepoints
+ // so we have to make sure this method is called outside one to be
+ // able to safely read the active field of the SATB queue set. Right
+ // now, it is called just before the thread is added to the Java
+ // thread list in the Threads::add() method. That method is holding
+ // the Threads_lock which ensures we are outside a safepoint. We
+ // cannot do the obvious and set the active field of the SATB queue
+ // when the thread is created given that, in some cases, safepoints
+ // might happen between the JavaThread constructor being called and the
+ // thread being added to the Java thread list (an example of this is
+ // when the structure for the DestroyJavaVM thread is created).
+ assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
+ assert(!thread->satb_mark_queue().is_active(), "SATB queue should not be active");
+ assert(thread->satb_mark_queue().is_empty(), "SATB queue should be empty");
+ assert(thread->dirty_card_queue().is_active(), "Dirty card queue should be active");
+
+ // If we are creating the thread during a marking cycle, we should
+ // set the active field of the SATB queue to true.
+ if (thread->satb_mark_queue_set().is_active()) {
+ thread->satb_mark_queue().set_active(true);
+ }
+}
+
+void G1BarrierSet::on_thread_detach(JavaThread* thread) {
+ // Flush any deferred card marks, SATB buffers and dirty card queue buffers
+ CardTableModRefBS::on_thread_detach(thread);
+ thread->satb_mark_queue().flush();
+ thread->dirty_card_queue().flush();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1BARRIERSET_HPP
+#define SHARE_VM_GC_G1_G1BARRIERSET_HPP
+
+#include "gc/shared/cardTableModRefBS.hpp"
+
+class DirtyCardQueueSet;
+class CardTable;
+class G1CardTable;
+
+// This barrier is specialized to use a logging barrier to support
+// snapshot-at-the-beginning marking.
+
+class G1BarrierSet: public CardTableModRefBS {
+ friend class VMStructs;
+ private:
+ DirtyCardQueueSet& _dcqs;
+
+ public:
+ G1BarrierSet(G1CardTable* table);
+ ~G1BarrierSet() { }
+
+ // Add "pre_val" to a set of objects that may have been disconnected from the
+ // pre-marking object graph.
+ static void enqueue(oop pre_val);
+
+ static void enqueue_if_weak_or_archive(DecoratorSet decorators, oop value);
+
+ template <class T> void write_ref_array_pre_work(T* dst, int count);
+ virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
+ virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
+
+ template <DecoratorSet decorators, typename T>
+ void write_ref_field_pre(T* field);
+
+ // NB: if you do a whole-heap invalidation, the "usual invariant" defined
+ // above no longer applies.
+ void invalidate(MemRegion mr);
+
+ void write_region(MemRegion mr) { invalidate(mr); }
+ void write_ref_array_work(MemRegion mr) { invalidate(mr); }
+
+ template <DecoratorSet decorators, typename T>
+ void write_ref_field_post(T* field, oop new_val);
+ void write_ref_field_post_slow(volatile jbyte* byte);
+
+ virtual void on_thread_attach(JavaThread* thread);
+ virtual void on_thread_detach(JavaThread* thread);
+
+ // Callbacks for runtime accesses.
+ template <DecoratorSet decorators, typename BarrierSetT = G1BarrierSet>
+ class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {
+ typedef ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> ModRef;
+ typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
+
+ public:
+ // Needed for loads on non-heap weak references
+ template <typename T>
+ static oop oop_load_not_in_heap(T* addr);
+
+ // Needed for non-heap stores
+ template <typename T>
+ static void oop_store_not_in_heap(T* addr, oop new_value);
+
+ // Needed for weak references
+ static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
+
+ // Defensive: will catch weak oops at addresses in heap
+ template <typename T>
+ static oop oop_load_in_heap(T* addr);
+ };
+};
+
+template<>
+struct BarrierSet::GetName<G1BarrierSet> {
+ static const BarrierSet::Name value = BarrierSet::G1BarrierSet;
+};
+
+template<>
+struct BarrierSet::GetType<BarrierSet::G1BarrierSet> {
+ typedef ::G1BarrierSet type;
+};
+
+#endif // SHARE_VM_GC_G1_G1BARRIERSET_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1BARRIERSET_INLINE_HPP
+#define SHARE_VM_GC_G1_G1BARRIERSET_INLINE_HPP
+
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/shared/accessBarrierSupport.inline.hpp"
+#include "oops/oop.inline.hpp"
+
+template <DecoratorSet decorators, typename T>
+inline void G1BarrierSet::write_ref_field_pre(T* field) {
+ if (HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value ||
+ HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
+ return;
+ }
+
+ T heap_oop = RawAccess<MO_VOLATILE>::oop_load(field);
+ if (!oopDesc::is_null(heap_oop)) {
+ enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
+ }
+}
+
+template <DecoratorSet decorators, typename T>
+inline void G1BarrierSet::write_ref_field_post(T* field, oop new_val) {
+ volatile jbyte* byte = _card_table->byte_for(field);
+ if (*byte != G1CardTable::g1_young_card_val()) {
+ // Take a slow path for cards in old
+ write_ref_field_post_slow(byte);
+ }
+}
+
+inline void G1BarrierSet::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
+ assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
+ // Archive roots need to be enqueued since they add subgraphs to the
+ // Java heap that were not there at the snapshot when marking started.
+ // Weak and phantom references also need enqueueing for similar reasons.
+ const bool in_archive_root = (decorators & IN_ARCHIVE_ROOT) != 0;
+ const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
+ const bool peek = (decorators & AS_NO_KEEPALIVE) != 0;
+ const bool needs_enqueue = in_archive_root || (!peek && !on_strong_oop_ref);
+
+ if (needs_enqueue && value != NULL) {
+ enqueue(value);
+ }
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop G1BarrierSet::AccessBarrier<decorators, BarrierSetT>::
+oop_load_not_in_heap(T* addr) {
+ oop value = ModRef::oop_load_not_in_heap(addr);
+ enqueue_if_weak_or_archive(decorators, value);
+ return value;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline oop G1BarrierSet::AccessBarrier<decorators, BarrierSetT>::
+oop_load_in_heap(T* addr) {
+ oop value = ModRef::oop_load_in_heap(addr);
+ enqueue_if_weak_or_archive(decorators, value);
+ return value;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+inline oop G1BarrierSet::AccessBarrier<decorators, BarrierSetT>::
+oop_load_in_heap_at(oop base, ptrdiff_t offset) {
+ oop value = ModRef::oop_load_in_heap_at(base, offset);
+ enqueue_if_weak_or_archive(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
+ return value;
+}
+
+template <DecoratorSet decorators, typename BarrierSetT>
+template <typename T>
+inline void G1BarrierSet::AccessBarrier<decorators, BarrierSetT>::
+oop_store_not_in_heap(T* addr, oop new_value) {
+ if (HasDecorator<decorators, IN_CONCURRENT_ROOT>::value) {
+ // For roots not scanned in a safepoint, we have to apply SATB barriers
+ // even for roots.
+ G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+ bs->write_ref_field_pre<decorators>(addr);
+ }
+ Raw::oop_store(addr, new_value);
+}
+
+#endif // SHARE_VM_GC_G1_G1BARRIERSET_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1BiasedArray.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1BiasedArray.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,14 +25,13 @@
#ifndef SHARE_VM_GC_G1_G1BIASEDARRAY_HPP
#define SHARE_VM_GC_G1_G1BIASEDARRAY_HPP
-#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "utilities/debug.hpp"
// Implements the common base functionality for arrays that contain provisions
// for accessing its elements using a biased index.
// The element type is defined by the instantiating the template.
-class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
+class G1BiasedMappedArrayBase {
friend class VMStructs;
public:
typedef size_t idx_t;
--- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -109,7 +109,7 @@
}
};
-class G1BlockOffsetTablePart VALUE_OBJ_CLASS_SPEC {
+class G1BlockOffsetTablePart {
friend class G1BlockOffsetTable;
friend class VMStructs;
private:
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CardLiveData.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -94,7 +94,7 @@
// Helper class that provides functionality to generate the Live Data Count
// information.
-class G1CardLiveDataHelper VALUE_OBJ_CLASS_SPEC {
+class G1CardLiveDataHelper {
private:
BitMapView _region_bm;
BitMapView _card_bm;
--- a/src/hotspot/share/gc/g1/g1CardLiveData.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CardLiveData.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,7 @@
// Can be used for various purposes, like as remembered set for completely
// coarsened remembered sets, scrubbing remembered sets or estimating liveness.
// This information is created as part of the concurrent marking cycle.
-class G1CardLiveData VALUE_OBJ_CLASS_SPEC {
+class G1CardLiveData {
friend class G1CardLiveDataHelper;
friend class G1VerifyCardLiveDataTask;
private:
--- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,6 @@
#ifndef SHARE_VM_GC_G1_G1CODECACHEREMSET_HPP
#define SHARE_VM_GC_G1_G1CODECACHEREMSET_HPP
-#include "memory/allocation.hpp"
-
class CodeBlobClosure;
class G1CodeRootSetTable;
class HeapRegion;
@@ -34,7 +32,7 @@
// Implements storage for a set of code roots.
// All methods that modify the set are not thread-safe except if otherwise noted.
-class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
+class G1CodeRootSet {
friend class G1CodeRootSetTest;
private:
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -52,7 +52,6 @@
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RootProcessor.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/g1YCTypes.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
@@ -81,6 +80,7 @@
#include "oops/oop.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/threadSMR.hpp"
@@ -237,8 +237,7 @@
HeapWord*
G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
uint num_regions,
- size_t word_size,
- AllocationContext_t context) {
+ size_t word_size) {
assert(first != G1_NO_HRM_INDEX, "pre-condition");
assert(is_humongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
@@ -302,14 +301,12 @@
// that there is a single object that starts at the bottom of the
// first region.
first_hr->set_starts_humongous(obj_top, word_fill_size);
- first_hr->set_allocation_context(context);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (uint i = first + 1; i <= last; ++i) {
hr = region_at(i);
hr->set_continues_humongous(first_hr);
- hr->set_allocation_context(context);
}
// Up to this point no concurrent thread would have been able to
@@ -362,7 +359,7 @@
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
-HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
+HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
_verifier->verify_region_sets_optional();
@@ -428,8 +425,7 @@
HeapWord* result = NULL;
if (first != G1_NO_HRM_INDEX) {
- result = humongous_obj_allocate_initialize_regions(first, obj_regions,
- word_size, context);
+ result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
assert(result != NULL, "it should always return a valid result");
// A successful humongous object allocation changes the used space
@@ -461,8 +457,7 @@
return attempt_allocation(word_size);
}
-HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
- AllocationContext_t context) {
+HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
ResourceMark rm; // For retrieving the thread names in log messages.
// Make sure you read the note in attempt_allocation_humongous().
@@ -485,7 +480,7 @@
{
MutexLockerEx x(Heap_lock);
- result = _allocator->attempt_allocation_locked(word_size, context);
+ result = _allocator->attempt_allocation_locked(word_size);
if (result != NULL) {
return result;
}
@@ -496,7 +491,7 @@
if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) {
// No need for an ergo message here, can_expand_young_list() does this when
// it returns true.
- result = _allocator->attempt_allocation_force(word_size, context);
+ result = _allocator->attempt_allocation_force(word_size);
if (result != NULL) {
return result;
}
@@ -553,7 +548,7 @@
// follow-on attempt will be at the start of the next loop
// iteration (after taking the Heap_lock).
- result = _allocator->attempt_allocation(word_size, context);
+ result = _allocator->attempt_allocation(word_size);
if (result != NULL) {
return result;
}
@@ -571,7 +566,7 @@
}
void G1CollectedHeap::begin_archive_alloc_range(bool open) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+ assert_at_safepoint_on_vm_thread();
if (_archive_allocator == NULL) {
_archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
}
@@ -585,7 +580,7 @@
}
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+ assert_at_safepoint_on_vm_thread();
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
if (is_archive_alloc_too_large(word_size)) {
return NULL;
@@ -595,7 +590,7 @@
void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
size_t end_alignment_in_bytes) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+ assert_at_safepoint_on_vm_thread();
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
// Call complete_archive to do the real work, filling in the MemRegion
@@ -685,7 +680,7 @@
}
// Mark each G1 region touched by the range as archive, add it to
- // the old set, and set the allocation context and top.
+ // the old set, and set top.
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
prev_last_region = last_region;
@@ -693,7 +688,6 @@
while (curr_region != NULL) {
assert(curr_region->is_empty() && !curr_region->is_pinned(),
"Region already in use (index %u)", curr_region->hrm_index());
- curr_region->set_allocation_context(AllocationContext::system());
if (open) {
curr_region->set_open_archive();
} else {
@@ -788,11 +782,10 @@
assert(!is_humongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
- AllocationContext_t context = AllocationContext::current();
- HeapWord* result = _allocator->attempt_allocation(word_size, context);
+ HeapWord* result = _allocator->attempt_allocation(word_size);
if (result == NULL) {
- result = attempt_allocation_slow(word_size, context);
+ result = attempt_allocation_slow(word_size);
}
assert_heap_not_locked();
if (result != NULL) {
@@ -917,7 +910,7 @@
// Given that humongous objects are not allocated in young
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
- result = humongous_obj_allocate(word_size, AllocationContext::current());
+ result = humongous_obj_allocate(word_size);
if (result != NULL) {
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
@@ -989,16 +982,15 @@
}
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
- AllocationContext_t context,
bool expect_null_mutator_alloc_region) {
- assert_at_safepoint(true /* should_be_vm_thread */);
- assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
+ assert_at_safepoint_on_vm_thread();
+ assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
"the current alloc region was unexpectedly found to be non-NULL");
if (!is_humongous(word_size)) {
- return _allocator->attempt_allocation_locked(word_size, context);
+ return _allocator->attempt_allocation_locked(word_size);
} else {
- HeapWord* result = humongous_obj_allocate(word_size, context);
+ HeapWord* result = humongous_obj_allocate(word_size);
if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
collector_state()->set_initiate_conc_mark_if_possible(true);
}
@@ -1082,7 +1074,7 @@
void G1CollectedHeap::prepare_heap_for_mutators() {
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge();
- MetaspaceAux::verify_metrics();
+ MetaspaceUtils::verify_metrics();
// Prepare heap for normal collections.
assert(num_free_regions() == 0, "we should not have added any free regions");
@@ -1162,7 +1154,7 @@
bool G1CollectedHeap::do_full_collection(bool explicit_gc,
bool clear_all_soft_refs) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+ assert_at_safepoint_on_vm_thread();
if (GCLocker::check_active_before_gc()) {
// Full GC was not completed.
@@ -1270,7 +1262,6 @@
}
HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
- AllocationContext_t context,
bool do_gc,
bool clear_all_soft_refs,
bool expect_null_mutator_alloc_region,
@@ -1279,7 +1270,6 @@
// Let's attempt the allocation first.
HeapWord* result =
attempt_allocation_at_safepoint(word_size,
- context,
expect_null_mutator_alloc_region);
if (result != NULL) {
return result;
@@ -1289,7 +1279,7 @@
// incremental pauses. Therefore, at least for now, we'll favor
// expansion over collection. (This might change in the future if we can
// do something smarter than full collection to satisfy a failed alloc.)
- result = expand_and_allocate(word_size, context);
+ result = expand_and_allocate(word_size);
if (result != NULL) {
return result;
}
@@ -1304,14 +1294,12 @@
}
HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
- AllocationContext_t context,
bool* succeeded) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+ assert_at_safepoint_on_vm_thread();
// Attempts to allocate followed by Full GC.
HeapWord* result =
satisfy_failed_allocation_helper(word_size,
- context,
true, /* do_gc */
false, /* clear_all_soft_refs */
false, /* expect_null_mutator_alloc_region */
@@ -1323,7 +1311,6 @@
// Attempts to allocate followed by Full GC that will collect all soft references.
result = satisfy_failed_allocation_helper(word_size,
- context,
true, /* do_gc */
true, /* clear_all_soft_refs */
true, /* expect_null_mutator_alloc_region */
@@ -1335,7 +1322,6 @@
// Attempts to allocate, no GC
result = satisfy_failed_allocation_helper(word_size,
- context,
false, /* do_gc */
false, /* clear_all_soft_refs */
true, /* expect_null_mutator_alloc_region */
@@ -1360,8 +1346,8 @@
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
-HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
+ assert_at_safepoint_on_vm_thread();
_verifier->verify_region_sets_optional();
@@ -1374,7 +1360,6 @@
_hrm.verify_optional();
_verifier->verify_region_sets_optional();
return attempt_allocation_at_safepoint(word_size,
- context,
false /* expect_null_mutator_alloc_region */);
}
return NULL;
@@ -1474,7 +1459,7 @@
_old_pool(NULL),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
- _g1_policy(create_g1_policy(_gc_timer_stw)),
+ _g1_policy(new G1Policy(_gc_timer_stw)),
_collection_set(this, _g1_policy),
_dirty_card_queue_set(false),
_is_alive_closure_cm(this),
@@ -1509,7 +1494,7 @@
_workers->initialize_workers();
_verifier = new G1HeapVerifier(this);
- _allocator = G1Allocator::create_allocator(this);
+ _allocator = new G1DefaultAllocator(this);
_heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
@@ -1620,9 +1605,9 @@
// Create the barrier set for the entire reserved region.
G1CardTable* ct = new G1CardTable(reserved_region());
ct->initialize();
- G1SATBCardTableLoggingModRefBS* bs = new G1SATBCardTableLoggingModRefBS(ct);
+ G1BarrierSet* bs = new G1BarrierSet(ct);
bs->initialize();
- assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
+ assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
set_barrier_set(bs);
_card_table = ct;
@@ -2025,8 +2010,7 @@
for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
// Let's use the existing mechanism for the allocation
- HeapWord* dummy_obj = humongous_obj_allocate(word_size,
- AllocationContext::system());
+ HeapWord* dummy_obj = humongous_obj_allocate(word_size);
if (dummy_obj != NULL) {
MemRegion mr(dummy_obj, word_size);
CollectedHeap::fill_with_object(mr);
@@ -2124,8 +2108,7 @@
gc_count_before,
cause,
true, /* should_initiate_conc_mark */
- g1_policy()->max_pause_time_ms(),
- AllocationContext::current());
+ g1_policy()->max_pause_time_ms());
VMThread::execute(&op);
if (!op.pause_succeeded()) {
if (old_marking_count_before == _old_marking_cycles_started) {
@@ -2152,8 +2135,7 @@
gc_count_before,
cause,
false, /* should_initiate_conc_mark */
- g1_policy()->max_pause_time_ms(),
- AllocationContext::current());
+ g1_policy()->max_pause_time_ms());
VMThread::execute(&op);
} else {
// Schedule a Full GC.
@@ -2266,8 +2248,7 @@
}
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
- AllocationContext_t context = AllocationContext::current();
- return _allocator->unsafe_max_tlab_alloc(context);
+ return _allocator->unsafe_max_tlab_alloc();
}
size_t G1CollectedHeap::max_capacity() const {
@@ -2364,14 +2345,13 @@
st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
(size_t) survivor_regions * HeapRegion::GrainBytes / K);
st->cr();
- MetaspaceAux::print_on(st);
+ MetaspaceUtils::print_on(st);
}
void G1CollectedHeap::print_regions_on(outputStream* st) const {
st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, A=archive, TS=gc time stamp, "
- "AC=allocation context, "
"TAMS=top-at-mark-start (previous, next)");
PrintRegionClosure blk(st);
heap_region_iterate(&blk);
@@ -2566,8 +2546,7 @@
gc_count_before,
gc_cause,
false, /* should_initiate_conc_mark */
- g1_policy()->max_pause_time_ms(),
- AllocationContext::current());
+ g1_policy()->max_pause_time_ms());
VMThread::execute(&op);
HeapWord* result = op.result();
@@ -2838,7 +2817,7 @@
bool
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+ assert_at_safepoint_on_vm_thread();
guarantee(!is_gc_active(), "collection is not reentrant");
if (GCLocker::check_active_before_gc()) {
@@ -3027,7 +3006,7 @@
pre_evacuate_collection_set();
// Actually do the work...
- evacuate_collection_set(evacuation_info, &per_thread_states);
+ evacuate_collection_set(&per_thread_states);
post_evacuate_collection_set(evacuation_info, &per_thread_states);
@@ -3412,7 +3391,7 @@
size_t symbols_removed() const { return (size_t)_symbols_removed; }
};
-class G1CodeCacheUnloadingTask VALUE_OBJ_CLASS_SPEC {
+class G1CodeCacheUnloadingTask {
private:
static Monitor* _lock;
@@ -4274,7 +4253,7 @@
}
}
-void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
+void G1CollectedHeap::evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states) {
// Should G1EvacuationFailureALot be in effect for this GC?
NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
@@ -4868,7 +4847,7 @@
};
void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
- assert_at_safepoint(true);
+ assert_at_safepoint_on_vm_thread();
if (!G1EagerReclaimHumongousObjects ||
(!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
@@ -5024,7 +5003,7 @@
};
void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+ assert_at_safepoint_on_vm_thread();
if (!free_list_only) {
TearDownRegionSetsClosure cl(&_old_set);
@@ -5075,7 +5054,6 @@
if (r->is_empty()) {
// Add free regions to the free list
r->set_free();
- r->set_allocation_context(AllocationContext::system());
_hrm->insert_into_free_list(r);
} else if (!_free_list_only) {
@@ -5099,7 +5077,7 @@
};
void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
- assert_at_safepoint(true /* should_be_vm_thread */);
+ assert_at_safepoint_on_vm_thread();
if (!free_list_only) {
_eden.clear();
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,7 +26,7 @@
#define SHARE_VM_GC_G1_G1COLLECTEDHEAP_HPP
#include "gc/g1/evacuationInfo.hpp"
-#include "gc/g1/g1AllocationContext.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BiasedArray.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectionSet.hpp"
@@ -40,7 +40,6 @@
#include "gc/g1/g1HRPrinter.hpp"
#include "gc/g1/g1InCSetState.hpp"
#include "gc/g1/g1MonitoringSupport.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/g1SurvivorRegions.hpp"
#include "gc/g1/g1YCTypes.hpp"
#include "gc/g1/heapRegionManager.hpp"
@@ -301,9 +300,6 @@
// this method will be found dead by the marking cycle).
void allocate_dummy_regions() PRODUCT_RETURN;
- // Clear RSets after a compaction. It also resets the GC time stamps.
- void clear_rsets_post_compaction();
-
// If the HR printer is active, dump the state of the regions in the
// heap after a compaction.
void print_hrm_post_compaction();
@@ -314,8 +310,6 @@
size_t size,
size_t translation_factor);
- static G1Policy* create_g1_policy(STWGCTimer* gc_timer);
-
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
// These are macros so that, if the assert fires, we get the correct
@@ -365,21 +359,13 @@
"should not be at a safepoint")); \
} while (0)
-#define assert_at_safepoint(_should_be_vm_thread_) \
+#define assert_at_safepoint_on_vm_thread() \
do { \
- assert(SafepointSynchronize::is_at_safepoint() && \
- ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
- heap_locking_asserts_params("should be at a safepoint")); \
+ assert_at_safepoint(); \
+ assert(Thread::current_or_null() != NULL, "no current thread"); \
+ assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
} while (0)
-#define assert_not_at_safepoint() \
- do { \
- assert(!SafepointSynchronize::is_at_safepoint(), \
- heap_locking_asserts_params("should not be at a safepoint")); \
- } while (0)
-
-protected:
-
// The young region list.
G1EdenRegions _eden;
G1SurvivorRegions _survivor;
@@ -413,12 +399,11 @@
// humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(uint first,
uint num_regions,
- size_t word_size,
- AllocationContext_t context);
+ size_t word_size);
// Attempt to allocate a humongous object of the given size. Return
// NULL if unsuccessful.
- HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
+ HeapWord* humongous_obj_allocate(size_t word_size);
// The following two methods, allocate_new_tlab() and
// mem_allocate(), are the two main entry points from the runtime
@@ -462,8 +447,7 @@
// Second-level mutator allocation attempt: take the Heap_lock and
// retry the allocation attempt, potentially scheduling a GC
// pause. This should only be used for non-humongous allocations.
- HeapWord* attempt_allocation_slow(size_t word_size,
- AllocationContext_t context);
+ HeapWord* attempt_allocation_slow(size_t word_size);
// Takes the Heap_lock and attempts a humongous allocation. It can
// potentially schedule a GC pause.
@@ -474,7 +458,6 @@
// specifies whether the mutator alloc region is expected to be NULL
// or not.
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
- AllocationContext_t context,
bool expect_null_mutator_alloc_region);
// These methods are the "callbacks" from the G1AllocRegion class.
@@ -509,9 +492,7 @@
// This function does everything necessary/possible to satisfy a
// failed allocation request (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t word_size,
- AllocationContext_t context,
bool* succeeded);
-private:
// Internal helpers used during full GC to split it up to
// increase readability.
void abort_concurrent_cycle();
@@ -524,18 +505,16 @@
// Helper method for satisfy_failed_allocation()
HeapWord* satisfy_failed_allocation_helper(size_t word_size,
- AllocationContext_t context,
bool do_gc,
bool clear_all_soft_refs,
bool expect_null_mutator_alloc_region,
bool* gc_succeeded);
-protected:
// Attempting to expand the heap sufficiently
// to support an allocation of the given "word_size". If
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
- HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
+ HeapWord* expand_and_allocate(size_t word_size);
// Preserve any referents discovered by concurrent marking that have not yet been
// copied by the STW pause.
@@ -607,9 +586,6 @@
void register_old_region_with_cset(HeapRegion* r) {
_in_cset_fast_test.set_in_old(r->hrm_index());
}
- inline void register_ext_region_with_cset(HeapRegion* r) {
- _in_cset_fast_test.set_ext(r->hrm_index());
- }
void clear_in_cset(const HeapRegion* hr) {
_in_cset_fast_test.clear(hr);
}
@@ -728,11 +704,11 @@
// mapping failed, with the same non-overlapping and sorted MemRegion array.
void dealloc_archive_regions(MemRegion* range, size_t count);
-protected:
+private:
// Shrink the garbage-first heap by at most the given size (in bytes!).
// (Rounds down to a HeapRegion boundary.)
- virtual void shrink(size_t expand_bytes);
+ void shrink(size_t expand_bytes);
void shrink_helper(size_t expand_bytes);
#if TASKQUEUE_STATS
@@ -764,7 +740,7 @@
bool do_collection_pause_at_safepoint(double target_pause_time_ms);
// Actually do the work of evacuating the collection set.
- virtual void evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states);
+ void evacuate_collection_set(G1ParScanThreadStateSet* per_thread_states);
void pre_evacuate_collection_set();
void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
@@ -1174,10 +1150,6 @@
virtual bool is_in_closed_subset(const void* p) const;
- G1SATBCardTableLoggingModRefBS* g1_barrier_set() {
- return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
- }
-
G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
G1CardTable* card_table() const {
@@ -1472,7 +1444,7 @@
public:
size_t pending_card_num();
-protected:
+private:
size_t _max_heap_capacity;
};
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,10 +25,10 @@
#ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
#define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/shared/taskqueue.hpp"
@@ -85,12 +85,12 @@
}
inline void G1CollectedHeap::reset_gc_time_stamp() {
- assert_at_safepoint(true);
+ assert_at_safepoint_on_vm_thread();
_gc_time_stamp = 0;
}
inline void G1CollectedHeap::increment_gc_time_stamp() {
- assert_at_safepoint(true);
+ assert_at_safepoint_on_vm_thread();
++_gc_time_stamp;
}
--- a/src/hotspot/share/gc/g1/g1CollectedHeap_ext.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1DefaultPolicy.hpp"
-#include "gc/g1/g1ParScanThreadState.hpp"
-#include "gc/g1/heapRegion.inline.hpp"
-
-class STWGCTimer;
-
-G1Policy* G1CollectedHeap::create_g1_policy(STWGCTimer* gc_timer) {
- return new G1DefaultPolicy(gc_timer);
-}
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -80,7 +80,7 @@
void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length) {
- assert_at_safepoint(true);
+ assert_at_safepoint_on_vm_thread();
_eden_region_length = eden_cset_region_length;
_survivor_region_length = survivor_cset_region_length;
@@ -103,7 +103,7 @@
// Add the heap region at the head of the non-incremental collection set
void G1CollectionSet::add_old_region(HeapRegion* hr) {
- assert_at_safepoint(true);
+ assert_at_safepoint_on_vm_thread();
assert(_inc_build_state == Active, "Precondition");
assert(hr->is_old(), "the region should be old");
@@ -167,7 +167,7 @@
}
void G1CollectionSet::clear() {
- assert_at_safepoint(true);
+ assert_at_safepoint_on_vm_thread();
_collection_set_cur_length = 0;
}
@@ -314,7 +314,7 @@
};
bool G1CollectionSet::verify_young_ages() {
- assert_at_safepoint(true);
+ assert_at_safepoint_on_vm_thread();
G1VerifyYoungAgesClosure cl;
iterate(&cl);
@@ -541,7 +541,7 @@
};
void G1CollectionSet::verify_young_cset_indices() const {
- assert_at_safepoint(true);
+ assert_at_safepoint_on_vm_thread();
G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
iterate(&cl);
--- a/src/hotspot/share/gc/g1/g1CollectionSet.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#define SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
#include "gc/g1/collectionSetChooser.hpp"
-#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -37,7 +36,7 @@
class G1SurvivorRegions;
class HeapRegion;
-class G1CollectionSet VALUE_OBJ_CLASS_SPEC {
+class G1CollectionSet {
G1CollectedHeap* _g1;
G1Policy* _policy;
--- a/src/hotspot/share/gc/g1/g1CollectorState.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectorState.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,12 +26,11 @@
#define SHARE_VM_GC_G1_G1COLLECTORSTATE_HPP
#include "gc/g1/g1YCTypes.hpp"
-#include "memory/allocation.hpp"
#include "utilities/globalDefinitions.hpp"
// Various state variables that indicate
// the phase of the G1 collection.
-class G1CollectorState VALUE_OBJ_CLASS_SPEC {
+class G1CollectorState {
// Indicates whether we are in "full young" or "mixed" GC mode.
bool _gcs_are_young;
// Was the last GC "young"?
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1013,8 +1013,6 @@
return;
}
- SvcGCMarker sgcm(SvcGCMarker::OTHER);
-
if (VerifyDuringGC) {
g1h->verifier()->verify(G1HeapVerifier::G1VerifyRemark, VerifyOption_G1UsePrevMarking, "During GC (before)");
}
@@ -1861,7 +1859,7 @@
}
#ifndef PRODUCT
-class VerifyNoCSetOops VALUE_OBJ_CLASS_SPEC {
+class VerifyNoCSetOops {
private:
G1CollectedHeap* _g1h;
const char* _phase;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp"
#include "gc/g1/heapRegionSet.hpp"
#include "gc/shared/taskqueue.hpp"
+#include "memory/allocation.hpp"
class ConcurrentGCTimer;
class ConcurrentMarkThread;
@@ -47,7 +48,7 @@
// This is a container class for either an oop or a continuation address for
// mark stack entries. Both are pushed onto the mark stack.
-class G1TaskQueueEntry VALUE_OBJ_CLASS_SPEC {
+class G1TaskQueueEntry {
private:
void* _holder;
@@ -127,7 +128,7 @@
// Memory management is done using a mix of tracking a high water-mark indicating
// that all chunks at a lower address are valid chunks, and a singly linked free
// list connecting all empty chunks.
-class G1CMMarkStack VALUE_OBJ_CLASS_SPEC {
+class G1CMMarkStack {
public:
// Number of TaskQueueEntries that can fit in a single chunk.
static const size_t EntriesPerChunk = 1024 - 1 /* One reference for the next pointer */;
@@ -227,7 +228,7 @@
// Currently, we only support root region scanning once (at the start
// of the marking cycle) and the root regions are all the survivor
// regions populated during the initial-mark pause.
-class G1CMRootRegions VALUE_OBJ_CLASS_SPEC {
+class G1CMRootRegions {
private:
const G1SurvivorRegions* _survivors;
G1ConcurrentMark* _cm;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@
#ifndef PRODUCT
template<typename Fn>
inline void G1CMMarkStack::iterate(Fn fn) const {
- assert_at_safepoint(true);
+ assert_at_safepoint_on_vm_thread();
size_t num_chunks = 0;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#define SHARE_VM_GC_G1_G1CONCURRENTMARKBITMAP_HPP
#include "gc/g1/g1RegionToSpaceMapper.hpp"
-#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/bitMap.hpp"
@@ -39,7 +38,7 @@
class HeapRegion;
// Closure for iteration over bitmaps
-class G1CMBitMapClosure VALUE_OBJ_CLASS_SPEC {
+class G1CMBitMapClosure {
private:
G1ConcurrentMark* const _cm;
G1CMTask* const _task;
@@ -62,7 +61,7 @@
// A generic mark bitmap for concurrent marking. This is essentially a wrapper
// around the BitMap class that is based on HeapWords, with one bit per (1 << _shifter) HeapWords.
-class G1CMBitMap VALUE_OBJ_CLASS_SPEC {
+class G1CMBitMap {
private:
MemRegion _covered; // The heap area covered by this bitmap.
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#define SHARE_VM_GC_G1_G1CONCURRENTMARKOBJARRAYPROCESSOR_HPP
#include "oops/oopsHierarchy.hpp"
-#include "memory/allocation.hpp"
class G1CMTask;
@@ -34,7 +33,7 @@
// Instead of pushing large object arrays, we push continuations onto the
// mark stack. These continuations are identified by having their LSB set.
// This allows incremental processing of large objects.
-class G1CMObjArrayProcessor VALUE_OBJ_CLASS_SPEC {
+class G1CMObjArrayProcessor {
private:
// Reference to the task for doing the actual work.
G1CMTask* _task;
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,7 @@
// Helper class for refinement thread management. Used to start, stop and
// iterate over them.
-class G1ConcurrentRefineThreadControl VALUE_OBJ_CLASS_SPEC {
+class G1ConcurrentRefineThreadControl {
G1ConcurrentRefine* _cr;
G1ConcurrentRefineThread** _threads;
--- a/src/hotspot/share/gc/g1/g1DefaultPolicy.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1165 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
-#include "gc/g1/g1Analytics.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectionSet.hpp"
-#include "gc/g1/g1ConcurrentMark.hpp"
-#include "gc/g1/g1ConcurrentRefine.hpp"
-#include "gc/g1/g1DefaultPolicy.hpp"
-#include "gc/g1/g1HotCardCache.hpp"
-#include "gc/g1/g1IHOPControl.hpp"
-#include "gc/g1/g1GCPhaseTimes.hpp"
-#include "gc/g1/g1Policy.hpp"
-#include "gc/g1/g1SurvivorRegions.hpp"
-#include "gc/g1/g1YoungGenSizer.hpp"
-#include "gc/g1/heapRegion.inline.hpp"
-#include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/shared/gcPolicyCounters.hpp"
-#include "logging/logStream.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/java.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/growableArray.hpp"
-#include "utilities/pair.hpp"
-
-G1DefaultPolicy::G1DefaultPolicy(STWGCTimer* gc_timer) :
- _predictor(G1ConfidencePercent / 100.0),
- _analytics(new G1Analytics(&_predictor)),
- _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
- _ihop_control(create_ihop_control(&_predictor)),
- _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
- _young_list_fixed_length(0),
- _short_lived_surv_rate_group(new SurvRateGroup()),
- _survivor_surv_rate_group(new SurvRateGroup()),
- _reserve_factor((double) G1ReservePercent / 100.0),
- _reserve_regions(0),
- _rs_lengths_prediction(0),
- _bytes_allocated_in_old_since_last_gc(0),
- _initial_mark_to_mixed(),
- _collection_set(NULL),
- _g1(NULL),
- _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
- _tenuring_threshold(MaxTenuringThreshold),
- _max_survivor_regions(0),
- _survivors_age_table(true),
- _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) { }
-
-G1DefaultPolicy::~G1DefaultPolicy() {
- delete _ihop_control;
-}
-
-G1CollectorState* G1DefaultPolicy::collector_state() const { return _g1->collector_state(); }
-
-void G1DefaultPolicy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
- _g1 = g1h;
- _collection_set = collection_set;
-
- assert(Heap_lock->owned_by_self(), "Locking discipline.");
-
- if (!adaptive_young_list_length()) {
- _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
- }
- _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
-
- _free_regions_at_end_of_collection = _g1->num_free_regions();
-
- update_young_list_max_and_target_length();
- // We may immediately start allocating regions and placing them on the
- // collection set list. Initialize the per-collection set info
- _collection_set->start_incremental_building();
-}
-
-void G1DefaultPolicy::note_gc_start() {
- phase_times()->note_gc_start();
-}
-
-class G1YoungLengthPredictor VALUE_OBJ_CLASS_SPEC {
- const bool _during_cm;
- const double _base_time_ms;
- const double _base_free_regions;
- const double _target_pause_time_ms;
- const G1DefaultPolicy* const _policy;
-
- public:
- G1YoungLengthPredictor(bool during_cm,
- double base_time_ms,
- double base_free_regions,
- double target_pause_time_ms,
- const G1DefaultPolicy* policy) :
- _during_cm(during_cm),
- _base_time_ms(base_time_ms),
- _base_free_regions(base_free_regions),
- _target_pause_time_ms(target_pause_time_ms),
- _policy(policy) {}
-
- bool will_fit(uint young_length) const {
- if (young_length >= _base_free_regions) {
- // end condition 1: not enough space for the young regions
- return false;
- }
-
- const double accum_surv_rate = _policy->accum_yg_surv_rate_pred((int) young_length - 1);
- const size_t bytes_to_copy =
- (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
- const double copy_time_ms =
- _policy->analytics()->predict_object_copy_time_ms(bytes_to_copy, _during_cm);
- const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length);
- const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms;
- if (pause_time_ms > _target_pause_time_ms) {
- // end condition 2: prediction is over the target pause time
- return false;
- }
-
- const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes;
-
- // When copying, we will likely need more bytes free than is live in the region.
- // Add some safety margin to factor in the confidence of our guess, and the
- // natural expected waste.
- // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
- // of the calculation: the lower the confidence, the more headroom.
- // (100 + TargetPLABWastePct) represents the increase in expected bytes during
- // copying due to anticipated waste in the PLABs.
- const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
- const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
-
- if (expected_bytes_to_copy > free_bytes) {
- // end condition 3: out-of-space
- return false;
- }
-
- // success!
- return true;
- }
-};
-
-void G1DefaultPolicy::record_new_heap_size(uint new_number_of_regions) {
- // re-calculate the necessary reserve
- double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
- // We use ceiling so that if reserve_regions_d is > 0.0 (but
- // smaller than 1.0) we'll get 1.
- _reserve_regions = (uint) ceil(reserve_regions_d);
-
- _young_gen_sizer.heap_size_changed(new_number_of_regions);
-
- _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
-}
-
-uint G1DefaultPolicy::calculate_young_list_desired_min_length(uint base_min_length) const {
- uint desired_min_length = 0;
- if (adaptive_young_list_length()) {
- if (_analytics->num_alloc_rate_ms() > 3) {
- double now_sec = os::elapsedTime();
- double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
- double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
- desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
- } else {
- // otherwise we don't have enough info to make the prediction
- }
- }
- desired_min_length += base_min_length;
- // make sure we don't go below any user-defined minimum bound
- return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
-}
-
-uint G1DefaultPolicy::calculate_young_list_desired_max_length() const {
- // Here, we might want to also take into account any additional
- // constraints (i.e., user-defined minimum bound). Currently, we
- // effectively don't set this bound.
- return _young_gen_sizer.max_desired_young_length();
-}
-
-uint G1DefaultPolicy::update_young_list_max_and_target_length() {
- return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
-}
-
-uint G1DefaultPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
- uint unbounded_target_length = update_young_list_target_length(rs_lengths);
- update_max_gc_locker_expansion();
- return unbounded_target_length;
-}
-
-uint G1DefaultPolicy::update_young_list_target_length(size_t rs_lengths) {
- YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
- _young_list_target_length = young_lengths.first;
- return young_lengths.second;
-}
-
-G1DefaultPolicy::YoungTargetLengths G1DefaultPolicy::young_list_target_lengths(size_t rs_lengths) const {
- YoungTargetLengths result;
-
- // Calculate the absolute and desired min bounds first.
-
- // This is how many young regions we already have (currently: the survivors).
- const uint base_min_length = _g1->survivor_regions_count();
- uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
- // This is the absolute minimum young length. Ensure that we
- // will at least have one eden region available for allocation.
- uint absolute_min_length = base_min_length + MAX2(_g1->eden_regions_count(), (uint)1);
- // If we shrank the young list target it should not shrink below the current size.
- desired_min_length = MAX2(desired_min_length, absolute_min_length);
- // Calculate the absolute and desired max bounds.
-
- uint desired_max_length = calculate_young_list_desired_max_length();
-
- uint young_list_target_length = 0;
- if (adaptive_young_list_length()) {
- if (collector_state()->gcs_are_young()) {
- young_list_target_length =
- calculate_young_list_target_length(rs_lengths,
- base_min_length,
- desired_min_length,
- desired_max_length);
- } else {
- // Don't calculate anything and let the code below bound it to
- // the desired_min_length, i.e., do the next GC as soon as
- // possible to maximize how many old regions we can add to it.
- }
- } else {
- // The user asked for a fixed young gen so we'll fix the young gen
- // whether the next GC is young or mixed.
- young_list_target_length = _young_list_fixed_length;
- }
-
- result.second = young_list_target_length;
-
- // We will try our best not to "eat" into the reserve.
- uint absolute_max_length = 0;
- if (_free_regions_at_end_of_collection > _reserve_regions) {
- absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
- }
- if (desired_max_length > absolute_max_length) {
- desired_max_length = absolute_max_length;
- }
-
- // Make sure we don't go over the desired max length, nor under the
- // desired min length. In case they clash, desired_min_length wins
- // which is why that test is second.
- if (young_list_target_length > desired_max_length) {
- young_list_target_length = desired_max_length;
- }
- if (young_list_target_length < desired_min_length) {
- young_list_target_length = desired_min_length;
- }
-
- assert(young_list_target_length > base_min_length,
- "we should be able to allocate at least one eden region");
- assert(young_list_target_length >= absolute_min_length, "post-condition");
-
- result.first = young_list_target_length;
- return result;
-}
-
-uint
-G1DefaultPolicy::calculate_young_list_target_length(size_t rs_lengths,
- uint base_min_length,
- uint desired_min_length,
- uint desired_max_length) const {
- assert(adaptive_young_list_length(), "pre-condition");
- assert(collector_state()->gcs_are_young(), "only call this for young GCs");
-
- // In case some edge-condition makes the desired max length too small...
- if (desired_max_length <= desired_min_length) {
- return desired_min_length;
- }
-
- // We'll adjust min_young_length and max_young_length not to include
- // the already allocated young regions (i.e., so they reflect the
- // min and max eden regions we'll allocate). The base_min_length
- // will be reflected in the predictions by the
- // survivor_regions_evac_time prediction.
- assert(desired_min_length > base_min_length, "invariant");
- uint min_young_length = desired_min_length - base_min_length;
- assert(desired_max_length > base_min_length, "invariant");
- uint max_young_length = desired_max_length - base_min_length;
-
- const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
- const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
- const size_t pending_cards = _analytics->predict_pending_cards();
- const size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
- const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
- const double base_time_ms =
- predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
- survivor_regions_evac_time;
- const uint available_free_regions = _free_regions_at_end_of_collection;
- const uint base_free_regions =
- available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
-
- // Here, we will make sure that the shortest young length that
- // makes sense fits within the target pause time.
-
- G1YoungLengthPredictor p(collector_state()->during_concurrent_mark(),
- base_time_ms,
- base_free_regions,
- target_pause_time_ms,
- this);
- if (p.will_fit(min_young_length)) {
- // The shortest young length will fit into the target pause time;
- // we'll now check whether the absolute maximum number of young
- // regions will fit in the target pause time. If not, we'll do
- // a binary search between min_young_length and max_young_length.
- if (p.will_fit(max_young_length)) {
- // The maximum young length will fit into the target pause time.
- // We are done so set min young length to the maximum length (as
- // the result is assumed to be returned in min_young_length).
- min_young_length = max_young_length;
- } else {
- // The maximum possible number of young regions will not fit within
- // the target pause time so we'll search for the optimal
- // length. The loop invariants are:
- //
- // min_young_length < max_young_length
- // min_young_length is known to fit into the target pause time
- // max_young_length is known not to fit into the target pause time
- //
- // Going into the loop we know the above hold as we've just
- // checked them. Every time around the loop we check whether
- // the middle value between min_young_length and
- // max_young_length fits into the target pause time. If it
- // does, it becomes the new min. If it doesn't, it becomes
- // the new max. This way we maintain the loop invariants.
-
- assert(min_young_length < max_young_length, "invariant");
- uint diff = (max_young_length - min_young_length) / 2;
- while (diff > 0) {
- uint young_length = min_young_length + diff;
- if (p.will_fit(young_length)) {
- min_young_length = young_length;
- } else {
- max_young_length = young_length;
- }
- assert(min_young_length < max_young_length, "invariant");
- diff = (max_young_length - min_young_length) / 2;
- }
- // The results is min_young_length which, according to the
- // loop invariants, should fit within the target pause time.
-
- // These are the post-conditions of the binary search above:
- assert(min_young_length < max_young_length,
- "otherwise we should have discovered that max_young_length "
- "fits into the pause target and not done the binary search");
- assert(p.will_fit(min_young_length),
- "min_young_length, the result of the binary search, should "
- "fit into the pause target");
- assert(!p.will_fit(min_young_length + 1),
- "min_young_length, the result of the binary search, should be "
- "optimal, so no larger length should fit into the pause target");
- }
- } else {
- // Even the minimum length doesn't fit into the pause time
- // target, return it as the result nevertheless.
- }
- return base_min_length + min_young_length;
-}
-
-double G1DefaultPolicy::predict_survivor_regions_evac_time() const {
- double survivor_regions_evac_time = 0.0;
- const GrowableArray<HeapRegion*>* survivor_regions = _g1->survivor()->regions();
-
- for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
- it != survivor_regions->end();
- ++it) {
- survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->gcs_are_young());
- }
- return survivor_regions_evac_time;
-}
-
-void G1DefaultPolicy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
- guarantee( adaptive_young_list_length(), "should not call this otherwise" );
-
- if (rs_lengths > _rs_lengths_prediction) {
- // add 10% to avoid having to recalculate often
- size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
- update_rs_lengths_prediction(rs_lengths_prediction);
-
- update_young_list_max_and_target_length(rs_lengths_prediction);
- }
-}
-
-void G1DefaultPolicy::update_rs_lengths_prediction() {
- update_rs_lengths_prediction(_analytics->predict_rs_lengths());
-}
-
-void G1DefaultPolicy::update_rs_lengths_prediction(size_t prediction) {
- if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
- _rs_lengths_prediction = prediction;
- }
-}
-
-void G1DefaultPolicy::record_full_collection_start() {
- _full_collection_start_sec = os::elapsedTime();
- // Release the future to-space so that it is available for compaction into.
- collector_state()->set_full_collection(true);
-}
-
-void G1DefaultPolicy::record_full_collection_end() {
- // Consider this like a collection pause for the purposes of allocation
- // since last pause.
- double end_sec = os::elapsedTime();
- double full_gc_time_sec = end_sec - _full_collection_start_sec;
- double full_gc_time_ms = full_gc_time_sec * 1000.0;
-
- _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
-
- collector_state()->set_full_collection(false);
-
- // "Nuke" the heuristics that control the young/mixed GC
- // transitions and make sure we start with young GCs after the Full GC.
- collector_state()->set_gcs_are_young(true);
- collector_state()->set_last_young_gc(false);
- collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
- collector_state()->set_during_initial_mark_pause(false);
- collector_state()->set_in_marking_window(false);
- collector_state()->set_in_marking_window_im(false);
-
- _short_lived_surv_rate_group->start_adding_regions();
- // also call this on any additional surv rate groups
-
- _free_regions_at_end_of_collection = _g1->num_free_regions();
- // Reset survivors SurvRateGroup.
- _survivor_surv_rate_group->reset();
- update_young_list_max_and_target_length();
- update_rs_lengths_prediction();
- cset_chooser()->clear();
-
- _bytes_allocated_in_old_since_last_gc = 0;
-
- record_pause(FullGC, _full_collection_start_sec, end_sec);
-}
-
-void G1DefaultPolicy::record_collection_pause_start(double start_time_sec) {
- // We only need to do this here as the policy will only be applied
- // to the GC we're about to start. so, no point is calculating this
- // every time we calculate / recalculate the target young length.
- update_survivors_policy();
-
- assert(_g1->used() == _g1->recalculate_used(),
- "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
- _g1->used(), _g1->recalculate_used());
-
- phase_times()->record_cur_collection_start_sec(start_time_sec);
- _pending_cards = _g1->pending_card_num();
-
- _collection_set->reset_bytes_used_before();
- _bytes_copied_during_gc = 0;
-
- collector_state()->set_last_gc_was_young(false);
-
- // do that for any other surv rate groups
- _short_lived_surv_rate_group->stop_adding_regions();
- _survivors_age_table.clear();
-
- assert(_g1->collection_set()->verify_young_ages(), "region age verification failed");
-}
-
-void G1DefaultPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
- collector_state()->set_during_marking(true);
- assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
- collector_state()->set_during_initial_mark_pause(false);
-}
-
-void G1DefaultPolicy::record_concurrent_mark_remark_start() {
- _mark_remark_start_sec = os::elapsedTime();
- collector_state()->set_during_marking(false);
-}
-
-void G1DefaultPolicy::record_concurrent_mark_remark_end() {
- double end_time_sec = os::elapsedTime();
- double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
- _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
- _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
-
- record_pause(Remark, _mark_remark_start_sec, end_time_sec);
-}
-
-void G1DefaultPolicy::record_concurrent_mark_cleanup_start() {
- _mark_cleanup_start_sec = os::elapsedTime();
-}
-
-void G1DefaultPolicy::record_concurrent_mark_cleanup_completed() {
- bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
- "skip last young-only gc");
- collector_state()->set_last_young_gc(should_continue_with_reclaim);
- // We skip the marking phase.
- if (!should_continue_with_reclaim) {
- abort_time_to_mixed_tracking();
- }
- collector_state()->set_in_marking_window(false);
-}
-
-double G1DefaultPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
- return phase_times()->average_time_ms(phase);
-}
-
-double G1DefaultPolicy::young_other_time_ms() const {
- return phase_times()->young_cset_choice_time_ms() +
- phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet);
-}
-
-double G1DefaultPolicy::non_young_other_time_ms() const {
- return phase_times()->non_young_cset_choice_time_ms() +
- phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
-}
-
-double G1DefaultPolicy::other_time_ms(double pause_time_ms) const {
- return pause_time_ms - phase_times()->cur_collection_par_time_ms();
-}
-
-double G1DefaultPolicy::constant_other_time_ms(double pause_time_ms) const {
- return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
-}
-
-CollectionSetChooser* G1DefaultPolicy::cset_chooser() const {
- return _collection_set->cset_chooser();
-}
-
-bool G1DefaultPolicy::about_to_start_mixed_phase() const {
- return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
-}
-
-bool G1DefaultPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
- if (about_to_start_mixed_phase()) {
- return false;
- }
-
- size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
-
- size_t cur_used_bytes = _g1->non_young_capacity_bytes();
- size_t alloc_byte_size = alloc_word_size * HeapWordSize;
- size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
-
- bool result = false;
- if (marking_request_bytes > marking_initiating_used_threshold) {
- result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
- log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
- result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
- cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
- }
-
- return result;
-}
-
-// Anything below that is considered to be zero
-#define MIN_TIMER_GRANULARITY 0.0000001
-
-void G1DefaultPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
- double end_time_sec = os::elapsedTime();
-
- size_t cur_used_bytes = _g1->used();
- assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
- bool last_pause_included_initial_mark = false;
- bool update_stats = !_g1->evacuation_failed();
-
- record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
-
- _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-
- last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
- if (last_pause_included_initial_mark) {
- record_concurrent_mark_init_end(0.0);
- } else {
- maybe_start_marking();
- }
-
- double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
- if (app_time_ms < MIN_TIMER_GRANULARITY) {
- // This usually happens due to the timer not having the required
- // granularity. Some Linuxes are the usual culprits.
- // We'll just set it to something (arbitrarily) small.
- app_time_ms = 1.0;
- }
-
- if (update_stats) {
- // We maintain the invariant that all objects allocated by mutator
- // threads will be allocated out of eden regions. So, we can use
- // the eden region number allocated since the previous GC to
- // calculate the application's allocate rate. The only exception
- // to that is humongous objects that are allocated separately. But
- // given that humongous object allocations do not really affect
- // either the pause's duration nor when the next pause will take
- // place we can safely ignore them here.
- uint regions_allocated = _collection_set->eden_region_length();
- double alloc_rate_ms = (double) regions_allocated / app_time_ms;
- _analytics->report_alloc_rate_ms(alloc_rate_ms);
-
- double interval_ms =
- (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
- _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
- _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
- }
-
- bool new_in_marking_window = collector_state()->in_marking_window();
- bool new_in_marking_window_im = false;
- if (last_pause_included_initial_mark) {
- new_in_marking_window = true;
- new_in_marking_window_im = true;
- }
-
- if (collector_state()->last_young_gc()) {
- // This is supposed to to be the "last young GC" before we start
- // doing mixed GCs. Here we decide whether to start mixed GCs or not.
- assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
-
- if (next_gc_should_be_mixed("start mixed GCs",
- "do not start mixed GCs")) {
- collector_state()->set_gcs_are_young(false);
- } else {
- // We aborted the mixed GC phase early.
- abort_time_to_mixed_tracking();
- }
-
- collector_state()->set_last_young_gc(false);
- }
-
- if (!collector_state()->last_gc_was_young()) {
- // This is a mixed GC. Here we decide whether to continue doing
- // mixed GCs or not.
- if (!next_gc_should_be_mixed("continue mixed GCs",
- "do not continue mixed GCs")) {
- collector_state()->set_gcs_are_young(true);
-
- maybe_start_marking();
- }
- }
-
- _short_lived_surv_rate_group->start_adding_regions();
- // Do that for any other surv rate groups
-
- double scan_hcc_time_ms = G1HotCardCache::default_use_cache() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
-
- if (update_stats) {
- double cost_per_card_ms = 0.0;
- if (_pending_cards > 0) {
- cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
- _analytics->report_cost_per_card_ms(cost_per_card_ms);
- }
- _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
-
- double cost_per_entry_ms = 0.0;
- if (cards_scanned > 10) {
- cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
- _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
- }
-
- if (_max_rs_lengths > 0) {
- double cards_per_entry_ratio =
- (double) cards_scanned / (double) _max_rs_lengths;
- _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
- }
-
- // This is defensive. For a while _max_rs_lengths could get
- // smaller than _recorded_rs_lengths which was causing
- // rs_length_diff to get very large and mess up the RSet length
- // predictions. The reason was unsafe concurrent updates to the
- // _inc_cset_recorded_rs_lengths field which the code below guards
- // against (see CR 7118202). This bug has now been fixed (see CR
- // 7119027). However, I'm still worried that
- // _inc_cset_recorded_rs_lengths might still end up somewhat
- // inaccurate. The concurrent refinement thread calculates an
- // RSet's length concurrently with other CR threads updating it
- // which might cause it to calculate the length incorrectly (if,
- // say, it's in mid-coarsening). So I'll leave in the defensive
- // conditional below just in case.
- size_t rs_length_diff = 0;
- size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
- if (_max_rs_lengths > recorded_rs_lengths) {
- rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
- }
- _analytics->report_rs_length_diff((double) rs_length_diff);
-
- size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
- size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
- double cost_per_byte_ms = 0.0;
-
- if (copied_bytes > 0) {
- cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
- _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
- }
-
- if (_collection_set->young_region_length() > 0) {
- _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
- _collection_set->young_region_length());
- }
-
- if (_collection_set->old_region_length() > 0) {
- _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
- _collection_set->old_region_length());
- }
-
- _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
-
- _analytics->report_pending_cards((double) _pending_cards);
- _analytics->report_rs_lengths((double) _max_rs_lengths);
- }
-
- collector_state()->set_in_marking_window(new_in_marking_window);
- collector_state()->set_in_marking_window_im(new_in_marking_window_im);
- _free_regions_at_end_of_collection = _g1->num_free_regions();
- // IHOP control wants to know the expected young gen length if it were not
- // restrained by the heap reserve. Using the actual length would make the
- // prediction too small and the limit the young gen every time we get to the
- // predicted target occupancy.
- size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
- update_rs_lengths_prediction();
-
- update_ihop_prediction(app_time_ms / 1000.0,
- _bytes_allocated_in_old_since_last_gc,
- last_unrestrained_young_length * HeapRegion::GrainBytes);
- _bytes_allocated_in_old_since_last_gc = 0;
-
- _ihop_control->send_trace_event(_g1->gc_tracer_stw());
-
- // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
- double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
-
- if (update_rs_time_goal_ms < scan_hcc_time_ms) {
- log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
- "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
- update_rs_time_goal_ms, scan_hcc_time_ms);
-
- update_rs_time_goal_ms = 0;
- } else {
- update_rs_time_goal_ms -= scan_hcc_time_ms;
- }
- _g1->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
- phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
- update_rs_time_goal_ms);
-
- cset_chooser()->verify();
-}
-
-G1IHOPControl* G1DefaultPolicy::create_ihop_control(const G1Predictions* predictor){
- if (G1UseAdaptiveIHOP) {
- return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
- predictor,
- G1ReservePercent,
- G1HeapWastePercent);
- } else {
- return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
- }
-}
-
-void G1DefaultPolicy::update_ihop_prediction(double mutator_time_s,
- size_t mutator_alloc_bytes,
- size_t young_gen_size) {
- // Always try to update IHOP prediction. Even evacuation failures give information
- // about e.g. whether to start IHOP earlier next time.
-
- // Avoid using really small application times that might create samples with
- // very high or very low values. They may be caused by e.g. back-to-back gcs.
- double const min_valid_time = 1e-6;
-
- bool report = false;
-
- double marking_to_mixed_time = -1.0;
- if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
- marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
- assert(marking_to_mixed_time > 0.0,
- "Initial mark to mixed time must be larger than zero but is %.3f",
- marking_to_mixed_time);
- if (marking_to_mixed_time > min_valid_time) {
- _ihop_control->update_marking_length(marking_to_mixed_time);
- report = true;
- }
- }
-
- // As an approximation for the young gc promotion rates during marking we use
- // all of them. In many applications there are only a few if any young gcs during
- // marking, which makes any prediction useless. This increases the accuracy of the
- // prediction.
- if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
- _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
- report = true;
- }
-
- if (report) {
- report_ihop_statistics();
- }
-}
-
-void G1DefaultPolicy::report_ihop_statistics() {
- _ihop_control->print();
-}
-
-void G1DefaultPolicy::print_phases() {
- phase_times()->print();
-}
-
-double G1DefaultPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
- TruncatedSeq* seq = surv_rate_group->get_seq(age);
- guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
- double pred = _predictor.get_new_prediction(seq);
- if (pred > 1.0) {
- pred = 1.0;
- }
- return pred;
-}
-
-double G1DefaultPolicy::accum_yg_surv_rate_pred(int age) const {
- return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
-}
-
-double G1DefaultPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
- size_t scanned_cards) const {
- return
- _analytics->predict_rs_update_time_ms(pending_cards) +
- _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
- _analytics->predict_constant_other_time_ms();
-}
-
-double G1DefaultPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
- size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
- size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
- return predict_base_elapsed_time_ms(pending_cards, card_num);
-}
-
-size_t G1DefaultPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
- size_t bytes_to_copy;
- if (hr->is_marked())
- bytes_to_copy = hr->max_live_bytes();
- else {
- assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
- int age = hr->age_in_surv_rate_group();
- double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
- bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
- }
- return bytes_to_copy;
-}
-
-double G1DefaultPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
- bool for_young_gc) const {
- size_t rs_length = hr->rem_set()->occupied();
- // Predicting the number of cards is based on which type of GC
- // we're predicting for.
- size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
- size_t bytes_to_copy = predict_bytes_to_copy(hr);
-
- double region_elapsed_time_ms =
- _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
- _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
-
- // The prediction of the "other" time for this region is based
- // upon the region type and NOT the GC type.
- if (hr->is_young()) {
- region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
- } else {
- region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
- }
- return region_elapsed_time_ms;
-}
-
-bool G1DefaultPolicy::should_allocate_mutator_region() const {
- uint young_list_length = _g1->young_regions_count();
- uint young_list_target_length = _young_list_target_length;
- return young_list_length < young_list_target_length;
-}
-
-bool G1DefaultPolicy::can_expand_young_list() const {
- uint young_list_length = _g1->young_regions_count();
- uint young_list_max_length = _young_list_max_length;
- return young_list_length < young_list_max_length;
-}
-
-bool G1DefaultPolicy::adaptive_young_list_length() const {
- return _young_gen_sizer.adaptive_young_list_length();
-}
-
-size_t G1DefaultPolicy::desired_survivor_size() const {
- size_t const survivor_capacity = HeapRegion::GrainWords * _max_survivor_regions;
- return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
-}
-
-void G1DefaultPolicy::print_age_table() {
- _survivors_age_table.print_age_table(_tenuring_threshold);
-}
-
-void G1DefaultPolicy::update_max_gc_locker_expansion() {
- uint expansion_region_num = 0;
- if (GCLockerEdenExpansionPercent > 0) {
- double perc = (double) GCLockerEdenExpansionPercent / 100.0;
- double expansion_region_num_d = perc * (double) _young_list_target_length;
- // We use ceiling so that if expansion_region_num_d is > 0.0 (but
- // less than 1.0) we'll get 1.
- expansion_region_num = (uint) ceil(expansion_region_num_d);
- } else {
- assert(expansion_region_num == 0, "sanity");
- }
- _young_list_max_length = _young_list_target_length + expansion_region_num;
- assert(_young_list_target_length <= _young_list_max_length, "post-condition");
-}
-
-// Calculates survivor space parameters.
-void G1DefaultPolicy::update_survivors_policy() {
- double max_survivor_regions_d =
- (double) _young_list_target_length / (double) SurvivorRatio;
- // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
- // smaller than 1.0) we'll get 1.
- _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
-
- _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(desired_survivor_size());
- if (UsePerfData) {
- _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
- _policy_counters->desired_survivor_size()->set_value(desired_survivor_size() * oopSize);
- }
-}
-
-bool G1DefaultPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
- // We actually check whether we are marking here and not if we are in a
- // reclamation phase. This means that we will schedule a concurrent mark
- // even while we are still in the process of reclaiming memory.
- bool during_cycle = _g1->concurrent_mark()->cm_thread()->during_cycle();
- if (!during_cycle) {
- log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
- collector_state()->set_initiate_conc_mark_if_possible(true);
- return true;
- } else {
- log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
- return false;
- }
-}
-
-void G1DefaultPolicy::initiate_conc_mark() {
- collector_state()->set_during_initial_mark_pause(true);
- collector_state()->set_initiate_conc_mark_if_possible(false);
-}
-
-void G1DefaultPolicy::decide_on_conc_mark_initiation() {
- // We are about to decide on whether this pause will be an
- // initial-mark pause.
-
- // First, collector_state()->during_initial_mark_pause() should not be already set. We
- // will set it here if we have to. However, it should be cleared by
- // the end of the pause (it's only set for the duration of an
- // initial-mark pause).
- assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
-
- if (collector_state()->initiate_conc_mark_if_possible()) {
- // We had noticed on a previous pause that the heap occupancy has
- // gone over the initiating threshold and we should start a
- // concurrent marking cycle. So we might initiate one.
-
- if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
- // Initiate a new initial mark if there is no marking or reclamation going on.
- initiate_conc_mark();
- log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
- } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
- // Initiate a user requested initial mark. An initial mark must be young only
- // GC, so the collector state must be updated to reflect this.
- collector_state()->set_gcs_are_young(true);
- collector_state()->set_last_young_gc(false);
-
- abort_time_to_mixed_tracking();
- initiate_conc_mark();
- log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
- } else {
- // The concurrent marking thread is still finishing up the
- // previous cycle. If we start one right now the two cycles
- // overlap. In particular, the concurrent marking thread might
- // be in the process of clearing the next marking bitmap (which
- // we will use for the next cycle if we start one). Starting a
- // cycle now will be bad given that parts of the marking
- // information might get cleared by the marking thread. And we
- // cannot wait for the marking thread to finish the cycle as it
- // periodically yields while clearing the next marking bitmap
- // and, if it's in a yield point, it's waiting for us to
- // finish. So, at this point we will not start a cycle and we'll
- // let the concurrent marking thread complete the last one.
- log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
- }
- }
-}
-
-void G1DefaultPolicy::record_concurrent_mark_cleanup_end() {
- cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
-
- double end_sec = os::elapsedTime();
- double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
- _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
- _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
-
- record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
-}
-
-double G1DefaultPolicy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
- return percent_of(reclaimable_bytes, _g1->capacity());
-}
-
-void G1DefaultPolicy::maybe_start_marking() {
- if (need_to_start_conc_mark("end of GC")) {
- // Note: this might have already been set, if during the last
- // pause we decided to start a cycle but at the beginning of
- // this pause we decided to postpone it. That's OK.
- collector_state()->set_initiate_conc_mark_if_possible(true);
- }
-}
-
-G1DefaultPolicy::PauseKind G1DefaultPolicy::young_gc_pause_kind() const {
- assert(!collector_state()->full_collection(), "must be");
- if (collector_state()->during_initial_mark_pause()) {
- assert(collector_state()->last_gc_was_young(), "must be");
- assert(!collector_state()->last_young_gc(), "must be");
- return InitialMarkGC;
- } else if (collector_state()->last_young_gc()) {
- assert(!collector_state()->during_initial_mark_pause(), "must be");
- assert(collector_state()->last_gc_was_young(), "must be");
- return LastYoungGC;
- } else if (!collector_state()->last_gc_was_young()) {
- assert(!collector_state()->during_initial_mark_pause(), "must be");
- assert(!collector_state()->last_young_gc(), "must be");
- return MixedGC;
- } else {
- assert(collector_state()->last_gc_was_young(), "must be");
- assert(!collector_state()->during_initial_mark_pause(), "must be");
- assert(!collector_state()->last_young_gc(), "must be");
- return YoungOnlyGC;
- }
-}
-
-void G1DefaultPolicy::record_pause(PauseKind kind, double start, double end) {
- // Manage the MMU tracker. For some reason it ignores Full GCs.
- if (kind != FullGC) {
- _mmu_tracker->add_pause(start, end);
- }
- // Manage the mutator time tracking from initial mark to first mixed gc.
- switch (kind) {
- case FullGC:
- abort_time_to_mixed_tracking();
- break;
- case Cleanup:
- case Remark:
- case YoungOnlyGC:
- case LastYoungGC:
- _initial_mark_to_mixed.add_pause(end - start);
- break;
- case InitialMarkGC:
- _initial_mark_to_mixed.record_initial_mark_end(end);
- break;
- case MixedGC:
- _initial_mark_to_mixed.record_mixed_gc_start(start);
- break;
- default:
- ShouldNotReachHere();
- }
-}
-
-void G1DefaultPolicy::abort_time_to_mixed_tracking() {
- _initial_mark_to_mixed.reset();
-}
-
-bool G1DefaultPolicy::next_gc_should_be_mixed(const char* true_action_str,
- const char* false_action_str) const {
- if (cset_chooser()->is_empty()) {
- log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
- return false;
- }
-
- // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
- size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
- double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
- double threshold = (double) G1HeapWastePercent;
- if (reclaimable_percent <= threshold) {
- log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
- false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
- return false;
- }
- log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
- true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
- return true;
-}
-
-uint G1DefaultPolicy::calc_min_old_cset_length() const {
- // The min old CSet region bound is based on the maximum desired
- // number of mixed GCs after a cycle. I.e., even if some old regions
- // look expensive, we should add them to the CSet anyway to make
- // sure we go through the available old regions in no more than the
- // maximum desired number of mixed GCs.
- //
- // The calculation is based on the number of marked regions we added
- // to the CSet chooser in the first place, not how many remain, so
- // that the result is the same during all mixed GCs that follow a cycle.
-
- const size_t region_num = (size_t) cset_chooser()->length();
- const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
- size_t result = region_num / gc_num;
- // emulate ceiling
- if (result * gc_num < region_num) {
- result += 1;
- }
- return (uint) result;
-}
-
-uint G1DefaultPolicy::calc_max_old_cset_length() const {
- // The max old CSet region bound is based on the threshold expressed
- // as a percentage of the heap size. I.e., it should bound the
- // number of old regions added to the CSet irrespective of how many
- // of them are available.
-
- const G1CollectedHeap* g1h = G1CollectedHeap::heap();
- const size_t region_num = g1h->num_regions();
- const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
- size_t result = region_num * perc / 100;
- // emulate ceiling
- if (100 * result < region_num * perc) {
- result += 1;
- }
- return (uint) result;
-}
-
-void G1DefaultPolicy::finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
- double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms, survivor);
- _collection_set->finalize_old_part(time_remaining_ms);
-}
-
-void G1DefaultPolicy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
-
- // Add survivor regions to SurvRateGroup.
- note_start_adding_survivor_regions();
- finished_recalculating_age_indexes(true /* is_survivors */);
-
- HeapRegion* last = NULL;
- for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
- it != survivors->regions()->end();
- ++it) {
- HeapRegion* curr = *it;
- set_region_survivor(curr);
-
- // The region is a non-empty survivor so let's add it to
- // the incremental collection set for the next evacuation
- // pause.
- _collection_set->add_survivor_regions(curr);
-
- last = curr;
- }
- note_stop_adding_survivor_regions();
-
- // Don't clear the survivor list handles until the start of
- // the next evacuation pause - we need it in order to re-tag
- // the survivor regions from this evacuation pause as 'young'
- // at the start of the next.
-
- finished_recalculating_age_indexes(false /* is_survivors */);
-}
--- a/src/hotspot/share/gc/g1/g1DefaultPolicy.hpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,399 +0,0 @@
-/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1DEFAULTPOLICY_HPP
-#define SHARE_VM_GC_G1_G1DEFAULTPOLICY_HPP
-
-#include "gc/g1/g1CollectorState.hpp"
-#include "gc/g1/g1GCPhaseTimes.hpp"
-#include "gc/g1/g1InCSetState.hpp"
-#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp"
-#include "gc/g1/g1MMUTracker.hpp"
-#include "gc/g1/g1Predictions.hpp"
-#include "gc/g1/g1Policy.hpp"
-#include "gc/g1/g1YoungGenSizer.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "utilities/pair.hpp"
-
-class HeapRegion;
-class G1CollectionSet;
-class CollectionSetChooser;
-class G1IHOPControl;
-class G1Analytics;
-class G1SurvivorRegions;
-class G1YoungGenSizer;
-class GCPolicyCounters;
-class STWGCTimer;
-
-class G1DefaultPolicy: public G1Policy {
- private:
-
- static G1IHOPControl* create_ihop_control(const G1Predictions* predictor);
- // Update the IHOP control with necessary statistics.
- void update_ihop_prediction(double mutator_time_s,
- size_t mutator_alloc_bytes,
- size_t young_gen_size);
- void report_ihop_statistics();
-
- G1Predictions _predictor;
- G1Analytics* _analytics;
- G1MMUTracker* _mmu_tracker;
- G1IHOPControl* _ihop_control;
-
- GCPolicyCounters* _policy_counters;
-
- double _full_collection_start_sec;
-
- jlong _collection_pause_end_millis;
-
- uint _young_list_target_length;
- uint _young_list_fixed_length;
-
- // The max number of regions we can extend the eden by while the GC
- // locker is active. This should be >= _young_list_target_length;
- uint _young_list_max_length;
-
- // SurvRateGroups below must be initialized after the predictor because they
- // indirectly use it through this object passed to their constructor.
- SurvRateGroup* _short_lived_surv_rate_group;
- SurvRateGroup* _survivor_surv_rate_group;
-
- double _reserve_factor;
- // This will be set when the heap is expanded
- // for the first time during initialization.
- uint _reserve_regions;
-
- G1YoungGenSizer _young_gen_sizer;
-
- uint _free_regions_at_end_of_collection;
-
- size_t _max_rs_lengths;
-
- size_t _rs_lengths_prediction;
-
- size_t _pending_cards;
-
- // The amount of allocated bytes in old gen during the last mutator and the following
- // young GC phase.
- size_t _bytes_allocated_in_old_since_last_gc;
-
- G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
-public:
- const G1Predictions& predictor() const { return _predictor; }
- const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
-
- void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
-
- void set_region_eden(HeapRegion* hr) {
- hr->set_eden();
- hr->install_surv_rate_group(_short_lived_surv_rate_group);
- }
-
- void set_region_survivor(HeapRegion* hr) {
- assert(hr->is_survivor(), "pre-condition");
- hr->install_surv_rate_group(_survivor_surv_rate_group);
- }
-
- void record_max_rs_lengths(size_t rs_lengths) {
- _max_rs_lengths = rs_lengths;
- }
-
-
- double predict_base_elapsed_time_ms(size_t pending_cards) const;
- double predict_base_elapsed_time_ms(size_t pending_cards,
- size_t scanned_cards) const;
- size_t predict_bytes_to_copy(HeapRegion* hr) const;
- double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
-
- double predict_survivor_regions_evac_time() const;
-
- bool should_update_surv_rate_group_predictors() {
- return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
- }
-
- void cset_regions_freed() {
- bool update = should_update_surv_rate_group_predictors();
-
- _short_lived_surv_rate_group->all_surviving_words_recorded(predictor(), update);
- _survivor_surv_rate_group->all_surviving_words_recorded(predictor(), update);
- }
-
- G1MMUTracker* mmu_tracker() {
- return _mmu_tracker;
- }
-
- const G1MMUTracker* mmu_tracker() const {
- return _mmu_tracker;
- }
-
- double max_pause_time_ms() const {
- return _mmu_tracker->max_gc_time() * 1000.0;
- }
-
- double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
-
- double predict_yg_surv_rate(int age) const;
-
- double accum_yg_surv_rate_pred(int age) const;
-
-protected:
- G1CollectionSet* _collection_set;
- virtual double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
- virtual double other_time_ms(double pause_time_ms) const;
-
- double young_other_time_ms() const;
- double non_young_other_time_ms() const;
- double constant_other_time_ms(double pause_time_ms) const;
-
- CollectionSetChooser* cset_chooser() const;
-private:
-
- // The number of bytes copied during the GC.
- size_t _bytes_copied_during_gc;
-
- // Stash a pointer to the g1 heap.
- G1CollectedHeap* _g1;
-
- G1GCPhaseTimes* _phase_times;
-
- // This set of variables tracks the collector efficiency, in order to
- // determine whether we should initiate a new marking.
- double _mark_remark_start_sec;
- double _mark_cleanup_start_sec;
-
- // Updates the internal young list maximum and target lengths. Returns the
- // unbounded young list target length.
- uint update_young_list_max_and_target_length();
- uint update_young_list_max_and_target_length(size_t rs_lengths);
-
- // Update the young list target length either by setting it to the
- // desired fixed value or by calculating it using G1's pause
- // prediction model. If no rs_lengths parameter is passed, predict
- // the RS lengths using the prediction model, otherwise use the
- // given rs_lengths as the prediction.
- // Returns the unbounded young list target length.
- uint update_young_list_target_length(size_t rs_lengths);
-
- // Calculate and return the minimum desired young list target
- // length. This is the minimum desired young list length according
- // to the user's inputs.
- uint calculate_young_list_desired_min_length(uint base_min_length) const;
-
- // Calculate and return the maximum desired young list target
- // length. This is the maximum desired young list length according
- // to the user's inputs.
- uint calculate_young_list_desired_max_length() const;
-
- // Calculate and return the maximum young list target length that
- // can fit into the pause time goal. The parameters are: rs_lengths
- // represent the prediction of how large the young RSet lengths will
- // be, base_min_length is the already existing number of regions in
- // the young list, min_length and max_length are the desired min and
- // max young list length according to the user's inputs.
- uint calculate_young_list_target_length(size_t rs_lengths,
- uint base_min_length,
- uint desired_min_length,
- uint desired_max_length) const;
-
- // Result of the bounded_young_list_target_length() method, containing both the
- // bounded as well as the unbounded young list target lengths in this order.
- typedef Pair<uint, uint, StackObj> YoungTargetLengths;
- YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
-
- void update_rs_lengths_prediction();
- void update_rs_lengths_prediction(size_t prediction);
-
- // Check whether a given young length (young_length) fits into the
- // given target pause time and whether the prediction for the amount
- // of objects to be copied for the given length will fit into the
- // given free space (expressed by base_free_regions). It is used by
- // calculate_young_list_target_length().
- bool predict_will_fit(uint young_length, double base_time_ms,
- uint base_free_regions, double target_pause_time_ms) const;
-
-public:
- size_t pending_cards() const { return _pending_cards; }
-
- uint calc_min_old_cset_length() const;
- uint calc_max_old_cset_length() const;
-
- // Returns the given amount of reclaimable bytes (that represents
- // the amount of reclaimable space still to be collected) as a
- // percentage of the current heap capacity.
- double reclaimable_bytes_percent(size_t reclaimable_bytes) const;
-
- jlong collection_pause_end_millis() { return _collection_pause_end_millis; }
-
-private:
- // Sets up marking if proper conditions are met.
- void maybe_start_marking();
-
- // The kind of STW pause.
- enum PauseKind {
- FullGC,
- YoungOnlyGC,
- MixedGC,
- LastYoungGC,
- InitialMarkGC,
- Cleanup,
- Remark
- };
-
- // Calculate PauseKind from internal state.
- PauseKind young_gc_pause_kind() const;
- // Record the given STW pause with the given start and end times (in s).
- void record_pause(PauseKind kind, double start, double end);
- // Indicate that we aborted marking before doing any mixed GCs.
- void abort_time_to_mixed_tracking();
-public:
-
- G1DefaultPolicy(STWGCTimer* gc_timer);
-
- virtual ~G1DefaultPolicy();
-
- G1CollectorState* collector_state() const;
-
- G1GCPhaseTimes* phase_times() const { return _phase_times; }
-
- void revise_young_list_target_length_if_necessary(size_t rs_lengths);
-
- void record_new_heap_size(uint new_number_of_regions);
-
- void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
-
- virtual void note_gc_start();
-
- bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
-
- bool about_to_start_mixed_phase() const;
-
- void record_collection_pause_start(double start_time_sec);
- void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
-
- void record_full_collection_start();
- void record_full_collection_end();
-
- void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
-
- void record_concurrent_mark_remark_start();
- void record_concurrent_mark_remark_end();
-
- void record_concurrent_mark_cleanup_start();
- void record_concurrent_mark_cleanup_end();
- void record_concurrent_mark_cleanup_completed();
-
- virtual void print_phases();
-
- void record_bytes_copied_during_gc(size_t bytes) {
- _bytes_copied_during_gc += bytes;
- }
-
- size_t bytes_copied_during_gc() const {
- return _bytes_copied_during_gc;
- }
-
- bool next_gc_should_be_mixed(const char* true_action_str,
- const char* false_action_str) const;
-
- virtual void finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
-private:
- // Set the state to start a concurrent marking cycle and clear
- // _initiate_conc_mark_if_possible because it has now been
- // acted on.
- void initiate_conc_mark();
-
-public:
- bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
-
- void decide_on_conc_mark_initiation();
-
- void finished_recalculating_age_indexes(bool is_survivors) {
- if (is_survivors) {
- _survivor_surv_rate_group->finished_recalculating_age_indexes();
- } else {
- _short_lived_surv_rate_group->finished_recalculating_age_indexes();
- }
- }
-
- size_t young_list_target_length() const { return _young_list_target_length; }
-
- bool should_allocate_mutator_region() const;
-
- bool can_expand_young_list() const;
-
- uint young_list_max_length() const {
- return _young_list_max_length;
- }
-
- bool adaptive_young_list_length() const;
-
- virtual bool should_process_references() const {
- return true;
- }
-
- void transfer_survivors_to_cset(const G1SurvivorRegions* survivors);
-
-private:
- //
- // Survivor regions policy.
- //
-
- // Current tenuring threshold, set to 0 if the collector reaches the
- // maximum amount of survivors regions.
- uint _tenuring_threshold;
-
- // The limit on the number of regions allocated for survivors.
- uint _max_survivor_regions;
-
- AgeTable _survivors_age_table;
-
-protected:
- size_t desired_survivor_size() const;
-public:
- uint tenuring_threshold() const { return _tenuring_threshold; }
-
- uint max_survivor_regions() {
- return _max_survivor_regions;
- }
-
- void note_start_adding_survivor_regions() {
- _survivor_surv_rate_group->start_adding_regions();
- }
-
- void note_stop_adding_survivor_regions() {
- _survivor_surv_rate_group->stop_adding_regions();
- }
-
- void record_age_table(AgeTable* age_table) {
- _survivors_age_table.merge(age_table);
- }
-
- void print_age_table();
-
- void update_max_gc_locker_expansion();
-
- void update_survivors_policy();
-};
-
-#endif // SHARE_VM_GC_G1_G1DEFAULTPOLICY_HPP
--- a/src/hotspot/share/gc/g1/g1EdenRegions.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1EdenRegions.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,10 @@
#define SHARE_VM_GC_G1_G1EDENREGIONS_HPP
#include "gc/g1/heapRegion.hpp"
-#include "memory/allocation.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
-class G1EdenRegions VALUE_OBJ_CLASS_SPEC {
+class G1EdenRegions {
private:
int _length;
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -43,6 +43,7 @@
#include "gc/shared/weakProcessor.hpp"
#include "logging/log.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/handles.inline.hpp"
#include "utilities/debug.hpp"
static void clear_and_activate_derived_pointers() {
@@ -199,7 +200,8 @@
scope()->tracer()->report_object_count_after_gc(&_is_alive);
}
-void G1FullCollector::prepare_compaction_common() {
+void G1FullCollector::phase2_prepare_compaction() {
+ GCTraceTime(Info, gc, phases) info("Phase 2: Prepare for compaction", scope()->timer());
G1FullGCPrepareTask task(this);
run_task(&task);
@@ -209,11 +211,6 @@
}
}
-void G1FullCollector::phase2_prepare_compaction() {
- GCTraceTime(Info, gc, phases) info("Phase 2: Prepare for compaction", scope()->timer());
- prepare_compaction_ext(); // Will call prepare_compaction_common() above.
-}
-
void G1FullCollector::phase3_adjust_pointers() {
// Adjust the pointers to reflect the new locations
GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers and remembered sets", scope()->timer());
--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -85,10 +85,6 @@
void verify_after_marking();
void run_task(AbstractGangTask* task);
-
- // Prepare compaction extension support.
- void prepare_compaction_ext();
- void prepare_compaction_common();
};
--- a/src/hotspot/share/gc/g1/g1FullCollector_ext.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1FullCollector.hpp"
-
-void G1FullCollector::prepare_compaction_ext() {
- prepare_compaction_common();
-}
--- a/src/hotspot/share/gc/g1/g1HRPrinter.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1HRPrinter.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,11 +27,10 @@
#include "gc/g1/heapRegion.hpp"
#include "logging/log.hpp"
-#include "memory/allocation.hpp"
#define SKIP_RETIRED_FULL_REGIONS 1
-class G1HRPrinter VALUE_OBJ_CLASS_SPEC {
+class G1HRPrinter {
private:
--- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -34,7 +34,7 @@
_survivor_length = g1_heap->survivor_regions_count();
_old_length = g1_heap->old_regions_count();
_humongous_length = g1_heap->humongous_regions_count();
- _metaspace_used_bytes = MetaspaceAux::used_bytes();
+ _metaspace_used_bytes = MetaspaceUtils::used_bytes();
}
G1HeapTransition::G1HeapTransition(G1CollectedHeap* g1_heap) : _g1_heap(g1_heap), _before(g1_heap) { }
@@ -117,5 +117,5 @@
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._humongous_used / K, ((after._humongous_length * HeapRegion::GrainBytes) - usage._humongous_used) / K);
- MetaspaceAux::print_metaspace_change(_before._metaspace_used_bytes);
+ MetaspaceUtils::print_metaspace_change(_before._metaspace_used_bytes);
}
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -39,6 +39,7 @@
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
class VerifyRootsClosure: public OopClosure {
private:
--- a/src/hotspot/share/gc/g1/g1InCSetState.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1InCSetState.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -58,7 +58,6 @@
// used to index into arrays.
// The negative values are used for objects requiring various special cases,
// for example eager reclamation of humongous objects.
- Ext = -2, // Extension point
Humongous = -1, // The region is humongous
NotInCSet = 0, // The region is not in the collection set.
Young = 1, // The region is in the collection set and a young region.
@@ -80,11 +79,10 @@
bool is_humongous() const { return _value == Humongous; }
bool is_young() const { return _value == Young; }
bool is_old() const { return _value == Old; }
- bool is_ext() const { return _value == Ext; }
#ifdef ASSERT
bool is_default() const { return _value == NotInCSet; }
- bool is_valid() const { return (_value >= Ext) && (_value < Num); }
+ bool is_valid() const { return (_value >= Humongous) && (_value < Num); }
bool is_valid_gen() const { return (_value >= Young && _value <= Old); }
#endif
};
@@ -110,12 +108,6 @@
set_by_index(index, InCSetState::Humongous);
}
- void set_ext(uintptr_t index) {
- assert(get_by_index(index).is_default(),
- "State at index " INTPTR_FORMAT " should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value());
- set_by_index(index, InCSetState::Ext);
- }
-
void clear_humongous(uintptr_t index) {
set_by_index(index, InCSetState::NotInCSet);
}
--- a/src/hotspot/share/gc/g1/g1InitialMarkToMixedTimeTracker.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1InitialMarkToMixedTimeTracker.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
// After calling the initial mark/mixed gc notifications, the result can be
// obtained in last_marking_time() once, after which the tracking resets.
// Any pauses recorded by add_pause() will be subtracted from that results.
-class G1InitialMarkToMixedTimeTracker VALUE_OBJ_CLASS_SPEC {
+class G1InitialMarkToMixedTimeTracker {
private:
bool _active;
double _initial_mark_end_time;
--- a/src/hotspot/share/gc/g1/g1MMUTracker.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1MMUTracker.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -81,7 +81,7 @@
}
};
-class G1MMUTrackerQueueElem VALUE_OBJ_CLASS_SPEC {
+class G1MMUTrackerQueueElem {
private:
double _start_time;
double _end_time;
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -77,6 +77,7 @@
_g1h(g1h),
_incremental_collection_counters(NULL),
_full_collection_counters(NULL),
+ _conc_collection_counters(NULL),
_old_collection_counters(NULL),
_old_space_counters(NULL),
_young_collection_counters(NULL),
@@ -105,6 +106,10 @@
// old generation collection.
_full_collection_counters =
new CollectorCounters("G1 stop-the-world full collections", 1);
+ // name "collector.2". In a generational collector this would be the
+ // STW phases in concurrent collection.
+ _conc_collection_counters =
+ new CollectorCounters("G1 stop-the-world phases", 2);
// timer sampling for all counters supporting sampling only update the
// used value. See the take_sample() method. G1 requires both used and
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -124,6 +124,8 @@
CollectorCounters* _incremental_collection_counters;
// full stop-the-world collections
CollectorCounters* _full_collection_counters;
+ // stop-the-world phases in G1
+ CollectorCounters* _conc_collection_counters;
// young collection set counters. The _eden_counters,
// _from_counters, and _to_counters are associated with
// this "generational" counter.
@@ -212,6 +214,9 @@
CollectorCounters* full_collection_counters() {
return _full_collection_counters;
}
+ CollectorCounters* conc_collection_counters() {
+ return _conc_collection_counters;
+ }
GenerationCounters* young_collection_counters() {
return _young_collection_counters;
}
--- a/src/hotspot/share/gc/g1/g1OopClosures.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1OopClosures.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -139,7 +139,7 @@
G1MarkPromotedFromRoot
};
-template <G1Barrier barrier, G1Mark do_mark_object, bool use_ext>
+template <G1Barrier barrier, G1Mark do_mark_object>
class G1ParCopyClosure : public G1ParCopyHelper {
public:
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -61,8 +61,6 @@
inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) {
if (state.is_humongous()) {
_g1->set_humongous_is_live(obj);
- } else if (state.is_ext()) {
- _par_scan_state->do_oop_ext(p);
}
}
@@ -218,9 +216,9 @@
_cm->mark_in_next_bitmap(to_obj);
}
-template <G1Barrier barrier, G1Mark do_mark_object, bool use_ext>
+template <G1Barrier barrier, G1Mark do_mark_object>
template <class T>
-void G1ParCopyClosure<barrier, do_mark_object, use_ext>::do_oop_work(T* p) {
+void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (oopDesc::is_null(heap_oop)) {
@@ -256,9 +254,6 @@
_g1->set_humongous_is_live(obj);
}
- if (use_ext && state.is_ext()) {
- _par_scan_state->do_oop_ext(p);
- }
// The object is not in collection set. If we're a root scanning
// closure during an initial mark pause then attempt to mark the object.
if (do_mark_object == G1MarkFromRoot) {
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_G1_G1PAGEBASEDVIRTUALSPACE_HPP
#define SHARE_VM_GC_G1_G1PAGEBASEDVIRTUALSPACE_HPP
-#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "memory/virtualspace.hpp"
#include "utilities/align.hpp"
@@ -45,7 +44,7 @@
// be committed using OS small pages.
// The implementation gives an error when trying to commit or uncommit pages that
// have already been committed or uncommitted.
-class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
+class G1PageBasedVirtualSpace {
friend class VMStructs;
private:
// Reserved area addresses.
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -65,7 +65,7 @@
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
memset(_surviving_young_words, 0, real_length * sizeof(size_t));
- _plab_allocator = G1PLABAllocator::create_allocator(_g1h->allocator());
+ _plab_allocator = new G1DefaultPLABAllocator(_g1h->allocator());
_dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
// The dest for Young is used when the objects are aged enough to
@@ -153,7 +153,6 @@
HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
InCSetState* dest,
size_t word_sz,
- AllocationContext_t const context,
bool previous_plab_refill_failed) {
assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value());
assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
@@ -164,7 +163,6 @@
bool plab_refill_in_old_failed = false;
HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
word_sz,
- context,
&plab_refill_in_old_failed);
// Make sure that we won't attempt to copy any other objects out
// of a survivor region (given that apparently we cannot allocate
@@ -204,9 +202,8 @@
void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state,
oop const old, size_t word_sz, uint age,
- HeapWord * const obj_ptr,
- const AllocationContext_t context) const {
- PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context);
+ HeapWord * const obj_ptr) const {
+ PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state);
if (alloc_buf->contains(obj_ptr)) {
_g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age,
dest_state.value() == InCSetState::Old,
@@ -226,7 +223,6 @@
const int young_index = from_region->young_index_in_cset()+1;
assert( (from_region->is_young() && young_index > 0) ||
(!from_region->is_young() && young_index == 0), "invariant" );
- const AllocationContext_t context = from_region->allocation_context();
uint age = 0;
InCSetState dest_state = next_state(state, old_mark, age);
@@ -235,15 +231,15 @@
if (_old_gen_is_full && dest_state.is_old()) {
return handle_evacuation_failure_par(old, old_mark);
}
- HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
+ HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz);
// PLAB allocations should succeed most of the time, so we'll
// normally check against NULL once and that's it.
if (obj_ptr == NULL) {
bool plab_refill_failed = false;
- obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context, &plab_refill_failed);
+ obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, &plab_refill_failed);
if (obj_ptr == NULL) {
- obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed);
+ obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, plab_refill_failed);
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
@@ -252,7 +248,7 @@
}
if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
// The events are checked individually as part of the actual commit
- report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context);
+ report_promotion_event(dest_state, old, word_sz, age, obj_ptr);
}
}
@@ -264,7 +260,7 @@
if (_g1h->evacuation_should_fail()) {
// Doing this after all the allocation attempts also tests the
// undo_allocation() method too.
- _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
+ _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
return handle_evacuation_failure_par(old, old_mark);
}
#endif // !PRODUCT
@@ -325,7 +321,7 @@
}
return obj;
} else {
- _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
+ _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
return forward_ptr;
}
}
@@ -333,7 +329,7 @@
G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
assert(worker_id < _n_workers, "out of bounds access");
if (_states[worker_id] == NULL) {
- _states[worker_id] = new_par_scan_state(worker_id, _young_cset_length);
+ _states[worker_id] = new G1ParScanThreadState(_g1h, worker_id, _young_cset_length);
}
return _states[worker_id];
}
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,11 +26,11 @@
#define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
#include "gc/g1/dirtyCardQueue.hpp"
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/shared/ageTable.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
@@ -175,14 +175,13 @@
HeapWord* allocate_in_next_plab(InCSetState const state,
InCSetState* dest,
size_t word_sz,
- AllocationContext_t const context,
bool previous_plab_refill_failed);
inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
void report_promotion_event(InCSetState const dest_state,
oop const old, size_t word_sz, uint age,
- HeapWord * const obj_ptr, const AllocationContext_t context) const;
+ HeapWord * const obj_ptr) const;
public:
oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -50,8 +50,8 @@
} else if (in_cset_state.is_humongous()) {
_g1h->set_humongous_is_live(obj);
} else {
- assert(in_cset_state.is_default() || in_cset_state.is_ext(),
- "In_cset_state must be NotInCSet or Ext here, but is " CSETSTATE_FORMAT, in_cset_state.value());
+ assert(in_cset_state.is_default(),
+ "In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value());
}
assert(obj != NULL, "Must be");
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState_ext.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-
-#include "gc/g1/g1ParScanThreadState.hpp"
-
-G1ParScanThreadState* G1ParScanThreadStateSet::new_par_scan_state(uint worker_id, size_t young_cset_length) {
- return new G1ParScanThreadState(_g1h, worker_id, young_cset_length);
-}
-
-template <typename T>
-void G1ParScanThreadState::do_oop_ext(T* ref) {
-}
-
-template void G1ParScanThreadState::do_oop_ext<oop>(oop* ref);
-template void G1ParScanThreadState::do_oop_ext<narrowOop>(narrowOop* ref);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,1164 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/concurrentMarkThread.inline.hpp"
+#include "gc/g1/g1Analytics.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1CollectionSet.hpp"
+#include "gc/g1/g1ConcurrentMark.hpp"
+#include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/g1HotCardCache.hpp"
+#include "gc/g1/g1IHOPControl.hpp"
+#include "gc/g1/g1GCPhaseTimes.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1SurvivorRegions.hpp"
+#include "gc/g1/g1YoungGenSizer.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/shared/gcPolicyCounters.hpp"
+#include "logging/logStream.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/java.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/pair.hpp"
+
+G1Policy::G1Policy(STWGCTimer* gc_timer) :
+ _predictor(G1ConfidencePercent / 100.0),
+ _analytics(new G1Analytics(&_predictor)),
+ _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
+ _ihop_control(create_ihop_control(&_predictor)),
+ _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
+ _young_list_fixed_length(0),
+ _short_lived_surv_rate_group(new SurvRateGroup()),
+ _survivor_surv_rate_group(new SurvRateGroup()),
+ _reserve_factor((double) G1ReservePercent / 100.0),
+ _reserve_regions(0),
+ _rs_lengths_prediction(0),
+ _bytes_allocated_in_old_since_last_gc(0),
+ _initial_mark_to_mixed(),
+ _collection_set(NULL),
+ _g1(NULL),
+ _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
+ _tenuring_threshold(MaxTenuringThreshold),
+ _max_survivor_regions(0),
+ _survivors_age_table(true),
+ _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) { }
+
+G1Policy::~G1Policy() {
+ delete _ihop_control;
+}
+
+G1CollectorState* G1Policy::collector_state() const { return _g1->collector_state(); }
+
+void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
+ _g1 = g1h;
+ _collection_set = collection_set;
+
+ assert(Heap_lock->owned_by_self(), "Locking discipline.");
+
+ if (!adaptive_young_list_length()) {
+ _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
+ }
+ _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
+
+ _free_regions_at_end_of_collection = _g1->num_free_regions();
+
+ update_young_list_max_and_target_length();
+ // We may immediately start allocating regions and placing them on the
+ // collection set list. Initialize the per-collection set info
+ _collection_set->start_incremental_building();
+}
+
+void G1Policy::note_gc_start() {
+ phase_times()->note_gc_start();
+}
+
+class G1YoungLengthPredictor {
+ const bool _during_cm;
+ const double _base_time_ms;
+ const double _base_free_regions;
+ const double _target_pause_time_ms;
+ const G1Policy* const _policy;
+
+ public:
+ G1YoungLengthPredictor(bool during_cm,
+ double base_time_ms,
+ double base_free_regions,
+ double target_pause_time_ms,
+ const G1Policy* policy) :
+ _during_cm(during_cm),
+ _base_time_ms(base_time_ms),
+ _base_free_regions(base_free_regions),
+ _target_pause_time_ms(target_pause_time_ms),
+ _policy(policy) {}
+
+ bool will_fit(uint young_length) const {
+ if (young_length >= _base_free_regions) {
+ // end condition 1: not enough space for the young regions
+ return false;
+ }
+
+ const double accum_surv_rate = _policy->accum_yg_surv_rate_pred((int) young_length - 1);
+ const size_t bytes_to_copy =
+ (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
+ const double copy_time_ms =
+ _policy->analytics()->predict_object_copy_time_ms(bytes_to_copy, _during_cm);
+ const double young_other_time_ms = _policy->analytics()->predict_young_other_time_ms(young_length);
+ const double pause_time_ms = _base_time_ms + copy_time_ms + young_other_time_ms;
+ if (pause_time_ms > _target_pause_time_ms) {
+ // end condition 2: prediction is over the target pause time
+ return false;
+ }
+
+ const size_t free_bytes = (_base_free_regions - young_length) * HeapRegion::GrainBytes;
+
+ // When copying, we will likely need more bytes free than is live in the region.
+ // Add some safety margin to factor in the confidence of our guess, and the
+ // natural expected waste.
+ // (100.0 / G1ConfidencePercent) is a scale factor that expresses the uncertainty
+ // of the calculation: the lower the confidence, the more headroom.
+ // (100 + TargetPLABWastePct) represents the increase in expected bytes during
+ // copying due to anticipated waste in the PLABs.
+ const double safety_factor = (100.0 / G1ConfidencePercent) * (100 + TargetPLABWastePct) / 100.0;
+ const size_t expected_bytes_to_copy = (size_t)(safety_factor * bytes_to_copy);
+
+ if (expected_bytes_to_copy > free_bytes) {
+ // end condition 3: out-of-space
+ return false;
+ }
+
+ // success!
+ return true;
+ }
+};
+
+void G1Policy::record_new_heap_size(uint new_number_of_regions) {
+ // re-calculate the necessary reserve
+ double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
+ // We use ceiling so that if reserve_regions_d is > 0.0 (but
+ // smaller than 1.0) we'll get 1.
+ _reserve_regions = (uint) ceil(reserve_regions_d);
+
+ _young_gen_sizer.heap_size_changed(new_number_of_regions);
+
+ _ihop_control->update_target_occupancy(new_number_of_regions * HeapRegion::GrainBytes);
+}
+
+uint G1Policy::calculate_young_list_desired_min_length(uint base_min_length) const {
+ uint desired_min_length = 0;
+ if (adaptive_young_list_length()) {
+ if (_analytics->num_alloc_rate_ms() > 3) {
+ double now_sec = os::elapsedTime();
+ double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
+ double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
+ desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
+ } else {
+ // otherwise we don't have enough info to make the prediction
+ }
+ }
+ desired_min_length += base_min_length;
+ // make sure we don't go below any user-defined minimum bound
+ return MAX2(_young_gen_sizer.min_desired_young_length(), desired_min_length);
+}
+
+uint G1Policy::calculate_young_list_desired_max_length() const {
+ // Here, we might want to also take into account any additional
+ // constraints (i.e., user-defined minimum bound). Currently, we
+ // effectively don't set this bound.
+ return _young_gen_sizer.max_desired_young_length();
+}
+
+uint G1Policy::update_young_list_max_and_target_length() {
+ return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
+}
+
+uint G1Policy::update_young_list_max_and_target_length(size_t rs_lengths) {
+ uint unbounded_target_length = update_young_list_target_length(rs_lengths);
+ update_max_gc_locker_expansion();
+ return unbounded_target_length;
+}
+
+uint G1Policy::update_young_list_target_length(size_t rs_lengths) {
+ YoungTargetLengths young_lengths = young_list_target_lengths(rs_lengths);
+ _young_list_target_length = young_lengths.first;
+ return young_lengths.second;
+}
+
+G1Policy::YoungTargetLengths G1Policy::young_list_target_lengths(size_t rs_lengths) const {
+ YoungTargetLengths result;
+
+ // Calculate the absolute and desired min bounds first.
+
+ // This is how many young regions we already have (currently: the survivors).
+ const uint base_min_length = _g1->survivor_regions_count();
+ uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
+ // This is the absolute minimum young length. Ensure that we
+ // will at least have one eden region available for allocation.
+ uint absolute_min_length = base_min_length + MAX2(_g1->eden_regions_count(), (uint)1);
+ // If we shrank the young list target it should not shrink below the current size.
+ desired_min_length = MAX2(desired_min_length, absolute_min_length);
+ // Calculate the absolute and desired max bounds.
+
+ uint desired_max_length = calculate_young_list_desired_max_length();
+
+ uint young_list_target_length = 0;
+ if (adaptive_young_list_length()) {
+ if (collector_state()->gcs_are_young()) {
+ young_list_target_length =
+ calculate_young_list_target_length(rs_lengths,
+ base_min_length,
+ desired_min_length,
+ desired_max_length);
+ } else {
+ // Don't calculate anything and let the code below bound it to
+ // the desired_min_length, i.e., do the next GC as soon as
+ // possible to maximize how many old regions we can add to it.
+ }
+ } else {
+ // The user asked for a fixed young gen so we'll fix the young gen
+ // whether the next GC is young or mixed.
+ young_list_target_length = _young_list_fixed_length;
+ }
+
+ result.second = young_list_target_length;
+
+ // We will try our best not to "eat" into the reserve.
+ uint absolute_max_length = 0;
+ if (_free_regions_at_end_of_collection > _reserve_regions) {
+ absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
+ }
+ if (desired_max_length > absolute_max_length) {
+ desired_max_length = absolute_max_length;
+ }
+
+ // Make sure we don't go over the desired max length, nor under the
+ // desired min length. In case they clash, desired_min_length wins
+ // which is why that test is second.
+ if (young_list_target_length > desired_max_length) {
+ young_list_target_length = desired_max_length;
+ }
+ if (young_list_target_length < desired_min_length) {
+ young_list_target_length = desired_min_length;
+ }
+
+ assert(young_list_target_length > base_min_length,
+ "we should be able to allocate at least one eden region");
+ assert(young_list_target_length >= absolute_min_length, "post-condition");
+
+ result.first = young_list_target_length;
+ return result;
+}
+
+uint
+G1Policy::calculate_young_list_target_length(size_t rs_lengths,
+ uint base_min_length,
+ uint desired_min_length,
+ uint desired_max_length) const {
+ assert(adaptive_young_list_length(), "pre-condition");
+ assert(collector_state()->gcs_are_young(), "only call this for young GCs");
+
+ // In case some edge-condition makes the desired max length too small...
+ if (desired_max_length <= desired_min_length) {
+ return desired_min_length;
+ }
+
+ // We'll adjust min_young_length and max_young_length not to include
+ // the already allocated young regions (i.e., so they reflect the
+ // min and max eden regions we'll allocate). The base_min_length
+ // will be reflected in the predictions by the
+ // survivor_regions_evac_time prediction.
+ assert(desired_min_length > base_min_length, "invariant");
+ uint min_young_length = desired_min_length - base_min_length;
+ assert(desired_max_length > base_min_length, "invariant");
+ uint max_young_length = desired_max_length - base_min_length;
+
+ const double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
+ const double survivor_regions_evac_time = predict_survivor_regions_evac_time();
+ const size_t pending_cards = _analytics->predict_pending_cards();
+ const size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
+ const size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
+ const double base_time_ms =
+ predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
+ survivor_regions_evac_time;
+ const uint available_free_regions = _free_regions_at_end_of_collection;
+ const uint base_free_regions =
+ available_free_regions > _reserve_regions ? available_free_regions - _reserve_regions : 0;
+
+ // Here, we will make sure that the shortest young length that
+ // makes sense fits within the target pause time.
+
+ G1YoungLengthPredictor p(collector_state()->during_concurrent_mark(),
+ base_time_ms,
+ base_free_regions,
+ target_pause_time_ms,
+ this);
+ if (p.will_fit(min_young_length)) {
+ // The shortest young length will fit into the target pause time;
+ // we'll now check whether the absolute maximum number of young
+ // regions will fit in the target pause time. If not, we'll do
+ // a binary search between min_young_length and max_young_length.
+ if (p.will_fit(max_young_length)) {
+ // The maximum young length will fit into the target pause time.
+ // We are done so set min young length to the maximum length (as
+ // the result is assumed to be returned in min_young_length).
+ min_young_length = max_young_length;
+ } else {
+ // The maximum possible number of young regions will not fit within
+ // the target pause time so we'll search for the optimal
+ // length. The loop invariants are:
+ //
+ // min_young_length < max_young_length
+ // min_young_length is known to fit into the target pause time
+ // max_young_length is known not to fit into the target pause time
+ //
+ // Going into the loop we know the above hold as we've just
+ // checked them. Every time around the loop we check whether
+ // the middle value between min_young_length and
+ // max_young_length fits into the target pause time. If it
+ // does, it becomes the new min. If it doesn't, it becomes
+ // the new max. This way we maintain the loop invariants.
+
+ assert(min_young_length < max_young_length, "invariant");
+ uint diff = (max_young_length - min_young_length) / 2;
+ while (diff > 0) {
+ uint young_length = min_young_length + diff;
+ if (p.will_fit(young_length)) {
+ min_young_length = young_length;
+ } else {
+ max_young_length = young_length;
+ }
+ assert(min_young_length < max_young_length, "invariant");
+ diff = (max_young_length - min_young_length) / 2;
+ }
+ // The results is min_young_length which, according to the
+ // loop invariants, should fit within the target pause time.
+
+ // These are the post-conditions of the binary search above:
+ assert(min_young_length < max_young_length,
+ "otherwise we should have discovered that max_young_length "
+ "fits into the pause target and not done the binary search");
+ assert(p.will_fit(min_young_length),
+ "min_young_length, the result of the binary search, should "
+ "fit into the pause target");
+ assert(!p.will_fit(min_young_length + 1),
+ "min_young_length, the result of the binary search, should be "
+ "optimal, so no larger length should fit into the pause target");
+ }
+ } else {
+ // Even the minimum length doesn't fit into the pause time
+ // target, return it as the result nevertheless.
+ }
+ return base_min_length + min_young_length;
+}
+
+double G1Policy::predict_survivor_regions_evac_time() const {
+ double survivor_regions_evac_time = 0.0;
+ const GrowableArray<HeapRegion*>* survivor_regions = _g1->survivor()->regions();
+
+ for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
+ it != survivor_regions->end();
+ ++it) {
+ survivor_regions_evac_time += predict_region_elapsed_time_ms(*it, collector_state()->gcs_are_young());
+ }
+ return survivor_regions_evac_time;
+}
+
+void G1Policy::revise_young_list_target_length_if_necessary(size_t rs_lengths) {
+ guarantee( adaptive_young_list_length(), "should not call this otherwise" );
+
+ if (rs_lengths > _rs_lengths_prediction) {
+ // add 10% to avoid having to recalculate often
+ size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
+ update_rs_lengths_prediction(rs_lengths_prediction);
+
+ update_young_list_max_and_target_length(rs_lengths_prediction);
+ }
+}
+
+void G1Policy::update_rs_lengths_prediction() {
+ update_rs_lengths_prediction(_analytics->predict_rs_lengths());
+}
+
+void G1Policy::update_rs_lengths_prediction(size_t prediction) {
+ if (collector_state()->gcs_are_young() && adaptive_young_list_length()) {
+ _rs_lengths_prediction = prediction;
+ }
+}
+
+void G1Policy::record_full_collection_start() {
+ _full_collection_start_sec = os::elapsedTime();
+ // Release the future to-space so that it is available for compaction into.
+ collector_state()->set_full_collection(true);
+}
+
+void G1Policy::record_full_collection_end() {
+ // Consider this like a collection pause for the purposes of allocation
+ // since last pause.
+ double end_sec = os::elapsedTime();
+ double full_gc_time_sec = end_sec - _full_collection_start_sec;
+ double full_gc_time_ms = full_gc_time_sec * 1000.0;
+
+ _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
+
+ collector_state()->set_full_collection(false);
+
+ // "Nuke" the heuristics that control the young/mixed GC
+ // transitions and make sure we start with young GCs after the Full GC.
+ collector_state()->set_gcs_are_young(true);
+ collector_state()->set_last_young_gc(false);
+ collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
+ collector_state()->set_during_initial_mark_pause(false);
+ collector_state()->set_in_marking_window(false);
+ collector_state()->set_in_marking_window_im(false);
+
+ _short_lived_surv_rate_group->start_adding_regions();
+ // also call this on any additional surv rate groups
+
+ _free_regions_at_end_of_collection = _g1->num_free_regions();
+ // Reset survivors SurvRateGroup.
+ _survivor_surv_rate_group->reset();
+ update_young_list_max_and_target_length();
+ update_rs_lengths_prediction();
+ cset_chooser()->clear();
+
+ _bytes_allocated_in_old_since_last_gc = 0;
+
+ record_pause(FullGC, _full_collection_start_sec, end_sec);
+}
+
+void G1Policy::record_collection_pause_start(double start_time_sec) {
+ // We only need to do this here as the policy will only be applied
+ // to the GC we're about to start. so, no point is calculating this
+ // every time we calculate / recalculate the target young length.
+ update_survivors_policy();
+
+ assert(_g1->used() == _g1->recalculate_used(),
+ "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
+ _g1->used(), _g1->recalculate_used());
+
+ phase_times()->record_cur_collection_start_sec(start_time_sec);
+ _pending_cards = _g1->pending_card_num();
+
+ _collection_set->reset_bytes_used_before();
+ _bytes_copied_during_gc = 0;
+
+ collector_state()->set_last_gc_was_young(false);
+
+ // do that for any other surv rate groups
+ _short_lived_surv_rate_group->stop_adding_regions();
+ _survivors_age_table.clear();
+
+ assert(_g1->collection_set()->verify_young_ages(), "region age verification failed");
+}
+
+void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
+ collector_state()->set_during_marking(true);
+ assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
+ collector_state()->set_during_initial_mark_pause(false);
+}
+
+void G1Policy::record_concurrent_mark_remark_start() {
+ _mark_remark_start_sec = os::elapsedTime();
+ collector_state()->set_during_marking(false);
+}
+
+void G1Policy::record_concurrent_mark_remark_end() {
+ double end_time_sec = os::elapsedTime();
+ double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
+ _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
+ _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
+
+ record_pause(Remark, _mark_remark_start_sec, end_time_sec);
+}
+
+void G1Policy::record_concurrent_mark_cleanup_start() {
+ _mark_cleanup_start_sec = os::elapsedTime();
+}
+
+void G1Policy::record_concurrent_mark_cleanup_completed() {
+ bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
+ "skip last young-only gc");
+ collector_state()->set_last_young_gc(should_continue_with_reclaim);
+ // We skip the marking phase.
+ if (!should_continue_with_reclaim) {
+ abort_time_to_mixed_tracking();
+ }
+ collector_state()->set_in_marking_window(false);
+}
+
+double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
+ return phase_times()->average_time_ms(phase);
+}
+
+double G1Policy::young_other_time_ms() const {
+ return phase_times()->young_cset_choice_time_ms() +
+ phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet);
+}
+
+double G1Policy::non_young_other_time_ms() const {
+ return phase_times()->non_young_cset_choice_time_ms() +
+ phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
+}
+
+double G1Policy::other_time_ms(double pause_time_ms) const {
+ return pause_time_ms - phase_times()->cur_collection_par_time_ms();
+}
+
+double G1Policy::constant_other_time_ms(double pause_time_ms) const {
+ return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
+}
+
+CollectionSetChooser* G1Policy::cset_chooser() const {
+ return _collection_set->cset_chooser();
+}
+
+bool G1Policy::about_to_start_mixed_phase() const {
+ return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->last_young_gc();
+}
+
+bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
+ if (about_to_start_mixed_phase()) {
+ return false;
+ }
+
+ size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
+
+ size_t cur_used_bytes = _g1->non_young_capacity_bytes();
+ size_t alloc_byte_size = alloc_word_size * HeapWordSize;
+ size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
+
+ bool result = false;
+ if (marking_request_bytes > marking_initiating_used_threshold) {
+ result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
+ log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
+ result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
+ cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
+ }
+
+ return result;
+}
+
+// Anything below that is considered to be zero
+#define MIN_TIMER_GRANULARITY 0.0000001
+
+void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
+ double end_time_sec = os::elapsedTime();
+
+ size_t cur_used_bytes = _g1->used();
+ assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
+ bool last_pause_included_initial_mark = false;
+ bool update_stats = !_g1->evacuation_failed();
+
+ record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
+
+ _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+
+ last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
+ if (last_pause_included_initial_mark) {
+ record_concurrent_mark_init_end(0.0);
+ } else {
+ maybe_start_marking();
+ }
+
+ double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
+ if (app_time_ms < MIN_TIMER_GRANULARITY) {
+ // This usually happens due to the timer not having the required
+ // granularity. Some Linuxes are the usual culprits.
+ // We'll just set it to something (arbitrarily) small.
+ app_time_ms = 1.0;
+ }
+
+ if (update_stats) {
+ // We maintain the invariant that all objects allocated by mutator
+ // threads will be allocated out of eden regions. So, we can use
+ // the eden region number allocated since the previous GC to
+ // calculate the application's allocate rate. The only exception
+ // to that is humongous objects that are allocated separately. But
+ // given that humongous object allocations do not really affect
+ // either the pause's duration nor when the next pause will take
+ // place we can safely ignore them here.
+ uint regions_allocated = _collection_set->eden_region_length();
+ double alloc_rate_ms = (double) regions_allocated / app_time_ms;
+ _analytics->report_alloc_rate_ms(alloc_rate_ms);
+
+ double interval_ms =
+ (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
+ _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
+ _analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
+ }
+
+ bool new_in_marking_window = collector_state()->in_marking_window();
+ bool new_in_marking_window_im = false;
+ if (last_pause_included_initial_mark) {
+ new_in_marking_window = true;
+ new_in_marking_window_im = true;
+ }
+
+ if (collector_state()->last_young_gc()) {
+ // This is supposed to to be the "last young GC" before we start
+ // doing mixed GCs. Here we decide whether to start mixed GCs or not.
+ assert(!last_pause_included_initial_mark, "The last young GC is not allowed to be an initial mark GC");
+
+ if (next_gc_should_be_mixed("start mixed GCs",
+ "do not start mixed GCs")) {
+ collector_state()->set_gcs_are_young(false);
+ } else {
+ // We aborted the mixed GC phase early.
+ abort_time_to_mixed_tracking();
+ }
+
+ collector_state()->set_last_young_gc(false);
+ }
+
+ if (!collector_state()->last_gc_was_young()) {
+ // This is a mixed GC. Here we decide whether to continue doing
+ // mixed GCs or not.
+ if (!next_gc_should_be_mixed("continue mixed GCs",
+ "do not continue mixed GCs")) {
+ collector_state()->set_gcs_are_young(true);
+
+ maybe_start_marking();
+ }
+ }
+
+ _short_lived_surv_rate_group->start_adding_regions();
+ // Do that for any other surv rate groups
+
+ double scan_hcc_time_ms = G1HotCardCache::default_use_cache() ? average_time_ms(G1GCPhaseTimes::ScanHCC) : 0.0;
+
+ if (update_stats) {
+ double cost_per_card_ms = 0.0;
+ if (_pending_cards > 0) {
+ cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
+ _analytics->report_cost_per_card_ms(cost_per_card_ms);
+ }
+ _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
+
+ double cost_per_entry_ms = 0.0;
+ if (cards_scanned > 10) {
+ cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
+ _analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
+ }
+
+ if (_max_rs_lengths > 0) {
+ double cards_per_entry_ratio =
+ (double) cards_scanned / (double) _max_rs_lengths;
+ _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
+ }
+
+ // This is defensive. For a while _max_rs_lengths could get
+ // smaller than _recorded_rs_lengths which was causing
+ // rs_length_diff to get very large and mess up the RSet length
+ // predictions. The reason was unsafe concurrent updates to the
+ // _inc_cset_recorded_rs_lengths field which the code below guards
+ // against (see CR 7118202). This bug has now been fixed (see CR
+ // 7119027). However, I'm still worried that
+ // _inc_cset_recorded_rs_lengths might still end up somewhat
+ // inaccurate. The concurrent refinement thread calculates an
+ // RSet's length concurrently with other CR threads updating it
+ // which might cause it to calculate the length incorrectly (if,
+ // say, it's in mid-coarsening). So I'll leave in the defensive
+ // conditional below just in case.
+ size_t rs_length_diff = 0;
+ size_t recorded_rs_lengths = _collection_set->recorded_rs_lengths();
+ if (_max_rs_lengths > recorded_rs_lengths) {
+ rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
+ }
+ _analytics->report_rs_length_diff((double) rs_length_diff);
+
+ size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
+ size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
+ double cost_per_byte_ms = 0.0;
+
+ if (copied_bytes > 0) {
+ cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
+ _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
+ }
+
+ if (_collection_set->young_region_length() > 0) {
+ _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
+ _collection_set->young_region_length());
+ }
+
+ if (_collection_set->old_region_length() > 0) {
+ _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
+ _collection_set->old_region_length());
+ }
+
+ _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
+
+ _analytics->report_pending_cards((double) _pending_cards);
+ _analytics->report_rs_lengths((double) _max_rs_lengths);
+ }
+
+ collector_state()->set_in_marking_window(new_in_marking_window);
+ collector_state()->set_in_marking_window_im(new_in_marking_window_im);
+ _free_regions_at_end_of_collection = _g1->num_free_regions();
+ // IHOP control wants to know the expected young gen length if it were not
+ // restrained by the heap reserve. Using the actual length would make the
+ // prediction too small and the limit the young gen every time we get to the
+ // predicted target occupancy.
+ size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
+ update_rs_lengths_prediction();
+
+ update_ihop_prediction(app_time_ms / 1000.0,
+ _bytes_allocated_in_old_since_last_gc,
+ last_unrestrained_young_length * HeapRegion::GrainBytes);
+ _bytes_allocated_in_old_since_last_gc = 0;
+
+ _ihop_control->send_trace_event(_g1->gc_tracer_stw());
+
+ // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
+ double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
+
+ if (update_rs_time_goal_ms < scan_hcc_time_ms) {
+ log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
+ "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
+ update_rs_time_goal_ms, scan_hcc_time_ms);
+
+ update_rs_time_goal_ms = 0;
+ } else {
+ update_rs_time_goal_ms -= scan_hcc_time_ms;
+ }
+ _g1->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
+ phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
+ update_rs_time_goal_ms);
+
+ cset_chooser()->verify();
+}
+
+G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
+ if (G1UseAdaptiveIHOP) {
+ return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
+ predictor,
+ G1ReservePercent,
+ G1HeapWastePercent);
+ } else {
+ return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
+ }
+}
+
+void G1Policy::update_ihop_prediction(double mutator_time_s,
+ size_t mutator_alloc_bytes,
+ size_t young_gen_size) {
+ // Always try to update IHOP prediction. Even evacuation failures give information
+ // about e.g. whether to start IHOP earlier next time.
+
+ // Avoid using really small application times that might create samples with
+ // very high or very low values. They may be caused by e.g. back-to-back gcs.
+ double const min_valid_time = 1e-6;
+
+ bool report = false;
+
+ double marking_to_mixed_time = -1.0;
+ if (!collector_state()->last_gc_was_young() && _initial_mark_to_mixed.has_result()) {
+ marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
+ assert(marking_to_mixed_time > 0.0,
+ "Initial mark to mixed time must be larger than zero but is %.3f",
+ marking_to_mixed_time);
+ if (marking_to_mixed_time > min_valid_time) {
+ _ihop_control->update_marking_length(marking_to_mixed_time);
+ report = true;
+ }
+ }
+
+ // As an approximation for the young gc promotion rates during marking we use
+ // all of them. In many applications there are only a few if any young gcs during
+ // marking, which makes any prediction useless. This increases the accuracy of the
+ // prediction.
+ if (collector_state()->last_gc_was_young() && mutator_time_s > min_valid_time) {
+ _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
+ report = true;
+ }
+
+ if (report) {
+ report_ihop_statistics();
+ }
+}
+
+void G1Policy::report_ihop_statistics() {
+ _ihop_control->print();
+}
+
+void G1Policy::print_phases() {
+ phase_times()->print();
+}
+
+double G1Policy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
+ TruncatedSeq* seq = surv_rate_group->get_seq(age);
+ guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
+ double pred = _predictor.get_new_prediction(seq);
+ if (pred > 1.0) {
+ pred = 1.0;
+ }
+ return pred;
+}
+
+double G1Policy::accum_yg_surv_rate_pred(int age) const {
+ return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
+}
+
+double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards,
+ size_t scanned_cards) const {
+ return
+ _analytics->predict_rs_update_time_ms(pending_cards) +
+ _analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
+ _analytics->predict_constant_other_time_ms();
+}
+
+double G1Policy::predict_base_elapsed_time_ms(size_t pending_cards) const {
+ size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
+ size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
+ return predict_base_elapsed_time_ms(pending_cards, card_num);
+}
+
+size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const {
+ size_t bytes_to_copy;
+ if (hr->is_marked())
+ bytes_to_copy = hr->max_live_bytes();
+ else {
+ assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
+ int age = hr->age_in_surv_rate_group();
+ double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
+ bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
+ }
+ return bytes_to_copy;
+}
+
+double G1Policy::predict_region_elapsed_time_ms(HeapRegion* hr,
+ bool for_young_gc) const {
+ size_t rs_length = hr->rem_set()->occupied();
+ // Predicting the number of cards is based on which type of GC
+ // we're predicting for.
+ size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
+ size_t bytes_to_copy = predict_bytes_to_copy(hr);
+
+ double region_elapsed_time_ms =
+ _analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
+ _analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
+
+ // The prediction of the "other" time for this region is based
+ // upon the region type and NOT the GC type.
+ if (hr->is_young()) {
+ region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
+ } else {
+ region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
+ }
+ return region_elapsed_time_ms;
+}
+
+bool G1Policy::should_allocate_mutator_region() const {
+ uint young_list_length = _g1->young_regions_count();
+ uint young_list_target_length = _young_list_target_length;
+ return young_list_length < young_list_target_length;
+}
+
+bool G1Policy::can_expand_young_list() const {
+ uint young_list_length = _g1->young_regions_count();
+ uint young_list_max_length = _young_list_max_length;
+ return young_list_length < young_list_max_length;
+}
+
+bool G1Policy::adaptive_young_list_length() const {
+ return _young_gen_sizer.adaptive_young_list_length();
+}
+
+size_t G1Policy::desired_survivor_size() const {
+ size_t const survivor_capacity = HeapRegion::GrainWords * _max_survivor_regions;
+ return (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
+}
+
+void G1Policy::print_age_table() {
+ _survivors_age_table.print_age_table(_tenuring_threshold);
+}
+
+void G1Policy::update_max_gc_locker_expansion() {
+ uint expansion_region_num = 0;
+ if (GCLockerEdenExpansionPercent > 0) {
+ double perc = (double) GCLockerEdenExpansionPercent / 100.0;
+ double expansion_region_num_d = perc * (double) _young_list_target_length;
+ // We use ceiling so that if expansion_region_num_d is > 0.0 (but
+ // less than 1.0) we'll get 1.
+ expansion_region_num = (uint) ceil(expansion_region_num_d);
+ } else {
+ assert(expansion_region_num == 0, "sanity");
+ }
+ _young_list_max_length = _young_list_target_length + expansion_region_num;
+ assert(_young_list_target_length <= _young_list_max_length, "post-condition");
+}
+
+// Calculates survivor space parameters.
+void G1Policy::update_survivors_policy() {
+ double max_survivor_regions_d =
+ (double) _young_list_target_length / (double) SurvivorRatio;
+ // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
+ // smaller than 1.0) we'll get 1.
+ _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
+
+ _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(desired_survivor_size());
+ if (UsePerfData) {
+ _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
+ _policy_counters->desired_survivor_size()->set_value(desired_survivor_size() * oopSize);
+ }
+}
+
+bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
+ // We actually check whether we are marking here and not if we are in a
+ // reclamation phase. This means that we will schedule a concurrent mark
+ // even while we are still in the process of reclaiming memory.
+ bool during_cycle = _g1->concurrent_mark()->cm_thread()->during_cycle();
+ if (!during_cycle) {
+ log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
+ collector_state()->set_initiate_conc_mark_if_possible(true);
+ return true;
+ } else {
+ log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
+ return false;
+ }
+}
+
+void G1Policy::initiate_conc_mark() {
+ collector_state()->set_during_initial_mark_pause(true);
+ collector_state()->set_initiate_conc_mark_if_possible(false);
+}
+
+void G1Policy::decide_on_conc_mark_initiation() {
+ // We are about to decide on whether this pause will be an
+ // initial-mark pause.
+
+ // First, collector_state()->during_initial_mark_pause() should not be already set. We
+ // will set it here if we have to. However, it should be cleared by
+ // the end of the pause (it's only set for the duration of an
+ // initial-mark pause).
+ assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
+
+ if (collector_state()->initiate_conc_mark_if_possible()) {
+ // We had noticed on a previous pause that the heap occupancy has
+ // gone over the initiating threshold and we should start a
+ // concurrent marking cycle. So we might initiate one.
+
+ if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
+ // Initiate a new initial mark if there is no marking or reclamation going on.
+ initiate_conc_mark();
+ log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
+ } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
+ // Initiate a user requested initial mark. An initial mark must be young only
+ // GC, so the collector state must be updated to reflect this.
+ collector_state()->set_gcs_are_young(true);
+ collector_state()->set_last_young_gc(false);
+
+ abort_time_to_mixed_tracking();
+ initiate_conc_mark();
+ log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
+ } else {
+ // The concurrent marking thread is still finishing up the
+ // previous cycle. If we start one right now the two cycles
+ // overlap. In particular, the concurrent marking thread might
+ // be in the process of clearing the next marking bitmap (which
+ // we will use for the next cycle if we start one). Starting a
+ // cycle now will be bad given that parts of the marking
+ // information might get cleared by the marking thread. And we
+ // cannot wait for the marking thread to finish the cycle as it
+ // periodically yields while clearing the next marking bitmap
+ // and, if it's in a yield point, it's waiting for us to
+ // finish. So, at this point we will not start a cycle and we'll
+ // let the concurrent marking thread complete the last one.
+ log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
+ }
+ }
+}
+
+void G1Policy::record_concurrent_mark_cleanup_end() {
+ cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
+
+ double end_sec = os::elapsedTime();
+ double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
+ _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
+ _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
+
+ record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
+}
+
+double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
+ return percent_of(reclaimable_bytes, _g1->capacity());
+}
+
+void G1Policy::maybe_start_marking() {
+ if (need_to_start_conc_mark("end of GC")) {
+ // Note: this might have already been set, if during the last
+ // pause we decided to start a cycle but at the beginning of
+ // this pause we decided to postpone it. That's OK.
+ collector_state()->set_initiate_conc_mark_if_possible(true);
+ }
+}
+
+G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
+ assert(!collector_state()->full_collection(), "must be");
+ if (collector_state()->during_initial_mark_pause()) {
+ assert(collector_state()->last_gc_was_young(), "must be");
+ assert(!collector_state()->last_young_gc(), "must be");
+ return InitialMarkGC;
+ } else if (collector_state()->last_young_gc()) {
+ assert(!collector_state()->during_initial_mark_pause(), "must be");
+ assert(collector_state()->last_gc_was_young(), "must be");
+ return LastYoungGC;
+ } else if (!collector_state()->last_gc_was_young()) {
+ assert(!collector_state()->during_initial_mark_pause(), "must be");
+ assert(!collector_state()->last_young_gc(), "must be");
+ return MixedGC;
+ } else {
+ assert(collector_state()->last_gc_was_young(), "must be");
+ assert(!collector_state()->during_initial_mark_pause(), "must be");
+ assert(!collector_state()->last_young_gc(), "must be");
+ return YoungOnlyGC;
+ }
+}
+
+void G1Policy::record_pause(PauseKind kind, double start, double end) {
+ // Manage the MMU tracker. For some reason it ignores Full GCs.
+ if (kind != FullGC) {
+ _mmu_tracker->add_pause(start, end);
+ }
+ // Manage the mutator time tracking from initial mark to first mixed gc.
+ switch (kind) {
+ case FullGC:
+ abort_time_to_mixed_tracking();
+ break;
+ case Cleanup:
+ case Remark:
+ case YoungOnlyGC:
+ case LastYoungGC:
+ _initial_mark_to_mixed.add_pause(end - start);
+ break;
+ case InitialMarkGC:
+ _initial_mark_to_mixed.record_initial_mark_end(end);
+ break;
+ case MixedGC:
+ _initial_mark_to_mixed.record_mixed_gc_start(start);
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+void G1Policy::abort_time_to_mixed_tracking() {
+ _initial_mark_to_mixed.reset();
+}
+
+bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
+ const char* false_action_str) const {
+ if (cset_chooser()->is_empty()) {
+ log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
+ return false;
+ }
+
+ // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
+ size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
+ double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
+ double threshold = (double) G1HeapWastePercent;
+ if (reclaimable_percent <= threshold) {
+ log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
+ false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
+ return false;
+ }
+ log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
+ true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
+ return true;
+}
+
+uint G1Policy::calc_min_old_cset_length() const {
+ // The min old CSet region bound is based on the maximum desired
+ // number of mixed GCs after a cycle. I.e., even if some old regions
+ // look expensive, we should add them to the CSet anyway to make
+ // sure we go through the available old regions in no more than the
+ // maximum desired number of mixed GCs.
+ //
+ // The calculation is based on the number of marked regions we added
+ // to the CSet chooser in the first place, not how many remain, so
+ // that the result is the same during all mixed GCs that follow a cycle.
+
+ const size_t region_num = (size_t) cset_chooser()->length();
+ const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
+ size_t result = region_num / gc_num;
+ // emulate ceiling
+ if (result * gc_num < region_num) {
+ result += 1;
+ }
+ return (uint) result;
+}
+
+uint G1Policy::calc_max_old_cset_length() const {
+ // The max old CSet region bound is based on the threshold expressed
+ // as a percentage of the heap size. I.e., it should bound the
+ // number of old regions added to the CSet irrespective of how many
+ // of them are available.
+
+ const G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ const size_t region_num = g1h->num_regions();
+ const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
+ size_t result = region_num * perc / 100;
+ // emulate ceiling
+ if (100 * result < region_num * perc) {
+ result += 1;
+ }
+ return (uint) result;
+}
+
+void G1Policy::finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) {
+ double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms, survivor);
+ _collection_set->finalize_old_part(time_remaining_ms);
+}
+
+void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
+
+ // Add survivor regions to SurvRateGroup.
+ note_start_adding_survivor_regions();
+ finished_recalculating_age_indexes(true /* is_survivors */);
+
+ HeapRegion* last = NULL;
+ for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
+ it != survivors->regions()->end();
+ ++it) {
+ HeapRegion* curr = *it;
+ set_region_survivor(curr);
+
+ // The region is a non-empty survivor so let's add it to
+ // the incremental collection set for the next evacuation
+ // pause.
+ _collection_set->add_survivor_regions(curr);
+
+ last = curr;
+ }
+ note_stop_adding_survivor_regions();
+
+ // Don't clear the survivor list handles until the start of
+ // the next evacuation pause - we need it in order to re-tag
+ // the survivor regions from this evacuation pause as 'young'
+ // at the start of the next.
+
+ finished_recalculating_age_indexes(false /* is_survivors */);
+}
--- a/src/hotspot/share/gc/g1/g1Policy.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,108 +47,309 @@
class G1Analytics;
class G1SurvivorRegions;
class G1YoungGenSizer;
+class GCPolicyCounters;
+class STWGCTimer;
class G1Policy: public CHeapObj<mtGC> {
+ private:
+
+ static G1IHOPControl* create_ihop_control(const G1Predictions* predictor);
+ // Update the IHOP control with necessary statistics.
+ void update_ihop_prediction(double mutator_time_s,
+ size_t mutator_alloc_bytes,
+ size_t young_gen_size);
+ void report_ihop_statistics();
+
+ G1Predictions _predictor;
+ G1Analytics* _analytics;
+ G1MMUTracker* _mmu_tracker;
+ G1IHOPControl* _ihop_control;
+
+ GCPolicyCounters* _policy_counters;
+
+ double _full_collection_start_sec;
+
+ jlong _collection_pause_end_millis;
+
+ uint _young_list_target_length;
+ uint _young_list_fixed_length;
+
+ // The max number of regions we can extend the eden by while the GC
+ // locker is active. This should be >= _young_list_target_length;
+ uint _young_list_max_length;
+
+ // SurvRateGroups below must be initialized after the predictor because they
+ // indirectly use it through this object passed to their constructor.
+ SurvRateGroup* _short_lived_surv_rate_group;
+ SurvRateGroup* _survivor_surv_rate_group;
+
+ double _reserve_factor;
+ // This will be set when the heap is expanded
+ // for the first time during initialization.
+ uint _reserve_regions;
+
+ G1YoungGenSizer _young_gen_sizer;
+
+ uint _free_regions_at_end_of_collection;
+
+ size_t _max_rs_lengths;
+
+ size_t _rs_lengths_prediction;
+
+ size_t _pending_cards;
+
+ // The amount of allocated bytes in old gen during the last mutator and the following
+ // young GC phase.
+ size_t _bytes_allocated_in_old_since_last_gc;
+
+ G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
public:
- virtual const G1Predictions& predictor() const = 0;
- virtual const G1Analytics* analytics() const = 0;
+ const G1Predictions& predictor() const { return _predictor; }
+ const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
// Add the given number of bytes to the total number of allocated bytes in the old gen.
- virtual void add_bytes_allocated_in_old_since_last_gc(size_t bytes) = 0;
+ void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
+
+ void set_region_eden(HeapRegion* hr) {
+ hr->set_eden();
+ hr->install_surv_rate_group(_short_lived_surv_rate_group);
+ }
+
+ void set_region_survivor(HeapRegion* hr) {
+ assert(hr->is_survivor(), "pre-condition");
+ hr->install_surv_rate_group(_survivor_surv_rate_group);
+ }
- // Accessors
+ void record_max_rs_lengths(size_t rs_lengths) {
+ _max_rs_lengths = rs_lengths;
+ }
+
+ double predict_base_elapsed_time_ms(size_t pending_cards) const;
+ double predict_base_elapsed_time_ms(size_t pending_cards,
+ size_t scanned_cards) const;
+ size_t predict_bytes_to_copy(HeapRegion* hr) const;
+ double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const;
+
+ double predict_survivor_regions_evac_time() const;
+
+ bool should_update_surv_rate_group_predictors() {
+ return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
+ }
+
+ void cset_regions_freed() {
+ bool update = should_update_surv_rate_group_predictors();
- virtual void set_region_eden(HeapRegion* hr) = 0;
- virtual void set_region_survivor(HeapRegion* hr) = 0;
+ _short_lived_surv_rate_group->all_surviving_words_recorded(predictor(), update);
+ _survivor_surv_rate_group->all_surviving_words_recorded(predictor(), update);
+ }
+
+ G1MMUTracker* mmu_tracker() {
+ return _mmu_tracker;
+ }
+
+ const G1MMUTracker* mmu_tracker() const {
+ return _mmu_tracker;
+ }
+
+ double max_pause_time_ms() const {
+ return _mmu_tracker->max_gc_time() * 1000.0;
+ }
- virtual void record_max_rs_lengths(size_t rs_lengths) = 0;
+ double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
+
+ double predict_yg_surv_rate(int age) const;
+
+ double accum_yg_surv_rate_pred(int age) const;
+
+protected:
+ G1CollectionSet* _collection_set;
+ double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
+ double other_time_ms(double pause_time_ms) const;
+
+ double young_other_time_ms() const;
+ double non_young_other_time_ms() const;
+ double constant_other_time_ms(double pause_time_ms) const;
+
+ CollectionSetChooser* cset_chooser() const;
+private:
- virtual double predict_base_elapsed_time_ms(size_t pending_cards) const = 0;
- virtual double predict_base_elapsed_time_ms(size_t pending_cards,
- size_t scanned_cards) const = 0;
+ // The number of bytes copied during the GC.
+ size_t _bytes_copied_during_gc;
+
+ // Stash a pointer to the g1 heap.
+ G1CollectedHeap* _g1;
+
+ G1GCPhaseTimes* _phase_times;
+
+ // This set of variables tracks the collector efficiency, in order to
+ // determine whether we should initiate a new marking.
+ double _mark_remark_start_sec;
+ double _mark_cleanup_start_sec;
- virtual double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc) const = 0;
+ // Updates the internal young list maximum and target lengths. Returns the
+ // unbounded young list target length.
+ uint update_young_list_max_and_target_length();
+ uint update_young_list_max_and_target_length(size_t rs_lengths);
- virtual void cset_regions_freed() = 0;
+ // Update the young list target length either by setting it to the
+ // desired fixed value or by calculating it using G1's pause
+ // prediction model. If no rs_lengths parameter is passed, predict
+ // the RS lengths using the prediction model, otherwise use the
+ // given rs_lengths as the prediction.
+ // Returns the unbounded young list target length.
+ uint update_young_list_target_length(size_t rs_lengths);
+
+ // Calculate and return the minimum desired young list target
+ // length. This is the minimum desired young list length according
+ // to the user's inputs.
+ uint calculate_young_list_desired_min_length(uint base_min_length) const;
- virtual G1MMUTracker* mmu_tracker() = 0;
+ // Calculate and return the maximum desired young list target
+ // length. This is the maximum desired young list length according
+ // to the user's inputs.
+ uint calculate_young_list_desired_max_length() const;
- virtual const G1MMUTracker* mmu_tracker() const = 0;
+ // Calculate and return the maximum young list target length that
+ // can fit into the pause time goal. The parameters are: rs_lengths
+ // represent the prediction of how large the young RSet lengths will
+ // be, base_min_length is the already existing number of regions in
+ // the young list, min_length and max_length are the desired min and
+ // max young list length according to the user's inputs.
+ uint calculate_young_list_target_length(size_t rs_lengths,
+ uint base_min_length,
+ uint desired_min_length,
+ uint desired_max_length) const;
- virtual double max_pause_time_ms() const = 0;
+ // Result of the bounded_young_list_target_length() method, containing both the
+ // bounded as well as the unbounded young list target lengths in this order.
+ typedef Pair<uint, uint, StackObj> YoungTargetLengths;
+ YoungTargetLengths young_list_target_lengths(size_t rs_lengths) const;
+
+ void update_rs_lengths_prediction();
+ void update_rs_lengths_prediction(size_t prediction);
- virtual size_t pending_cards() const = 0;
+ // Check whether a given young length (young_length) fits into the
+ // given target pause time and whether the prediction for the amount
+ // of objects to be copied for the given length will fit into the
+ // given free space (expressed by base_free_regions). It is used by
+ // calculate_young_list_target_length().
+ bool predict_will_fit(uint young_length, double base_time_ms,
+ uint base_free_regions, double target_pause_time_ms) const;
+
+public:
+ size_t pending_cards() const { return _pending_cards; }
// Calculate the minimum number of old regions we'll add to the CSet
// during a mixed GC.
- virtual uint calc_min_old_cset_length() const = 0;
+ uint calc_min_old_cset_length() const;
// Calculate the maximum number of old regions we'll add to the CSet
// during a mixed GC.
- virtual uint calc_max_old_cset_length() const = 0;
+ uint calc_max_old_cset_length() const;
- // Returns the given amount of uncollected reclaimable space
- // as a percentage of the current heap capacity.
- virtual double reclaimable_bytes_percent(size_t reclaimable_bytes) const = 0;
+ // Returns the given amount of reclaimable bytes (that represents
+ // the amount of reclaimable space still to be collected) as a
+ // percentage of the current heap capacity.
+ double reclaimable_bytes_percent(size_t reclaimable_bytes) const;
+
+ jlong collection_pause_end_millis() { return _collection_pause_end_millis; }
+
+private:
+ // Sets up marking if proper conditions are met.
+ void maybe_start_marking();
- virtual ~G1Policy() {}
+ // The kind of STW pause.
+ enum PauseKind {
+ FullGC,
+ YoungOnlyGC,
+ MixedGC,
+ LastYoungGC,
+ InitialMarkGC,
+ Cleanup,
+ Remark
+ };
- virtual G1CollectorState* collector_state() const = 0;
+ // Calculate PauseKind from internal state.
+ PauseKind young_gc_pause_kind() const;
+ // Record the given STW pause with the given start and end times (in s).
+ void record_pause(PauseKind kind, double start, double end);
+ // Indicate that we aborted marking before doing any mixed GCs.
+ void abort_time_to_mixed_tracking();
+public:
- virtual G1GCPhaseTimes* phase_times() const = 0;
+ G1Policy(STWGCTimer* gc_timer);
+
+ virtual ~G1Policy();
+
+ G1CollectorState* collector_state() const;
+
+ G1GCPhaseTimes* phase_times() const { return _phase_times; }
// Check the current value of the young list RSet lengths and
// compare it against the last prediction. If the current value is
// higher, recalculate the young list target length prediction.
- virtual void revise_young_list_target_length_if_necessary(size_t rs_lengths) = 0;
+ void revise_young_list_target_length_if_necessary(size_t rs_lengths);
// This should be called after the heap is resized.
- virtual void record_new_heap_size(uint new_number_of_regions) = 0;
+ void record_new_heap_size(uint new_number_of_regions);
- virtual void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) = 0;
+ void init(G1CollectedHeap* g1h, G1CollectionSet* collection_set);
- virtual void note_gc_start() = 0;
+ void note_gc_start();
- virtual bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0) = 0;
+ bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
+
+ bool about_to_start_mixed_phase() const;
// Record the start and end of an evacuation pause.
- virtual void record_collection_pause_start(double start_time_sec) = 0;
- virtual void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) = 0;
+ void record_collection_pause_start(double start_time_sec);
+ void record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc);
// Record the start and end of a full collection.
- virtual void record_full_collection_start() = 0;
- virtual void record_full_collection_end() = 0;
-
- virtual jlong collection_pause_end_millis() = 0;
+ void record_full_collection_start();
+ void record_full_collection_end();
// Must currently be called while the world is stopped.
- virtual void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) = 0;
+ void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
// Record start and end of remark.
- virtual void record_concurrent_mark_remark_start() = 0;
- virtual void record_concurrent_mark_remark_end() = 0;
+ void record_concurrent_mark_remark_start();
+ void record_concurrent_mark_remark_end();
// Record start, end, and completion of cleanup.
- virtual void record_concurrent_mark_cleanup_start() = 0;
- virtual void record_concurrent_mark_cleanup_end() = 0;
- virtual void record_concurrent_mark_cleanup_completed() = 0;
+ void record_concurrent_mark_cleanup_start();
+ void record_concurrent_mark_cleanup_end();
+ void record_concurrent_mark_cleanup_completed();
- virtual void print_phases() = 0;
+ void print_phases();
// Record how much space we copied during a GC. This is typically
// called when a GC alloc region is being retired.
- virtual void record_bytes_copied_during_gc(size_t bytes) = 0;
+ void record_bytes_copied_during_gc(size_t bytes) {
+ _bytes_copied_during_gc += bytes;
+ }
// The amount of space we copied during a GC.
- virtual size_t bytes_copied_during_gc() const = 0;
+ size_t bytes_copied_during_gc() const {
+ return _bytes_copied_during_gc;
+ }
+
+ bool next_gc_should_be_mixed(const char* true_action_str,
+ const char* false_action_str) const;
- virtual void finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor) = 0;
+ void finalize_collection_set(double target_pause_time_ms, G1SurvivorRegions* survivor);
+private:
+ // Set the state to start a concurrent marking cycle and clear
+ // _initiate_conc_mark_if_possible because it has now been
+ // acted on.
+ void initiate_conc_mark();
+public:
// This sets the initiate_conc_mark_if_possible() flag to start a
// new cycle, as long as we are not already in one. It's best if it
// is called during a safepoint when the test whether a cycle is in
// progress or not is stable.
- virtual bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) = 0;
+ bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
// This is called at the very beginning of an evacuation pause (it
// has to be the first thing that the pause does). If
@@ -156,36 +357,74 @@
// marking thread has completed its work during the previous cycle,
// it will set during_initial_mark_pause() to so that the pause does
// the initial-mark work and start a marking cycle.
- virtual void decide_on_conc_mark_initiation() = 0;
-
+ void decide_on_conc_mark_initiation();
- virtual void finished_recalculating_age_indexes(bool is_survivors) = 0;
+ void finished_recalculating_age_indexes(bool is_survivors) {
+ if (is_survivors) {
+ _survivor_surv_rate_group->finished_recalculating_age_indexes();
+ } else {
+ _short_lived_surv_rate_group->finished_recalculating_age_indexes();
+ }
+ }
- virtual void transfer_survivors_to_cset(const G1SurvivorRegions* survivors) = 0;
+ size_t young_list_target_length() const { return _young_list_target_length; }
+
+ bool should_allocate_mutator_region() const;
+
+ bool can_expand_young_list() const;
- virtual size_t young_list_target_length() const = 0;
+ uint young_list_max_length() const {
+ return _young_list_max_length;
+ }
- virtual bool should_allocate_mutator_region() const = 0;
+ bool adaptive_young_list_length() const;
- virtual bool can_expand_young_list() const = 0;
+ bool should_process_references() const {
+ return true;
+ }
- virtual uint young_list_max_length() const = 0;
+ void transfer_survivors_to_cset(const G1SurvivorRegions* survivors);
+
+private:
+ //
+ // Survivor regions policy.
+ //
- virtual bool adaptive_young_list_length() const = 0;
+ // Current tenuring threshold, set to 0 if the collector reaches the
+ // maximum amount of survivors regions.
+ uint _tenuring_threshold;
+
+ // The limit on the number of regions allocated for survivors.
+ uint _max_survivor_regions;
+
+ AgeTable _survivors_age_table;
- virtual bool should_process_references() const = 0;
+protected:
+ size_t desired_survivor_size() const;
+public:
+ uint tenuring_threshold() const { return _tenuring_threshold; }
- virtual uint tenuring_threshold() const = 0;
- virtual uint max_survivor_regions() = 0;
+ uint max_survivor_regions() {
+ return _max_survivor_regions;
+ }
- virtual void note_start_adding_survivor_regions() = 0;
+ void note_start_adding_survivor_regions() {
+ _survivor_surv_rate_group->start_adding_regions();
+ }
- virtual void note_stop_adding_survivor_regions() = 0;
+ void note_stop_adding_survivor_regions() {
+ _survivor_surv_rate_group->stop_adding_regions();
+ }
- virtual void record_age_table(AgeTable* age_table) = 0;
- virtual void print_age_table() = 0;
-protected:
- virtual size_t desired_survivor_size() const = 0;
+ void record_age_table(AgeTable* age_table) {
+ _survivors_age_table.merge(age_table);
+ }
+
+ void print_age_table();
+
+ void update_max_gc_locker_expansion();
+
+ void update_survivors_policy();
};
#endif // SHARE_VM_GC_G1_G1POLICY_HPP
--- a/src/hotspot/share/gc/g1/g1Predictions.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Predictions.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "utilities/numberSeq.hpp"
// Utility class containing various helper methods for prediction.
-class G1Predictions VALUE_OBJ_CLASS_SPEC {
+class G1Predictions {
private:
double _sigma;
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@
class WorkGang;
-class G1MappingChangedListener VALUE_OBJ_CLASS_SPEC {
+class G1MappingChangedListener {
public:
// Fired after commit of the memory, i.e. the memory this listener is registered
// for can be accessed.
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -33,7 +33,6 @@
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
@@ -587,20 +586,6 @@
return;
}
- // While we are processing RSet buffers during the collection, we
- // actually don't want to scan any cards on the collection set,
- // since we don't want to update remembered sets with entries that
- // point into the collection set, given that live objects from the
- // collection set are about to move and such entries will be stale
- // very soon. This change also deals with a reliability issue which
- // involves scanning a card in the collection set and coming across
- // an array that was being chunked and looking malformed. Note,
- // however, that if evacuation fails, we have to scan any objects
- // that were not moved and create any missing entries.
- if (r->in_collection_set()) {
- return;
- }
-
// The result from the hot card cache insert call is either:
// * pointer to the current card
// (implying that the current card is not 'hot'),
@@ -625,8 +610,7 @@
// Check whether the region formerly in the cache should be
// ignored, as discussed earlier for the original card. The
- // region could have been freed while in the cache. The cset is
- // not relevant here, since we're in concurrent phase.
+ // region could have been freed while in the cache.
if (!r->is_old_or_humongous()) {
return;
}
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -45,7 +45,6 @@
class G1RemSetScanState;
class G1ParScanThreadState;
class G1Policy;
-class G1SATBCardTableModRefBS;
class G1ScanObjsDuringScanRSClosure;
class G1ScanObjsDuringUpdateRSClosure;
class HeapRegionClaimer;
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -145,7 +145,7 @@
_sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
}
-class RegionTypeCounter VALUE_OBJ_CLASS_SPEC {
+class RegionTypeCounter {
private:
const char* _name;
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
// A G1RemSetSummary manages statistical information about the G1RemSet
-class G1RemSetSummary VALUE_OBJ_CLASS_SPEC {
+class G1RemSetSummary {
private:
friend class GetRSThreadVTimeClosure;
--- a/src/hotspot/share/gc/g1/g1RootClosures.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1RootClosures.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -111,11 +111,7 @@
};
G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
- G1EvacuationRootClosures* res = create_root_closures_ext(pss, g1h);
- if (res != NULL) {
- return res;
- }
-
+ G1EvacuationRootClosures* res = NULL;
if (g1h->collector_state()->during_initial_mark_pause()) {
if (ClassUnloadingWithConcurrentMark) {
res = new G1InitialMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
--- a/src/hotspot/share/gc/g1/g1RootClosures.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1RootClosures.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -48,7 +48,6 @@
};
class G1EvacuationRootClosures : public G1RootClosures {
- static G1EvacuationRootClosures* create_root_closures_ext(G1ParScanThreadState* pss, G1CollectedHeap* g1h);
public:
// Flush any buffered state and deferred processing
virtual void flush() = 0;
--- a/src/hotspot/share/gc/g1/g1RootClosures_ext.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1RootClosures.hpp"
-
-G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures_ext(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
- return NULL;
-}
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1CardTable.inline.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
-#include "gc/g1/heapRegion.hpp"
-#include "gc/g1/satbMarkQueue.hpp"
-#include "logging/log.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/thread.inline.hpp"
-
-G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(
- G1CardTable* card_table,
- const BarrierSet::FakeRtti& fake_rtti) :
- CardTableModRefBS(card_table, fake_rtti.add_tag(BarrierSet::G1SATBCT))
-{ }
-
-void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
- // Nulls should have been already filtered.
- assert(oopDesc::is_oop(pre_val, true), "Error");
-
- if (!JavaThread::satb_mark_queue_set().is_active()) return;
- Thread* thr = Thread::current();
- if (thr->is_Java_thread()) {
- JavaThread* jt = (JavaThread*)thr;
- jt->satb_mark_queue().enqueue(pre_val);
- } else {
- MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag);
- JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val);
- }
-}
-
-template <class T> void
-G1SATBCardTableModRefBS::write_ref_array_pre_work(T* dst, int count) {
- if (!JavaThread::satb_mark_queue_set().is_active()) return;
- T* elem_ptr = dst;
- for (int i = 0; i < count; i++, elem_ptr++) {
- T heap_oop = oopDesc::load_heap_oop(elem_ptr);
- if (!oopDesc::is_null(heap_oop)) {
- enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
- }
- }
-}
-
-void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
- if (!dest_uninitialized) {
- write_ref_array_pre_work(dst, count);
- }
-}
-
-void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
- if (!dest_uninitialized) {
- write_ref_array_pre_work(dst, count);
- }
-}
-
-G1SATBCardTableLoggingModRefBS::
-G1SATBCardTableLoggingModRefBS(G1CardTable* card_table) :
- G1SATBCardTableModRefBS(card_table, BarrierSet::FakeRtti(G1SATBCTLogging)),
- _dcqs(JavaThread::dirty_card_queue_set()) {}
-
-void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) {
- // In the slow path, we know a card is not young
- assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering");
- OrderAccess::storeload();
- if (*byte != G1CardTable::dirty_card_val()) {
- *byte = G1CardTable::dirty_card_val();
- Thread* thr = Thread::current();
- if (thr->is_Java_thread()) {
- JavaThread* jt = (JavaThread*)thr;
- jt->dirty_card_queue().enqueue(byte);
- } else {
- MutexLockerEx x(Shared_DirtyCardQ_lock,
- Mutex::_no_safepoint_check_flag);
- _dcqs.shared_dirty_card_queue()->enqueue(byte);
- }
- }
-}
-
-void G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
- if (mr.is_empty()) {
- return;
- }
- volatile jbyte* byte = _card_table->byte_for(mr.start());
- jbyte* last_byte = _card_table->byte_for(mr.last());
- Thread* thr = Thread::current();
- // skip all consecutive young cards
- for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++);
-
- if (byte <= last_byte) {
- OrderAccess::storeload();
- // Enqueue if necessary.
- if (thr->is_Java_thread()) {
- JavaThread* jt = (JavaThread*)thr;
- for (; byte <= last_byte; byte++) {
- if (*byte == G1CardTable::g1_young_card_val()) {
- continue;
- }
- if (*byte != G1CardTable::dirty_card_val()) {
- *byte = G1CardTable::dirty_card_val();
- jt->dirty_card_queue().enqueue(byte);
- }
- }
- } else {
- MutexLockerEx x(Shared_DirtyCardQ_lock,
- Mutex::_no_safepoint_check_flag);
- for (; byte <= last_byte; byte++) {
- if (*byte == G1CardTable::g1_young_card_val()) {
- continue;
- }
- if (*byte != G1CardTable::dirty_card_val()) {
- *byte = G1CardTable::dirty_card_val();
- _dcqs.shared_dirty_card_queue()->enqueue(byte);
- }
- }
- }
- }
-}
-
-void G1SATBCardTableLoggingModRefBS::on_thread_attach(JavaThread* thread) {
- // This method initializes the SATB and dirty card queues before a
- // JavaThread is added to the Java thread list. Right now, we don't
- // have to do anything to the dirty card queue (it should have been
- // activated when the thread was created), but we have to activate
- // the SATB queue if the thread is created while a marking cycle is
- // in progress. The activation / de-activation of the SATB queues at
- // the beginning / end of a marking cycle is done during safepoints
- // so we have to make sure this method is called outside one to be
- // able to safely read the active field of the SATB queue set. Right
- // now, it is called just before the thread is added to the Java
- // thread list in the Threads::add() method. That method is holding
- // the Threads_lock which ensures we are outside a safepoint. We
- // cannot do the obvious and set the active field of the SATB queue
- // when the thread is created given that, in some cases, safepoints
- // might happen between the JavaThread constructor being called and the
- // thread being added to the Java thread list (an example of this is
- // when the structure for the DestroyJavaVM thread is created).
- assert(!SafepointSynchronize::is_at_safepoint(), "We should not be at a safepoint");
- assert(!thread->satb_mark_queue().is_active(), "SATB queue should not be active");
- assert(thread->satb_mark_queue().is_empty(), "SATB queue should be empty");
- assert(thread->dirty_card_queue().is_active(), "Dirty card queue should be active");
-
- // If we are creating the thread during a marking cycle, we should
- // set the active field of the SATB queue to true.
- if (thread->satb_mark_queue_set().is_active()) {
- thread->satb_mark_queue().set_active(true);
- }
-}
-
-void G1SATBCardTableLoggingModRefBS::on_thread_detach(JavaThread* thread) {
- // Flush any deferred card marks, SATB buffers and dirty card queue buffers
- CardTableModRefBS::on_thread_detach(thread);
- thread->satb_mark_queue().flush();
- thread->dirty_card_queue().flush();
-}
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_HPP
-#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_HPP
-
-#include "gc/g1/g1RegionToSpaceMapper.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
-#include "memory/memRegion.hpp"
-#include "oops/oop.hpp"
-#include "utilities/macros.hpp"
-
-class DirtyCardQueueSet;
-class G1SATBCardTableLoggingModRefBS;
-class CardTable;
-class G1CardTable;
-
-// This barrier is specialized to use a logging barrier to support
-// snapshot-at-the-beginning marking.
-
-class G1SATBCardTableModRefBS: public CardTableModRefBS {
- friend class VMStructs;
-protected:
- G1SATBCardTableModRefBS(G1CardTable* table, const BarrierSet::FakeRtti& fake_rtti);
- ~G1SATBCardTableModRefBS() { }
-
-public:
- // Add "pre_val" to a set of objects that may have been disconnected from the
- // pre-marking object graph.
- static void enqueue(oop pre_val);
-
- static void enqueue_if_weak_or_archive(DecoratorSet decorators, oop value);
-
- template <class T> void write_ref_array_pre_work(T* dst, int count);
- virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
- virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
-
- template <DecoratorSet decorators, typename T>
- void write_ref_field_pre(T* field);
-};
-
-template<>
-struct BarrierSet::GetName<G1SATBCardTableModRefBS> {
- static const BarrierSet::Name value = BarrierSet::G1SATBCT;
-};
-
-template<>
-struct BarrierSet::GetType<BarrierSet::G1SATBCT> {
- typedef G1SATBCardTableModRefBS type;
-};
-
-// Adds card-table logging to the post-barrier.
-// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
-class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
- private:
- DirtyCardQueueSet& _dcqs;
-
- public:
- G1SATBCardTableLoggingModRefBS(G1CardTable* card_table);
-
- // NB: if you do a whole-heap invalidation, the "usual invariant" defined
- // above no longer applies.
- void invalidate(MemRegion mr);
-
- void write_region(MemRegion mr) { invalidate(mr); }
- void write_ref_array_work(MemRegion mr) { invalidate(mr); }
-
- template <DecoratorSet decorators, typename T>
- void write_ref_field_post(T* field, oop new_val);
- void write_ref_field_post_slow(volatile jbyte* byte);
-
- virtual void on_thread_attach(JavaThread* thread);
- virtual void on_thread_detach(JavaThread* thread);
-
- // Callbacks for runtime accesses.
- template <DecoratorSet decorators, typename BarrierSetT = G1SATBCardTableLoggingModRefBS>
- class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {
- typedef ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> ModRef;
- typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
-
- public:
- // Needed for loads on non-heap weak references
- template <typename T>
- static oop oop_load_not_in_heap(T* addr);
-
- // Needed for non-heap stores
- template <typename T>
- static void oop_store_not_in_heap(T* addr, oop new_value);
-
- // Needed for weak references
- static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
-
- // Defensive: will catch weak oops at addresses in heap
- template <typename T>
- static oop oop_load_in_heap(T* addr);
- };
-};
-
-template<>
-struct BarrierSet::GetName<G1SATBCardTableLoggingModRefBS> {
- static const BarrierSet::Name value = BarrierSet::G1SATBCTLogging;
-};
-
-template<>
-struct BarrierSet::GetType<BarrierSet::G1SATBCTLogging> {
- typedef G1SATBCardTableLoggingModRefBS type;
-};
-
-#endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_HPP
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
-#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
-
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
-#include "gc/shared/accessBarrierSupport.inline.hpp"
-
-template <DecoratorSet decorators, typename T>
-inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
- if (HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value ||
- HasDecorator<decorators, AS_NO_KEEPALIVE>::value) {
- return;
- }
-
- T heap_oop = oopDesc::load_heap_oop(field);
- if (!oopDesc::is_null(heap_oop)) {
- enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
- }
-}
-
-template <DecoratorSet decorators, typename T>
-inline void G1SATBCardTableLoggingModRefBS::write_ref_field_post(T* field, oop new_val) {
- volatile jbyte* byte = _card_table->byte_for(field);
- if (*byte != G1CardTable::g1_young_card_val()) {
- // Take a slow path for cards in old
- write_ref_field_post_slow(byte);
- }
-}
-
-inline void G1SATBCardTableModRefBS::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
- assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
- // Archive roots need to be enqueued since they add subgraphs to the
- // Java heap that were not there at the snapshot when marking started.
- // Weak and phantom references also need enqueueing for similar reasons.
- const bool in_archive_root = (decorators & IN_ARCHIVE_ROOT) != 0;
- const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
- const bool peek = (decorators & AS_NO_KEEPALIVE) != 0;
- const bool needs_enqueue = in_archive_root || (!peek && !on_strong_oop_ref);
-
- if (needs_enqueue && value != NULL) {
- enqueue(value);
- }
-}
-
-template <DecoratorSet decorators, typename BarrierSetT>
-template <typename T>
-inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
-oop_load_not_in_heap(T* addr) {
- oop value = ModRef::oop_load_not_in_heap(addr);
- enqueue_if_weak_or_archive(decorators, value);
- return value;
-}
-
-template <DecoratorSet decorators, typename BarrierSetT>
-template <typename T>
-inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
-oop_load_in_heap(T* addr) {
- oop value = ModRef::oop_load_in_heap(addr);
- enqueue_if_weak_or_archive(decorators, value);
- return value;
-}
-
-template <DecoratorSet decorators, typename BarrierSetT>
-inline oop G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
-oop_load_in_heap_at(oop base, ptrdiff_t offset) {
- oop value = ModRef::oop_load_in_heap_at(base, offset);
- enqueue_if_weak_or_archive(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
- return value;
-}
-
-template <DecoratorSet decorators, typename BarrierSetT>
-template <typename T>
-inline void G1SATBCardTableLoggingModRefBS::AccessBarrier<decorators, BarrierSetT>::
-oop_store_not_in_heap(T* addr, oop new_value) {
- if (HasDecorator<decorators, IN_CONCURRENT_ROOT>::value) {
- // For roots not scanned in a safepoint, we have to apply SATB barriers
- // even for roots.
- G1SATBCardTableLoggingModRefBS *bs = barrier_set_cast<G1SATBCardTableLoggingModRefBS>(BarrierSet::barrier_set());
- bs->write_ref_field_pre<decorators>(addr);
- }
- Raw::oop_store(addr, new_value);
-}
-
-#endif // SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1SharedClosures.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1SharedClosures.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,11 +31,11 @@
class G1ParScanThreadState;
// Simple holder object for a complete set of closures used by the G1 evacuation code.
-template <G1Mark Mark, bool use_ext = false>
-class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
+template <G1Mark Mark>
+class G1SharedClosures {
public:
- G1ParCopyClosure<G1BarrierNone, Mark, use_ext> _oops;
- G1ParCopyClosure<G1BarrierCLD, Mark, use_ext> _oops_in_cld;
+ G1ParCopyClosure<G1BarrierNone, Mark> _oops;
+ G1ParCopyClosure<G1BarrierCLD, Mark> _oops_in_cld;
G1CLDScanClosure _clds;
G1CodeBlobClosure _codeblobs;
--- a/src/hotspot/share/gc/g1/g1StringDedupTable.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedupTable.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/javaClasses.inline.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/g1StringDedupTable.hpp"
#include "gc/shared/gcLocker.hpp"
@@ -383,7 +383,7 @@
if (existing_value != NULL) {
// Enqueue the reference to make sure it is kept alive. Concurrent mark might
// otherwise declare it dead if there are no other strong references to this object.
- G1SATBCardTableModRefBS::enqueue(existing_value);
+ G1BarrierSet::enqueue(existing_value);
// Existing value found, deduplicate string
java_lang_String::set_value(java_string, existing_value);
--- a/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,14 +25,13 @@
#ifndef SHARE_VM_GC_G1_G1SURVIVORREGIONS_HPP
#define SHARE_VM_GC_G1_G1SURVIVORREGIONS_HPP
-#include "memory/allocation.hpp"
#include "runtime/globals.hpp"
template <typename T>
class GrowableArray;
class HeapRegion;
-class G1SurvivorRegions VALUE_OBJ_CLASS_SPEC {
+class G1SurvivorRegions {
private:
GrowableArray<HeapRegion*>* _regions;
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
#define SHARE_VM_GC_G1_G1YOUNGGENSIZER_HPP
-#include "memory/allocation.hpp"
+#include "utilities/globalDefinitions.hpp"
// There are three command line options related to the young gen size:
// NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
@@ -63,7 +63,7 @@
//
// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
// combined with either NewSize or MaxNewSize. (A warning message is printed.)
-class G1YoungGenSizer VALUE_OBJ_CLASS_SPEC {
+class G1YoungGenSizer {
private:
enum SizerKind {
SizerDefaults,
--- a/src/hotspot/share/gc/g1/heapRegion.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -113,7 +113,6 @@
assert(!in_collection_set(),
"Should not clear heap region %u in the collection set", hrm_index());
- set_allocation_context(AllocationContext::system());
set_young_index_in_cset(-1);
uninstall_surv_rate_group();
set_free();
@@ -235,7 +234,6 @@
MemRegion mr) :
G1ContiguousSpace(bot),
_hrm_index(hrm_index),
- _allocation_context(AllocationContext::system()),
_humongous_start_region(NULL),
_evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
@@ -266,8 +264,7 @@
get_trace_type(),
to,
(uintptr_t)bottom(),
- used(),
- (uint)allocation_context());
+ used());
}
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
@@ -454,7 +451,6 @@
st->print("| ");
}
st->print("|TS%3u", _gc_time_stamp);
- st->print("|AC%3u", allocation_context());
st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "|",
p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()));
}
--- a/src/hotspot/share/gc/g1/heapRegion.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_G1_HEAPREGION_HPP
#define SHARE_VM_GC_G1_HEAPREGION_HPP
-#include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1BlockOffsetTable.hpp"
#include "gc/g1/g1HeapRegionTraceType.hpp"
#include "gc/g1/heapRegionTracer.hpp"
@@ -233,8 +232,6 @@
// The index of this region in the heap region sequence.
uint _hrm_index;
- AllocationContext_t _allocation_context;
-
HeapRegionType _type;
// For a humongous region, region in which it starts.
@@ -473,14 +470,6 @@
inline bool in_collection_set() const;
- void set_allocation_context(AllocationContext_t context) {
- _allocation_context = context;
- }
-
- AllocationContext_t allocation_context() const {
- return _allocation_context;
- }
-
// Methods used by the HeapRegionSetBase class and subclasses.
// Getter and setter for the next and prev fields used to link regions into
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -71,7 +71,7 @@
// is represented. If a deleted PRT is re-used, a thread adding a bit,
// thinking the PRT is for a different region, does no harm.
-class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
+class OtherRegionsTable {
friend class HeapRegionRemSetIterator;
G1CollectedHeap* _g1h;
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,7 @@
// (e.g., length, region num, used bytes sum) plus any shared
// functionality (e.g., verification).
-class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC {
+class HeapRegionSetBase {
friend class VMStructs;
private:
bool _is_humongous;
--- a/src/hotspot/share/gc/g1/heapRegionTracer.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/heapRegionTracer.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -30,8 +30,7 @@
G1HeapRegionTraceType::Type from,
G1HeapRegionTraceType::Type to,
uintptr_t start,
- size_t used,
- uint allocationContext) {
+ size_t used) {
EventG1HeapRegionTypeChange e;
if (e.should_commit()) {
e.set_index(index);
@@ -39,7 +38,6 @@
e.set_to(to);
e.set_start(start);
e.set_used(used);
- e.set_allocationContext(allocationContext);
e.commit();
}
}
--- a/src/hotspot/share/gc/g1/heapRegionTracer.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/heapRegionTracer.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -34,8 +34,7 @@
G1HeapRegionTraceType::Type from,
G1HeapRegionTraceType::Type to,
uintptr_t start,
- size_t used,
- uint allocationContext);
+ size_t used);
};
#endif // SHARE_VM_GC_G1_HEAPREGIONTRACER_HPP
--- a/src/hotspot/share/gc/g1/heapRegionType.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/heapRegionType.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,12 +26,11 @@
#define SHARE_VM_GC_G1_HEAPREGIONTYPE_HPP
#include "gc/g1/g1HeapRegionTraceType.hpp"
-#include "memory/allocation.hpp"
#define hrt_assert_is_valid(tag) \
assert(is_valid((tag)), "invalid HR type: %u", (uint) (tag))
-class HeapRegionType VALUE_OBJ_CLASS_SPEC {
+class HeapRegionType {
friend class VMStructs;
private:
--- a/src/hotspot/share/gc/g1/ptrQueue.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/ptrQueue.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP
#define SHARE_VM_GC_G1_PTRQUEUE_HPP
-#include "memory/allocation.hpp"
#include "utilities/align.hpp"
#include "utilities/sizes.hpp"
@@ -36,7 +35,7 @@
class BufferNode;
class PtrQueueSet;
-class PtrQueue VALUE_OBJ_CLASS_SPEC {
+class PtrQueue {
friend class VMStructs;
// Noncopyable - not defined.
@@ -257,7 +256,7 @@
// In particular, the individual queues allocate buffers from this shared
// set, and return completed buffers to the set.
// All these variables are are protected by the TLOQ_CBL_mon. XXX ???
-class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
+class PtrQueueSet {
private:
// The size of all buffers in the set.
size_t _buffer_size;
--- a/src/hotspot/share/gc/g1/satbMarkQueue.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/satbMarkQueue.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -40,8 +40,7 @@
// them with their active field set to false. If a thread is
// created during a cycle and its SATB queue needs to be activated
// before the thread starts running, we'll need to set its active
- // field to true. This is done in G1SATBCardTableLoggingModRefBS::
- // on_thread_attach().
+ // field to true. This is done in G1SBarrierSet::on_thread_attach().
PtrQueue(qset, permanent, false /* active */)
{ }
--- a/src/hotspot/share/gc/g1/sparsePRT.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/sparsePRT.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -186,8 +186,8 @@
void print();
};
-// ValueObj because will be embedded in HRRS iterator.
-class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
+// This is embedded in HRRS iterator.
+class RSHashTableIter {
// Return value indicating "invalid/no card".
static const int NoCardFound = -1;
@@ -222,7 +222,7 @@
class SparsePRTIter;
class SparsePRTCleanupTask;
-class SparsePRT VALUE_OBJ_CLASS_SPEC {
+class SparsePRT {
friend class SparsePRTCleanupTask;
// Iterations are done on the _cur hash table, since they only need to
@@ -334,7 +334,7 @@
// to be processed at the beginning of the next GC pause. This lists
// are concatenated into the single expanded list at the end of the
// cleanup pause.
-class SparsePRTCleanupTask VALUE_OBJ_CLASS_SPEC {
+class SparsePRTCleanupTask {
private:
SparsePRT* _head;
SparsePRT* _tail;
--- a/src/hotspot/share/gc/g1/vm_operations_g1.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/vm_operations_g1.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -43,11 +43,9 @@
uint gc_count_before,
GCCause::Cause gc_cause,
bool should_initiate_conc_mark,
- double target_pause_time_ms,
- AllocationContext_t allocation_context)
+ double target_pause_time_ms)
: VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
_pause_succeeded(false),
- _allocation_context(allocation_context),
_should_initiate_conc_mark(should_initiate_conc_mark),
_target_pause_time_ms(target_pause_time_ms),
_should_retry_gc(false),
@@ -82,7 +80,6 @@
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
_result = g1h->attempt_allocation_at_safepoint(_word_size,
- _allocation_context,
false /* expect_null_cur_alloc_region */);
if (_result != NULL) {
// If we can successfully allocate before we actually do the
@@ -138,7 +135,7 @@
if (_word_size > 0) {
// An allocation had been requested. Do it, eventually trying a stronger
// kind of GC.
- _result = g1h->satisfy_failed_allocation(_word_size, _allocation_context, &_pause_succeeded);
+ _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
} else {
bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) &&
!g1h->has_regions_left_for_allocation();
@@ -207,6 +204,8 @@
GCTraceCPUTime tcpu;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCTraceTime(Info, gc) t(_printGCMessage, g1h->concurrent_mark()->gc_timer_cm(), GCCause::_no_gc, true);
+ TraceCollectorStats tcs(g1h->g1mm()->conc_collection_counters());
+ SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
IsGCActiveMark x;
_cl->do_void();
}
--- a/src/hotspot/share/gc/g1/vm_operations_g1.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/g1/vm_operations_g1.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP
#define SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP
-#include "gc/g1/g1AllocationContext.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/vmGCOperations.hpp"
@@ -51,7 +50,6 @@
class VM_G1CollectForAllocation: public VM_CollectForAllocation {
private:
bool _pause_succeeded;
- AllocationContext_t _allocation_context;
bool _should_initiate_conc_mark;
bool _should_retry_gc;
@@ -62,8 +60,7 @@
uint gc_count_before,
GCCause::Cause gc_cause,
bool should_initiate_conc_mark,
- double target_pause_time_ms,
- AllocationContext_t allocation_context);
+ double target_pause_time_ms);
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
virtual bool doit_prologue();
virtual void doit();
--- a/src/hotspot/share/gc/parallel/gcTaskManager.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/parallel/gcTaskManager.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -272,7 +272,7 @@
~SynchronizedGCTaskQueue();
};
-class WaitHelper VALUE_OBJ_CLASS_SPEC {
+class WaitHelper {
private:
Monitor* _monitor;
volatile bool _should_wait;
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "gc/parallel/mutableNUMASpace.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/spaceDecorator.hpp"
+#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/thread.inline.hpp"
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -572,7 +572,7 @@
void ParallelScavengeHeap::print_on(outputStream* st) const {
young_gen()->print_on(st);
old_gen()->print_on(st);
- MetaspaceAux::print_on(st);
+ MetaspaceUtils::print_on(st);
}
void ParallelScavengeHeap::print_on_error(outputStream* st) const {
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -267,7 +267,7 @@
_heap_used(heap->used()),
_young_gen_used(heap->young_gen()->used_in_bytes()),
_old_gen_used(heap->old_gen()->used_in_bytes()),
- _metadata_used(MetaspaceAux::used_bytes()) { };
+ _metadata_used(MetaspaceUtils::used_bytes()) { };
size_t heap_used() const { return _heap_used; }
size_t young_gen_used() const { return _young_gen_used; }
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -51,6 +51,7 @@
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
@@ -185,7 +186,7 @@
BiasedLocking::preserve_marks();
// Capture metadata size before collection for sizing.
- size_t metadata_prev_used = MetaspaceAux::used_bytes();
+ size_t metadata_prev_used = MetaspaceUtils::used_bytes();
size_t old_gen_prev_used = old_gen->used_in_bytes();
size_t young_gen_prev_used = young_gen->used_in_bytes();
@@ -246,7 +247,7 @@
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge();
- MetaspaceAux::verify_metrics();
+ MetaspaceUtils::verify_metrics();
BiasedLocking::restore_marks();
CodeCache::gc_epilogue();
@@ -351,7 +352,7 @@
young_gen->print_used_change(young_gen_prev_used);
old_gen->print_used_change(old_gen_prev_used);
- MetaspaceAux::print_metaspace_change(metadata_prev_used);
+ MetaspaceUtils::print_metaspace_change(metadata_prev_used);
// Track memory usage and detect low memory
MemoryService::track_memory_usage();
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -61,6 +61,7 @@
#include "oops/objArrayKlass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
#include "services/management.hpp"
@@ -1027,7 +1028,7 @@
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge();
- MetaspaceAux::verify_metrics();
+ MetaspaceUtils::verify_metrics();
CodeCache::gc_epilogue();
JvmtiExport::gc_epilogue();
@@ -1901,7 +1902,7 @@
young_gen->print_used_change(pre_gc_values.young_gen_used());
old_gen->print_used_change(pre_gc_values.old_gen_used());
- MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
+ MetaspaceUtils::print_metaspace_change(pre_gc_values.metadata_used());
// Track memory usage and detect low memory
MemoryService::track_memory_usage();
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/taskqueue.hpp"
-#include "memory/allocation.hpp"
#include "memory/padded.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -50,7 +49,7 @@
class PSOldGen;
class ParCompactionManager;
-class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
+class PSPromotionManager {
friend class PSScavenge;
friend class PSRefProcTaskExecutor;
private:
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -626,7 +626,7 @@
young_gen->print_used_change(pre_gc_values.young_gen_used());
old_gen->print_used_change(pre_gc_values.old_gen_used());
- MetaspaceAux::print_metaspace_change(pre_gc_values.metadata_used());
+ MetaspaceUtils::print_metaspace_change(pre_gc_values.metadata_used());
// Track memory usage and detect low memory
MemoryService::track_memory_usage();
--- a/src/hotspot/share/gc/serial/markSweep.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/serial/markSweep.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -200,7 +200,7 @@
debug_only(virtual bool should_verify_oops() { return false; })
};
-class PreservedMark VALUE_OBJ_CLASS_SPEC {
+class PreservedMark {
private:
oop _obj;
markOop _mark;
--- a/src/hotspot/share/gc/shared/ageTable.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/ageTable.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,14 +29,14 @@
#include "oops/oop.hpp"
#include "runtime/perfData.hpp"
-/* Copyright (c) 1992, 2016, Oracle and/or its affiliates, and Stanford University.
+/* Copyright (c) 1992, 2018, Oracle and/or its affiliates, and Stanford University.
See the LICENSE file for license information. */
// Age table for adaptive feedback-mediated tenuring (scavenging)
//
// Note: all sizes are in oops
-class AgeTable VALUE_OBJ_CLASS_SPEC {
+class AgeTable {
friend class VMStructs;
public:
--- a/src/hotspot/share/gc/shared/barrierSet.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -189,8 +189,8 @@
}
template <typename T>
- static bool arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
- return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
+ static void arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+ Raw::arraycopy(src_obj, dst_obj, src, dst, length);
}
// Heap oop accesses. These accessors get resolved when
--- a/src/hotspot/share/gc/shared/barrierSet.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/barrierSet.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,7 +26,6 @@
#define SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/barrierSetConfig.inline.hpp"
#include "utilities/align.hpp"
// count is number of array elements being written
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -29,26 +29,18 @@
#if INCLUDE_ALL_GCS
#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
- f(G1SATBCTLogging)
+ f(G1BarrierSet)
#else
#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
#endif
-#if INCLUDE_ALL_GCS
-#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
- f(G1SATBCT)
-#else
-#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
-#endif
-
// Do something for each concrete barrier set part of the build.
#define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \
f(CardTableModRef) \
FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
- f(ModRef) \
- FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+ f(ModRef)
// Do something for each known barrier set.
#define FOR_EACH_BARRIER_SET_DO(f) \
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -31,7 +31,7 @@
#include "gc/shared/cardTableModRefBS.inline.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" // G1 support
+#include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
#endif
#endif // SHARE_VM_GC_SHARED_BARRIERSETCONFIG_INLINE_HPP
--- a/src/hotspot/share/gc/shared/blockOffsetTable.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#define SHARE_VM_GC_SHARED_BLOCKOFFSETTABLE_HPP
#include "gc/shared/memset_with_concurrent_readers.hpp"
+#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "memory/virtualspace.hpp"
#include "runtime/globals.hpp"
@@ -77,7 +78,7 @@
//////////////////////////////////////////////////////////////////////////
// The BlockOffsetTable "interface"
//////////////////////////////////////////////////////////////////////////
-class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
+class BlockOffsetTable {
friend class VMStructs;
protected:
// These members describe the region covered by the table.
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -116,6 +116,7 @@
// that specific collector in mind, and the documentation above suitably
// extended and updated.
void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
+#if defined(COMPILER2) || INCLUDE_JVMCI
if (!ReduceInitialCardMarks) {
return;
}
@@ -137,6 +138,7 @@
invalidate(mr);
}
}
+#endif // COMPILER2 || JVMCI
}
void CardTableModRefBS::initialize_deferred_card_mark_barriers() {
--- a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBSFORCTRS_HPP
-#define SHARE_VM_GC_SHARED_CARDTABLEMODREFBSFORCTRS_HPP
-
-#include "gc/shared/cardTableModRefBS.hpp"
-
-class CardTableRS;
-class DirtyCardToOopClosure;
-class OopsInGenClosure;
-
-// A specialization for the CardTableRS gen rem set.
-class CardTableModRefBSForCTRS: public CardTableModRefBS {
- friend class CardTableRS;
-
-public:
- CardTableModRefBSForCTRS(MemRegion whole_heap);
- ~CardTableModRefBSForCTRS();
-
- virtual void initialize();
-
- void set_CTRS(CardTableRS* rs) { _rs = rs; }
-
- virtual bool card_mark_must_follow_store() const {
- return UseConcMarkSweepGC;
- }
-
- virtual bool is_in_young(oop obj) const;
-
-private:
- CardTableRS* _rs;
-
- // *** Support for parallel card scanning.
-
- // dirty and precleaned are equivalent wrt younger_refs_iter.
- static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
- return cv == dirty_card || cv == precleaned_card;
- }
-
- // Returns "true" iff the value "cv" will cause the card containing it
- // to be scanned in the current traversal. May be overridden by
- // subtypes.
- bool card_will_be_scanned(jbyte cv);
-
- // Returns "true" iff the value "cv" may have represented a dirty card at
- // some point.
- bool card_may_have_been_dirty(jbyte cv);
-
- // Iterate over the portion of the card-table which covers the given
- // region mr in the given space and apply cl to any dirty sub-regions
- // of mr. Clears the dirty cards as they are processed.
- void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
- OopsInGenClosure* cl, CardTableRS* ct,
- uint n_threads);
-
- // Work method used to implement non_clean_card_iterate_possibly_parallel()
- // above in the parallel case.
- void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
- OopsInGenClosure* cl, CardTableRS* ct,
- uint n_threads);
-
- // This is an array, one element per covered region of the card table.
- // Each entry is itself an array, with one element per chunk in the
- // covered region. Each entry of these arrays is the lowest non-clean
- // card of the corresponding chunk containing part of an object from the
- // previous chunk, or else NULL.
- typedef jbyte* CardPtr;
- typedef CardPtr* CardArr;
- CardArr* _lowest_non_clean;
- size_t* _lowest_non_clean_chunk_size;
- uintptr_t* _lowest_non_clean_base_chunk_index;
- volatile int* _last_LNC_resizing_collection;
-
- // Initializes "lowest_non_clean" to point to the array for the region
- // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
- // index of the corresponding to the first element of that array.
- // Ensures that these arrays are of sufficient size, allocating if necessary.
- // May be called by several threads concurrently.
- void get_LNC_array_for_space(Space* sp,
- jbyte**& lowest_non_clean,
- uintptr_t& lowest_non_clean_base_chunk_index,
- size_t& lowest_non_clean_chunk_size);
-
- // Returns the number of chunks necessary to cover "mr".
- size_t chunks_to_cover(MemRegion mr) {
- return (size_t)(addr_to_chunk_index(mr.last()) -
- addr_to_chunk_index(mr.start()) + 1);
- }
-
- // Returns the index of the chunk in a stride which
- // covers the given address.
- uintptr_t addr_to_chunk_index(const void* addr) {
- uintptr_t card = (uintptr_t) byte_for(addr);
- return card / ParGCCardsPerStrideChunk;
- }
-
- // Apply cl, which must either itself apply dcto_cl or be dcto_cl,
- // to the cards in the stride (of n_strides) within the given space.
- void process_stride(Space* sp,
- MemRegion used,
- jint stride, int n_strides,
- OopsInGenClosure* cl,
- CardTableRS* ct,
- jbyte** lowest_non_clean,
- uintptr_t lowest_non_clean_base_chunk_index,
- size_t lowest_non_clean_chunk_size);
-
- // Makes sure that chunk boundaries are handled appropriately, by
- // adjusting the min_done of dcto_cl, and by using a special card-table
- // value to indicate how min_done should be set.
- void process_chunk_boundaries(Space* sp,
- DirtyCardToOopClosure* dcto_cl,
- MemRegion chunk_mr,
- MemRegion used,
- jbyte** lowest_non_clean,
- uintptr_t lowest_non_clean_base_chunk_index,
- size_t lowest_non_clean_chunk_size);
-
-};
-
-template<>
-struct BarrierSet::GetName<CardTableModRefBSForCTRS> {
- static const BarrierSet::Name value = BarrierSet::CardTableForRS;
-};
-
-template<>
-struct BarrierSet::GetType<BarrierSet::CardTableForRS> {
- typedef CardTableModRefBSForCTRS type;
-};
-
-#endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBSFORCTRS_HPP
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -39,6 +39,7 @@
#include "memory/resourceArea.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
@@ -96,22 +97,22 @@
MetaspaceSummary CollectedHeap::create_metaspace_summary() {
const MetaspaceSizes meta_space(
- MetaspaceAux::committed_bytes(),
- MetaspaceAux::used_bytes(),
- MetaspaceAux::reserved_bytes());
+ MetaspaceUtils::committed_bytes(),
+ MetaspaceUtils::used_bytes(),
+ MetaspaceUtils::reserved_bytes());
const MetaspaceSizes data_space(
- MetaspaceAux::committed_bytes(Metaspace::NonClassType),
- MetaspaceAux::used_bytes(Metaspace::NonClassType),
- MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
+ MetaspaceUtils::committed_bytes(Metaspace::NonClassType),
+ MetaspaceUtils::used_bytes(Metaspace::NonClassType),
+ MetaspaceUtils::reserved_bytes(Metaspace::NonClassType));
const MetaspaceSizes class_space(
- MetaspaceAux::committed_bytes(Metaspace::ClassType),
- MetaspaceAux::used_bytes(Metaspace::ClassType),
- MetaspaceAux::reserved_bytes(Metaspace::ClassType));
+ MetaspaceUtils::committed_bytes(Metaspace::ClassType),
+ MetaspaceUtils::used_bytes(Metaspace::ClassType),
+ MetaspaceUtils::reserved_bytes(Metaspace::ClassType));
const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
- MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);
+ MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
- MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);
+ MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
ms_chunk_free_list_summary, class_chunk_free_list_summary);
--- a/src/hotspot/share/gc/shared/concurrentGCPhaseManager.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/concurrentGCPhaseManager.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
static const int IDLE_PHASE = 1; // Concurrent processing is idle.
// Stack of phase managers.
- class Stack VALUE_OBJ_CLASS_SPEC {
+ class Stack {
friend class ConcurrentGCPhaseManager;
public:
--- a/src/hotspot/share/gc/shared/gcTrace.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/gcTrace.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcName.hpp"
#include "gc/shared/gcWhen.hpp"
-#include "memory/allocation.hpp"
#include "memory/metaspace.hpp"
#include "memory/referenceType.hpp"
#include "utilities/macros.hpp"
@@ -50,7 +49,7 @@
class TimePartitions;
class BoolObjectClosure;
-class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
+class SharedGCInfo {
private:
GCName _name;
GCCause::Cause _cause;
@@ -88,7 +87,7 @@
const Tickspan longest_pause() const { return _longest_pause; }
};
-class ParallelOldGCInfo VALUE_OBJ_CLASS_SPEC {
+class ParallelOldGCInfo {
void* _dense_prefix;
public:
ParallelOldGCInfo() : _dense_prefix(NULL) {}
@@ -100,7 +99,7 @@
#if INCLUDE_ALL_GCS
-class G1YoungGCInfo VALUE_OBJ_CLASS_SPEC {
+class G1YoungGCInfo {
G1YCType _type;
public:
G1YoungGCInfo() : _type(G1YCTypeEndSentinel) {}
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -558,7 +558,7 @@
ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
- const size_t metadata_prev_used = MetaspaceAux::used_bytes();
+ const size_t metadata_prev_used = MetaspaceUtils::used_bytes();
print_heap_before_gc();
@@ -644,7 +644,7 @@
complete = complete || collected_old;
print_heap_change(young_prev_used, old_prev_used);
- MetaspaceAux::print_metaspace_change(metadata_prev_used);
+ MetaspaceUtils::print_metaspace_change(metadata_prev_used);
// Adjust generation sizes.
if (collected_old) {
@@ -655,7 +655,7 @@
if (complete) {
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
ClassLoaderDataGraph::purge();
- MetaspaceAux::verify_metrics();
+ MetaspaceUtils::verify_metrics();
// Resize the metaspace capacity after full collections
MetaspaceGC::compute_new_size();
update_full_collections_completed();
@@ -1258,7 +1258,7 @@
void GenCollectedHeap::print_on(outputStream* st) const {
_young_gen->print_on(st);
_old_gen->print_on(st);
- MetaspaceAux::print_on(st);
+ MetaspaceUtils::print_on(st);
}
void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
#define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
+#include "gc/shared/barrierSet.inline.hpp"
#include "gc/shared/modRefBarrierSet.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayOop.hpp"
--- a/src/hotspot/share/gc/shared/oopStorage.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -283,12 +283,6 @@
return NULL;
}
-#ifdef ASSERT
-void OopStorage::assert_at_safepoint() {
- assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-}
-#endif // ASSERT
-
//////////////////////////////////////////////////////////////////////////////
// Allocation
//
@@ -728,7 +722,9 @@
}
void OopStorage::BasicParState::ensure_iteration_started() {
- if (!_concurrent) assert_at_safepoint();
+ if (!_concurrent) {
+ assert_at_safepoint();
+ }
assert(!_concurrent || _storage->_concurrent_iteration_active, "invariant");
// Ensure _next_block is not the not_started_marker, setting it to
// the _active_head to start the iteration if necessary.
--- a/src/hotspot/share/gc/shared/oopStorage.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -175,7 +175,7 @@
class Block; // Forward decl; defined in .inline.hpp file.
class BlockList; // Forward decl for BlockEntry friend decl.
- class BlockEntry VALUE_OBJ_CLASS_SPEC {
+ class BlockEntry {
friend class BlockList;
// Members are mutable, and we deal exclusively with pointers to
@@ -193,7 +193,7 @@
~BlockEntry();
};
- class BlockList VALUE_OBJ_CLASS_SPEC {
+ class BlockList {
const Block* _head;
const Block* _tail;
const BlockEntry& (*_get_entry)(const Block& block);
@@ -241,8 +241,6 @@
void delete_empty_block(const Block& block);
bool reduce_deferred_updates();
- static void assert_at_safepoint() NOT_DEBUG_RETURN;
-
template<typename F, typename Storage>
static bool iterate_impl(F f, Storage* storage);
--- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,10 +26,10 @@
#define SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
#include "gc/shared/oopStorage.hpp"
-#include "memory/allocation.hpp"
#include "metaprogramming/conditional.hpp"
#include "metaprogramming/isConst.hpp"
#include "oops/oop.hpp"
+#include "runtime/safepoint.hpp"
#include "utilities/count_trailing_zeros.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -126,7 +126,7 @@
}
template<typename Closure>
-class OopStorage::OopFn VALUE_OBJ_CLASS_SPEC {
+class OopStorage::OopFn {
public:
explicit OopFn(Closure* cl) : _cl(cl) {}
@@ -146,7 +146,7 @@
}
template<typename IsAlive, typename F>
-class OopStorage::IfAliveFn VALUE_OBJ_CLASS_SPEC {
+class OopStorage::IfAliveFn {
public:
IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {}
@@ -174,7 +174,7 @@
}
template<typename F>
-class OopStorage::SkipNullFn VALUE_OBJ_CLASS_SPEC {
+class OopStorage::SkipNullFn {
public:
SkipNullFn(F f) : _f(f) {}
--- a/src/hotspot/share/gc/shared/oopStorageParState.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorageParState.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,7 +26,6 @@
#define SHARE_GC_SHARED_OOPSTORAGEPARSTATE_HPP
#include "gc/shared/oopStorage.hpp"
-#include "memory/allocation.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
@@ -140,7 +139,7 @@
// If is_alive->do_object_b(*p) is false, then cl will not be
// invoked on p.
-class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC {
+class OopStorage::BasicParState {
OopStorage* _storage;
void* volatile _next_block;
bool _concurrent;
@@ -164,7 +163,7 @@
};
template<bool concurrent, bool is_const>
-class OopStorage::ParState VALUE_OBJ_CLASS_SPEC {
+class OopStorage::ParState {
BasicParState _basic_state;
public:
@@ -178,7 +177,7 @@
};
template<>
-class OopStorage::ParState<false, false> VALUE_OBJ_CLASS_SPEC {
+class OopStorage::ParState<false, false> {
BasicParState _basic_state;
public:
--- a/src/hotspot/share/gc/shared/oopStorageParState.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/oopStorageParState.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -27,14 +27,13 @@
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageParState.hpp"
-#include "memory/allocation.hpp"
#include "metaprogramming/conditional.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
template<typename F>
-class OopStorage::BasicParState::AlwaysTrueFn VALUE_OBJ_CLASS_SPEC {
+class OopStorage::BasicParState::AlwaysTrueFn {
F _f;
public:
--- a/src/hotspot/share/gc/shared/preservedMarks.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/preservedMarks.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,9 +33,9 @@
class PreservedMarksSet;
class WorkGang;
-class PreservedMarks VALUE_OBJ_CLASS_SPEC {
+class PreservedMarks {
private:
- class OopAndMarkOop VALUE_OBJ_CLASS_SPEC {
+ class OopAndMarkOop {
private:
oop _o;
markOop _m;
--- a/src/hotspot/share/gc/shared/referencePolicy.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/referencePolicy.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
#ifndef SHARE_VM_GC_SHARED_REFERENCEPOLICY_HPP
#define SHARE_VM_GC_SHARED_REFERENCEPOLICY_HPP
+#include "oops/oopsHierarchy.hpp"
+
// referencePolicy is used to determine when soft reference objects
// should be cleared.
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -32,7 +32,7 @@
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/referenceProcessor.inline.hpp"
#include "logging/log.hpp"
-#include "memory/allocation.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,10 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/referenceProcessorPhaseTimes.hpp"
#include "gc/shared/referenceProcessor.inline.hpp"
+#include "gc/shared/workerDataArray.inline.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
RefProcWorkerTimeTracker::RefProcWorkerTimeTracker(ReferenceProcessorPhaseTimes::RefProcPhaseNumbers number,
ReferenceProcessorPhaseTimes* phase_times,
--- a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define SHARE_VM_GC_SHARED_REFERENCEPROCESSORPHASETIMES_HPP
#include "gc/shared/referenceProcessorStats.hpp"
-#include "gc/shared/workerDataArray.inline.hpp"
+#include "gc/shared/workerDataArray.hpp"
#include "memory/referenceType.hpp"
#include "utilities/ticks.hpp"
--- a/src/hotspot/share/gc/shared/space.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/space.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -33,7 +33,7 @@
#include "gc/shared/space.hpp"
#include "gc/shared/space.inline.hpp"
#include "gc/shared/spaceDecorator.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/java.hpp"
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
--- a/src/hotspot/share/gc/shared/vmGCOperations.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/vmGCOperations.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -227,7 +227,7 @@
private:
JvmtiGCMarker _jgcm;
public:
- typedef enum { MINOR, FULL, OTHER } reason_type;
+ typedef enum { MINOR, FULL, CONCURRENT, OTHER } reason_type;
SvcGCMarker(reason_type reason ) {
VM_GC_Operation::notify_gc_begin(reason == FULL);
--- a/src/hotspot/share/gc/shared/workgroup.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/gc/shared/workgroup.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -54,7 +54,7 @@
// An abstract task to be worked on by a gang.
// You subclass this to supply your own work() method
-class AbstractGangTask VALUE_OBJ_CLASS_SPEC {
+class AbstractGangTask {
const char* _name;
const uint _gc_id;
--- a/src/hotspot/share/interpreter/bytecode.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/bytecode.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,9 +23,10 @@
*/
#include "precompiled.hpp"
-#include "interpreter/bytecode.hpp"
+#include "interpreter/bytecode.inline.hpp"
#include "interpreter/linkResolver.hpp"
#include "oops/constantPool.hpp"
+#include "oops/cpCache.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/fieldType.hpp"
#include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/interpreter/bytecode.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/bytecode.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -129,7 +129,7 @@
// Abstractions for lookupswitch bytecode
-class LookupswitchPair VALUE_OBJ_CLASS_SPEC {
+class LookupswitchPair {
private:
const address _bcp;
@@ -229,7 +229,7 @@
is_invokedynamic() ||
is_invokehandle(); }
- bool has_appendix() { return cpcache_entry()->has_appendix(); }
+ bool has_appendix();
int size_of_parameters() const;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/interpreter/bytecode.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_INTERPRETER_BYTECODE_INLINE_HPP
+#define SHARE_VM_INTERPRETER_BYTECODE_INLINE_HPP
+
+#include "interpreter/bytecode.hpp"
+#include "oops/cpCache.inline.hpp"
+
+inline bool Bytecode_invoke::has_appendix() { return cpcache_entry()->has_appendix(); }
+
+#endif // SHARE_VM_INTERPRETER_BYTECODE_INLINE_HPP
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -33,10 +33,14 @@
#include "interpreter/interpreterRuntime.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/constantPool.inline.hpp"
+#include "oops/cpCache.inline.hpp"
+#include "oops/method.inline.hpp"
#include "oops/methodCounters.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "oops/typeArrayOop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/atomic.hpp"
@@ -2588,17 +2592,19 @@
if (ki->interface_klass() == iclass) break;
}
// If the interface isn't found, this class doesn't implement this
- // interface. The link resolver checks this but only for the first
+ // interface. The link resolver checks this but only for the first
// time this interface is called.
if (i == int2->itable_length()) {
- VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap);
+ CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(THREAD, rcvr->klass(), iclass),
+ handle_exception);
}
int mindex = interface_method->itable_index();
itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
callee = im[mindex].method();
if (callee == NULL) {
- VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap);
+ CALL_VM(InterpreterRuntime::throw_AbstractMethodErrorVerbose(THREAD, rcvr->klass(), interface_method),
+ handle_exception);
}
// Profile virtual call.
@@ -2821,7 +2827,7 @@
HandleMark __hm(THREAD);
THREAD->clear_pending_exception();
- assert(except_oop(), "No exception to process");
+ assert(except_oop() != NULL, "No exception to process");
intptr_t continuation_bci;
// expression stack is emptied
topOfStack = istate->stack_base() - Interpreter::stackElementWords;
--- a/src/hotspot/share/interpreter/bytecodeTracer.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/bytecodeTracer.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -30,6 +30,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/constantPool.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,8 +37,9 @@
#include "logging/log.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/constantPool.hpp"
+#include "oops/cpCache.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayKlass.hpp"
@@ -486,8 +487,8 @@
ResourceMark rm(thread);
stringStream tempst;
tempst.print("interpreter method <%s>\n"
- " at bci %d for thread " INTPTR_FORMAT,
- h_method->print_value_string(), current_bci, p2i(thread));
+ " at bci %d for thread " INTPTR_FORMAT " (%s)",
+ h_method->print_value_string(), current_bci, p2i(thread), thread->name());
Exceptions::log_exception(h_exception, tempst);
}
// Don't go paging in something which won't be used.
@@ -581,11 +582,45 @@
THROW(vmSymbols::java_lang_AbstractMethodError());
IRT_END
+// This method is called from the "abstract_entry" of the interpreter.
+// At that point, the arguments have already been removed from the stack
+// and therefore we don't have the receiver object at our fingertips. (Though,
+// on some platforms the receiver still resides in a register...). Thus,
+// we have no choice but print an error message not containing the receiver
+// type.
+IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorWithMethod(JavaThread* thread,
+ Method* missingMethod))
+ ResourceMark rm(thread);
+ assert(missingMethod != NULL, "sanity");
+ methodHandle m(thread, missingMethod);
+ LinkResolver::throw_abstract_method_error(m, THREAD);
+IRT_END
+
+IRT_ENTRY(void, InterpreterRuntime::throw_AbstractMethodErrorVerbose(JavaThread* thread,
+ Klass* recvKlass,
+ Method* missingMethod))
+ ResourceMark rm(thread);
+ methodHandle mh = methodHandle(thread, missingMethod);
+ LinkResolver::throw_abstract_method_error(mh, recvKlass, THREAD);
+IRT_END
+
IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
THROW(vmSymbols::java_lang_IncompatibleClassChangeError());
IRT_END
+IRT_ENTRY(void, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose(JavaThread* thread,
+ Klass* recvKlass,
+ Klass* interfaceKlass))
+ ResourceMark rm(thread);
+ char buf[1000];
+ buf[0] = '\0';
+ jio_snprintf(buf, sizeof(buf),
+ "Class %s does not implement the requested interface %s",
+ recvKlass ? recvKlass->external_name() : "NULL",
+ interfaceKlass ? interfaceKlass->external_name() : "NULL");
+ THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+IRT_END
//------------------------------------------------------------------------------------------------------------------------
// Fields
--- a/src/hotspot/share/interpreter/interpreterRuntime.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/interpreterRuntime.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -118,7 +118,15 @@
// Exceptions thrown by the interpreter
static void throw_AbstractMethodError(JavaThread* thread);
+ static void throw_AbstractMethodErrorWithMethod(JavaThread* thread, Method* oop);
+ static void throw_AbstractMethodErrorVerbose(JavaThread* thread,
+ Klass* recvKlass,
+ Method* missingMethod);
+
static void throw_IncompatibleClassChangeError(JavaThread* thread);
+ static void throw_IncompatibleClassChangeErrorVerbose(JavaThread* thread,
+ Klass* resc,
+ Klass* interfaceKlass);
static void throw_StackOverflowError(JavaThread* thread);
static void throw_delayed_StackOverflowError(JavaThread* thread);
static void throw_ArrayIndexOutOfBoundsException(JavaThread* thread, char* name, jint index);
--- a/src/hotspot/share/interpreter/invocationCounter.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/invocationCounter.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
// more significant bits. The counter is incremented before a method is activated and an
// action is triggered when count() > limit().
-class InvocationCounter VALUE_OBJ_CLASS_SPEC {
+class InvocationCounter {
friend class VMStructs;
friend class JVMCIVMStructs;
friend class ciReplay;
--- a/src/hotspot/share/interpreter/linkResolver.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/linkResolver.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,8 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
+#include "oops/cpCache.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/method.hpp"
#include "oops/objArrayKlass.hpp"
@@ -1343,8 +1344,7 @@
// do lookup based on receiver klass using the vtable index
if (resolved_method->method_holder()->is_interface()) { // default or miranda method
- vtable_index = vtable_index_of_interface_method(resolved_klass,
- resolved_method);
+ vtable_index = vtable_index_of_interface_method(resolved_klass, resolved_method);
assert(vtable_index >= 0 , "we should have valid vtable index at this point");
selected_method = methodHandle(THREAD, recv_klass->method_at_vtable(vtable_index));
@@ -1354,7 +1354,7 @@
assert(!resolved_method->has_itable_index(), "");
vtable_index = resolved_method->vtable_index();
// We could get a negative vtable_index for final methods,
- // because as an optimization they are they are never put in the vtable,
+ // because as an optimization they are never put in the vtable,
// unless they override an existing method.
// If we do get a negative, it means the resolved method is the the selected
// method, and it can never be changed by an override.
@@ -1368,20 +1368,13 @@
// check if method exists
if (selected_method.is_null()) {
- ResourceMark rm(THREAD);
- THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
- Method::name_and_sig_as_C_string(resolved_klass,
- resolved_method->name(),
- resolved_method->signature()));
+ throw_abstract_method_error(resolved_method, recv_klass, CHECK);
}
// check if abstract
if (check_null_and_abstract && selected_method->is_abstract()) {
- ResourceMark rm(THREAD);
- THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
- Method::name_and_sig_as_C_string(resolved_klass,
- selected_method->name(),
- selected_method->signature()));
+ // Pass arguments for generating a verbose error message.
+ throw_abstract_method_error(resolved_method, selected_method, recv_klass, CHECK);
}
if (log_develop_is_enabled(Trace, vtables)) {
@@ -1437,53 +1430,46 @@
// do lookup based on receiver klass
// This search must match the linktime preparation search for itable initialization
// to correctly enforce loader constraints for interface method inheritance
- methodHandle sel_method = lookup_instance_method_in_klasses(recv_klass,
+ methodHandle selected_method = lookup_instance_method_in_klasses(recv_klass,
resolved_method->name(),
resolved_method->signature(), CHECK);
- if (sel_method.is_null() && !check_null_and_abstract) {
+ if (selected_method.is_null() && !check_null_and_abstract) {
// In theory this is a harmless placeholder value, but
// in practice leaving in null affects the nsk default method tests.
// This needs further study.
- sel_method = resolved_method;
+ selected_method = resolved_method;
}
// check if method exists
- if (sel_method.is_null()) {
- ResourceMark rm(THREAD);
- THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
- Method::name_and_sig_as_C_string(recv_klass,
- resolved_method->name(),
- resolved_method->signature()));
+ if (selected_method.is_null()) {
+ // Pass arguments for generating a verbose error message.
+ throw_abstract_method_error(resolved_method, recv_klass, CHECK);
}
// check access
- // Throw Illegal Access Error if sel_method is not public.
- if (!sel_method->is_public()) {
+ // Throw Illegal Access Error if selected_method is not public.
+ if (!selected_method->is_public()) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
Method::name_and_sig_as_C_string(recv_klass,
- sel_method->name(),
- sel_method->signature()));
+ selected_method->name(),
+ selected_method->signature()));
}
// check if abstract
- if (check_null_and_abstract && sel_method->is_abstract()) {
- ResourceMark rm(THREAD);
- THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
- Method::name_and_sig_as_C_string(recv_klass,
- sel_method->name(),
- sel_method->signature()));
+ if (check_null_and_abstract && selected_method->is_abstract()) {
+ throw_abstract_method_error(resolved_method, selected_method, recv_klass, CHECK);
}
if (log_develop_is_enabled(Trace, itables)) {
trace_method_resolution("invokeinterface selected method: receiver-class:",
- recv_klass, resolved_klass, sel_method, true);
+ recv_klass, resolved_klass, selected_method, true);
}
// setup result
if (!resolved_method->has_itable_index()) {
int vtable_index = resolved_method->vtable_index();
- assert(vtable_index == sel_method->vtable_index(), "sanity check");
- result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
+ assert(vtable_index == selected_method->vtable_index(), "sanity check");
+ result.set_virtual(resolved_klass, recv_klass, resolved_method, selected_method, vtable_index, CHECK);
} else {
int itable_index = resolved_method()->itable_index();
- result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
+ result.set_interface(resolved_klass, recv_klass, resolved_method, selected_method, itable_index, CHECK);
}
}
@@ -1773,3 +1759,38 @@
result.set_handle(resolved_method, resolved_appendix, resolved_method_type, THREAD);
Exceptions::wrap_dynamic_exception(CHECK);
}
+
+// Selected method is abstract.
+void LinkResolver::throw_abstract_method_error(const methodHandle& resolved_method,
+ const methodHandle& selected_method,
+ Klass *recv_klass, TRAPS) {
+ Klass *resolved_klass = resolved_method->method_holder();
+ ResourceMark rm(THREAD);
+ stringStream ss;
+
+ if (recv_klass != NULL) {
+ ss.print("Receiver class %s does not define or inherit an "
+ "implementation of the",
+ recv_klass->external_name());
+ } else {
+ ss.print("Missing implementation of");
+ }
+
+ assert(resolved_method.not_null(), "Sanity");
+ ss.print(" resolved method %s%s%s%s of %s %s.",
+ resolved_method->is_abstract() ? "abstract " : "",
+ resolved_method->is_private() ? "private " : "",
+ resolved_method->name()->as_C_string(),
+ resolved_method->signature()->as_C_string(),
+ resolved_klass->external_kind(),
+ resolved_klass->external_name());
+
+ if (selected_method.not_null() && !(resolved_method == selected_method)) {
+ ss.print(" Selected method is %s%s%s.",
+ selected_method->is_abstract() ? "abstract " : "",
+ selected_method->is_private() ? "private " : "",
+ selected_method->name_and_sig_as_C_string());
+ }
+
+ THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), ss.as_string());
+}
--- a/src/hotspot/share/interpreter/linkResolver.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/linkResolver.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -347,5 +347,19 @@
static void resolve_invoke(CallInfo& result, Handle& recv,
const methodHandle& attached_method,
Bytecodes::Code byte, TRAPS);
+
+ public:
+ // Only resolved method known.
+ static void throw_abstract_method_error(const methodHandle& resolved_method, TRAPS) {
+ throw_abstract_method_error(resolved_method, NULL, NULL, CHECK);
+ }
+ // Resolved method and receiver klass know.
+ static void throw_abstract_method_error(const methodHandle& resolved_method, Klass *recv_klass, TRAPS) {
+ throw_abstract_method_error(resolved_method, NULL, recv_klass, CHECK);
+ }
+ // Selected method is abstract.
+ static void throw_abstract_method_error(const methodHandle& resolved_method,
+ const methodHandle& selected_method,
+ Klass *recv_klass, TRAPS);
};
#endif // SHARE_VM_INTERPRETER_LINKRESOLVER_HPP
--- a/src/hotspot/share/interpreter/rewriter.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/rewriter.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -111,12 +111,12 @@
if (HAS_PENDING_EXCEPTION) {
MetadataFactory::free_metadata(loader_data, cache);
_pool->set_cache(NULL); // so the verifier isn't confused
+ } else {
+ DEBUG_ONLY(
+ if (DumpSharedSpaces) {
+ cache->verify_just_initialized();
+ })
}
-
- DEBUG_ONLY(
- if (DumpSharedSpaces) {
- cache->verify_just_initialized();
- })
}
--- a/src/hotspot/share/interpreter/templateInterpreter.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/templateInterpreter.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
// A little wrapper class to group tosca-specific entry points into a unit.
// (tosca = Top-Of-Stack CAche)
-class EntryPoint VALUE_OBJ_CLASS_SPEC {
+class EntryPoint {
private:
address _entry[number_of_states];
@@ -62,7 +62,7 @@
//------------------------------------------------------------------------------------------------------------------------
// A little wrapper class to group tosca-specific dispatch tables into a unit.
-class DispatchTable VALUE_OBJ_CLASS_SPEC {
+class DispatchTable {
public:
enum { length = 1 << BitsPerByte }; // an entry point for each byte value (also for undefined bytecodes)
--- a/src/hotspot/share/interpreter/templateTable.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/interpreter/templateTable.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
// A Template describes the properties of a code template for a given bytecode
// and provides a generator to generate the code template.
-class Template VALUE_OBJ_CLASS_SPEC {
+class Template {
private:
enum Flags {
uses_bcp_bit, // set if template needs the bcp pointing to bytecode
--- a/src/hotspot/share/jvmci/compilerRuntime.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/jvmci/compilerRuntime.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "classfile/symbolTable.hpp"
#include "interpreter/linkResolver.hpp"
#include "jvmci/compilerRuntime.hpp"
+#include "oops/cpCache.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -34,6 +34,7 @@
#include "jvmci/jvmciJavaClasses.hpp"
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciRuntime.hpp"
+#include "memory/allocation.inline.hpp"
#include "oops/arrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
@@ -133,7 +134,10 @@
if (!reference_map->is_a(HotSpotReferenceMap::klass())) {
JVMCI_ERROR_NULL("unknown reference map: %s", reference_map->klass()->signature_name());
}
- if (HotSpotReferenceMap::maxRegisterSize(reference_map) > 16) {
+ if (!_has_wide_vector && SharedRuntime::is_wide_vector(HotSpotReferenceMap::maxRegisterSize(reference_map))) {
+ if (SharedRuntime::polling_page_vectors_safepoint_handler_blob() == NULL) {
+ JVMCI_ERROR_NULL("JVMCI is producing code using vectors larger than the runtime supports");
+ }
_has_wide_vector = true;
}
OopMap* map = new OopMap(_total_frame_size, _parameter_count);
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -22,42 +22,22 @@
*/
#include "precompiled.hpp"
-#include "ci/ciUtilities.hpp"
#include "classfile/javaClasses.inline.hpp"
-#include "code/codeCache.hpp"
#include "code/scopeDesc.hpp"
-#include "interpreter/linkResolver.hpp"
#include "memory/oopFactory.hpp"
-#include "memory/resourceArea.hpp"
+#include "oops/cpCache.inline.hpp"
#include "oops/generateOopMap.hpp"
-#include "oops/fieldStreams.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/method.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
-#include "runtime/fieldDescriptor.hpp"
-#include "runtime/javaCalls.hpp"
-#include "runtime/jniHandles.inline.hpp"
-#include "jvmci/jvmciRuntime.hpp"
-#include "compiler/abstractCompiler.hpp"
#include "compiler/compileBroker.hpp"
-#include "compiler/compilerOracle.hpp"
#include "compiler/disassembler.hpp"
-#include "compiler/oopMap.hpp"
#include "jvmci/jvmciCompilerToVM.hpp"
-#include "jvmci/jvmciCompiler.hpp"
-#include "jvmci/jvmciEnv.hpp"
-#include "jvmci/jvmciJavaClasses.hpp"
#include "jvmci/jvmciCodeInstaller.hpp"
-#include "jvmci/vmStructs_jvmci.hpp"
-#include "gc/g1/heapRegion.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "runtime/javaCalls.hpp"
-#include "runtime/deoptimization.hpp"
+#include "jvmci/jvmciRuntime.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/timerTrace.hpp"
-#include "runtime/vframe.hpp"
#include "runtime/vframe_hp.hpp"
-#include "runtime/vmStructs.hpp"
-#include "utilities/resourceHash.hpp"
void JNIHandleMark::push_jni_handle_block() {
@@ -119,394 +99,11 @@
}
-int CompilerToVM::Data::Klass_vtable_start_offset;
-int CompilerToVM::Data::Klass_vtable_length_offset;
-
-int CompilerToVM::Data::Method_extra_stack_entries;
-
-address CompilerToVM::Data::SharedRuntime_ic_miss_stub;
-address CompilerToVM::Data::SharedRuntime_handle_wrong_method_stub;
-address CompilerToVM::Data::SharedRuntime_deopt_blob_unpack;
-address CompilerToVM::Data::SharedRuntime_deopt_blob_uncommon_trap;
-
-size_t CompilerToVM::Data::ThreadLocalAllocBuffer_alignment_reserve;
-
-CollectedHeap* CompilerToVM::Data::Universe_collectedHeap;
-int CompilerToVM::Data::Universe_base_vtable_size;
-address CompilerToVM::Data::Universe_narrow_oop_base;
-int CompilerToVM::Data::Universe_narrow_oop_shift;
-address CompilerToVM::Data::Universe_narrow_klass_base;
-int CompilerToVM::Data::Universe_narrow_klass_shift;
-void* CompilerToVM::Data::Universe_non_oop_bits;
-uintptr_t CompilerToVM::Data::Universe_verify_oop_mask;
-uintptr_t CompilerToVM::Data::Universe_verify_oop_bits;
-
-bool CompilerToVM::Data::_supports_inline_contig_alloc;
-HeapWord** CompilerToVM::Data::_heap_end_addr;
-HeapWord* volatile* CompilerToVM::Data::_heap_top_addr;
-int CompilerToVM::Data::_max_oop_map_stack_offset;
-
-jbyte* CompilerToVM::Data::cardtable_start_address;
-int CompilerToVM::Data::cardtable_shift;
-
-int CompilerToVM::Data::vm_page_size;
-
-int CompilerToVM::Data::sizeof_vtableEntry = sizeof(vtableEntry);
-int CompilerToVM::Data::sizeof_ExceptionTableElement = sizeof(ExceptionTableElement);
-int CompilerToVM::Data::sizeof_LocalVariableTableElement = sizeof(LocalVariableTableElement);
-int CompilerToVM::Data::sizeof_ConstantPool = sizeof(ConstantPool);
-int CompilerToVM::Data::sizeof_SymbolPointer = sizeof(Symbol*);
-int CompilerToVM::Data::sizeof_narrowKlass = sizeof(narrowKlass);
-int CompilerToVM::Data::sizeof_arrayOopDesc = sizeof(arrayOopDesc);
-int CompilerToVM::Data::sizeof_BasicLock = sizeof(BasicLock);
-
-address CompilerToVM::Data::dsin;
-address CompilerToVM::Data::dcos;
-address CompilerToVM::Data::dtan;
-address CompilerToVM::Data::dexp;
-address CompilerToVM::Data::dlog;
-address CompilerToVM::Data::dlog10;
-address CompilerToVM::Data::dpow;
-
-address CompilerToVM::Data::symbol_init;
-address CompilerToVM::Data::symbol_clinit;
-
-void CompilerToVM::Data::initialize(TRAPS) {
- Klass_vtable_start_offset = in_bytes(Klass::vtable_start_offset());
- Klass_vtable_length_offset = in_bytes(Klass::vtable_length_offset());
-
- Method_extra_stack_entries = Method::extra_stack_entries();
-
- SharedRuntime_ic_miss_stub = SharedRuntime::get_ic_miss_stub();
- SharedRuntime_handle_wrong_method_stub = SharedRuntime::get_handle_wrong_method_stub();
- SharedRuntime_deopt_blob_unpack = SharedRuntime::deopt_blob()->unpack();
- SharedRuntime_deopt_blob_uncommon_trap = SharedRuntime::deopt_blob()->uncommon_trap();
-
- ThreadLocalAllocBuffer_alignment_reserve = ThreadLocalAllocBuffer::alignment_reserve();
-
- Universe_collectedHeap = Universe::heap();
- Universe_base_vtable_size = Universe::base_vtable_size();
- Universe_narrow_oop_base = Universe::narrow_oop_base();
- Universe_narrow_oop_shift = Universe::narrow_oop_shift();
- Universe_narrow_klass_base = Universe::narrow_klass_base();
- Universe_narrow_klass_shift = Universe::narrow_klass_shift();
- Universe_non_oop_bits = Universe::non_oop_word();
- Universe_verify_oop_mask = Universe::verify_oop_mask();
- Universe_verify_oop_bits = Universe::verify_oop_bits();
-
- _supports_inline_contig_alloc = Universe::heap()->supports_inline_contig_alloc();
- _heap_end_addr = _supports_inline_contig_alloc ? Universe::heap()->end_addr() : (HeapWord**) -1;
- _heap_top_addr = _supports_inline_contig_alloc ? Universe::heap()->top_addr() : (HeapWord* volatile*) -1;
-
- _max_oop_map_stack_offset = (OopMapValue::register_mask - VMRegImpl::stack2reg(0)->value()) * VMRegImpl::stack_slot_size;
- int max_oop_map_stack_index = _max_oop_map_stack_offset / VMRegImpl::stack_slot_size;
- assert(OopMapValue::legal_vm_reg_name(VMRegImpl::stack2reg(max_oop_map_stack_index)), "should be valid");
- assert(!OopMapValue::legal_vm_reg_name(VMRegImpl::stack2reg(max_oop_map_stack_index + 1)), "should be invalid");
-
- symbol_init = (address) vmSymbols::object_initializer_name();
- symbol_clinit = (address) vmSymbols::class_initializer_name();
-
- BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->is_a(BarrierSet::CardTableModRef)) {
- jbyte* base = ci_card_table_address();
- assert(base != NULL, "unexpected byte_map_base");
- cardtable_start_address = base;
- cardtable_shift = CardTable::card_shift;
- } else {
- // No card mark barriers
- cardtable_start_address = 0;
- cardtable_shift = 0;
- }
-
- vm_page_size = os::vm_page_size();
-
-#define SET_TRIGFUNC(name) \
- if (StubRoutines::name() != NULL) { \
- name = StubRoutines::name(); \
- } else { \
- name = CAST_FROM_FN_PTR(address, SharedRuntime::name); \
- }
-
- SET_TRIGFUNC(dsin);
- SET_TRIGFUNC(dcos);
- SET_TRIGFUNC(dtan);
- SET_TRIGFUNC(dexp);
- SET_TRIGFUNC(dlog10);
- SET_TRIGFUNC(dlog);
- SET_TRIGFUNC(dpow);
-
-#undef SET_TRIGFUNC
-}
-
-objArrayHandle CompilerToVM::initialize_intrinsics(TRAPS) {
- objArrayHandle vmIntrinsics = oopFactory::new_objArray_handle(VMIntrinsicMethod::klass(), (vmIntrinsics::ID_LIMIT - 1), CHECK_(objArrayHandle()));
- int index = 0;
- // The intrinsics for a class are usually adjacent to each other.
- // When they are, the string for the class name can be reused.
- vmSymbols::SID kls_sid = vmSymbols::NO_SID;
- Handle kls_str;
-#define SID_ENUM(n) vmSymbols::VM_SYMBOL_ENUM_NAME(n)
-#define VM_SYMBOL_TO_STRING(s) \
- java_lang_String::create_from_symbol(vmSymbols::symbol_at(SID_ENUM(s)), CHECK_(objArrayHandle()))
-#define VM_INTRINSIC_INFO(id, kls, name, sig, ignore_fcode) { \
- instanceHandle vmIntrinsicMethod = InstanceKlass::cast(VMIntrinsicMethod::klass())->allocate_instance_handle(CHECK_(objArrayHandle())); \
- if (kls_sid != SID_ENUM(kls)) { \
- kls_str = VM_SYMBOL_TO_STRING(kls); \
- kls_sid = SID_ENUM(kls); \
- } \
- Handle name_str = VM_SYMBOL_TO_STRING(name); \
- Handle sig_str = VM_SYMBOL_TO_STRING(sig); \
- VMIntrinsicMethod::set_declaringClass(vmIntrinsicMethod, kls_str()); \
- VMIntrinsicMethod::set_name(vmIntrinsicMethod, name_str()); \
- VMIntrinsicMethod::set_descriptor(vmIntrinsicMethod, sig_str()); \
- VMIntrinsicMethod::set_id(vmIntrinsicMethod, vmIntrinsics::id); \
- vmIntrinsics->obj_at_put(index++, vmIntrinsicMethod()); \
- }
-
- VM_INTRINSICS_DO(VM_INTRINSIC_INFO, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE)
-#undef SID_ENUM
-#undef VM_SYMBOL_TO_STRING
-#undef VM_INTRINSIC_INFO
- assert(index == vmIntrinsics::ID_LIMIT - 1, "must be");
-
- return vmIntrinsics;
-}
-
-/**
- * The set of VM flags known to be used.
- */
-#define PREDEFINED_CONFIG_FLAGS(do_bool_flag, do_intx_flag, do_uintx_flag) \
- do_intx_flag(AllocateInstancePrefetchLines) \
- do_intx_flag(AllocatePrefetchDistance) \
- do_intx_flag(AllocatePrefetchInstr) \
- do_intx_flag(AllocatePrefetchLines) \
- do_intx_flag(AllocatePrefetchStepSize) \
- do_intx_flag(AllocatePrefetchStyle) \
- do_intx_flag(BciProfileWidth) \
- do_bool_flag(BootstrapJVMCI) \
- do_bool_flag(CITime) \
- do_bool_flag(CITimeEach) \
- do_uintx_flag(CodeCacheSegmentSize) \
- do_intx_flag(CodeEntryAlignment) \
- do_bool_flag(CompactFields) \
- NOT_PRODUCT(do_intx_flag(CompileTheWorldStartAt)) \
- NOT_PRODUCT(do_intx_flag(CompileTheWorldStopAt)) \
- do_intx_flag(ContendedPaddingWidth) \
- do_bool_flag(DontCompileHugeMethods) \
- do_bool_flag(EnableContended) \
- do_intx_flag(FieldsAllocationStyle) \
- do_bool_flag(FoldStableValues) \
- do_bool_flag(ForceUnreachable) \
- do_intx_flag(HugeMethodLimit) \
- do_bool_flag(Inline) \
- do_intx_flag(JVMCICounterSize) \
- do_bool_flag(JVMCIPrintProperties) \
- do_bool_flag(JVMCIUseFastLocking) \
- do_intx_flag(MethodProfileWidth) \
- do_intx_flag(ObjectAlignmentInBytes) \
- do_bool_flag(PrintInlining) \
- do_bool_flag(ReduceInitialCardMarks) \
- do_bool_flag(RestrictContended) \
- do_intx_flag(StackReservedPages) \
- do_intx_flag(StackShadowPages) \
- do_bool_flag(TLABStats) \
- do_uintx_flag(TLABWasteIncrement) \
- do_intx_flag(TypeProfileWidth) \
- do_bool_flag(UseAESIntrinsics) \
- X86_ONLY(do_intx_flag(UseAVX)) \
- do_bool_flag(UseBiasedLocking) \
- do_bool_flag(UseCRC32Intrinsics) \
- do_bool_flag(UseCompressedClassPointers) \
- do_bool_flag(UseCompressedOops) \
- do_bool_flag(UseConcMarkSweepGC) \
- X86_ONLY(do_bool_flag(UseCountLeadingZerosInstruction)) \
- X86_ONLY(do_bool_flag(UseCountTrailingZerosInstruction)) \
- do_bool_flag(UseG1GC) \
- COMPILER2_PRESENT(do_bool_flag(UseMontgomeryMultiplyIntrinsic)) \
- COMPILER2_PRESENT(do_bool_flag(UseMontgomerySquareIntrinsic)) \
- COMPILER2_PRESENT(do_bool_flag(UseMulAddIntrinsic)) \
- COMPILER2_PRESENT(do_bool_flag(UseMultiplyToLenIntrinsic)) \
- do_bool_flag(UsePopCountInstruction) \
- do_bool_flag(UseSHA1Intrinsics) \
- do_bool_flag(UseSHA256Intrinsics) \
- do_bool_flag(UseSHA512Intrinsics) \
- do_intx_flag(UseSSE) \
- COMPILER2_PRESENT(do_bool_flag(UseSquareToLenIntrinsic)) \
- do_bool_flag(UseStackBanging) \
- do_bool_flag(UseTLAB) \
- do_bool_flag(VerifyOops) \
-
-#define BOXED_BOOLEAN(name, value) oop name = ((jboolean)(value) ? boxedTrue() : boxedFalse())
-#define BOXED_DOUBLE(name, value) oop name; do { jvalue p; p.d = (jdouble) (value); name = java_lang_boxing_object::create(T_DOUBLE, &p, CHECK_NULL);} while(0)
-#define BOXED_LONG(name, value) \
- oop name; \
- do { \
- jvalue p; p.j = (jlong) (value); \
- Handle* e = longs.get(p.j); \
- if (e == NULL) { \
- oop o = java_lang_boxing_object::create(T_LONG, &p, CHECK_NULL); \
- Handle h(THREAD, o); \
- longs.put(p.j, h); \
- name = h(); \
- } else { \
- name = (*e)(); \
- } \
- } while (0)
-
-#define CSTRING_TO_JSTRING(name, value) \
- Handle name; \
- do { \
- if (value != NULL) { \
- Handle* e = strings.get(value); \
- if (e == NULL) { \
- Handle h = java_lang_String::create_from_str(value, CHECK_NULL); \
- strings.put(value, h); \
- name = h; \
- } else { \
- name = (*e); \
- } \
- } \
- } while (0)
+jobjectArray readConfiguration0(JNIEnv *env, TRAPS);
C2V_VMENTRY(jobjectArray, readConfiguration, (JNIEnv *env))
- ResourceMark rm;
- HandleMark hm;
-
- // Used to canonicalize Long and String values.
- ResourceHashtable<jlong, Handle> longs;
- ResourceHashtable<const char*, Handle, &CompilerToVM::cstring_hash, &CompilerToVM::cstring_equals> strings;
-
- jvalue prim;
- prim.z = true; oop boxedTrueOop = java_lang_boxing_object::create(T_BOOLEAN, &prim, CHECK_NULL);
- Handle boxedTrue(THREAD, boxedTrueOop);
- prim.z = false; oop boxedFalseOop = java_lang_boxing_object::create(T_BOOLEAN, &prim, CHECK_NULL);
- Handle boxedFalse(THREAD, boxedFalseOop);
-
- CompilerToVM::Data::initialize(CHECK_NULL);
-
- VMField::klass()->initialize(CHECK_NULL);
- VMFlag::klass()->initialize(CHECK_NULL);
- VMIntrinsicMethod::klass()->initialize(CHECK_NULL);
-
- int len = JVMCIVMStructs::localHotSpotVMStructs_count();
- objArrayHandle vmFields = oopFactory::new_objArray_handle(VMField::klass(), len, CHECK_NULL);
- for (int i = 0; i < len ; i++) {
- VMStructEntry vmField = JVMCIVMStructs::localHotSpotVMStructs[i];
- instanceHandle vmFieldObj = InstanceKlass::cast(VMField::klass())->allocate_instance_handle(CHECK_NULL);
- size_t name_buf_len = strlen(vmField.typeName) + strlen(vmField.fieldName) + 2 /* "::" */;
- char* name_buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, name_buf_len + 1);
- sprintf(name_buf, "%s::%s", vmField.typeName, vmField.fieldName);
- CSTRING_TO_JSTRING(name, name_buf);
- CSTRING_TO_JSTRING(type, vmField.typeString);
- VMField::set_name(vmFieldObj, name());
- VMField::set_type(vmFieldObj, type());
- VMField::set_offset(vmFieldObj, vmField.offset);
- VMField::set_address(vmFieldObj, (jlong) vmField.address);
- if (vmField.isStatic && vmField.typeString != NULL) {
- if (strcmp(vmField.typeString, "bool") == 0) {
- BOXED_BOOLEAN(box, *(jbyte*) vmField.address);
- VMField::set_value(vmFieldObj, box);
- } else if (strcmp(vmField.typeString, "int") == 0 ||
- strcmp(vmField.typeString, "jint") == 0) {
- BOXED_LONG(box, *(jint*) vmField.address);
- VMField::set_value(vmFieldObj, box);
- } else if (strcmp(vmField.typeString, "uint64_t") == 0) {
- BOXED_LONG(box, *(uint64_t*) vmField.address);
- VMField::set_value(vmFieldObj, box);
- } else if (strcmp(vmField.typeString, "address") == 0 ||
- strcmp(vmField.typeString, "intptr_t") == 0 ||
- strcmp(vmField.typeString, "uintptr_t") == 0 ||
- strcmp(vmField.typeString, "OopHandle") == 0 ||
- strcmp(vmField.typeString, "size_t") == 0 ||
- // All foo* types are addresses.
- vmField.typeString[strlen(vmField.typeString) - 1] == '*') {
- BOXED_LONG(box, *((address*) vmField.address));
- VMField::set_value(vmFieldObj, box);
- } else {
- JVMCI_ERROR_NULL("VM field %s has unsupported type %s", name_buf, vmField.typeString);
- }
- }
- vmFields->obj_at_put(i, vmFieldObj());
- }
-
- int ints_len = JVMCIVMStructs::localHotSpotVMIntConstants_count();
- int longs_len = JVMCIVMStructs::localHotSpotVMLongConstants_count();
- len = ints_len + longs_len;
- objArrayHandle vmConstants = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), len * 2, CHECK_NULL);
- int insert = 0;
- for (int i = 0; i < ints_len ; i++) {
- VMIntConstantEntry c = JVMCIVMStructs::localHotSpotVMIntConstants[i];
- CSTRING_TO_JSTRING(name, c.name);
- BOXED_LONG(value, c.value);
- vmConstants->obj_at_put(insert++, name());
- vmConstants->obj_at_put(insert++, value);
- }
- for (int i = 0; i < longs_len ; i++) {
- VMLongConstantEntry c = JVMCIVMStructs::localHotSpotVMLongConstants[i];
- CSTRING_TO_JSTRING(name, c.name);
- BOXED_LONG(value, c.value);
- vmConstants->obj_at_put(insert++, name());
- vmConstants->obj_at_put(insert++, value);
- }
- assert(insert == len * 2, "must be");
-
- len = JVMCIVMStructs::localHotSpotVMAddresses_count();
- objArrayHandle vmAddresses = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), len * 2, CHECK_NULL);
- for (int i = 0; i < len ; i++) {
- VMAddressEntry a = JVMCIVMStructs::localHotSpotVMAddresses[i];
- CSTRING_TO_JSTRING(name, a.name);
- BOXED_LONG(value, a.value);
- vmAddresses->obj_at_put(i * 2, name());
- vmAddresses->obj_at_put(i * 2 + 1, value);
- }
-
-#define COUNT_FLAG(ignore) +1
-#ifdef ASSERT
-#define CHECK_FLAG(type, name) { \
- Flag* flag = Flag::find_flag(#name, strlen(#name), /*allow_locked*/ true, /* return_flag */ true); \
- assert(flag != NULL, "No such flag named " #name); \
- assert(flag->is_##type(), "Flag " #name " is not of type " #type); \
-}
-#else
-#define CHECK_FLAG(type, name)
-#endif
-
-#define ADD_FLAG(type, name, convert) { \
- CHECK_FLAG(type, name) \
- instanceHandle vmFlagObj = InstanceKlass::cast(VMFlag::klass())->allocate_instance_handle(CHECK_NULL); \
- CSTRING_TO_JSTRING(fname, #name); \
- CSTRING_TO_JSTRING(ftype, #type); \
- VMFlag::set_name(vmFlagObj, fname()); \
- VMFlag::set_type(vmFlagObj, ftype()); \
- convert(value, name); \
- VMFlag::set_value(vmFlagObj, value); \
- vmFlags->obj_at_put(i++, vmFlagObj()); \
-}
-#define ADD_BOOL_FLAG(name) ADD_FLAG(bool, name, BOXED_BOOLEAN)
-#define ADD_INTX_FLAG(name) ADD_FLAG(intx, name, BOXED_LONG)
-#define ADD_UINTX_FLAG(name) ADD_FLAG(uintx, name, BOXED_LONG)
-
- len = 0 + PREDEFINED_CONFIG_FLAGS(COUNT_FLAG, COUNT_FLAG, COUNT_FLAG);
- objArrayHandle vmFlags = oopFactory::new_objArray_handle(VMFlag::klass(), len, CHECK_NULL);
- int i = 0;
- PREDEFINED_CONFIG_FLAGS(ADD_BOOL_FLAG, ADD_INTX_FLAG, ADD_UINTX_FLAG)
-
- objArrayHandle vmIntrinsics = CompilerToVM::initialize_intrinsics(CHECK_NULL);
-
- objArrayOop data = oopFactory::new_objArray(SystemDictionary::Object_klass(), 5, CHECK_NULL);
- data->obj_at_put(0, vmFields());
- data->obj_at_put(1, vmConstants());
- data->obj_at_put(2, vmAddresses());
- data->obj_at_put(3, vmFlags());
- data->obj_at_put(4, vmIntrinsics());
-
- return (jobjectArray) JNIHandles::make_local(THREAD, data);
-#undef COUNT_FLAG
-#undef ADD_FLAG
-#undef ADD_BOOL_FLAG
-#undef ADD_INTX_FLAG
-#undef ADD_UINTX_FLAG
-#undef CHECK_FLAG
+ jobjectArray config = readConfiguration0(env, CHECK_NULL);
+ return config;
C2V_END
C2V_VMENTRY(jobject, getFlagValue, (JNIEnv *, jobject c2vm, jobject name_handle))
@@ -547,12 +144,10 @@
} else {
JVMCI_ERROR_NULL("VM flag %s has unsupported type %s", flag->_name, flag->_type);
}
+#undef RETURN_BOXED_LONG
+#undef RETURN_BOXED_DOUBLE
C2V_END
-#undef BOXED_LONG
-#undef BOXED_DOUBLE
-#undef CSTRING_TO_JSTRING
-
C2V_VMENTRY(jbyteArray, getBytecode, (JNIEnv *, jobject, jobject jvmci_method))
methodHandle method = CompilerToVM::asMethod(jvmci_method);
ResourceMark rm;
@@ -1373,53 +968,40 @@
return false;
}
-C2V_VMENTRY(jobject, getNextStackFrame, (JNIEnv*, jobject compilerToVM, jobject hs_frame, jobjectArray methods, jint initialSkip))
+void call_interface(JavaValue* result, Klass* spec_klass, Symbol* name, Symbol* signature, JavaCallArguments* args, TRAPS) {
+ CallInfo callinfo;
+ Handle receiver = args->receiver();
+ Klass* recvrKlass = receiver.is_null() ? (Klass*)NULL : receiver->klass();
+ LinkInfo link_info(spec_klass, name, signature);
+ LinkResolver::resolve_interface_call(
+ callinfo, receiver, recvrKlass, link_info, true, CHECK);
+ methodHandle method = callinfo.selected_method();
+ assert(method.not_null(), "should have thrown exception");
+
+ // Invoke the method
+ JavaCalls::call(result, method, args, CHECK);
+}
+
+C2V_VMENTRY(jobject, iterateFrames, (JNIEnv*, jobject compilerToVM, jobjectArray initial_methods, jobjectArray match_methods, jint initialSkip, jobject visitor_handle))
ResourceMark rm;
- if (!thread->has_last_Java_frame()) return NULL;
- Handle result = HotSpotStackFrameReference::klass()->allocate_instance_handle(CHECK_NULL);
+ if (!thread->has_last_Java_frame()) {
+ return NULL;
+ }
+ Handle visitor(THREAD, JNIHandles::resolve_non_null(visitor_handle));
+ Handle frame_reference = HotSpotStackFrameReference::klass()->allocate_instance_handle(CHECK_NULL);
HotSpotStackFrameReference::klass()->initialize(CHECK_NULL);
StackFrameStream fst(thread);
- if (hs_frame != NULL) {
- // look for the correct stack frame if one is given
- intptr_t* stack_pointer = (intptr_t*) HotSpotStackFrameReference::stackPointer(hs_frame);
- while (fst.current()->sp() != stack_pointer && !fst.is_done()) {
- fst.next();
- }
- if (fst.current()->sp() != stack_pointer) {
- THROW_MSG_NULL(vmSymbols::java_lang_IllegalStateException(), "stack frame not found")
- }
- }
+
+ jobjectArray methods = initial_methods;
int frame_number = 0;
vframe* vf = vframe::new_vframe(fst.current(), fst.register_map(), thread);
- if (hs_frame != NULL) {
- // look for the correct vframe within the stack frame if one is given
- int last_frame_number = HotSpotStackFrameReference::frameNumber(hs_frame);
- while (frame_number < last_frame_number) {
- if (vf->is_top()) {
- THROW_MSG_NULL(vmSymbols::java_lang_IllegalStateException(), "invalid frame number")
- }
- vf = vf->sender();
- frame_number ++;
- }
- // move one frame forward
- if (vf->is_top()) {
- if (fst.is_done()) {
- return NULL;
- }
- fst.next();
- vf = vframe::new_vframe(fst.current(), fst.register_map(), thread);
- frame_number = 0;
- } else {
- vf = vf->sender();
- frame_number++;
- }
- }
while (true) {
// look for the given method
+ bool realloc_called = false;
while (true) {
StackValueCollection* locals = NULL;
if (vf->is_compiled_frame()) {
@@ -1427,13 +1009,28 @@
compiledVFrame* cvf = compiledVFrame::cast(vf);
if (methods == NULL || matches(methods, cvf->method())) {
if (initialSkip > 0) {
- initialSkip --;
+ initialSkip--;
} else {
ScopeDesc* scope = cvf->scope();
// native wrappers do not have a scope
if (scope != NULL && scope->objects() != NULL) {
- bool realloc_failures = Deoptimization::realloc_objects(thread, fst.current(), scope->objects(), CHECK_NULL);
- Deoptimization::reassign_fields(fst.current(), fst.register_map(), scope->objects(), realloc_failures, false);
+ GrowableArray<ScopeValue*>* objects;
+ if (!realloc_called) {
+ objects = scope->objects();
+ } else {
+ // some object might already have been re-allocated, only reallocate the non-allocated ones
+ objects = new GrowableArray<ScopeValue*>(scope->objects()->length());
+ int ii = 0;
+ for (int i = 0; i < scope->objects()->length(); i++) {
+ ObjectValue* sv = (ObjectValue*) scope->objects()->at(i);
+ if (sv->value().is_null()) {
+ objects->at_put(ii++, sv);
+ }
+ }
+ }
+ bool realloc_failures = Deoptimization::realloc_objects(thread, fst.current(), objects, CHECK_NULL);
+ Deoptimization::reassign_fields(fst.current(), fst.register_map(), objects, realloc_failures, false);
+ realloc_called = true;
GrowableArray<ScopeValue*>* local_values = scope->locals();
assert(local_values != NULL, "NULL locals");
@@ -1445,15 +1042,15 @@
array->bool_at_put(i, true);
}
}
- HotSpotStackFrameReference::set_localIsVirtual(result, array());
+ HotSpotStackFrameReference::set_localIsVirtual(frame_reference, array());
} else {
- HotSpotStackFrameReference::set_localIsVirtual(result, NULL);
+ HotSpotStackFrameReference::set_localIsVirtual(frame_reference, NULL);
}
locals = cvf->locals();
- HotSpotStackFrameReference::set_bci(result, cvf->bci());
+ HotSpotStackFrameReference::set_bci(frame_reference, cvf->bci());
oop method = CompilerToVM::get_jvmci_method(cvf->method(), CHECK_NULL);
- HotSpotStackFrameReference::set_method(result, method);
+ HotSpotStackFrameReference::set_method(frame_reference, method);
}
}
} else if (vf->is_interpreted_frame()) {
@@ -1461,22 +1058,23 @@
interpretedVFrame* ivf = interpretedVFrame::cast(vf);
if (methods == NULL || matches(methods, ivf->method())) {
if (initialSkip > 0) {
- initialSkip --;
+ initialSkip--;
} else {
locals = ivf->locals();
- HotSpotStackFrameReference::set_bci(result, ivf->bci());
+ HotSpotStackFrameReference::set_bci(frame_reference, ivf->bci());
oop method = CompilerToVM::get_jvmci_method(ivf->method(), CHECK_NULL);
- HotSpotStackFrameReference::set_method(result, method);
- HotSpotStackFrameReference::set_localIsVirtual(result, NULL);
+ HotSpotStackFrameReference::set_method(frame_reference, method);
+ HotSpotStackFrameReference::set_localIsVirtual(frame_reference, NULL);
}
}
}
// locals != NULL means that we found a matching frame and result is already partially initialized
if (locals != NULL) {
- HotSpotStackFrameReference::set_compilerToVM(result, JNIHandles::resolve(compilerToVM));
- HotSpotStackFrameReference::set_stackPointer(result, (jlong) fst.current()->sp());
- HotSpotStackFrameReference::set_frameNumber(result, frame_number);
+ methods = match_methods;
+ HotSpotStackFrameReference::set_compilerToVM(frame_reference, JNIHandles::resolve(compilerToVM));
+ HotSpotStackFrameReference::set_stackPointer(frame_reference, (jlong) fst.current()->sp());
+ HotSpotStackFrameReference::set_frameNumber(frame_reference, frame_number);
// initialize the locals array
objArrayOop array_oop = oopFactory::new_objectArray(locals->size(), CHECK_NULL);
@@ -1487,9 +1085,41 @@
array->obj_at_put(i, locals->at(i)->get_obj()());
}
}
- HotSpotStackFrameReference::set_locals(result, array());
+ HotSpotStackFrameReference::set_locals(frame_reference, array());
+ HotSpotStackFrameReference::set_objectsMaterialized(frame_reference, JNI_FALSE);
- return JNIHandles::make_local(thread, result());
+ JavaValue result(T_OBJECT);
+ JavaCallArguments args(visitor);
+ args.push_oop(frame_reference);
+ call_interface(&result, SystemDictionary::InspectedFrameVisitor_klass(), vmSymbols::visitFrame_name(), vmSymbols::visitFrame_signature(), &args, CHECK_NULL);
+ if (result.get_jobject() != NULL) {
+ return JNIHandles::make_local(thread, (oop) result.get_jobject());
+ }
+ assert(initialSkip == 0, "There should be no match before initialSkip == 0");
+ if (HotSpotStackFrameReference::objectsMaterialized(frame_reference) == JNI_TRUE) {
+ // the frame has been deoptimized, we need to re-synchronize the frame and vframe
+ intptr_t* stack_pointer = (intptr_t*) HotSpotStackFrameReference::stackPointer(frame_reference);
+ fst = StackFrameStream(thread);
+ while (fst.current()->sp() != stack_pointer && !fst.is_done()) {
+ fst.next();
+ }
+ if (fst.current()->sp() != stack_pointer) {
+ THROW_MSG_NULL(vmSymbols::java_lang_IllegalStateException(), "stack frame not found after deopt")
+ }
+ vf = vframe::new_vframe(fst.current(), fst.register_map(), thread);
+ if (!vf->is_compiled_frame()) {
+ THROW_MSG_NULL(vmSymbols::java_lang_IllegalStateException(), "compiled stack frame expected")
+ }
+ for (int i = 0; i < frame_number; i++) {
+ if (vf->is_top()) {
+ THROW_MSG_NULL(vmSymbols::java_lang_IllegalStateException(), "vframe not found after deopt")
+ }
+ vf = vf->sender();
+ assert(vf->is_compiled_frame(), "Wrong frame type");
+ }
+ }
+ frame_reference = HotSpotStackFrameReference::klass()->allocate_instance_handle(CHECK_NULL);
+ HotSpotStackFrameReference::klass()->initialize(CHECK_NULL);
}
if (vf->is_top()) {
@@ -1709,6 +1339,7 @@
array->obj_at_put(i, locals->at(i)->get_obj()());
}
}
+ HotSpotStackFrameReference::set_objectsMaterialized(hs_frame, JNI_TRUE);
C2V_END
C2V_VMENTRY(void, writeDebugOutput, (JNIEnv*, jobject, jbyteArray bytes, jint offset, jint length))
@@ -1823,24 +1454,25 @@
#define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &(c2v_ ## f))
-#define STRING "Ljava/lang/String;"
-#define OBJECT "Ljava/lang/Object;"
-#define CLASS "Ljava/lang/Class;"
-#define EXECUTABLE "Ljava/lang/reflect/Executable;"
-#define STACK_TRACE_ELEMENT "Ljava/lang/StackTraceElement;"
-#define INSTALLED_CODE "Ljdk/vm/ci/code/InstalledCode;"
-#define TARGET_DESCRIPTION "Ljdk/vm/ci/code/TargetDescription;"
-#define BYTECODE_FRAME "Ljdk/vm/ci/code/BytecodeFrame;"
-#define RESOLVED_METHOD "Ljdk/vm/ci/meta/ResolvedJavaMethod;"
-#define HS_RESOLVED_METHOD "Ljdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl;"
-#define HS_RESOLVED_KLASS "Ljdk/vm/ci/hotspot/HotSpotResolvedObjectTypeImpl;"
-#define HS_CONSTANT_POOL "Ljdk/vm/ci/hotspot/HotSpotConstantPool;"
-#define HS_COMPILED_CODE "Ljdk/vm/ci/hotspot/HotSpotCompiledCode;"
-#define HS_CONFIG "Ljdk/vm/ci/hotspot/HotSpotVMConfig;"
-#define HS_METADATA "Ljdk/vm/ci/hotspot/HotSpotMetaData;"
-#define HS_STACK_FRAME_REF "Ljdk/vm/ci/hotspot/HotSpotStackFrameReference;"
-#define HS_SPECULATION_LOG "Ljdk/vm/ci/hotspot/HotSpotSpeculationLog;"
-#define METASPACE_METHOD_DATA "J"
+#define STRING "Ljava/lang/String;"
+#define OBJECT "Ljava/lang/Object;"
+#define CLASS "Ljava/lang/Class;"
+#define EXECUTABLE "Ljava/lang/reflect/Executable;"
+#define STACK_TRACE_ELEMENT "Ljava/lang/StackTraceElement;"
+#define INSTALLED_CODE "Ljdk/vm/ci/code/InstalledCode;"
+#define TARGET_DESCRIPTION "Ljdk/vm/ci/code/TargetDescription;"
+#define BYTECODE_FRAME "Ljdk/vm/ci/code/BytecodeFrame;"
+#define INSPECTED_FRAME_VISITOR "Ljdk/vm/ci/code/stack/InspectedFrameVisitor;"
+#define RESOLVED_METHOD "Ljdk/vm/ci/meta/ResolvedJavaMethod;"
+#define HS_RESOLVED_METHOD "Ljdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl;"
+#define HS_RESOLVED_KLASS "Ljdk/vm/ci/hotspot/HotSpotResolvedObjectTypeImpl;"
+#define HS_CONSTANT_POOL "Ljdk/vm/ci/hotspot/HotSpotConstantPool;"
+#define HS_COMPILED_CODE "Ljdk/vm/ci/hotspot/HotSpotCompiledCode;"
+#define HS_CONFIG "Ljdk/vm/ci/hotspot/HotSpotVMConfig;"
+#define HS_METADATA "Ljdk/vm/ci/hotspot/HotSpotMetaData;"
+#define HS_STACK_FRAME_REF "Ljdk/vm/ci/hotspot/HotSpotStackFrameReference;"
+#define HS_SPECULATION_LOG "Ljdk/vm/ci/hotspot/HotSpotSpeculationLog;"
+#define METASPACE_METHOD_DATA "J"
JNINativeMethod CompilerToVM::methods[] = {
{CC "getBytecode", CC "(" HS_RESOLVED_METHOD ")[B", FN_PTR(getBytecode)},
@@ -1896,7 +1528,7 @@
{CC "isMature", CC "(" METASPACE_METHOD_DATA ")Z", FN_PTR(isMature)},
{CC "hasCompiledCodeForOSR", CC "(" HS_RESOLVED_METHOD "II)Z", FN_PTR(hasCompiledCodeForOSR)},
{CC "getSymbol", CC "(J)" STRING, FN_PTR(getSymbol)},
- {CC "getNextStackFrame", CC "(" HS_STACK_FRAME_REF "[" RESOLVED_METHOD "I)" HS_STACK_FRAME_REF, FN_PTR(getNextStackFrame)},
+ {CC "iterateFrames", CC "([" RESOLVED_METHOD "[" RESOLVED_METHOD "I" INSPECTED_FRAME_VISITOR ")" OBJECT, FN_PTR(iterateFrames)},
{CC "materializeVirtualObjects", CC "(" HS_STACK_FRAME_REF "Z)V", FN_PTR(materializeVirtualObjects)},
{CC "shouldDebugNonSafepoints", CC "()Z", FN_PTR(shouldDebugNonSafepoints)},
{CC "writeDebugOutput", CC "([BII)V", FN_PTR(writeDebugOutput)},
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+// no precompiled headers
+#include "ci/ciUtilities.hpp"
+#include "memory/oopFactory.hpp"
+#include "oops/objArrayOop.inline.hpp"
+#include "jvmci/jvmciRuntime.hpp"
+#include "jvmci/jvmciCompilerToVM.hpp"
+#include "jvmci/vmStructs_jvmci.hpp"
+#include "utilities/resourceHash.hpp"
+
+
+int CompilerToVM::Data::Klass_vtable_start_offset;
+int CompilerToVM::Data::Klass_vtable_length_offset;
+
+int CompilerToVM::Data::Method_extra_stack_entries;
+
+address CompilerToVM::Data::SharedRuntime_ic_miss_stub;
+address CompilerToVM::Data::SharedRuntime_handle_wrong_method_stub;
+address CompilerToVM::Data::SharedRuntime_deopt_blob_unpack;
+address CompilerToVM::Data::SharedRuntime_deopt_blob_uncommon_trap;
+
+size_t CompilerToVM::Data::ThreadLocalAllocBuffer_alignment_reserve;
+
+CollectedHeap* CompilerToVM::Data::Universe_collectedHeap;
+int CompilerToVM::Data::Universe_base_vtable_size;
+address CompilerToVM::Data::Universe_narrow_oop_base;
+int CompilerToVM::Data::Universe_narrow_oop_shift;
+address CompilerToVM::Data::Universe_narrow_klass_base;
+int CompilerToVM::Data::Universe_narrow_klass_shift;
+void* CompilerToVM::Data::Universe_non_oop_bits;
+uintptr_t CompilerToVM::Data::Universe_verify_oop_mask;
+uintptr_t CompilerToVM::Data::Universe_verify_oop_bits;
+
+bool CompilerToVM::Data::_supports_inline_contig_alloc;
+HeapWord** CompilerToVM::Data::_heap_end_addr;
+HeapWord* volatile* CompilerToVM::Data::_heap_top_addr;
+int CompilerToVM::Data::_max_oop_map_stack_offset;
+
+jbyte* CompilerToVM::Data::cardtable_start_address;
+int CompilerToVM::Data::cardtable_shift;
+
+int CompilerToVM::Data::vm_page_size;
+
+int CompilerToVM::Data::sizeof_vtableEntry = sizeof(vtableEntry);
+int CompilerToVM::Data::sizeof_ExceptionTableElement = sizeof(ExceptionTableElement);
+int CompilerToVM::Data::sizeof_LocalVariableTableElement = sizeof(LocalVariableTableElement);
+int CompilerToVM::Data::sizeof_ConstantPool = sizeof(ConstantPool);
+int CompilerToVM::Data::sizeof_SymbolPointer = sizeof(Symbol*);
+int CompilerToVM::Data::sizeof_narrowKlass = sizeof(narrowKlass);
+int CompilerToVM::Data::sizeof_arrayOopDesc = sizeof(arrayOopDesc);
+int CompilerToVM::Data::sizeof_BasicLock = sizeof(BasicLock);
+
+address CompilerToVM::Data::dsin;
+address CompilerToVM::Data::dcos;
+address CompilerToVM::Data::dtan;
+address CompilerToVM::Data::dexp;
+address CompilerToVM::Data::dlog;
+address CompilerToVM::Data::dlog10;
+address CompilerToVM::Data::dpow;
+
+address CompilerToVM::Data::symbol_init;
+address CompilerToVM::Data::symbol_clinit;
+
+void CompilerToVM::Data::initialize(TRAPS) {
+ Klass_vtable_start_offset = in_bytes(Klass::vtable_start_offset());
+ Klass_vtable_length_offset = in_bytes(Klass::vtable_length_offset());
+
+ Method_extra_stack_entries = Method::extra_stack_entries();
+
+ SharedRuntime_ic_miss_stub = SharedRuntime::get_ic_miss_stub();
+ SharedRuntime_handle_wrong_method_stub = SharedRuntime::get_handle_wrong_method_stub();
+ SharedRuntime_deopt_blob_unpack = SharedRuntime::deopt_blob()->unpack();
+ SharedRuntime_deopt_blob_uncommon_trap = SharedRuntime::deopt_blob()->uncommon_trap();
+
+ ThreadLocalAllocBuffer_alignment_reserve = ThreadLocalAllocBuffer::alignment_reserve();
+
+ Universe_collectedHeap = Universe::heap();
+ Universe_base_vtable_size = Universe::base_vtable_size();
+ Universe_narrow_oop_base = Universe::narrow_oop_base();
+ Universe_narrow_oop_shift = Universe::narrow_oop_shift();
+ Universe_narrow_klass_base = Universe::narrow_klass_base();
+ Universe_narrow_klass_shift = Universe::narrow_klass_shift();
+ Universe_non_oop_bits = Universe::non_oop_word();
+ Universe_verify_oop_mask = Universe::verify_oop_mask();
+ Universe_verify_oop_bits = Universe::verify_oop_bits();
+
+ _supports_inline_contig_alloc = Universe::heap()->supports_inline_contig_alloc();
+ _heap_end_addr = _supports_inline_contig_alloc ? Universe::heap()->end_addr() : (HeapWord**) -1;
+ _heap_top_addr = _supports_inline_contig_alloc ? Universe::heap()->top_addr() : (HeapWord* volatile*) -1;
+
+ _max_oop_map_stack_offset = (OopMapValue::register_mask - VMRegImpl::stack2reg(0)->value()) * VMRegImpl::stack_slot_size;
+ int max_oop_map_stack_index = _max_oop_map_stack_offset / VMRegImpl::stack_slot_size;
+ assert(OopMapValue::legal_vm_reg_name(VMRegImpl::stack2reg(max_oop_map_stack_index)), "should be valid");
+ assert(!OopMapValue::legal_vm_reg_name(VMRegImpl::stack2reg(max_oop_map_stack_index + 1)), "should be invalid");
+
+ symbol_init = (address) vmSymbols::object_initializer_name();
+ symbol_clinit = (address) vmSymbols::class_initializer_name();
+
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ if (bs->is_a(BarrierSet::CardTableModRef)) {
+ jbyte* base = ci_card_table_address();
+ assert(base != NULL, "unexpected byte_map_base");
+ cardtable_start_address = base;
+ cardtable_shift = CardTable::card_shift;
+ } else {
+ // No card mark barriers
+ cardtable_start_address = 0;
+ cardtable_shift = 0;
+ }
+
+ vm_page_size = os::vm_page_size();
+
+#define SET_TRIGFUNC(name) \
+ if (StubRoutines::name() != NULL) { \
+ name = StubRoutines::name(); \
+ } else { \
+ name = CAST_FROM_FN_PTR(address, SharedRuntime::name); \
+ }
+
+ SET_TRIGFUNC(dsin);
+ SET_TRIGFUNC(dcos);
+ SET_TRIGFUNC(dtan);
+ SET_TRIGFUNC(dexp);
+ SET_TRIGFUNC(dlog10);
+ SET_TRIGFUNC(dlog);
+ SET_TRIGFUNC(dpow);
+
+#undef SET_TRIGFUNC
+}
+
+objArrayHandle CompilerToVM::initialize_intrinsics(TRAPS) {
+ objArrayHandle vmIntrinsics = oopFactory::new_objArray_handle(VMIntrinsicMethod::klass(), (vmIntrinsics::ID_LIMIT - 1), CHECK_(objArrayHandle()));
+ int index = 0;
+ // The intrinsics for a class are usually adjacent to each other.
+ // When they are, the string for the class name can be reused.
+ vmSymbols::SID kls_sid = vmSymbols::NO_SID;
+ Handle kls_str;
+#define VM_SYMBOL_TO_STRING(s) \
+ java_lang_String::create_from_symbol(vmSymbols::symbol_at(vmSymbols::VM_SYMBOL_ENUM_NAME(s)), CHECK_(objArrayHandle()))
+#define VM_INTRINSIC_INFO(id, kls, name, sig, ignore_fcode) { \
+ instanceHandle vmIntrinsicMethod = InstanceKlass::cast(VMIntrinsicMethod::klass())->allocate_instance_handle(CHECK_(objArrayHandle())); \
+ vmSymbols::SID sid = vmSymbols::VM_SYMBOL_ENUM_NAME(kls); \
+ if (kls_sid != sid) { \
+ kls_str = VM_SYMBOL_TO_STRING(kls); \
+ kls_sid = sid; \
+ } \
+ Handle name_str = VM_SYMBOL_TO_STRING(name); \
+ Handle sig_str = VM_SYMBOL_TO_STRING(sig); \
+ VMIntrinsicMethod::set_declaringClass(vmIntrinsicMethod, kls_str()); \
+ VMIntrinsicMethod::set_name(vmIntrinsicMethod, name_str()); \
+ VMIntrinsicMethod::set_descriptor(vmIntrinsicMethod, sig_str()); \
+ VMIntrinsicMethod::set_id(vmIntrinsicMethod, vmIntrinsics::id); \
+ vmIntrinsics->obj_at_put(index++, vmIntrinsicMethod()); \
+ }
+
+ VM_INTRINSICS_DO(VM_INTRINSIC_INFO, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE)
+#undef VM_SYMBOL_TO_STRING
+#undef VM_INTRINSIC_INFO
+ assert(index == vmIntrinsics::ID_LIMIT - 1, "must be");
+
+ return vmIntrinsics;
+}
+
+/**
+ * The set of VM flags known to be used.
+ */
+#define PREDEFINED_CONFIG_FLAGS(do_bool_flag, do_intx_flag, do_uintx_flag) \
+ do_intx_flag(AllocateInstancePrefetchLines) \
+ do_intx_flag(AllocatePrefetchDistance) \
+ do_intx_flag(AllocatePrefetchInstr) \
+ do_intx_flag(AllocatePrefetchLines) \
+ do_intx_flag(AllocatePrefetchStepSize) \
+ do_intx_flag(AllocatePrefetchStyle) \
+ do_intx_flag(BciProfileWidth) \
+ do_bool_flag(BootstrapJVMCI) \
+ do_bool_flag(CITime) \
+ do_bool_flag(CITimeEach) \
+ do_uintx_flag(CodeCacheSegmentSize) \
+ do_intx_flag(CodeEntryAlignment) \
+ do_bool_flag(CompactFields) \
+ NOT_PRODUCT(do_intx_flag(CompileTheWorldStartAt)) \
+ NOT_PRODUCT(do_intx_flag(CompileTheWorldStopAt)) \
+ do_intx_flag(ContendedPaddingWidth) \
+ do_bool_flag(DontCompileHugeMethods) \
+ do_bool_flag(EagerJVMCI) \
+ do_bool_flag(EnableContended) \
+ do_intx_flag(FieldsAllocationStyle) \
+ do_bool_flag(FoldStableValues) \
+ do_bool_flag(ForceUnreachable) \
+ do_intx_flag(HugeMethodLimit) \
+ do_bool_flag(Inline) \
+ do_intx_flag(JVMCICounterSize) \
+ do_bool_flag(JVMCIPrintProperties) \
+ do_bool_flag(JVMCIUseFastLocking) \
+ do_intx_flag(MethodProfileWidth) \
+ do_intx_flag(ObjectAlignmentInBytes) \
+ do_bool_flag(PrintInlining) \
+ do_bool_flag(ReduceInitialCardMarks) \
+ do_bool_flag(RestrictContended) \
+ do_intx_flag(StackReservedPages) \
+ do_intx_flag(StackShadowPages) \
+ do_bool_flag(TLABStats) \
+ do_uintx_flag(TLABWasteIncrement) \
+ do_intx_flag(TypeProfileWidth) \
+ do_bool_flag(UseAESIntrinsics) \
+ X86_ONLY(do_intx_flag(UseAVX)) \
+ do_bool_flag(UseBiasedLocking) \
+ do_bool_flag(UseCRC32Intrinsics) \
+ do_bool_flag(UseCompressedClassPointers) \
+ do_bool_flag(UseCompressedOops) \
+ do_bool_flag(UseConcMarkSweepGC) \
+ X86_ONLY(do_bool_flag(UseCountLeadingZerosInstruction)) \
+ X86_ONLY(do_bool_flag(UseCountTrailingZerosInstruction)) \
+ do_bool_flag(UseG1GC) \
+ COMPILER2_PRESENT(do_bool_flag(UseMontgomeryMultiplyIntrinsic)) \
+ COMPILER2_PRESENT(do_bool_flag(UseMontgomerySquareIntrinsic)) \
+ COMPILER2_PRESENT(do_bool_flag(UseMulAddIntrinsic)) \
+ COMPILER2_PRESENT(do_bool_flag(UseMultiplyToLenIntrinsic)) \
+ do_bool_flag(UsePopCountInstruction) \
+ do_bool_flag(UseSHA1Intrinsics) \
+ do_bool_flag(UseSHA256Intrinsics) \
+ do_bool_flag(UseSHA512Intrinsics) \
+ do_intx_flag(UseSSE) \
+ COMPILER2_PRESENT(do_bool_flag(UseSquareToLenIntrinsic)) \
+ do_bool_flag(UseStackBanging) \
+ do_bool_flag(UseTLAB) \
+ do_bool_flag(VerifyOops) \
+
+#define BOXED_BOOLEAN(name, value) oop name = ((jboolean)(value) ? boxedTrue() : boxedFalse())
+#define BOXED_DOUBLE(name, value) oop name; do { jvalue p; p.d = (jdouble) (value); name = java_lang_boxing_object::create(T_DOUBLE, &p, CHECK_NULL);} while(0)
+#define BOXED_LONG(name, value) \
+ oop name; \
+ do { \
+ jvalue p; p.j = (jlong) (value); \
+ Handle* e = longs.get(p.j); \
+ if (e == NULL) { \
+ oop o = java_lang_boxing_object::create(T_LONG, &p, CHECK_NULL); \
+ Handle h(THREAD, o); \
+ longs.put(p.j, h); \
+ name = h(); \
+ } else { \
+ name = (*e)(); \
+ } \
+ } while (0)
+
+#define CSTRING_TO_JSTRING(name, value) \
+ Handle name; \
+ do { \
+ if (value != NULL) { \
+ Handle* e = strings.get(value); \
+ if (e == NULL) { \
+ Handle h = java_lang_String::create_from_str(value, CHECK_NULL); \
+ strings.put(value, h); \
+ name = h; \
+ } else { \
+ name = (*e); \
+ } \
+ } \
+ } while (0)
+
+jobjectArray readConfiguration0(JNIEnv *env, TRAPS) {
+ ResourceMark rm;
+ HandleMark hm;
+
+ // Used to canonicalize Long and String values.
+ ResourceHashtable<jlong, Handle> longs;
+ ResourceHashtable<const char*, Handle, &CompilerToVM::cstring_hash, &CompilerToVM::cstring_equals> strings;
+
+ jvalue prim;
+ prim.z = true; oop boxedTrueOop = java_lang_boxing_object::create(T_BOOLEAN, &prim, CHECK_NULL);
+ Handle boxedTrue(THREAD, boxedTrueOop);
+ prim.z = false; oop boxedFalseOop = java_lang_boxing_object::create(T_BOOLEAN, &prim, CHECK_NULL);
+ Handle boxedFalse(THREAD, boxedFalseOop);
+
+ CompilerToVM::Data::initialize(CHECK_NULL);
+
+ VMField::klass()->initialize(CHECK_NULL);
+ VMFlag::klass()->initialize(CHECK_NULL);
+ VMIntrinsicMethod::klass()->initialize(CHECK_NULL);
+
+ int len = JVMCIVMStructs::localHotSpotVMStructs_count();
+ objArrayHandle vmFields = oopFactory::new_objArray_handle(VMField::klass(), len, CHECK_NULL);
+ for (int i = 0; i < len ; i++) {
+ VMStructEntry vmField = JVMCIVMStructs::localHotSpotVMStructs[i];
+ instanceHandle vmFieldObj = InstanceKlass::cast(VMField::klass())->allocate_instance_handle(CHECK_NULL);
+ size_t name_buf_len = strlen(vmField.typeName) + strlen(vmField.fieldName) + 2 /* "::" */;
+ char* name_buf = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, name_buf_len + 1);
+ sprintf(name_buf, "%s::%s", vmField.typeName, vmField.fieldName);
+ CSTRING_TO_JSTRING(name, name_buf);
+ CSTRING_TO_JSTRING(type, vmField.typeString);
+ VMField::set_name(vmFieldObj, name());
+ VMField::set_type(vmFieldObj, type());
+ VMField::set_offset(vmFieldObj, vmField.offset);
+ VMField::set_address(vmFieldObj, (jlong) vmField.address);
+ if (vmField.isStatic && vmField.typeString != NULL) {
+ if (strcmp(vmField.typeString, "bool") == 0) {
+ BOXED_BOOLEAN(box, *(jbyte*) vmField.address);
+ VMField::set_value(vmFieldObj, box);
+ } else if (strcmp(vmField.typeString, "int") == 0 ||
+ strcmp(vmField.typeString, "jint") == 0) {
+ BOXED_LONG(box, *(jint*) vmField.address);
+ VMField::set_value(vmFieldObj, box);
+ } else if (strcmp(vmField.typeString, "uint64_t") == 0) {
+ BOXED_LONG(box, *(uint64_t*) vmField.address);
+ VMField::set_value(vmFieldObj, box);
+ } else if (strcmp(vmField.typeString, "address") == 0 ||
+ strcmp(vmField.typeString, "intptr_t") == 0 ||
+ strcmp(vmField.typeString, "uintptr_t") == 0 ||
+ strcmp(vmField.typeString, "OopHandle") == 0 ||
+ strcmp(vmField.typeString, "size_t") == 0 ||
+ // All foo* types are addresses.
+ vmField.typeString[strlen(vmField.typeString) - 1] == '*') {
+ BOXED_LONG(box, *((address*) vmField.address));
+ VMField::set_value(vmFieldObj, box);
+ } else {
+ JVMCI_ERROR_NULL("VM field %s has unsupported type %s", name_buf, vmField.typeString);
+ }
+ }
+ vmFields->obj_at_put(i, vmFieldObj());
+ }
+
+ int ints_len = JVMCIVMStructs::localHotSpotVMIntConstants_count();
+ int longs_len = JVMCIVMStructs::localHotSpotVMLongConstants_count();
+ len = ints_len + longs_len;
+ objArrayHandle vmConstants = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), len * 2, CHECK_NULL);
+ int insert = 0;
+ for (int i = 0; i < ints_len ; i++) {
+ VMIntConstantEntry c = JVMCIVMStructs::localHotSpotVMIntConstants[i];
+ CSTRING_TO_JSTRING(name, c.name);
+ BOXED_LONG(value, c.value);
+ vmConstants->obj_at_put(insert++, name());
+ vmConstants->obj_at_put(insert++, value);
+ }
+ for (int i = 0; i < longs_len ; i++) {
+ VMLongConstantEntry c = JVMCIVMStructs::localHotSpotVMLongConstants[i];
+ CSTRING_TO_JSTRING(name, c.name);
+ BOXED_LONG(value, c.value);
+ vmConstants->obj_at_put(insert++, name());
+ vmConstants->obj_at_put(insert++, value);
+ }
+ assert(insert == len * 2, "must be");
+
+ len = JVMCIVMStructs::localHotSpotVMAddresses_count();
+ objArrayHandle vmAddresses = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), len * 2, CHECK_NULL);
+ for (int i = 0; i < len ; i++) {
+ VMAddressEntry a = JVMCIVMStructs::localHotSpotVMAddresses[i];
+ CSTRING_TO_JSTRING(name, a.name);
+ BOXED_LONG(value, a.value);
+ vmAddresses->obj_at_put(i * 2, name());
+ vmAddresses->obj_at_put(i * 2 + 1, value);
+ }
+
+#define COUNT_FLAG(ignore) +1
+#ifdef ASSERT
+#define CHECK_FLAG(type, name) { \
+ Flag* flag = Flag::find_flag(#name, strlen(#name), /*allow_locked*/ true, /* return_flag */ true); \
+ assert(flag != NULL, "No such flag named " #name); \
+ assert(flag->is_##type(), "Flag " #name " is not of type " #type); \
+}
+#else
+#define CHECK_FLAG(type, name)
+#endif
+
+#define ADD_FLAG(type, name, convert) { \
+ CHECK_FLAG(type, name) \
+ instanceHandle vmFlagObj = InstanceKlass::cast(VMFlag::klass())->allocate_instance_handle(CHECK_NULL); \
+ CSTRING_TO_JSTRING(fname, #name); \
+ CSTRING_TO_JSTRING(ftype, #type); \
+ VMFlag::set_name(vmFlagObj, fname()); \
+ VMFlag::set_type(vmFlagObj, ftype()); \
+ convert(value, name); \
+ VMFlag::set_value(vmFlagObj, value); \
+ vmFlags->obj_at_put(i++, vmFlagObj()); \
+}
+#define ADD_BOOL_FLAG(name) ADD_FLAG(bool, name, BOXED_BOOLEAN)
+#define ADD_INTX_FLAG(name) ADD_FLAG(intx, name, BOXED_LONG)
+#define ADD_UINTX_FLAG(name) ADD_FLAG(uintx, name, BOXED_LONG)
+
+ len = 0 + PREDEFINED_CONFIG_FLAGS(COUNT_FLAG, COUNT_FLAG, COUNT_FLAG);
+ objArrayHandle vmFlags = oopFactory::new_objArray_handle(VMFlag::klass(), len, CHECK_NULL);
+ int i = 0;
+ PREDEFINED_CONFIG_FLAGS(ADD_BOOL_FLAG, ADD_INTX_FLAG, ADD_UINTX_FLAG)
+
+ objArrayHandle vmIntrinsics = CompilerToVM::initialize_intrinsics(CHECK_NULL);
+
+ objArrayOop data = oopFactory::new_objArray(SystemDictionary::Object_klass(), 5, CHECK_NULL);
+ data->obj_at_put(0, vmFields());
+ data->obj_at_put(1, vmConstants());
+ data->obj_at_put(2, vmAddresses());
+ data->obj_at_put(3, vmFlags());
+ data->obj_at_put(4, vmIntrinsics());
+
+ return (jobjectArray) JNIHandles::make_local(THREAD, data);
+#undef COUNT_FLAG
+#undef ADD_FLAG
+#undef ADD_BOOL_FLAG
+#undef ADD_INTX_FLAG
+#undef ADD_UINTX_FLAG
+#undef CHECK_FLAG
+}
+
--- a/src/hotspot/share/jvmci/jvmciEnv.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/jvmci/jvmciEnv.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,10 @@
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
+#include "oops/constantPool.inline.hpp"
+#include "oops/cpCache.inline.hpp"
+#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -288,6 +288,7 @@
end_class \
start_class(HotSpotStackFrameReference) \
oop_field(HotSpotStackFrameReference, compilerToVM, "Ljdk/vm/ci/hotspot/CompilerToVM;") \
+ boolean_field(HotSpotStackFrameReference, objectsMaterialized) \
long_field(HotSpotStackFrameReference, stackPointer) \
int_field(HotSpotStackFrameReference, frameNumber) \
int_field(HotSpotStackFrameReference, bci) \
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -34,6 +34,7 @@
#include "jvmci/jvmciJavaClasses.hpp"
#include "jvmci/jvmciEnv.hpp"
#include "logging/log.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/jvmci/systemDictionary_jvmci.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/jvmci/systemDictionary_jvmci.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -80,6 +80,7 @@
do_klass(site_Infopoint_klass, jdk_vm_ci_code_site_Infopoint, Jvmci) \
do_klass(site_Site_klass, jdk_vm_ci_code_site_Site, Jvmci) \
do_klass(site_InfopointReason_klass, jdk_vm_ci_code_site_InfopointReason, Jvmci) \
+ do_klass(InspectedFrameVisitor_klass, jdk_vm_ci_code_stack_InspectedFrameVisitor, Jvmci) \
do_klass(JavaConstant_klass, jdk_vm_ci_meta_JavaConstant, Jvmci) \
do_klass(PrimitiveConstant_klass, jdk_vm_ci_meta_PrimitiveConstant, Jvmci) \
do_klass(RawConstant_klass, jdk_vm_ci_meta_RawConstant, Jvmci) \
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -41,7 +41,8 @@
#include "runtime/vm_version.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/heapRegion.hpp"
#endif
--- a/src/hotspot/share/jvmci/vmSymbols_jvmci.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/jvmci/vmSymbols_jvmci.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -88,9 +88,12 @@
template(jdk_vm_ci_code_site_ExceptionHandler, "jdk/vm/ci/code/site/ExceptionHandler") \
template(jdk_vm_ci_code_site_Mark, "jdk/vm/ci/code/site/Mark") \
template(jdk_vm_ci_code_site_Infopoint, "jdk/vm/ci/code/site/Infopoint") \
+ template(jdk_vm_ci_code_stack_InspectedFrameVisitor, "jdk/vm/ci/code/stack/InspectedFrameVisitor") \
template(jdk_vm_ci_code_site_Site, "jdk/vm/ci/code/site/Site") \
template(jdk_vm_ci_code_site_InfopointReason, "jdk/vm/ci/code/site/InfopointReason") \
template(jdk_vm_ci_common_JVMCIError, "jdk/vm/ci/common/JVMCIError") \
+ template(visitFrame_name, "visitFrame") \
+ template(visitFrame_signature, "(Ljdk/vm/ci/code/stack/InspectedFrame;)Ljava/lang/Object;") \
template(adjustCompilationLevel_name, "adjustCompilationLevel") \
template(adjustCompilationLevel_signature, "(Ljava/lang/Class;Ljava/lang/String;Ljava/lang/String;ZI)I") \
template(compileMethod_name, "compileMethod") \
--- a/src/hotspot/share/libadt/dict.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/libadt/dict.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
// Dictionaries - An Abstract Data Type
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/thread.hpp"
--- a/src/hotspot/share/logging/log.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/logging/log.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -110,7 +110,7 @@
template <LogTagType T0, LogTagType T1 = LogTag::__NO_TAG, LogTagType T2 = LogTag::__NO_TAG, LogTagType T3 = LogTag::__NO_TAG,
LogTagType T4 = LogTag::__NO_TAG, LogTagType GuardTag = LogTag::__NO_TAG>
-class LogImpl VALUE_OBJ_CLASS_SPEC {
+class LogImpl {
private:
static const size_t LogBufferSize = 512;
public:
--- a/src/hotspot/share/logging/logDecorations.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/logging/logDecorations.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "memory/allocation.hpp"
// Temporary object containing the necessary data for a log call's decorations (timestamps, etc).
-class LogDecorations VALUE_OBJ_CLASS_SPEC {
+class LogDecorations {
public:
static const int DecorationsBufferSize = 256;
private:
--- a/src/hotspot/share/logging/logDecorators.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/logging/logDecorators.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -57,7 +57,7 @@
// each log message for a given output. Decorators are always prepended in the order
// declared above. For example, logging with 'uptime, level, tags' decorators results in:
// [0,943s][info ][logging] message.
-class LogDecorators VALUE_OBJ_CLASS_SPEC {
+class LogDecorators {
public:
enum Decorator {
#define DECORATOR(name, abbr) name##_decorator,
--- a/src/hotspot/share/logging/logMessageBuffer.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/logging/logMessageBuffer.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@
class LogMessageBuffer : public StackObj {
friend class LogMessageTest;
protected:
- struct LogLine VALUE_OBJ_CLASS_SPEC {
+ struct LogLine {
LogLevelType level;
size_t message_offset;
};
--- a/src/hotspot/share/logging/logOutputList.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/logging/logOutputList.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
// To remove a node from the list the node must first be unlinked,
// and the memory for that node can be freed whenever the removing
// thread observes an active reader count of 0 (after unlinking it).
-class LogOutputList VALUE_OBJ_CLASS_SPEC {
+class LogOutputList {
private:
struct LogOutputNode : public CHeapObj<mtLogging> {
LogOutput* _value;
@@ -88,7 +88,7 @@
// Set (add/update/remove) the output to the specified level.
void set_output_level(LogOutput* output, LogLevelType level);
- class Iterator VALUE_OBJ_CLASS_SPEC {
+ class Iterator {
friend class LogOutputList;
private:
LogOutputNode* _current;
--- a/src/hotspot/share/logging/logTag.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/logging/logTag.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -148,6 +148,7 @@
LOG_TAG(update) \
LOG_TAG(unload) /* Trace unloading of classes */ \
LOG_TAG(unshareable) \
+ LOG_TAG(mirror) \
LOG_TAG(verification) \
LOG_TAG(verify) \
LOG_TAG(vmoperation) \
--- a/src/hotspot/share/logging/logTagSet.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/logging/logTagSet.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@
// The tagset represents a combination of tags that occur in a log call somewhere.
// Tagsets are created automatically by the LogTagSetMappings and should never be
// instantiated directly somewhere else.
-class LogTagSet VALUE_OBJ_CLASS_SPEC {
+class LogTagSet {
private:
static LogTagSet* _list;
static size_t _ntagsets;
--- a/src/hotspot/share/memory/allocation.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/allocation.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -45,11 +45,6 @@
void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
void StackObj::operator delete [](void* p) { ShouldNotCallThis(); }
-void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
-void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }
-void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
-void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
-
void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
size_t word_size,
MetaspaceObj::Type type, TRAPS) throw() {
--- a/src/hotspot/share/memory/allocation.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/allocation.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -37,21 +37,24 @@
};
typedef AllocFailStrategy::AllocFailEnum AllocFailType;
-// All classes in the virtual machine must be subclassed
-// by one of the following allocation classes:
+// The virtual machine must never call one of the implicitly declared
+// global allocation or deletion functions. (Such calls may result in
+// link-time or run-time errors.) For convenience and documentation of
+// intended use, classes in the virtual machine may be derived from one
+// of the following allocation classes, some of which define allocation
+// and deletion functions.
+// Note: std::malloc and std::free should never called directly.
+
//
// For objects allocated in the resource area (see resourceArea.hpp).
// - ResourceObj
//
-// For objects allocated in the C-heap (managed by: free & malloc).
+// For objects allocated in the C-heap (managed by: free & malloc and tracked with NMT)
// - CHeapObj
//
// For objects allocated on the stack.
// - StackObj
//
-// For embedded objects.
-// - ValueObj
-//
// For classes used as name spaces.
// - AllStatic
//
@@ -84,15 +87,10 @@
// char* AllocateHeap(size_t size, const char* name);
// void FreeHeap(void* p);
//
-// C-heap allocation can be traced using +PrintHeapAllocation.
-// malloc and free should therefore never called directly.
-
-// Base class for objects allocated in the C-heap.
// In non product mode we introduce a super class for all allocation classes
// that supports printing.
-// We avoid the superclass in product mode since some C++ compilers add
-// a word overhead for empty super classes.
+// We avoid the superclass in product mode to save space.
#ifdef PRODUCT
#define ALLOCATION_SUPER_CLASS_SPEC
@@ -188,33 +186,6 @@
void operator delete [](void* p);
};
-// Base class for objects used as value objects.
-// Calling new or delete will result in fatal error.
-//
-// Portability note: Certain compilers (e.g. gcc) will
-// always make classes bigger if it has a superclass, even
-// if the superclass does not have any virtual methods or
-// instance fields. The HotSpot implementation relies on this
-// not to happen. So never make a ValueObj class a direct subclass
-// of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g.,
-// like this:
-//
-// class A VALUE_OBJ_CLASS_SPEC {
-// ...
-// }
-//
-// With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can
-// be defined as a an empty string "".
-//
-class _ValueObj {
- private:
- void* operator new(size_t size) throw();
- void operator delete(void* p);
- void* operator new [](size_t size) throw();
- void operator delete [](void* p);
-};
-
-
// Base class for objects stored in Metaspace.
// Calling delete will result in fatal error.
//
--- a/src/hotspot/share/memory/filemap.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/filemap.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "classfile/classLoader.hpp"
+#include "classfile/classLoader.inline.hpp"
#include "classfile/compactHashtable.inline.hpp"
#include "classfile/sharedClassUtil.hpp"
#include "classfile/stringTable.hpp"
--- a/src/hotspot/share/memory/filemap.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/filemap.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -42,7 +42,7 @@
static const int JVM_IDENT_MAX = 256;
-class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
+class SharedClassPathEntry {
protected:
bool _is_dir;
time_t _timestamp; // jar/jimage timestamp, 0 if is directory or other
@@ -125,14 +125,13 @@
size_t _cds_i2i_entry_code_buffers_size;
size_t _core_spaces_size; // number of bytes allocated by the core spaces
// (mc, md, ro, rw and od).
-
struct space_info {
int _crc; // crc checksum of the current space
size_t _file_offset; // sizeof(this) rounded to vm page size
union {
char* _base; // copy-on-write base address
intx _offset; // offset from the compressed oop encoding base, only used
- // by string space
+ // by archive heap space
} _addr;
size_t _used; // for setting space top on read
bool _read_only; // read only space?
--- a/src/hotspot/share/memory/freeList.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/freeList.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
class Mutex;
template <class Chunk_t>
-class FreeList VALUE_OBJ_CLASS_SPEC {
+class FreeList {
friend class CompactibleFreeListSpace;
friend class VMStructs;
--- a/src/hotspot/share/memory/heap.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/heap.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
// Blocks
-class HeapBlock VALUE_OBJ_CLASS_SPEC {
+class HeapBlock {
friend class VMStructs;
public:
--- a/src/hotspot/share/memory/iterator.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/iterator.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -344,6 +344,9 @@
// correct length.
virtual void do_tag(int tag) = 0;
+ // Read/write the oop
+ virtual void do_oop(oop* o) = 0;
+
bool writing() {
return !reading();
}
--- a/src/hotspot/share/memory/memRegion.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/memRegion.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,13 +34,13 @@
// Note that MemRegions are passed by value, not by reference.
// The intent is that they remain very small and contain no
-// objects. _ValueObj should never be allocated in heap but we do
+// objects. These should never be allocated in heap but we do
// create MemRegions (in CardTableModRefBS) in heap so operator
// new and operator new [] added for this special case.
class MetaWord;
-class MemRegion VALUE_OBJ_CLASS_SPEC {
+class MemRegion {
friend class VMStructs;
private:
HeapWord* _start;
--- a/src/hotspot/share/memory/metachunk.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/metachunk.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -48,9 +48,14 @@
// Metachunk methods
-Metachunk::Metachunk(size_t word_size,
+Metachunk::Metachunk(ChunkIndex chunktype, bool is_class, size_t word_size,
VirtualSpaceNode* container)
: Metabase<Metachunk>(word_size),
+ _chunk_type(chunktype),
+ _is_class(is_class),
+ _sentinel(CHUNK_SENTINEL),
+ _origin(origin_normal),
+ _use_count(0),
_top(NULL),
_container(container)
{
@@ -58,6 +63,7 @@
set_is_tagged_free(false);
#ifdef ASSERT
mangle(uninitMetaWordVal);
+ verify();
#endif
}
@@ -83,15 +89,16 @@
void Metachunk::print_on(outputStream* st) const {
st->print_cr("Metachunk:"
" bottom " PTR_FORMAT " top " PTR_FORMAT
- " end " PTR_FORMAT " size " SIZE_FORMAT,
- p2i(bottom()), p2i(_top), p2i(end()), word_size());
+ " end " PTR_FORMAT " size " SIZE_FORMAT " (%s)",
+ p2i(bottom()), p2i(_top), p2i(end()), word_size(),
+ chunk_size_name(get_chunk_type()));
if (Verbose) {
st->print_cr(" used " SIZE_FORMAT " free " SIZE_FORMAT,
used_word_size(), free_word_size());
}
}
-#ifndef PRODUCT
+#ifdef ASSERT
void Metachunk::mangle(juint word_value) {
// Overwrite the payload of the chunk and not the links that
// maintain list of chunks.
@@ -99,16 +106,44 @@
size_t size = word_size() - overhead();
Copy::fill_to_words(start, size, word_value);
}
-#endif // PRODUCT
void Metachunk::verify() {
-#ifdef ASSERT
- // Cannot walk through the blocks unless the blocks have
- // headers with sizes.
- assert(bottom() <= _top &&
- _top <= (MetaWord*)end(),
- "Chunk has been smashed");
-#endif
- return;
+ assert(is_valid_sentinel(), "Chunk " PTR_FORMAT ": sentinel invalid", p2i(this));
+ const ChunkIndex chunk_type = get_chunk_type();
+ assert(is_valid_chunktype(chunk_type), "Chunk " PTR_FORMAT ": Invalid chunk type.", p2i(this));
+ if (chunk_type != HumongousIndex) {
+ assert(word_size() == get_size_for_nonhumongous_chunktype(chunk_type, is_class()),
+ "Chunk " PTR_FORMAT ": wordsize " SIZE_FORMAT " does not fit chunk type %s.",
+ p2i(this), word_size(), chunk_size_name(chunk_type));
+ }
+ assert(is_valid_chunkorigin(get_origin()), "Chunk " PTR_FORMAT ": Invalid chunk origin.", p2i(this));
+ assert(bottom() <= _top && _top <= (MetaWord*)end(),
+ "Chunk " PTR_FORMAT ": Chunk top out of chunk bounds.", p2i(this));
+
+ // For non-humongous chunks, starting address shall be aligned
+ // to its chunk size. Humongous chunks start address is
+ // aligned to specialized chunk size.
+ const size_t required_alignment =
+ (chunk_type != HumongousIndex ? word_size() : get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class())) * sizeof(MetaWord);
+ assert(is_aligned((address)this, required_alignment),
+ "Chunk " PTR_FORMAT ": (size " SIZE_FORMAT ") not aligned to " SIZE_FORMAT ".",
+ p2i(this), word_size() * sizeof(MetaWord), required_alignment);
}
+#endif // ASSERT
+
+// Helper, returns a descriptive name for the given index.
+const char* chunk_size_name(ChunkIndex index) {
+ switch (index) {
+ case SpecializedIndex:
+ return "specialized";
+ case SmallIndex:
+ return "small";
+ case MediumIndex:
+ return "medium";
+ case HumongousIndex:
+ return "humongous";
+ default:
+ return "Invalid index";
+ }
+}
--- a/src/hotspot/share/memory/metachunk.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/metachunk.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
// Super class of Metablock and Metachunk to allow them to
// be put on the FreeList and in the BinaryTreeDictionary.
template <class T>
-class Metabase VALUE_OBJ_CLASS_SPEC {
+class Metabase {
size_t _word_size;
T* _next;
T* _prev;
@@ -94,16 +94,84 @@
// | | | |
// +--------------+ <- bottom --+ --+
+// ChunkIndex defines the type of chunk.
+// Chunk types differ by size: specialized < small < medium, chunks
+// larger than medium are humongous chunks of varying size.
+enum ChunkIndex {
+ ZeroIndex = 0,
+ SpecializedIndex = ZeroIndex,
+ SmallIndex = SpecializedIndex + 1,
+ MediumIndex = SmallIndex + 1,
+ HumongousIndex = MediumIndex + 1,
+ NumberOfFreeLists = 3,
+ NumberOfInUseLists = 4
+};
+
+// Utility functions.
+size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunk_type, bool is_class);
+ChunkIndex get_chunk_type_by_size(size_t size, bool is_class);
+
+// Returns a descriptive name for a chunk type.
+const char* chunk_size_name(ChunkIndex index);
+
+// Verify chunk type.
+inline bool is_valid_chunktype(ChunkIndex index) {
+ return index == SpecializedIndex || index == SmallIndex ||
+ index == MediumIndex || index == HumongousIndex;
+}
+
+inline bool is_valid_nonhumongous_chunktype(ChunkIndex index) {
+ return is_valid_chunktype(index) && index != HumongousIndex;
+}
+
+enum ChunkOrigin {
+ // Chunk normally born (via take_from_committed)
+ origin_normal = 1,
+ // Chunk was born as padding chunk
+ origin_pad = 2,
+ // Chunk was born as leftover chunk in VirtualSpaceNode::retire
+ origin_leftover = 3,
+ // Chunk was born as result of a merge of smaller chunks
+ origin_merge = 4,
+ // Chunk was born as result of a split of a larger chunk
+ origin_split = 5,
+
+ origin_minimum = origin_normal,
+ origin_maximum = origin_split,
+ origins_count = origin_maximum + 1
+};
+
+inline bool is_valid_chunkorigin(ChunkOrigin origin) {
+ return origin == origin_normal ||
+ origin == origin_pad ||
+ origin == origin_leftover ||
+ origin == origin_merge ||
+ origin == origin_split;
+}
+
class Metachunk : public Metabase<Metachunk> {
friend class MetachunkTest;
// The VirtualSpaceNode containing this chunk.
- VirtualSpaceNode* _container;
+ VirtualSpaceNode* const _container;
// Current allocation top.
MetaWord* _top;
+ // A 32bit sentinel for debugging purposes.
+ enum { CHUNK_SENTINEL = 0x4d4554EF, // "MET"
+ CHUNK_SENTINEL_INVALID = 0xFEEEEEEF
+ };
+
+ uint32_t _sentinel;
+
+ const ChunkIndex _chunk_type;
+ const bool _is_class;
+ // Whether the chunk is free (in freelist) or in use by some class loader.
bool _is_tagged_free;
+ ChunkOrigin _origin;
+ int _use_count;
+
MetaWord* initial_top() const { return (MetaWord*)this + overhead(); }
MetaWord* top() const { return _top; }
@@ -120,7 +188,7 @@
// Size of the Metachunk header, including alignment.
static size_t overhead();
- Metachunk(size_t word_size , VirtualSpaceNode* container);
+ Metachunk(ChunkIndex chunktype, bool is_class, size_t word_size, VirtualSpaceNode* container);
MetaWord* allocate(size_t word_size);
@@ -143,12 +211,23 @@
bool contains(const void* ptr) { return bottom() <= ptr && ptr < _top; }
-#ifndef PRODUCT
- void mangle(juint word_value);
-#endif
+ void print_on(outputStream* st) const;
+
+ bool is_valid_sentinel() const { return _sentinel == CHUNK_SENTINEL; }
+ void remove_sentinel() { _sentinel = CHUNK_SENTINEL_INVALID; }
+
+ int get_use_count() const { return _use_count; }
+ void inc_use_count() { _use_count ++; }
- void print_on(outputStream* st) const;
- void verify();
+ ChunkOrigin get_origin() const { return _origin; }
+ void set_origin(ChunkOrigin orig) { _origin = orig; }
+
+ ChunkIndex get_chunk_type() const { return _chunk_type; }
+ bool is_class() const { return _is_class; }
+
+ DEBUG_ONLY(void mangle(juint word_value);)
+ DEBUG_ONLY(void verify();)
+
};
// Metablock is the unit of allocation from a Chunk.
--- a/src/hotspot/share/memory/metaspace.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/metaspace.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -55,8 +55,12 @@
typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
-// Set this constant to enable slow integrity checking of the free chunk lists
-const bool metaspace_slow_verify = false;
+// Helper function that does a bunch of checks for a chunk.
+DEBUG_ONLY(static void do_verify_chunk(Metachunk* chunk);)
+
+// Given a Metachunk, update its in-use information (both in the
+// chunk and the occupancy map).
+static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse);
size_t const allocation_from_dictionary_limit = 4 * K;
@@ -67,33 +71,6 @@
DEBUG_ONLY(bool Metaspace::_frozen = false;)
-// Used in declarations in SpaceManager and ChunkManager
-enum ChunkIndex {
- ZeroIndex = 0,
- SpecializedIndex = ZeroIndex,
- SmallIndex = SpecializedIndex + 1,
- MediumIndex = SmallIndex + 1,
- HumongousIndex = MediumIndex + 1,
- NumberOfFreeLists = 3,
- NumberOfInUseLists = 4
-};
-
-// Helper, returns a descriptive name for the given index.
-static const char* chunk_size_name(ChunkIndex index) {
- switch (index) {
- case SpecializedIndex:
- return "specialized";
- case SmallIndex:
- return "small";
- case MediumIndex:
- return "medium";
- case HumongousIndex:
- return "humongous";
- default:
- return "Invalid index";
- }
-}
-
enum ChunkSizes { // in words.
ClassSpecializedChunk = 128,
SpecializedChunk = 128,
@@ -103,11 +80,69 @@
MediumChunk = 8 * K
};
+// Returns size of this chunk type.
+size_t get_size_for_nonhumongous_chunktype(ChunkIndex chunktype, bool is_class) {
+ assert(is_valid_nonhumongous_chunktype(chunktype), "invalid chunk type.");
+ size_t size = 0;
+ if (is_class) {
+ switch(chunktype) {
+ case SpecializedIndex: size = ClassSpecializedChunk; break;
+ case SmallIndex: size = ClassSmallChunk; break;
+ case MediumIndex: size = ClassMediumChunk; break;
+ default:
+ ShouldNotReachHere();
+ }
+ } else {
+ switch(chunktype) {
+ case SpecializedIndex: size = SpecializedChunk; break;
+ case SmallIndex: size = SmallChunk; break;
+ case MediumIndex: size = MediumChunk; break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ return size;
+}
+
+ChunkIndex get_chunk_type_by_size(size_t size, bool is_class) {
+ if (is_class) {
+ if (size == ClassSpecializedChunk) {
+ return SpecializedIndex;
+ } else if (size == ClassSmallChunk) {
+ return SmallIndex;
+ } else if (size == ClassMediumChunk) {
+ return MediumIndex;
+ } else if (size > ClassMediumChunk) {
+ assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
+ return HumongousIndex;
+ }
+ } else {
+ if (size == SpecializedChunk) {
+ return SpecializedIndex;
+ } else if (size == SmallChunk) {
+ return SmallIndex;
+ } else if (size == MediumChunk) {
+ return MediumIndex;
+ } else if (size > MediumChunk) {
+ assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
+ return HumongousIndex;
+ }
+ }
+ ShouldNotReachHere();
+ return (ChunkIndex)-1;
+}
+
+
static ChunkIndex next_chunk_index(ChunkIndex i) {
assert(i < NumberOfInUseLists, "Out of bound");
return (ChunkIndex) (i+1);
}
+static ChunkIndex prev_chunk_index(ChunkIndex i) {
+ assert(i > ZeroIndex, "Out of bound");
+ return (ChunkIndex) (i-1);
+}
+
static const char* scale_unit(size_t scale) {
switch(scale) {
case 1: return "BYTES";
@@ -136,6 +171,9 @@
// MediumChunk
ChunkList _free_chunks[NumberOfFreeLists];
+ // Whether or not this is the class chunkmanager.
+ const bool _is_class;
+
// Return non-humongous chunk list by its index.
ChunkList* free_chunks(ChunkIndex index);
@@ -166,18 +204,41 @@
void locked_verify_free_chunks_total();
void slow_locked_verify_free_chunks_total() {
- if (metaspace_slow_verify) {
+ if (VerifyMetaspace) {
locked_verify_free_chunks_total();
}
}
void locked_verify_free_chunks_count();
void slow_locked_verify_free_chunks_count() {
- if (metaspace_slow_verify) {
+ if (VerifyMetaspace) {
locked_verify_free_chunks_count();
}
}
void verify_free_chunks_count();
+ // Given a pointer to a chunk, attempts to merge it with neighboring
+ // free chunks to form a bigger chunk. Returns true if successful.
+ bool attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type);
+
+ // Helper for chunk merging:
+ // Given an address range with 1-n chunks which are all supposed to be
+ // free and hence currently managed by this ChunkManager, remove them
+ // from this ChunkManager and mark them as invalid.
+ // - This does not correct the occupancy map.
+ // - This does not adjust the counters in ChunkManager.
+ // - Does not adjust container count counter in containing VirtualSpaceNode.
+ // Returns number of chunks removed.
+ int remove_chunks_in_area(MetaWord* p, size_t word_size);
+
+ // Helper for chunk splitting: given a target chunk size and a larger free chunk,
+ // split up the larger chunk into n smaller chunks, at least one of which should be
+ // the target chunk of target chunk size. The smaller chunks, including the target
+ // chunk, are returned to the freelist. The pointer to the target chunk is returned.
+ // Note that this chunk is supposed to be removed from the freelist right away.
+ Metachunk* split_chunk(size_t target_chunk_word_size, Metachunk* chunk);
+
+ public:
+
struct ChunkManagerStatistics {
size_t num_by_type[NumberOfFreeLists];
size_t single_size_by_type[NumberOfFreeLists];
@@ -190,16 +251,15 @@
void get_statistics(ChunkManagerStatistics* stat) const;
static void print_statistics(const ChunkManagerStatistics* stat, outputStream* out, size_t scale);
- public:
-
- ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
- : _free_chunks_total(0), _free_chunks_count(0) {
- _free_chunks[SpecializedIndex].set_size(specialized_size);
- _free_chunks[SmallIndex].set_size(small_size);
- _free_chunks[MediumIndex].set_size(medium_size);
- }
-
- // add or delete (return) a chunk to the global freelist.
+
+ ChunkManager(bool is_class)
+ : _is_class(is_class), _free_chunks_total(0), _free_chunks_count(0) {
+ _free_chunks[SpecializedIndex].set_size(get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class));
+ _free_chunks[SmallIndex].set_size(get_size_for_nonhumongous_chunktype(SmallIndex, is_class));
+ _free_chunks[MediumIndex].set_size(get_size_for_nonhumongous_chunktype(MediumIndex, is_class));
+ }
+
+ // Add or delete (return) a chunk to the global freelist.
Metachunk* chunk_freelist_allocate(size_t word_size);
// Map a size to a list index assuming that there are lists
@@ -209,6 +269,13 @@
// Map a given index to the chunk size.
size_t size_by_index(ChunkIndex index) const;
+ bool is_class() const { return _is_class; }
+
+ // Convenience accessors.
+ size_t medium_chunk_word_size() const { return size_by_index(MediumIndex); }
+ size_t small_chunk_word_size() const { return size_by_index(SmallIndex); }
+ size_t specialized_chunk_word_size() const { return size_by_index(SpecializedIndex); }
+
// Take a chunk from the ChunkManager. The chunk is expected to be in
// the chunk manager (the freelist if non-humongous, the dictionary if
// humongous).
@@ -276,13 +343,13 @@
// Debug support
void verify();
void slow_verify() {
- if (metaspace_slow_verify) {
+ if (VerifyMetaspace) {
verify();
}
}
void locked_verify();
void slow_locked_verify() {
- if (metaspace_slow_verify) {
+ if (VerifyMetaspace) {
locked_verify();
}
}
@@ -391,6 +458,294 @@
void print_on(outputStream* st) const;
};
+// Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant.
+template <typename T> struct all_ones { static const T value; };
+template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; };
+template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; };
+
+// The OccupancyMap is a bitmap which, for a given VirtualSpaceNode,
+// keeps information about
+// - where a chunk starts
+// - whether a chunk is in-use or free
+// A bit in this bitmap represents one range of memory in the smallest
+// chunk size (SpecializedChunk or ClassSpecializedChunk).
+class OccupancyMap : public CHeapObj<mtInternal> {
+
+ // The address range this map covers.
+ const MetaWord* const _reference_address;
+ const size_t _word_size;
+
+ // The word size of a specialized chunk, aka the number of words one
+ // bit in this map represents.
+ const size_t _smallest_chunk_word_size;
+
+ // map data
+ // Data are organized in two bit layers:
+ // The first layer is the chunk-start-map. Here, a bit is set to mark
+ // the corresponding region as the head of a chunk.
+ // The second layer is the in-use-map. Here, a set bit indicates that
+ // the corresponding belongs to a chunk which is in use.
+ uint8_t* _map[2];
+
+ enum { layer_chunk_start_map = 0, layer_in_use_map = 1 };
+
+ // length, in bytes, of bitmap data
+ size_t _map_size;
+
+ // Returns true if bit at position pos at bit-layer layer is set.
+ bool get_bit_at_position(unsigned pos, unsigned layer) const {
+ assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
+ const unsigned byteoffset = pos / 8;
+ assert(byteoffset < _map_size,
+ "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
+ const unsigned mask = 1 << (pos % 8);
+ return (_map[layer][byteoffset] & mask) > 0;
+ }
+
+ // Changes bit at position pos at bit-layer layer to value v.
+ void set_bit_at_position(unsigned pos, unsigned layer, bool v) {
+ assert(layer == 0 || layer == 1, "Invalid layer %d", layer);
+ const unsigned byteoffset = pos / 8;
+ assert(byteoffset < _map_size,
+ "invalid byte offset (%u), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
+ const unsigned mask = 1 << (pos % 8);
+ if (v) {
+ _map[layer][byteoffset] |= mask;
+ } else {
+ _map[layer][byteoffset] &= ~mask;
+ }
+ }
+
+ // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access:
+ // pos is 32/64 aligned and num_bits is 32/64.
+ // This is the typical case when coalescing to medium chunks, whose size is
+ // 32 or 64 times the specialized chunk size (depending on class or non class
+ // case), so they occupy 64 bits which should be 64bit aligned, because
+ // chunks are chunk-size aligned.
+ template <typename T>
+ bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const {
+ assert(_map_size > 0, "not initialized");
+ assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
+ assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u).", pos);
+ assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u).", num_bits);
+ const size_t byteoffset = pos / 8;
+ assert(byteoffset <= (_map_size - sizeof(T)),
+ "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
+ const T w = *(T*)(_map[layer] + byteoffset);
+ return w > 0 ? true : false;
+ }
+
+ // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer.
+ bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const {
+ if (pos % 32 == 0 && num_bits == 32) {
+ return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer);
+ } else if (pos % 64 == 0 && num_bits == 64) {
+ return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer);
+ } else {
+ for (unsigned n = 0; n < num_bits; n ++) {
+ if (get_bit_at_position(pos + n, layer)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer.
+ bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const {
+ assert(word_size % _smallest_chunk_word_size == 0,
+ "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
+ const unsigned pos = get_bitpos_for_address(p);
+ const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
+ return is_any_bit_set_in_region(pos, num_bits, layer);
+ }
+
+ // Optimized case of set_bits_of_region for 32/64bit aligned access:
+ // pos is 32/64 aligned and num_bits is 32/64.
+ // This is the typical case when coalescing to medium chunks, whose size
+ // is 32 or 64 times the specialized chunk size (depending on class or non
+ // class case), so they occupy 64 bits which should be 64bit aligned,
+ // because chunks are chunk-size aligned.
+ template <typename T>
+ void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
+ assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u).",
+ (unsigned)(sizeof(T) * 8), pos);
+ assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u.",
+ num_bits, (unsigned)(sizeof(T) * 8));
+ const size_t byteoffset = pos / 8;
+ assert(byteoffset <= (_map_size - sizeof(T)),
+ "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT ".", byteoffset, _map_size);
+ T* const pw = (T*)(_map[layer] + byteoffset);
+ *pw = v ? all_ones<T>::value : (T) 0;
+ }
+
+ // Set all bits in a region starting at pos to a value.
+ void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) {
+ assert(_map_size > 0, "not initialized");
+ assert(layer == 0 || layer == 1, "Invalid layer %d.", layer);
+ if (pos % 32 == 0 && num_bits == 32) {
+ set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v);
+ } else if (pos % 64 == 0 && num_bits == 64) {
+ set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v);
+ } else {
+ for (unsigned n = 0; n < num_bits; n ++) {
+ set_bit_at_position(pos + n, layer, v);
+ }
+ }
+ }
+
+ // Helper: sets all bits in a region [p, p+word_size).
+ void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) {
+ assert(word_size % _smallest_chunk_word_size == 0,
+ "Region size " SIZE_FORMAT " not a multiple of smallest chunk size.", word_size);
+ const unsigned pos = get_bitpos_for_address(p);
+ const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size);
+ set_bits_of_region(pos, num_bits, layer, v);
+ }
+
+ // Helper: given an address, return the bit position representing that address.
+ unsigned get_bitpos_for_address(const MetaWord* p) const {
+ assert(_reference_address != NULL, "not initialized");
+ assert(p >= _reference_address && p < _reference_address + _word_size,
+ "Address %p out of range for occupancy map [%p..%p).",
+ p, _reference_address, _reference_address + _word_size);
+ assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)),
+ "Address not aligned (%p).", p);
+ const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size;
+ assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity.");
+ return (unsigned) d;
+ }
+
+ public:
+
+ OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size) :
+ _reference_address(reference_address), _word_size(word_size),
+ _smallest_chunk_word_size(smallest_chunk_word_size) {
+ assert(reference_address != NULL, "invalid reference address");
+ assert(is_aligned(reference_address, smallest_chunk_word_size),
+ "Reference address not aligned to smallest chunk size.");
+ assert(is_aligned(word_size, smallest_chunk_word_size),
+ "Word_size shall be a multiple of the smallest chunk size.");
+ // Calculate bitmap size: one bit per smallest_chunk_word_size'd area.
+ size_t num_bits = word_size / smallest_chunk_word_size;
+ _map_size = (num_bits + 7) / 8;
+ assert(_map_size * 8 >= num_bits, "sanity");
+ _map[0] = (uint8_t*) os::malloc(_map_size, mtInternal);
+ _map[1] = (uint8_t*) os::malloc(_map_size, mtInternal);
+ assert(_map[0] != NULL && _map[1] != NULL, "Occupancy Map: allocation failed.");
+ memset(_map[1], 0, _map_size);
+ memset(_map[0], 0, _map_size);
+ // Sanity test: the first respectively last possible chunk start address in
+ // the covered range shall map to the first and last bit in the bitmap.
+ assert(get_bitpos_for_address(reference_address) == 0,
+ "First chunk address in range must map to fist bit in bitmap.");
+ assert(get_bitpos_for_address(reference_address + word_size - smallest_chunk_word_size) == num_bits - 1,
+ "Last chunk address in range must map to last bit in bitmap.");
+ }
+
+ ~OccupancyMap() {
+ os::free(_map[0]);
+ os::free(_map[1]);
+ }
+
+ // Returns true if at address x a chunk is starting.
+ bool chunk_starts_at_address(MetaWord* p) const {
+ const unsigned pos = get_bitpos_for_address(p);
+ return get_bit_at_position(pos, layer_chunk_start_map);
+ }
+
+ void set_chunk_starts_at_address(MetaWord* p, bool v) {
+ const unsigned pos = get_bitpos_for_address(p);
+ set_bit_at_position(pos, layer_chunk_start_map, v);
+ }
+
+ // Removes all chunk-start-bits inside a region, typically as a
+ // result of a chunk merge.
+ void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) {
+ set_bits_of_region(p, word_size, layer_chunk_start_map, false);
+ }
+
+ // Returns true if there are life (in use) chunks in the region limited
+ // by [p, p+word_size).
+ bool is_region_in_use(MetaWord* p, size_t word_size) const {
+ return is_any_bit_set_in_region(p, word_size, layer_in_use_map);
+ }
+
+ // Marks the region starting at p with the size word_size as in use
+ // or free, depending on v.
+ void set_region_in_use(MetaWord* p, size_t word_size, bool v) {
+ set_bits_of_region(p, word_size, layer_in_use_map, v);
+ }
+
+#ifdef ASSERT
+ // Verify occupancy map for the address range [from, to).
+ // We need to tell it the address range, because the memory the
+ // occupancy map is covering may not be fully comitted yet.
+ void verify(MetaWord* from, MetaWord* to) {
+ Metachunk* chunk = NULL;
+ int nth_bit_for_chunk = 0;
+ MetaWord* chunk_end = NULL;
+ for (MetaWord* p = from; p < to; p += _smallest_chunk_word_size) {
+ const unsigned pos = get_bitpos_for_address(p);
+ // Check the chunk-starts-info:
+ if (get_bit_at_position(pos, layer_chunk_start_map)) {
+ // Chunk start marked in bitmap.
+ chunk = (Metachunk*) p;
+ if (chunk_end != NULL) {
+ assert(chunk_end == p, "Unexpected chunk start found at %p (expected "
+ "the next chunk to start at %p).", p, chunk_end);
+ }
+ assert(chunk->is_valid_sentinel(), "Invalid chunk at address %p.", p);
+ if (chunk->get_chunk_type() != HumongousIndex) {
+ guarantee(is_aligned(p, chunk->word_size()), "Chunk %p not aligned.", p);
+ }
+ chunk_end = p + chunk->word_size();
+ nth_bit_for_chunk = 0;
+ assert(chunk_end <= to, "Chunk end overlaps test address range.");
+ } else {
+ // No chunk start marked in bitmap.
+ assert(chunk != NULL, "Chunk should start at start of address range.");
+ assert(p < chunk_end, "Did not find expected chunk start at %p.", p);
+ nth_bit_for_chunk ++;
+ }
+ // Check the in-use-info:
+ const bool in_use_bit = get_bit_at_position(pos, layer_in_use_map);
+ if (in_use_bit) {
+ assert(!chunk->is_tagged_free(), "Chunk %p: marked in-use in map but is free (bit %u).",
+ chunk, nth_bit_for_chunk);
+ } else {
+ assert(chunk->is_tagged_free(), "Chunk %p: marked free in map but is in use (bit %u).",
+ chunk, nth_bit_for_chunk);
+ }
+ }
+ }
+
+ // Verify that a given chunk is correctly accounted for in the bitmap.
+ void verify_for_chunk(Metachunk* chunk) {
+ assert(chunk_starts_at_address((MetaWord*) chunk),
+ "No chunk start marked in map for chunk %p.", chunk);
+ // For chunks larger than the minimal chunk size, no other chunk
+ // must start in its area.
+ if (chunk->word_size() > _smallest_chunk_word_size) {
+ assert(!is_any_bit_set_in_region(((MetaWord*) chunk) + _smallest_chunk_word_size,
+ chunk->word_size() - _smallest_chunk_word_size, layer_chunk_start_map),
+ "No chunk must start within another chunk.");
+ }
+ if (!chunk->is_tagged_free()) {
+ assert(is_region_in_use((MetaWord*)chunk, chunk->word_size()),
+ "Chunk %p is in use but marked as free in map (%d %d).",
+ chunk, chunk->get_chunk_type(), chunk->get_origin());
+ } else {
+ assert(!is_region_in_use((MetaWord*)chunk, chunk->word_size()),
+ "Chunk %p is free but marked as in-use in map (%d %d).",
+ chunk, chunk->get_chunk_type(), chunk->get_origin());
+ }
+ }
+
+#endif // ASSERT
+
+};
+
// A VirtualSpaceList node.
class VirtualSpaceNode : public CHeapObj<mtClass> {
friend class VirtualSpaceList;
@@ -398,6 +753,9 @@
// Link to next VirtualSpaceNode
VirtualSpaceNode* _next;
+ // Whether this node is contained in class or metaspace.
+ const bool _is_class;
+
// total in the VirtualSpace
MemRegion _reserved;
ReservedSpace _rs;
@@ -406,6 +764,8 @@
// count of chunks contained in this VirtualSpace
uintx _container_count;
+ OccupancyMap* _occupancy_map;
+
// Convenience functions to access the _virtual_space
char* low() const { return virtual_space()->low(); }
char* high() const { return virtual_space()->high(); }
@@ -416,16 +776,28 @@
// Committed but unused space in the virtual space
size_t free_words_in_vs() const;
+
+ // True if this node belongs to class metaspace.
+ bool is_class() const { return _is_class; }
+
+ // Helper function for take_from_committed: allocate padding chunks
+ // until top is at the given address.
+ void allocate_padding_chunks_until_top_is_at(MetaWord* target_top);
+
public:
- VirtualSpaceNode(size_t byte_size);
- VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
+ VirtualSpaceNode(bool is_class, size_t byte_size);
+ VirtualSpaceNode(bool is_class, ReservedSpace rs) :
+ _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
~VirtualSpaceNode();
// Convenience functions for logical bottom and end
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
+ const OccupancyMap* occupancy_map() const { return _occupancy_map; }
+ OccupancyMap* occupancy_map() { return _occupancy_map; }
+
bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
@@ -486,13 +858,18 @@
// the smallest chunk size.
void retire(ChunkManager* chunk_manager);
-#ifdef ASSERT
- // Debug support
- void mangle();
-#endif
void print_on(outputStream* st) const;
void print_map(outputStream* st, bool is_class) const;
+
+ // Debug support
+ DEBUG_ONLY(void mangle();)
+ // Verify counters, all chunks in this list node and the occupancy map.
+ DEBUG_ONLY(void verify();)
+ // Verify that all free chunks in this node are ideally merged
+ // (there not should be multiple small chunks where a large chunk could exist.)
+ DEBUG_ONLY(void verify_free_chunks_are_ideally_merged();)
+
};
#define assert_is_aligned(value, alignment) \
@@ -515,7 +892,8 @@
}
// byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
+VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
+ _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
assert_is_aligned(bytes, Metaspace::reserve_alignment());
bool large_pages = should_commit_large_pages_when_reserving(bytes);
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
@@ -531,12 +909,14 @@
}
void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
+ DEBUG_ONLY(this->verify();)
Metachunk* chunk = first_chunk();
Metachunk* invalid_chunk = (Metachunk*) top();
while (chunk < invalid_chunk ) {
assert(chunk->is_tagged_free(), "Should be tagged free");
MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
chunk_manager->remove_chunk(chunk);
+ chunk->remove_sentinel();
assert(chunk->next() == NULL &&
chunk->prev() == NULL,
"Was not removed from its list");
@@ -546,23 +926,10 @@
void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const {
- // Format:
- // <ptr>
- // <ptr> . .. . . ..
- // SSxSSMMMMMMMMMMMMMMMMsssXX
- // 112114444444444444444
- // <ptr> . .. . . ..
- // SSxSSMMMMMMMMMMMMMMMMsssXX
- // 112114444444444444444
-
if (bottom() == top()) {
return;
}
- // First line: dividers for every med-chunk-sized interval
- // Second line: a dot for the start of a chunk
- // Third line: a letter per chunk type (x,s,m,h), uppercase if in use.
-
const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk;
const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk;
@@ -571,9 +938,12 @@
const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size);
line_len = (int)(section_len / spec_chunk_size);
- char* line1 = (char*)os::malloc(line_len, mtInternal);
- char* line2 = (char*)os::malloc(line_len, mtInternal);
- char* line3 = (char*)os::malloc(line_len, mtInternal);
+ static const int NUM_LINES = 4;
+
+ char* lines[NUM_LINES];
+ for (int i = 0; i < NUM_LINES; i ++) {
+ lines[i] = (char*)os::malloc(line_len, mtInternal);
+ }
int pos = 0;
const MetaWord* p = bottom();
const Metachunk* chunk = (const Metachunk*)p;
@@ -581,12 +951,11 @@
while (p < top()) {
if (pos == line_len) {
pos = 0;
- st->fill_to(22);
- st->print_raw(line1, line_len);
- st->cr();
- st->fill_to(22);
- st->print_raw(line2, line_len);
- st->cr();
+ for (int i = 0; i < NUM_LINES; i ++) {
+ st->fill_to(22);
+ st->print_raw(lines[i], line_len);
+ st->cr();
+ }
}
if (pos == 0) {
st->print(PTR_FORMAT ":", p2i(p));
@@ -595,40 +964,45 @@
chunk = (Metachunk*)p;
chunk_end = p + chunk->word_size();
}
- if (p == (const MetaWord*)chunk) {
- // chunk starts.
- line1[pos] = '.';
- } else {
- line1[pos] = ' ';
- }
+ // line 1: chunk starting points (a dot if that area is a chunk start).
+ lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' ';
+
// Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if
// chunk is in use.
const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free();
if (chunk->word_size() == spec_chunk_size) {
- line2[pos] = chunk_is_free ? 'x' : 'X';
+ lines[1][pos] = chunk_is_free ? 'x' : 'X';
} else if (chunk->word_size() == small_chunk_size) {
- line2[pos] = chunk_is_free ? 's' : 'S';
+ lines[1][pos] = chunk_is_free ? 's' : 'S';
} else if (chunk->word_size() == med_chunk_size) {
- line2[pos] = chunk_is_free ? 'm' : 'M';
- } else if (chunk->word_size() > med_chunk_size) {
- line2[pos] = chunk_is_free ? 'h' : 'H';
+ lines[1][pos] = chunk_is_free ? 'm' : 'M';
+ } else if (chunk->word_size() > med_chunk_size) {
+ lines[1][pos] = chunk_is_free ? 'h' : 'H';
} else {
ShouldNotReachHere();
}
+
+ // Line 3: chunk origin
+ const ChunkOrigin origin = chunk->get_origin();
+ lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin;
+
+ // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting,
+ // but were never used.
+ lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v';
+
p += spec_chunk_size;
pos ++;
}
if (pos > 0) {
- st->fill_to(22);
- st->print_raw(line1, pos);
- st->cr();
- st->fill_to(22);
- st->print_raw(line2, pos);
- st->cr();
- }
- os::free(line1);
- os::free(line2);
- os::free(line3);
+ for (int i = 0; i < NUM_LINES; i ++) {
+ st->fill_to(22);
+ st->print_raw(lines[i], line_len);
+ st->cr();
+ }
+ }
+ for (int i = 0; i < NUM_LINES; i ++) {
+ os::free(lines[i]);
+ }
}
@@ -639,6 +1013,7 @@
Metachunk* invalid_chunk = (Metachunk*) top();
while (chunk < invalid_chunk ) {
MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+ do_verify_chunk(chunk);
// Don't count the chunks on the free lists. Those are
// still part of the VirtualSpaceNode but not currently
// counted.
@@ -651,6 +1026,77 @@
}
#endif
+#ifdef ASSERT
+// Verify counters, all chunks in this list node and the occupancy map.
+void VirtualSpaceNode::verify() {
+ uintx num_in_use_chunks = 0;
+ Metachunk* chunk = first_chunk();
+ Metachunk* invalid_chunk = (Metachunk*) top();
+
+ // Iterate the chunks in this node and verify each chunk.
+ while (chunk < invalid_chunk ) {
+ DEBUG_ONLY(do_verify_chunk(chunk);)
+ if (!chunk->is_tagged_free()) {
+ num_in_use_chunks ++;
+ }
+ MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+ chunk = (Metachunk*) next;
+ }
+ assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT
+ ", counter: " UINTX_FORMAT ".", num_in_use_chunks, _container_count);
+ // Also verify the occupancy map.
+ occupancy_map()->verify(this->bottom(), this->top());
+}
+#endif // ASSERT
+
+#ifdef ASSERT
+// Verify that all free chunks in this node are ideally merged
+// (there not should be multiple small chunks where a large chunk could exist.)
+void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() {
+ Metachunk* chunk = first_chunk();
+ Metachunk* invalid_chunk = (Metachunk*) top();
+ // Shorthands.
+ const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord;
+ const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord;
+ int num_free_chunks_since_last_med_boundary = -1;
+ int num_free_chunks_since_last_small_boundary = -1;
+ while (chunk < invalid_chunk ) {
+ // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary.
+ // Reset the counter when encountering a non-free chunk.
+ if (chunk->get_chunk_type() != HumongousIndex) {
+ if (chunk->is_tagged_free()) {
+ // Count successive free, non-humongous chunks.
+ if (is_aligned(chunk, size_small)) {
+ assert(num_free_chunks_since_last_small_boundary <= 1,
+ "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_small, size_small);
+ num_free_chunks_since_last_small_boundary = 0;
+ } else if (num_free_chunks_since_last_small_boundary != -1) {
+ num_free_chunks_since_last_small_boundary ++;
+ }
+ if (is_aligned(chunk, size_med)) {
+ assert(num_free_chunks_since_last_med_boundary <= 1,
+ "Missed chunk merge opportunity at " PTR_FORMAT " for chunk size " SIZE_FORMAT_HEX ".", p2i(chunk) - size_med, size_med);
+ num_free_chunks_since_last_med_boundary = 0;
+ } else if (num_free_chunks_since_last_med_boundary != -1) {
+ num_free_chunks_since_last_med_boundary ++;
+ }
+ } else {
+ // Encountering a non-free chunk, reset counters.
+ num_free_chunks_since_last_med_boundary = -1;
+ num_free_chunks_since_last_small_boundary = -1;
+ }
+ } else {
+ // One cannot merge areas with a humongous chunk in the middle. Reset counters.
+ num_free_chunks_since_last_med_boundary = -1;
+ num_free_chunks_since_last_small_boundary = -1;
+ }
+
+ MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+ chunk = (Metachunk*) next;
+ }
+}
+#endif // ASSERT
+
// List of VirtualSpaces for metadata allocation.
class VirtualSpaceList : public CHeapObj<mtClass> {
friend class VirtualSpaceNode;
@@ -776,7 +1222,7 @@
// SpaceManager - used by Metaspace to handle allocations
class SpaceManager : public CHeapObj<mtClass> {
- friend class Metaspace;
+ friend class ClassLoaderMetaspace;
friend class Metadebug;
private:
@@ -799,9 +1245,9 @@
// Maximum number of small chunks to allocate to a SpaceManager
static uint const _small_chunk_limit;
- // Maximum number of specialize chunks to allocate for anonymous
+ // Maximum number of specialize chunks to allocate for anonymous and delegating
// metadata space to a SpaceManager
- static uint const _anon_metadata_specialize_chunk_limit;
+ static uint const _anon_and_delegating_metadata_specialize_chunk_limit;
// Sum of all space in allocated chunks
size_t _allocated_blocks_words;
@@ -922,8 +1368,6 @@
// Block allocation and deallocation.
// Allocates a block from the current chunk
MetaWord* allocate(size_t word_size);
- // Allocates a block from a small chunk
- MetaWord* get_small_chunk_and_allocate(size_t word_size);
// Helper for allocations
MetaWord* allocate_work(size_t word_size);
@@ -971,7 +1415,7 @@
};
uint const SpaceManager::_small_chunk_limit = 4;
-uint const SpaceManager::_anon_metadata_specialize_chunk_limit = 4;
+uint const SpaceManager::_anon_and_delegating_metadata_specialize_chunk_limit = 4;
const char* SpaceManager::_expand_lock_name =
"SpaceManager chunk allocation lock";
@@ -1078,6 +1522,9 @@
VirtualSpaceNode::~VirtualSpaceNode() {
_rs.release();
+ if (_occupancy_map != NULL) {
+ delete _occupancy_map;
+ }
#ifdef ASSERT
size_t word_size = sizeof(*this) / BytesPerWord;
Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
@@ -1097,10 +1544,120 @@
return pointer_delta(end(), top(), sizeof(MetaWord));
}
+// Given an address larger than top(), allocate padding chunks until top is at the given address.
+void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) {
+
+ assert(target_top > top(), "Sanity");
+
+ // Padding chunks are added to the freelist.
+ ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
+
+ // shorthands
+ const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
+ const size_t small_word_size = chunk_manager->small_chunk_word_size();
+ const size_t med_word_size = chunk_manager->medium_chunk_word_size();
+
+ while (top() < target_top) {
+
+ // We could make this coding more generic, but right now we only deal with two possible chunk sizes
+ // for padding chunks, so it is not worth it.
+ size_t padding_chunk_word_size = small_word_size;
+ if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) {
+ assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true.
+ padding_chunk_word_size = spec_word_size;
+ }
+ MetaWord* here = top();
+ assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord));
+ inc_top(padding_chunk_word_size);
+
+ // Create new padding chunk.
+ ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class());
+ assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity");
+
+ Metachunk* const padding_chunk =
+ ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this);
+ assert(padding_chunk == (Metachunk*)here, "Sanity");
+ DEBUG_ONLY(padding_chunk->set_origin(origin_pad);)
+ log_trace(gc, metaspace, freelist)("Created padding chunk in %s at "
+ PTR_FORMAT ", size " SIZE_FORMAT_HEX ".",
+ (is_class() ? "class space " : "metaspace"),
+ p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord));
+
+ // Mark chunk start in occupancy map.
+ occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true);
+
+ // Chunks are born as in-use (see MetaChunk ctor). So, before returning
+ // the padding chunk to its chunk manager, mark it as in use (ChunkManager
+ // will assert that).
+ do_update_in_use_info_for_chunk(padding_chunk, true);
+
+ // Return Chunk to freelist.
+ inc_container_count();
+ chunk_manager->return_single_chunk(padding_chunk_type, padding_chunk);
+ // Please note: at this point, ChunkManager::return_single_chunk()
+ // may already have merged the padding chunk with neighboring chunks, so
+ // it may have vanished at this point. Do not reference the padding
+ // chunk beyond this point.
+ }
+
+ assert(top() == target_top, "Sanity");
+
+} // allocate_padding_chunks_until_top_is_at()
+
// Allocates the chunk from the virtual space only.
// This interface is also used internally for debugging. Not all
// chunks removed here are necessarily used for allocation.
Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) {
+ // Non-humongous chunks are to be allocated aligned to their chunk
+ // size. So, start addresses of medium chunks are aligned to medium
+ // chunk size, those of small chunks to small chunk size and so
+ // forth. This facilitates merging of free chunks and reduces
+ // fragmentation. Chunk sizes are spec < small < medium, with each
+ // larger chunk size being a multiple of the next smaller chunk
+ // size.
+ // Because of this alignment, me may need to create a number of padding
+ // chunks. These chunks are created and added to the freelist.
+
+ // The chunk manager to which we will give our padding chunks.
+ ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(this->is_class());
+
+ // shorthands
+ const size_t spec_word_size = chunk_manager->specialized_chunk_word_size();
+ const size_t small_word_size = chunk_manager->small_chunk_word_size();
+ const size_t med_word_size = chunk_manager->medium_chunk_word_size();
+
+ assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size ||
+ chunk_word_size >= med_word_size, "Invalid chunk size requested.");
+
+ // Chunk alignment (in bytes) == chunk size unless humongous.
+ // Humongous chunks are aligned to the smallest chunk size (spec).
+ const size_t required_chunk_alignment = (chunk_word_size > med_word_size ?
+ spec_word_size : chunk_word_size) * sizeof(MetaWord);
+
+ // Do we have enough space to create the requested chunk plus
+ // any padding chunks needed?
+ MetaWord* const next_aligned =
+ static_cast<MetaWord*>(align_up(top(), required_chunk_alignment));
+ if (!is_available((next_aligned - top()) + chunk_word_size)) {
+ return NULL;
+ }
+
+ // Before allocating the requested chunk, allocate padding chunks if necessary.
+ // We only need to do this for small or medium chunks: specialized chunks are the
+ // smallest size, hence always aligned. Homungous chunks are allocated unaligned
+ // (implicitly, also aligned to smallest chunk size).
+ if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top()) {
+ log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p...",
+ (is_class() ? "class space " : "metaspace"),
+ top(), next_aligned);
+ allocate_padding_chunks_until_top_is_at(next_aligned);
+ // Now, top should be aligned correctly.
+ assert_is_aligned(top(), required_chunk_alignment);
+ }
+
+ // Now, top should be aligned correctly.
+ assert_is_aligned(top(), required_chunk_alignment);
+
// Bottom of the new chunk
MetaWord* chunk_limit = top();
assert(chunk_limit != NULL, "Not safe to call this method");
@@ -1126,7 +1683,23 @@
inc_top(chunk_word_size);
// Initialize the chunk
- Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
+ ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class());
+ Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this);
+ assert(result == (Metachunk*)chunk_limit, "Sanity");
+ occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true);
+ do_update_in_use_info_for_chunk(result, true);
+
+ inc_container_count();
+
+ if (VerifyMetaspace) {
+ DEBUG_ONLY(chunk_manager->locked_verify());
+ DEBUG_ONLY(this->verify());
+ }
+
+ DEBUG_ONLY(do_verify_chunk(result));
+
+ result->inc_use_count();
+
return result;
}
@@ -1145,6 +1718,14 @@
size_t commit = MIN2(preferred_bytes, uncommitted);
bool result = virtual_space()->expand_by(commit, false);
+ if (result) {
+ log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words.",
+ (is_class() ? "class" : "non-class"), commit);
+ } else {
+ log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words.",
+ (is_class() ? "class" : "non-class"), commit);
+ }
+
assert(result, "Failed to commit memory");
return result;
@@ -1153,9 +1734,6 @@
Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
assert_lock_strong(SpaceManager::expand_lock());
Metachunk* result = take_from_committed(chunk_word_size);
- if (result != NULL) {
- inc_container_count();
- }
return result;
}
@@ -1195,6 +1773,10 @@
_rs.size() / BytesPerWord);
}
+ // Initialize Occupancy Map.
+ const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk;
+ _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size);
+
return result;
}
@@ -1239,10 +1821,10 @@
}
#define assert_committed_below_limit() \
- assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \
+ assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
"Too much committed memory. Committed: " SIZE_FORMAT \
" limit (MaxMetaspaceSize): " SIZE_FORMAT, \
- MetaspaceAux::committed_bytes(), MaxMetaspaceSize);
+ MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
void VirtualSpaceList::inc_committed_words(size_t v) {
assert_lock_strong(SpaceManager::expand_lock());
@@ -1279,6 +1861,135 @@
account_for_removed_chunk(chunk);
}
+bool ChunkManager::attempt_to_coalesce_around_chunk(Metachunk* chunk, ChunkIndex target_chunk_type) {
+ assert_lock_strong(SpaceManager::expand_lock());
+ assert(chunk != NULL, "invalid chunk pointer");
+ // Check for valid merge combinations.
+ assert((chunk->get_chunk_type() == SpecializedIndex &&
+ (target_chunk_type == SmallIndex || target_chunk_type == MediumIndex)) ||
+ (chunk->get_chunk_type() == SmallIndex && target_chunk_type == MediumIndex),
+ "Invalid chunk merge combination.");
+
+ const size_t target_chunk_word_size =
+ get_size_for_nonhumongous_chunktype(target_chunk_type, this->is_class());
+
+ // [ prospective merge region )
+ MetaWord* const p_merge_region_start =
+ (MetaWord*) align_down(chunk, target_chunk_word_size * sizeof(MetaWord));
+ MetaWord* const p_merge_region_end =
+ p_merge_region_start + target_chunk_word_size;
+
+ // We need the VirtualSpaceNode containing this chunk and its occupancy map.
+ VirtualSpaceNode* const vsn = chunk->container();
+ OccupancyMap* const ocmap = vsn->occupancy_map();
+
+ // The prospective chunk merge range must be completely contained by the
+ // committed range of the virtual space node.
+ if (p_merge_region_start < vsn->bottom() || p_merge_region_end > vsn->top()) {
+ return false;
+ }
+
+ // Only attempt to merge this range if at its start a chunk starts and at its end
+ // a chunk ends. If a chunk (can only be humongous) straddles either start or end
+ // of that range, we cannot merge.
+ if (!ocmap->chunk_starts_at_address(p_merge_region_start)) {
+ return false;
+ }
+ if (p_merge_region_end < vsn->top() &&
+ !ocmap->chunk_starts_at_address(p_merge_region_end)) {
+ return false;
+ }
+
+ // Now check if the prospective merge area contains live chunks. If it does we cannot merge.
+ if (ocmap->is_region_in_use(p_merge_region_start, target_chunk_word_size)) {
+ return false;
+ }
+
+ // Success! Remove all chunks in this region...
+ log_trace(gc, metaspace, freelist)("%s: coalescing chunks in area [%p-%p)...",
+ (is_class() ? "class space" : "metaspace"),
+ p_merge_region_start, p_merge_region_end);
+
+ const int num_chunks_removed =
+ remove_chunks_in_area(p_merge_region_start, target_chunk_word_size);
+
+ // ... and create a single new bigger chunk.
+ Metachunk* const p_new_chunk =
+ ::new (p_merge_region_start) Metachunk(target_chunk_type, is_class(), target_chunk_word_size, vsn);
+ assert(p_new_chunk == (Metachunk*)p_merge_region_start, "Sanity");
+ p_new_chunk->set_origin(origin_merge);
+
+ log_trace(gc, metaspace, freelist)("%s: created coalesced chunk at %p, size " SIZE_FORMAT_HEX ".",
+ (is_class() ? "class space" : "metaspace"),
+ p_new_chunk, p_new_chunk->word_size() * sizeof(MetaWord));
+
+ // Fix occupancy map: remove old start bits of the small chunks and set new start bit.
+ ocmap->wipe_chunk_start_bits_in_region(p_merge_region_start, target_chunk_word_size);
+ ocmap->set_chunk_starts_at_address(p_merge_region_start, true);
+
+ // Mark chunk as free. Note: it is not necessary to update the occupancy
+ // map in-use map, because the old chunks were also free, so nothing
+ // should have changed.
+ p_new_chunk->set_is_tagged_free(true);
+
+ // Add new chunk to its freelist.
+ ChunkList* const list = free_chunks(target_chunk_type);
+ list->return_chunk_at_head(p_new_chunk);
+
+ // And adjust ChunkManager:: _free_chunks_count (_free_chunks_total
+ // should not have changed, because the size of the space should be the same)
+ _free_chunks_count -= num_chunks_removed;
+ _free_chunks_count ++;
+
+ // VirtualSpaceNode::container_count does not have to be modified:
+ // it means "number of active (non-free) chunks", so merging free chunks
+ // should not affect that count.
+
+ // At the end of a chunk merge, run verification tests.
+ if (VerifyMetaspace) {
+ DEBUG_ONLY(this->locked_verify());
+ DEBUG_ONLY(vsn->verify());
+ }
+
+ return true;
+}
+
+// Remove all chunks in the given area - the chunks are supposed to be free -
+// from their corresponding freelists. Mark them as invalid.
+// - This does not correct the occupancy map.
+// - This does not adjust the counters in ChunkManager.
+// - Does not adjust container count counter in containing VirtualSpaceNode
+// Returns number of chunks removed.
+int ChunkManager::remove_chunks_in_area(MetaWord* p, size_t word_size) {
+ assert(p != NULL && word_size > 0, "Invalid range.");
+ const size_t smallest_chunk_size = get_size_for_nonhumongous_chunktype(SpecializedIndex, is_class());
+ assert_is_aligned(word_size, smallest_chunk_size);
+
+ Metachunk* const start = (Metachunk*) p;
+ const Metachunk* const end = (Metachunk*)(p + word_size);
+ Metachunk* cur = start;
+ int num_removed = 0;
+ while (cur < end) {
+ Metachunk* next = (Metachunk*)(((MetaWord*)cur) + cur->word_size());
+ DEBUG_ONLY(do_verify_chunk(cur));
+ assert(cur->get_chunk_type() != HumongousIndex, "Unexpected humongous chunk found at %p.", cur);
+ assert(cur->is_tagged_free(), "Chunk expected to be free (%p)", cur);
+ log_trace(gc, metaspace, freelist)("%s: removing chunk %p, size " SIZE_FORMAT_HEX ".",
+ (is_class() ? "class space" : "metaspace"),
+ cur, cur->word_size() * sizeof(MetaWord));
+ cur->remove_sentinel();
+ // Note: cannot call ChunkManager::remove_chunk, because that
+ // modifies the counters in ChunkManager, which we do not want. So
+ // we call remove_chunk on the freelist directly (see also the
+ // splitting function which does the same).
+ ChunkList* const list = free_chunks(list_index(cur->word_size()));
+ list->remove_chunk(cur);
+ num_removed ++;
+ cur = next;
+ }
+ return num_removed;
+}
+
// Walk the list of VirtualSpaceNodes and delete
// nodes with a 0 container_count. Remove Metachunks in
// the node from their respective freelists.
@@ -1297,6 +2008,8 @@
// Don't free the current virtual space since it will likely
// be needed soon.
if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
+ log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
+ ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
// Unlink it from the list
if (prev_vsl == vsl) {
// This is the case of the current node being the first node.
@@ -1358,14 +2071,22 @@
void VirtualSpaceNode::retire(ChunkManager* chunk_manager) {
DEBUG_ONLY(verify_container_count();)
+ assert(this->is_class() == chunk_manager->is_class(), "Wrong ChunkManager?");
for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) {
ChunkIndex index = (ChunkIndex)i;
size_t chunk_size = chunk_manager->size_by_index(index);
while (free_words_in_vs() >= chunk_size) {
Metachunk* chunk = get_chunk_vs(chunk_size);
- assert(chunk != NULL, "allocation should have been successful");
-
+ // Chunk will be allocated aligned, so allocation may require
+ // additional padding chunks. That may cause above allocation to
+ // fail. Just ignore the failed allocation and continue with the
+ // next smaller chunk size. As the VirtualSpaceNode comitted
+ // size should be a multiple of the smallest chunk size, we
+ // should always be able to fill the VirtualSpace completely.
+ if (chunk == NULL) {
+ break;
+ }
chunk_manager->return_single_chunk(index, chunk);
}
DEBUG_ONLY(verify_container_count();)
@@ -1394,7 +2115,7 @@
_virtual_space_count(0) {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
- VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
+ VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
bool succeeded = class_entry->initialize();
if (succeeded) {
link_vs(class_entry);
@@ -1426,7 +2147,7 @@
assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
// Allocate the meta virtual space and initialize it.
- VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
+ VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
if (!new_entry->initialize()) {
delete new_entry;
return false;
@@ -1483,12 +2204,18 @@
assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
assert(min_words <= preferred_words, "Invalid arguments");
+ const char* const class_or_not = (is_class() ? "class" : "non-class");
+
if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
+ log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
+ class_or_not);
return false;
}
size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
if (allowed_expansion_words < min_words) {
+ log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
+ class_or_not);
return false;
}
@@ -1499,8 +2226,12 @@
min_words,
max_expansion_words);
if (vs_expanded) {
- return true;
- }
+ log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
+ class_or_not);
+ return true;
+ }
+ log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
+ class_or_not);
retire_current_virtual_space();
// Get another virtual space.
@@ -1524,6 +2255,24 @@
return false;
}
+// Given a chunk, calculate the largest possible padding space which
+// could be required when allocating it.
+static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
+ const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
+ if (chunk_type != HumongousIndex) {
+ // Normal, non-humongous chunks are allocated at chunk size
+ // boundaries, so the largest padding space required would be that
+ // minus the smallest chunk size.
+ const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
+ return chunk_word_size - smallest_chunk_size;
+ } else {
+ // Humongous chunks are allocated at smallest-chunksize
+ // boundaries, so there is no padding required.
+ return 0;
+ }
+}
+
+
Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
// Allocate a chunk out of the current virtual space.
@@ -1536,7 +2285,11 @@
// The expand amount is currently only determined by the requested sizes
// and not how much committed memory is left in the current virtual space.
- size_t min_word_size = align_up(chunk_word_size, Metaspace::commit_alignment_words());
+ // We must have enough space for the requested size and any
+ // additional reqired padding chunks.
+ const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
+
+ size_t min_word_size = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
if (min_word_size >= preferred_word_size) {
// Can happen when humongous chunks are allocated.
@@ -1668,21 +2421,25 @@
void MetaspaceGC::post_initialize() {
// Reset the high-water mark once the VM initialization is done.
- _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
+ _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
}
bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
// Check if the compressed class space is full.
if (is_class && Metaspace::using_class_space()) {
- size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+ size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
+ log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
+ (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
return false;
}
}
// Check if the user has imposed a limit on the metaspace memory.
- size_t committed_bytes = MetaspaceAux::committed_bytes();
+ size_t committed_bytes = MetaspaceUtils::committed_bytes();
if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
+ log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
+ (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
return false;
}
@@ -1690,7 +2447,7 @@
}
size_t MetaspaceGC::allowed_expansion() {
- size_t committed_bytes = MetaspaceAux::committed_bytes();
+ size_t committed_bytes = MetaspaceUtils::committed_bytes();
size_t capacity_until_gc = capacity_until_GC();
assert(capacity_until_gc >= committed_bytes,
@@ -1700,6 +2457,9 @@
size_t left_until_max = MaxMetaspaceSize - committed_bytes;
size_t left_until_GC = capacity_until_gc - committed_bytes;
size_t left_to_commit = MIN2(left_until_GC, left_until_max);
+ log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
+ " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
+ left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
return left_to_commit / BytesPerWord;
}
@@ -1717,7 +2477,7 @@
// Including the chunk free lists in the definition of "in use" is therefore
// necessary. Not including the chunk free lists can cause capacity_until_GC to
// shrink below committed_bytes() and this has caused serious bugs in the past.
- const size_t used_after_gc = MetaspaceAux::committed_bytes();
+ const size_t used_after_gc = MetaspaceUtils::committed_bytes();
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
@@ -1948,6 +2708,17 @@
void ChunkManager::locked_verify() {
locked_verify_free_chunks_count();
locked_verify_free_chunks_total();
+ for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) {
+ ChunkList* list = free_chunks(i);
+ if (list != NULL) {
+ Metachunk* chunk = list->head();
+ while (chunk) {
+ DEBUG_ONLY(do_verify_chunk(chunk);)
+ assert(chunk->is_tagged_free(), "Chunk should be tagged as free.");
+ chunk = chunk->next();
+ }
+ }
+ }
}
void ChunkManager::locked_print_free_chunks(outputStream* st) {
@@ -2007,27 +2778,173 @@
return free_chunks(index);
}
+// Helper for chunk splitting: given a target chunk size and a larger free chunk,
+// split up the larger chunk into n smaller chunks, at least one of which should be
+// the target chunk of target chunk size. The smaller chunks, including the target
+// chunk, are returned to the freelist. The pointer to the target chunk is returned.
+// Note that this chunk is supposed to be removed from the freelist right away.
+Metachunk* ChunkManager::split_chunk(size_t target_chunk_word_size, Metachunk* larger_chunk) {
+ assert(larger_chunk->word_size() > target_chunk_word_size, "Sanity");
+
+ const ChunkIndex larger_chunk_index = larger_chunk->get_chunk_type();
+ const ChunkIndex target_chunk_index = get_chunk_type_by_size(target_chunk_word_size, is_class());
+
+ MetaWord* const region_start = (MetaWord*)larger_chunk;
+ const size_t region_word_len = larger_chunk->word_size();
+ MetaWord* const region_end = region_start + region_word_len;
+ VirtualSpaceNode* const vsn = larger_chunk->container();
+ OccupancyMap* const ocmap = vsn->occupancy_map();
+
+ // Any larger non-humongous chunk size is a multiple of any smaller chunk size.
+ // Since non-humongous chunks are aligned to their chunk size, the larger chunk should start
+ // at an address suitable to place the smaller target chunk.
+ assert_is_aligned(region_start, target_chunk_word_size);
+
+ // Remove old chunk.
+ free_chunks(larger_chunk_index)->remove_chunk(larger_chunk);
+ larger_chunk->remove_sentinel();
+
+ // Prevent access to the old chunk from here on.
+ larger_chunk = NULL;
+ // ... and wipe it.
+ DEBUG_ONLY(memset(region_start, 0xfe, region_word_len * BytesPerWord));
+
+ // In its place create first the target chunk...
+ MetaWord* p = region_start;
+ Metachunk* target_chunk = ::new (p) Metachunk(target_chunk_index, is_class(), target_chunk_word_size, vsn);
+ assert(target_chunk == (Metachunk*)p, "Sanity");
+ target_chunk->set_origin(origin_split);
+
+ // Note: we do not need to mark its start in the occupancy map
+ // because it coincides with the old chunk start.
+
+ // Mark chunk as free and return to the freelist.
+ do_update_in_use_info_for_chunk(target_chunk, false);
+ free_chunks(target_chunk_index)->return_chunk_at_head(target_chunk);
+
+ // This chunk should now be valid and can be verified.
+ DEBUG_ONLY(do_verify_chunk(target_chunk));
+
+ // In the remaining space create the remainder chunks.
+ p += target_chunk->word_size();
+ assert(p < region_end, "Sanity");
+
+ while (p < region_end) {
+
+ // Find the largest chunk size which fits the alignment requirements at address p.
+ ChunkIndex this_chunk_index = prev_chunk_index(larger_chunk_index);
+ size_t this_chunk_word_size = 0;
+ for(;;) {
+ this_chunk_word_size = get_size_for_nonhumongous_chunktype(this_chunk_index, is_class());
+ if (is_aligned(p, this_chunk_word_size * BytesPerWord)) {
+ break;
+ } else {
+ this_chunk_index = prev_chunk_index(this_chunk_index);
+ assert(this_chunk_index >= target_chunk_index, "Sanity");
+ }
+ }
+
+ assert(this_chunk_word_size >= target_chunk_word_size, "Sanity");
+ assert(is_aligned(p, this_chunk_word_size * BytesPerWord), "Sanity");
+ assert(p + this_chunk_word_size <= region_end, "Sanity");
+
+ // Create splitting chunk.
+ Metachunk* this_chunk = ::new (p) Metachunk(this_chunk_index, is_class(), this_chunk_word_size, vsn);
+ assert(this_chunk == (Metachunk*)p, "Sanity");
+ this_chunk->set_origin(origin_split);
+ ocmap->set_chunk_starts_at_address(p, true);
+ do_update_in_use_info_for_chunk(this_chunk, false);
+
+ // This chunk should be valid and can be verified.
+ DEBUG_ONLY(do_verify_chunk(this_chunk));
+
+ // Return this chunk to freelist and correct counter.
+ free_chunks(this_chunk_index)->return_chunk_at_head(this_chunk);
+ _free_chunks_count ++;
+
+ log_trace(gc, metaspace, freelist)("Created chunk at " PTR_FORMAT ", word size "
+ SIZE_FORMAT_HEX " (%s), in split region [" PTR_FORMAT "..." PTR_FORMAT ").",
+ p2i(this_chunk), this_chunk->word_size(), chunk_size_name(this_chunk_index),
+ p2i(region_start), p2i(region_end));
+
+ p += this_chunk_word_size;
+
+ }
+
+ return target_chunk;
+}
+
Metachunk* ChunkManager::free_chunks_get(size_t word_size) {
assert_lock_strong(SpaceManager::expand_lock());
slow_locked_verify();
Metachunk* chunk = NULL;
+ bool we_did_split_a_chunk = false;
+
if (list_index(word_size) != HumongousIndex) {
+
ChunkList* free_list = find_free_chunks_list(word_size);
assert(free_list != NULL, "Sanity check");
chunk = free_list->head();
if (chunk == NULL) {
+ // Split large chunks into smaller chunks if there are no smaller chunks, just large chunks.
+ // This is the counterpart of the coalescing-upon-chunk-return.
+
+ ChunkIndex target_chunk_index = get_chunk_type_by_size(word_size, is_class());
+
+ // Is there a larger chunk we could split?
+ Metachunk* larger_chunk = NULL;
+ ChunkIndex larger_chunk_index = next_chunk_index(target_chunk_index);
+ while (larger_chunk == NULL && larger_chunk_index < NumberOfFreeLists) {
+ larger_chunk = free_chunks(larger_chunk_index)->head();
+ if (larger_chunk == NULL) {
+ larger_chunk_index = next_chunk_index(larger_chunk_index);
+ }
+ }
+
+ if (larger_chunk != NULL) {
+ assert(larger_chunk->word_size() > word_size, "Sanity");
+ assert(larger_chunk->get_chunk_type() == larger_chunk_index, "Sanity");
+
+ // We found a larger chunk. Lets split it up:
+ // - remove old chunk
+ // - in its place, create new smaller chunks, with at least one chunk
+ // being of target size, the others sized as large as possible. This
+ // is to make sure the resulting chunks are "as coalesced as possible"
+ // (similar to VirtualSpaceNode::retire()).
+ // Note: during this operation both ChunkManager and VirtualSpaceNode
+ // are temporarily invalid, so be careful with asserts.
+
+ log_trace(gc, metaspace, freelist)("%s: splitting chunk " PTR_FORMAT
+ ", word size " SIZE_FORMAT_HEX " (%s), to get a chunk of word size " SIZE_FORMAT_HEX " (%s)...",
+ (is_class() ? "class space" : "metaspace"), p2i(larger_chunk), larger_chunk->word_size(),
+ chunk_size_name(larger_chunk_index), word_size, chunk_size_name(target_chunk_index));
+
+ chunk = split_chunk(word_size, larger_chunk);
+
+ // This should have worked.
+ assert(chunk != NULL, "Sanity");
+ assert(chunk->word_size() == word_size, "Sanity");
+ assert(chunk->is_tagged_free(), "Sanity");
+
+ we_did_split_a_chunk = true;
+
+ }
+ }
+
+ if (chunk == NULL) {
return NULL;
}
// Remove the chunk as the head of the list.
free_list->remove_chunk(chunk);
- log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list " PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
- p2i(free_list), p2i(chunk), chunk->word_size());
+ log_trace(gc, metaspace, freelist)("ChunkManager::free_chunks_get: free_list: " PTR_FORMAT " chunks left: " SSIZE_FORMAT ".",
+ p2i(free_list), free_list->count());
+
} else {
chunk = humongous_dictionary()->get_chunk(word_size);
@@ -2041,17 +2958,26 @@
// Chunk has been removed from the chunk manager; update counters.
account_for_removed_chunk(chunk);
+ do_update_in_use_info_for_chunk(chunk, true);
+ chunk->container()->inc_container_count();
+ chunk->inc_use_count();
// Remove it from the links to this freelist
chunk->set_next(NULL);
chunk->set_prev(NULL);
- // Chunk is no longer on any freelist. Setting to false make container_count_slow()
- // work.
- chunk->set_is_tagged_free(false);
- chunk->container()->inc_container_count();
-
- slow_locked_verify();
+ // Run some verifications (some more if we did a chunk split)
+#ifdef ASSERT
+ if (VerifyMetaspace) {
+ locked_verify();
+ VirtualSpaceNode* const vsn = chunk->container();
+ vsn->verify();
+ if (we_did_split_a_chunk) {
+ vsn->verify_free_chunks_are_ideally_merged();
+ }
+ }
+#endif
+
return chunk;
}
@@ -2089,6 +3015,8 @@
void ChunkManager::return_single_chunk(ChunkIndex index, Metachunk* chunk) {
assert_lock_strong(SpaceManager::expand_lock());
+ DEBUG_ONLY(do_verify_chunk(chunk);)
+ assert(chunk->get_chunk_type() == index, "Chunk does not match expected index.");
assert(chunk != NULL, "Expected chunk.");
assert(chunk->container() != NULL, "Container should have been set.");
assert(chunk->is_tagged_free() == false, "Chunk should be in use.");
@@ -2097,7 +3025,7 @@
// Note: mangle *before* returning the chunk to the freelist or dictionary. It does not
// matter for the freelist (non-humongous chunks), but the humongous chunk dictionary
// keeps tree node pointers in the chunk payload area which mangle will overwrite.
- NOT_PRODUCT(chunk->mangle(badMetaWordVal);)
+ DEBUG_ONLY(chunk->mangle(badMetaWordVal);)
if (index != HumongousIndex) {
// Return non-humongous chunk to freelist.
@@ -2116,11 +3044,24 @@
chunk_size_name(index), p2i(chunk), chunk->word_size());
}
chunk->container()->dec_container_count();
- chunk->set_is_tagged_free(true);
+ do_update_in_use_info_for_chunk(chunk, false);
// Chunk has been added; update counters.
account_for_added_chunk(chunk);
+ // Attempt coalesce returned chunks with its neighboring chunks:
+ // if this chunk is small or special, attempt to coalesce to a medium chunk.
+ if (index == SmallIndex || index == SpecializedIndex) {
+ if (!attempt_to_coalesce_around_chunk(chunk, MediumIndex)) {
+ // This did not work. But if this chunk is special, we still may form a small chunk?
+ if (index == SpecializedIndex) {
+ if (!attempt_to_coalesce_around_chunk(chunk, SmallIndex)) {
+ // give up.
+ }
+ }
+ }
+ }
+
}
void ChunkManager::return_chunk_list(ChunkIndex index, Metachunk* chunks) {
@@ -2414,11 +3355,11 @@
// Anonymous metadata space is usually small, with majority within 1K - 2K range and
// rarely about 4K (64-bits JVM).
// Instead of jumping to SmallChunk after initial chunk exhausted, keeping allocation
- // from SpecializeChunk up to _anon_metadata_specialize_chunk_limit (4) reduces space waste
- // from 60+% to around 30%.
- if (_space_type == Metaspace::AnonymousMetaspaceType &&
+ // from SpecializeChunk up to _anon_or_delegating_metadata_specialize_chunk_limit (4)
+ // reduces space waste from 60+% to around 30%.
+ if ((_space_type == Metaspace::AnonymousMetaspaceType || _space_type == Metaspace::ReflectionMetaspaceType) &&
_mdtype == Metaspace::NonClassType &&
- sum_count_in_chunks_in_use(SpecializedIndex) < _anon_metadata_specialize_chunk_limit &&
+ sum_count_in_chunks_in_use(SpecializedIndex) < _anon_and_delegating_metadata_specialize_chunk_limit &&
word_size + Metachunk::overhead() <= SpecializedChunk) {
return SpecializedChunk;
}
@@ -2547,27 +3488,27 @@
_allocated_chunks_words = _allocated_chunks_words + words;
_allocated_chunks_count++;
// Global total of capacity in allocated Metachunks
- MetaspaceAux::inc_capacity(mdtype(), words);
+ MetaspaceUtils::inc_capacity(mdtype(), words);
// Global total of allocated Metablocks.
// used_words_slow() includes the overhead in each
// Metachunk so include it in the used when the
// Metachunk is first added (so only added once per
// Metachunk).
- MetaspaceAux::inc_used(mdtype(), Metachunk::overhead());
+ MetaspaceUtils::inc_used(mdtype(), Metachunk::overhead());
}
void SpaceManager::inc_used_metrics(size_t words) {
// Add to the per SpaceManager total
Atomic::add(words, &_allocated_blocks_words);
// Add to the global total
- MetaspaceAux::inc_used(mdtype(), words);
+ MetaspaceUtils::inc_used(mdtype(), words);
}
void SpaceManager::dec_total_from_size_metrics() {
- MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words());
- MetaspaceAux::dec_used(mdtype(), allocated_blocks_words());
+ MetaspaceUtils::dec_capacity(mdtype(), allocated_chunks_words());
+ MetaspaceUtils::dec_used(mdtype(), allocated_blocks_words());
// Also deduct the overhead per Metachunk
- MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
+ MetaspaceUtils::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead());
}
void SpaceManager::initialize() {
@@ -2589,6 +3530,11 @@
MutexLockerEx fcl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
+ assert(sum_count_in_chunks_in_use() == allocated_chunks_count(),
+ "sum_count_in_chunks_in_use() " SIZE_FORMAT
+ " allocated_chunks_count() " SIZE_FORMAT,
+ sum_count_in_chunks_in_use(), allocated_chunks_count());
+
chunk_manager()->slow_locked_verify();
dec_total_from_size_metrics();
@@ -2712,45 +3658,6 @@
return next;
}
-/*
- * The policy is to allocate up to _small_chunk_limit small chunks
- * after which only medium chunks are allocated. This is done to
- * reduce fragmentation. In some cases, this can result in a lot
- * of small chunks being allocated to the point where it's not
- * possible to expand. If this happens, there may be no medium chunks
- * available and OOME would be thrown. Instead of doing that,
- * if the allocation request size fits in a small chunk, an attempt
- * will be made to allocate a small chunk.
- */
-MetaWord* SpaceManager::get_small_chunk_and_allocate(size_t word_size) {
- size_t raw_word_size = get_allocation_word_size(word_size);
-
- if (raw_word_size + Metachunk::overhead() > small_chunk_size()) {
- return NULL;
- }
-
- MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
- MutexLockerEx cl1(expand_lock(), Mutex::_no_safepoint_check_flag);
-
- Metachunk* chunk = chunk_manager()->chunk_freelist_allocate(small_chunk_size());
-
- MetaWord* mem = NULL;
-
- if (chunk != NULL) {
- // Add chunk to the in-use chunk list and do an allocation from it.
- // Add to this manager's list of chunks in use.
- add_chunk(chunk, false);
- mem = chunk->allocate(raw_word_size);
-
- inc_used_metrics(raw_word_size);
-
- // Track metaspace memory usage statistic.
- track_metaspace_memory_usage();
- }
-
- return mem;
-}
-
MetaWord* SpaceManager::allocate(size_t word_size) {
MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
size_t raw_word_size = get_allocation_word_size(word_size);
@@ -2801,17 +3708,12 @@
}
void SpaceManager::verify() {
- // If there are blocks in the dictionary, then
- // verification of chunks does not work since
- // being in the dictionary alters a chunk.
- if (block_freelists() != NULL && block_freelists()->total_size() == 0) {
- for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
- Metachunk* curr = chunks_in_use(i);
- while (curr != NULL) {
- curr->verify();
- verify_chunk_size(curr);
- curr = curr->next();
- }
+ for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+ Metachunk* curr = chunks_in_use(i);
+ while (curr != NULL) {
+ DEBUG_ONLY(do_verify_chunk(curr);)
+ assert(curr->is_tagged_free() == false, "Chunk should be tagged as in use.");
+ curr = curr->next();
}
}
}
@@ -2874,22 +3776,22 @@
" waste " SIZE_FORMAT, curr_total, used, free, capacity, waste);
}
-// MetaspaceAux
-
-
-size_t MetaspaceAux::_capacity_words[] = {0, 0};
-volatile size_t MetaspaceAux::_used_words[] = {0, 0};
-
-size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
+// MetaspaceUtils
+
+
+size_t MetaspaceUtils::_capacity_words[] = {0, 0};
+volatile size_t MetaspaceUtils::_used_words[] = {0, 0};
+
+size_t MetaspaceUtils::free_bytes(Metaspace::MetadataType mdtype) {
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
return list == NULL ? 0 : list->free_bytes();
}
-size_t MetaspaceAux::free_bytes() {
+size_t MetaspaceUtils::free_bytes() {
return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType);
}
-void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
+void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
assert_lock_strong(SpaceManager::expand_lock());
assert(words <= capacity_words(mdtype),
"About to decrement below 0: words " SIZE_FORMAT
@@ -2898,13 +3800,13 @@
_capacity_words[mdtype] -= words;
}
-void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
+void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
assert_lock_strong(SpaceManager::expand_lock());
// Needs to be atomic
_capacity_words[mdtype] += words;
}
-void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
+void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
assert(words <= used_words(mdtype),
"About to decrement below 0: words " SIZE_FORMAT
" is greater than _used_words[%u] " SIZE_FORMAT,
@@ -2916,7 +3818,7 @@
Atomic::sub(words, &_used_words[mdtype]);
}
-void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
+void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
// _used_words tracks allocations for
// each piece of metadata. Those allocations are
// generally done concurrently by different application
@@ -2924,11 +3826,11 @@
Atomic::add(words, &_used_words[mdtype]);
}
-size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
+size_t MetaspaceUtils::used_bytes_slow(Metaspace::MetadataType mdtype) {
size_t used = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
- Metaspace* msp = iter.get_next();
+ ClassLoaderMetaspace* msp = iter.get_next();
// Sum allocated_blocks_words for each metaspace
if (msp != NULL) {
used += msp->used_words_slow(mdtype);
@@ -2937,11 +3839,11 @@
return used * BytesPerWord;
}
-size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
+size_t MetaspaceUtils::free_bytes_slow(Metaspace::MetadataType mdtype) {
size_t free = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
- Metaspace* msp = iter.get_next();
+ ClassLoaderMetaspace* msp = iter.get_next();
if (msp != NULL) {
free += msp->free_words_slow(mdtype);
}
@@ -2949,7 +3851,7 @@
return free * BytesPerWord;
}
-size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
+size_t MetaspaceUtils::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
return 0;
}
@@ -2958,7 +3860,7 @@
size_t capacity = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
- Metaspace* msp = iter.get_next();
+ ClassLoaderMetaspace* msp = iter.get_next();
if (msp != NULL) {
capacity += msp->capacity_words_slow(mdtype);
}
@@ -2966,7 +3868,7 @@
return capacity * BytesPerWord;
}
-size_t MetaspaceAux::capacity_bytes_slow() {
+size_t MetaspaceUtils::capacity_bytes_slow() {
#ifdef PRODUCT
// Use capacity_bytes() in PRODUCT instead of this function.
guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
@@ -2983,19 +3885,19 @@
return class_capacity + non_class_capacity;
}
-size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
+size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
return list == NULL ? 0 : list->reserved_bytes();
}
-size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
+size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
return list == NULL ? 0 : list->committed_bytes();
}
-size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
-
-size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
+size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
+
+size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
if (chunk_manager == NULL) {
return 0;
@@ -3004,24 +3906,24 @@
return chunk_manager->free_chunks_total_words();
}
-size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
+size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
return free_chunks_total_words(mdtype) * BytesPerWord;
}
-size_t MetaspaceAux::free_chunks_total_words() {
+size_t MetaspaceUtils::free_chunks_total_words() {
return free_chunks_total_words(Metaspace::ClassType) +
free_chunks_total_words(Metaspace::NonClassType);
}
-size_t MetaspaceAux::free_chunks_total_bytes() {
+size_t MetaspaceUtils::free_chunks_total_bytes() {
return free_chunks_total_words() * BytesPerWord;
}
-bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) {
+bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
return Metaspace::get_chunk_manager(mdtype) != NULL;
}
-MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
+MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
if (!has_chunk_free_list(mdtype)) {
return MetaspaceChunkFreeListSummary();
}
@@ -3030,12 +3932,12 @@
return cm->chunk_free_list_summary();
}
-void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
+void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
}
-void MetaspaceAux::print_on(outputStream* out) {
+void MetaspaceUtils::print_on(outputStream* out) {
Metaspace::MetadataType nct = Metaspace::NonClassType;
out->print_cr(" Metaspace "
@@ -3064,7 +3966,7 @@
// Print information for class space and data space separately.
// This is almost the same as above.
-void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
+void MetaspaceUtils::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
size_t capacity_bytes = capacity_bytes_slow(mdtype);
size_t used_bytes = used_bytes_slow(mdtype);
@@ -3085,13 +3987,13 @@
}
// Print total fragmentation for class metaspaces
-void MetaspaceAux::print_class_waste(outputStream* out) {
+void MetaspaceUtils::print_class_waste(outputStream* out) {
assert(Metaspace::using_class_space(), "class metaspace not used");
size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
- Metaspace* msp = iter.get_next();
+ ClassLoaderMetaspace* msp = iter.get_next();
if (msp != NULL) {
cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
@@ -3112,13 +4014,13 @@
}
// Print total fragmentation for data and class metaspaces separately
-void MetaspaceAux::print_waste(outputStream* out) {
+void MetaspaceUtils::print_waste(outputStream* out) {
size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
- Metaspace* msp = iter.get_next();
+ ClassLoaderMetaspace* msp = iter.get_next();
if (msp != NULL) {
specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
@@ -3141,7 +4043,7 @@
}
}
-class MetadataStats VALUE_OBJ_CLASS_SPEC {
+class MetadataStats {
private:
size_t _capacity;
size_t _used;
@@ -3203,7 +4105,7 @@
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
if (cld->is_unloading()) return;
- Metaspace* msp = cld->metaspace_or_null();
+ ClassLoaderMetaspace* msp = cld->metaspace_or_null();
if (msp == NULL) {
return;
}
@@ -3222,11 +4124,11 @@
}
private:
- void print_metaspace(Metaspace* msp, bool anonymous);
+ void print_metaspace(ClassLoaderMetaspace* msp, bool anonymous);
void print_summary() const;
};
-void PrintCLDMetaspaceInfoClosure::print_metaspace(Metaspace* msp, bool anonymous){
+void PrintCLDMetaspaceInfoClosure::print_metaspace(ClassLoaderMetaspace* msp, bool anonymous){
assert(msp != NULL, "Sanity");
SpaceManager* vsm = msp->vsm();
const char* unit = scale_unit(_scale);
@@ -3305,7 +4207,7 @@
}
}
-void MetaspaceAux::print_metadata_for_nmt(outputStream* out, size_t scale) {
+void MetaspaceUtils::print_metadata_for_nmt(outputStream* out, size_t scale) {
const char* unit = scale_unit(scale);
out->print_cr("Metaspaces:");
out->print_cr(" Metadata space: reserved=" SIZE_FORMAT_W(10) "%s committed=" SIZE_FORMAT_W(10) "%s",
@@ -3330,7 +4232,7 @@
// Dump global metaspace things from the end of ClassLoaderDataGraph
-void MetaspaceAux::dump(outputStream* out) {
+void MetaspaceUtils::dump(outputStream* out) {
out->print_cr("All Metaspace:");
out->print("data space: "); print_on(out, Metaspace::NonClassType);
out->print("class space: "); print_on(out, Metaspace::ClassType);
@@ -3338,7 +4240,7 @@
}
// Prints an ASCII representation of the given space.
-void MetaspaceAux::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
+void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
const bool for_class = mdtype == Metaspace::ClassType ? true : false;
VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
@@ -3362,14 +4264,14 @@
}
}
-void MetaspaceAux::verify_free_chunks() {
+void MetaspaceUtils::verify_free_chunks() {
Metaspace::chunk_manager_metadata()->verify();
if (Metaspace::using_class_space()) {
Metaspace::chunk_manager_class()->verify();
}
}
-void MetaspaceAux::verify_capacity() {
+void MetaspaceUtils::verify_capacity() {
#ifdef ASSERT
size_t running_sum_capacity_bytes = capacity_bytes();
// For purposes of the running sum of capacity, verify against capacity
@@ -3390,7 +4292,7 @@
#endif
}
-void MetaspaceAux::verify_used() {
+void MetaspaceUtils::verify_used() {
#ifdef ASSERT
size_t running_sum_used_bytes = used_bytes();
// For purposes of the running sum of used, verify against used
@@ -3411,7 +4313,7 @@
#endif
}
-void MetaspaceAux::verify_metrics() {
+void MetaspaceUtils::verify_metrics() {
verify_capacity();
verify_used();
}
@@ -3425,17 +4327,6 @@
size_t Metaspace::_commit_alignment = 0;
size_t Metaspace::_reserve_alignment = 0;
-Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
- initialize(lock, type);
-}
-
-Metaspace::~Metaspace() {
- delete _vsm;
- if (using_class_space()) {
- delete _class_vsm;
- }
-}
-
VirtualSpaceList* Metaspace::_space_list = NULL;
VirtualSpaceList* Metaspace::_class_space_list = NULL;
@@ -3650,7 +4541,7 @@
SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
assert(using_class_space(), "Must be using class space");
_class_space_list = new VirtualSpaceList(rs);
- _chunk_manager_class = new ChunkManager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
+ _chunk_manager_class = new ChunkManager(true/*is_class*/);
if (!_class_space_list->initialization_succeeded()) {
vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
@@ -3757,7 +4648,7 @@
// Initialize the list of virtual spaces.
_space_list = new VirtualSpaceList(word_size);
- _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
+ _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
if (!_space_list->initialization_succeeded()) {
vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
@@ -3770,28 +4661,6 @@
MetaspaceGC::post_initialize();
}
-void Metaspace::initialize_first_chunk(MetaspaceType type, MetadataType mdtype) {
- Metachunk* chunk = get_initialization_chunk(type, mdtype);
- if (chunk != NULL) {
- // Add to this manager's list of chunks in use and current_chunk().
- get_space_manager(mdtype)->add_chunk(chunk, true);
- }
-}
-
-Metachunk* Metaspace::get_initialization_chunk(MetaspaceType type, MetadataType mdtype) {
- size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
-
- // Get a chunk from the chunk freelist
- Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
-
- if (chunk == NULL) {
- chunk = get_space_list(mdtype)->get_new_chunk(chunk_word_size,
- get_space_manager(mdtype)->medium_chunk_bunch());
- }
-
- return chunk;
-}
-
void Metaspace::verify_global_initialization() {
assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
@@ -3802,132 +4671,11 @@
}
}
-void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
- verify_global_initialization();
-
- // Allocate SpaceManager for metadata objects.
- _vsm = new SpaceManager(NonClassType, type, lock);
-
- if (using_class_space()) {
- // Allocate SpaceManager for classes.
- _class_vsm = new SpaceManager(ClassType, type, lock);
- }
-
- MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
-
- // Allocate chunk for metadata objects
- initialize_first_chunk(type, NonClassType);
-
- // Allocate chunk for class metadata objects
- if (using_class_space()) {
- initialize_first_chunk(type, ClassType);
- }
-}
-
size_t Metaspace::align_word_size_up(size_t word_size) {
size_t byte_size = word_size * wordSize;
return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
}
-MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
- assert(!_frozen, "sanity");
- // Don't use class_vsm() unless UseCompressedClassPointers is true.
- if (is_class_space_allocation(mdtype)) {
- return class_vsm()->allocate(word_size);
- } else {
- return vsm()->allocate(word_size);
- }
-}
-
-MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
- assert(!_frozen, "sanity");
- size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
- assert(delta_bytes > 0, "Must be");
-
- size_t before = 0;
- size_t after = 0;
- MetaWord* res;
- bool incremented;
-
- // Each thread increments the HWM at most once. Even if the thread fails to increment
- // the HWM, an allocation is still attempted. This is because another thread must then
- // have incremented the HWM and therefore the allocation might still succeed.
- do {
- incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
- res = allocate(word_size, mdtype);
- } while (!incremented && res == NULL);
-
- if (incremented) {
- tracer()->report_gc_threshold(before, after,
- MetaspaceGCThresholdUpdater::ExpandAndAllocate);
- log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
- }
-
- return res;
-}
-
-size_t Metaspace::used_words_slow(MetadataType mdtype) const {
- if (mdtype == ClassType) {
- return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
- } else {
- return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
- }
-}
-
-size_t Metaspace::free_words_slow(MetadataType mdtype) const {
- assert(!_frozen, "sanity");
- if (mdtype == ClassType) {
- return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
- } else {
- return vsm()->sum_free_in_chunks_in_use();
- }
-}
-
-// Space capacity in the Metaspace. It includes
-// space in the list of chunks from which allocations
-// have been made. Don't include space in the global freelist and
-// in the space available in the dictionary which
-// is already counted in some chunk.
-size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
- if (mdtype == ClassType) {
- return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
- } else {
- return vsm()->sum_capacity_in_chunks_in_use();
- }
-}
-
-size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
- return used_words_slow(mdtype) * BytesPerWord;
-}
-
-size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
- return capacity_words_slow(mdtype) * BytesPerWord;
-}
-
-size_t Metaspace::allocated_blocks_bytes() const {
- return vsm()->allocated_blocks_bytes() +
- (using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
-}
-
-size_t Metaspace::allocated_chunks_bytes() const {
- return vsm()->allocated_chunks_bytes() +
- (using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
-}
-
-void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
- assert(!_frozen, "sanity");
- assert(!SafepointSynchronize::is_at_safepoint()
- || Thread::current()->is_VM_thread(), "should be the VM thread");
-
- MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
-
- if (is_class && using_class_space()) {
- class_vsm()->deallocate(ptr, word_size);
- } else {
- vsm()->deallocate(ptr, word_size);
- }
-}
-
MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
MetaspaceObj::Type type, TRAPS) {
assert(!_frozen, "sanity");
@@ -3945,6 +4693,12 @@
MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
if (result == NULL) {
+ if (DumpSharedSpaces && THREAD->is_VM_thread()) {
+ tty->print_cr("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
+ MetaspaceObj::type_name(type), word_size * BytesPerWord);
+ vm_exit(1);
+ }
+
tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
// Allocation failed.
@@ -3957,18 +4711,7 @@
}
if (result == NULL) {
- SpaceManager* sm;
- if (is_class_space_allocation(mdtype)) {
- sm = loader_data->metaspace_non_null()->class_vsm();
- } else {
- sm = loader_data->metaspace_non_null()->vsm();
- }
-
- result = sm->get_small_chunk_and_allocate(word_size);
-
- if (result == NULL) {
- report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
- }
+ report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL);
}
// Zero initialize.
@@ -3977,11 +4720,6 @@
return result;
}
-size_t Metaspace::class_chunk_size(size_t word_size) {
- assert(using_class_space(), "Has to use class space");
- return class_vsm()->calc_chunk_size(word_size);
-}
-
void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
@@ -3994,20 +4732,20 @@
if (log.is_debug()) {
if (loader_data->metaspace_or_null() != NULL) {
LogStream ls(log.debug());
- loader_data->dump(&ls);
+ loader_data->print_value_on(&ls);
}
}
LogStream ls(log.info());
- MetaspaceAux::dump(&ls);
- MetaspaceAux::print_metaspace_map(&ls, mdtype);
+ MetaspaceUtils::dump(&ls);
+ MetaspaceUtils::print_metaspace_map(&ls, mdtype);
ChunkManager::print_all_chunkmanagers(&ls);
}
bool out_of_compressed_class_space = false;
if (is_class_space_allocation(mdtype)) {
- Metaspace* metaspace = loader_data->metaspace_non_null();
+ ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
out_of_compressed_class_space =
- MetaspaceAux::committed_bytes(Metaspace::ClassType) +
+ MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
(metaspace->class_chunk_size(word_size) * BytesPerWord) >
CompressedClassSpaceSize;
}
@@ -4058,16 +4796,6 @@
}
}
-void Metaspace::print_on(outputStream* out) const {
- // Print both class virtual space counts and metaspace.
- if (Verbose) {
- vsm()->print_on(out);
- if (using_class_space()) {
- class_vsm()->print_on(out);
- }
- }
-}
-
bool Metaspace::contains(const void* ptr) {
if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
return true;
@@ -4083,61 +4811,251 @@
return get_space_list(NonClassType)->contains(ptr);
}
-void Metaspace::verify() {
+// ClassLoaderMetaspace
+
+ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type) {
+ initialize(lock, type);
+}
+
+ClassLoaderMetaspace::~ClassLoaderMetaspace() {
+ delete _vsm;
+ if (Metaspace::using_class_space()) {
+ delete _class_vsm;
+ }
+}
+void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
+ Metachunk* chunk = get_initialization_chunk(type, mdtype);
+ if (chunk != NULL) {
+ // Add to this manager's list of chunks in use and current_chunk().
+ get_space_manager(mdtype)->add_chunk(chunk, true);
+ }
+}
+
+Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
+ size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
+
+ // Get a chunk from the chunk freelist
+ Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
+
+ if (chunk == NULL) {
+ chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
+ get_space_manager(mdtype)->medium_chunk_bunch());
+ }
+
+ return chunk;
+}
+
+void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
+ Metaspace::verify_global_initialization();
+
+ // Allocate SpaceManager for metadata objects.
+ _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
+
+ if (Metaspace::using_class_space()) {
+ // Allocate SpaceManager for classes.
+ _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
+ }
+
+ MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+
+ // Allocate chunk for metadata objects
+ initialize_first_chunk(type, Metaspace::NonClassType);
+
+ // Allocate chunk for class metadata objects
+ if (Metaspace::using_class_space()) {
+ initialize_first_chunk(type, Metaspace::ClassType);
+ }
+}
+
+MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
+ Metaspace::assert_not_frozen();
+ // Don't use class_vsm() unless UseCompressedClassPointers is true.
+ if (Metaspace::is_class_space_allocation(mdtype)) {
+ return class_vsm()->allocate(word_size);
+ } else {
+ return vsm()->allocate(word_size);
+ }
+}
+
+MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
+ Metaspace::assert_not_frozen();
+ size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
+ assert(delta_bytes > 0, "Must be");
+
+ size_t before = 0;
+ size_t after = 0;
+ MetaWord* res;
+ bool incremented;
+
+ // Each thread increments the HWM at most once. Even if the thread fails to increment
+ // the HWM, an allocation is still attempted. This is because another thread must then
+ // have incremented the HWM and therefore the allocation might still succeed.
+ do {
+ incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before);
+ res = allocate(word_size, mdtype);
+ } while (!incremented && res == NULL);
+
+ if (incremented) {
+ Metaspace::tracer()->report_gc_threshold(before, after,
+ MetaspaceGCThresholdUpdater::ExpandAndAllocate);
+ log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
+ }
+
+ return res;
+}
+
+size_t ClassLoaderMetaspace::used_words_slow(Metaspace::MetadataType mdtype) const {
+ if (mdtype == Metaspace::ClassType) {
+ return Metaspace::using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
+ } else {
+ return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
+ }
+}
+
+size_t ClassLoaderMetaspace::free_words_slow(Metaspace::MetadataType mdtype) const {
+ Metaspace::assert_not_frozen();
+ if (mdtype == Metaspace::ClassType) {
+ return Metaspace::using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
+ } else {
+ return vsm()->sum_free_in_chunks_in_use();
+ }
+}
+
+// Space capacity in the Metaspace. It includes
+// space in the list of chunks from which allocations
+// have been made. Don't include space in the global freelist and
+// in the space available in the dictionary which
+// is already counted in some chunk.
+size_t ClassLoaderMetaspace::capacity_words_slow(Metaspace::MetadataType mdtype) const {
+ if (mdtype == Metaspace::ClassType) {
+ return Metaspace::using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
+ } else {
+ return vsm()->sum_capacity_in_chunks_in_use();
+ }
+}
+
+size_t ClassLoaderMetaspace::used_bytes_slow(Metaspace::MetadataType mdtype) const {
+ return used_words_slow(mdtype) * BytesPerWord;
+}
+
+size_t ClassLoaderMetaspace::capacity_bytes_slow(Metaspace::MetadataType mdtype) const {
+ return capacity_words_slow(mdtype) * BytesPerWord;
+}
+
+size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
+ return vsm()->allocated_blocks_bytes() +
+ (Metaspace::using_class_space() ? class_vsm()->allocated_blocks_bytes() : 0);
+}
+
+size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
+ return vsm()->allocated_chunks_bytes() +
+ (Metaspace::using_class_space() ? class_vsm()->allocated_chunks_bytes() : 0);
+}
+
+void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
+ Metaspace::assert_not_frozen();
+ assert(!SafepointSynchronize::is_at_safepoint()
+ || Thread::current()->is_VM_thread(), "should be the VM thread");
+
+ MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
+
+ if (is_class && Metaspace::using_class_space()) {
+ class_vsm()->deallocate(ptr, word_size);
+ } else {
+ vsm()->deallocate(ptr, word_size);
+ }
+}
+
+size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
+ assert(Metaspace::using_class_space(), "Has to use class space");
+ return class_vsm()->calc_chunk_size(word_size);
+}
+
+void ClassLoaderMetaspace::print_on(outputStream* out) const {
+ // Print both class virtual space counts and metaspace.
+ if (Verbose) {
+ vsm()->print_on(out);
+ if (Metaspace::using_class_space()) {
+ class_vsm()->print_on(out);
+ }
+ }
+}
+
+void ClassLoaderMetaspace::verify() {
vsm()->verify();
- if (using_class_space()) {
+ if (Metaspace::using_class_space()) {
class_vsm()->verify();
}
}
-void Metaspace::dump(outputStream* const out) const {
+void ClassLoaderMetaspace::dump(outputStream* const out) const {
out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, p2i(vsm()));
vsm()->dump(out);
- if (using_class_space()) {
+ if (Metaspace::using_class_space()) {
out->print_cr("\nClass space manager: " INTPTR_FORMAT, p2i(class_vsm()));
class_vsm()->dump(out);
}
}
+
+
+#ifdef ASSERT
+static void do_verify_chunk(Metachunk* chunk) {
+ guarantee(chunk != NULL, "Sanity");
+ // Verify chunk itself; then verify that it is consistent with the
+ // occupany map of its containing node.
+ chunk->verify();
+ VirtualSpaceNode* const vsn = chunk->container();
+ OccupancyMap* const ocmap = vsn->occupancy_map();
+ ocmap->verify_for_chunk(chunk);
+}
+#endif
+
+static void do_update_in_use_info_for_chunk(Metachunk* chunk, bool inuse) {
+ chunk->set_is_tagged_free(!inuse);
+ OccupancyMap* const ocmap = chunk->container()->occupancy_map();
+ ocmap->set_region_in_use((MetaWord*)chunk, chunk->word_size(), inuse);
+}
+
/////////////// Unit tests ///////////////
#ifndef PRODUCT
-class TestMetaspaceAuxTest : AllStatic {
+class TestMetaspaceUtilsTest : AllStatic {
public:
static void test_reserved() {
- size_t reserved = MetaspaceAux::reserved_bytes();
+ size_t reserved = MetaspaceUtils::reserved_bytes();
assert(reserved > 0, "assert");
- size_t committed = MetaspaceAux::committed_bytes();
+ size_t committed = MetaspaceUtils::committed_bytes();
assert(committed <= reserved, "assert");
- size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
+ size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
assert(reserved_metadata > 0, "assert");
assert(reserved_metadata <= reserved, "assert");
if (UseCompressedClassPointers) {
- size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
+ size_t reserved_class = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
assert(reserved_class > 0, "assert");
assert(reserved_class < reserved, "assert");
}
}
static void test_committed() {
- size_t committed = MetaspaceAux::committed_bytes();
+ size_t committed = MetaspaceUtils::committed_bytes();
assert(committed > 0, "assert");
- size_t reserved = MetaspaceAux::reserved_bytes();
+ size_t reserved = MetaspaceUtils::reserved_bytes();
assert(committed <= reserved, "assert");
- size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
+ size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
assert(committed_metadata > 0, "assert");
assert(committed_metadata <= committed, "assert");
if (UseCompressedClassPointers) {
- size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+ size_t committed_class = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
assert(committed_class > 0, "assert");
assert(committed_class < committed, "assert");
}
@@ -4160,8 +5078,8 @@
}
};
-void TestMetaspaceAux_test() {
- TestMetaspaceAuxTest::test();
+void TestMetaspaceUtils_test() {
+ TestMetaspaceUtilsTest::test();
}
class TestVirtualSpaceNodeTest {
@@ -4189,16 +5107,16 @@
STATIC_ASSERT(SmallChunk % SpecializedChunk == 0);
{ // No committed memory in VSN
- ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
- VirtualSpaceNode vsn(vsn_test_size_bytes);
+ ChunkManager cm(false);
+ VirtualSpaceNode vsn(false, vsn_test_size_bytes);
vsn.initialize();
vsn.retire(&cm);
assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN");
}
{ // All of VSN is committed, half is used by chunks
- ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
- VirtualSpaceNode vsn(vsn_test_size_bytes);
+ ChunkManager cm(false);
+ VirtualSpaceNode vsn(false, vsn_test_size_bytes);
vsn.initialize();
vsn.expand_by(vsn_test_size_words, vsn_test_size_words);
vsn.get_chunk_vs(MediumChunk);
@@ -4212,8 +5130,8 @@
// This doesn't work for systems with vm_page_size >= 16K.
if (page_chunks < MediumChunk) {
// 4 pages of VSN is committed, some is used by chunks
- ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
- VirtualSpaceNode vsn(vsn_test_size_bytes);
+ ChunkManager cm(false);
+ VirtualSpaceNode vsn(false, vsn_test_size_bytes);
vsn.initialize();
vsn.expand_by(page_chunks, page_chunks);
@@ -4233,8 +5151,8 @@
}
{ // Half of VSN is committed, a humongous chunk is used
- ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk);
- VirtualSpaceNode vsn(vsn_test_size_bytes);
+ ChunkManager cm(false);
+ VirtualSpaceNode vsn(false, vsn_test_size_bytes);
vsn.initialize();
vsn.expand_by(MediumChunk * 2, MediumChunk * 2);
vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk
@@ -4265,7 +5183,7 @@
static void test_is_available_positive() {
// Reserve some memory.
- VirtualSpaceNode vsn(os::vm_allocation_granularity());
+ VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
// Commit some memory.
@@ -4283,7 +5201,7 @@
static void test_is_available_negative() {
// Reserve some memory.
- VirtualSpaceNode vsn(os::vm_allocation_granularity());
+ VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
// Commit some memory.
@@ -4298,7 +5216,7 @@
static void test_is_available_overflow() {
// Reserve some memory.
- VirtualSpaceNode vsn(os::vm_allocation_granularity());
+ VirtualSpaceNode vsn(false, os::vm_allocation_granularity());
assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
// Commit some memory.
@@ -4323,15 +5241,10 @@
}
};
-void TestVirtualSpaceNode_test() {
- TestVirtualSpaceNodeTest::test();
- TestVirtualSpaceNodeTest::test_is_available();
-}
-
// The following test is placed here instead of a gtest / unittest file
// because the ChunkManager class is only available in this file.
void ChunkManager_test_list_index() {
- ChunkManager manager(ClassSpecializedChunk, ClassSmallChunk, ClassMediumChunk);
+ ChunkManager manager(true);
// Test previous bug where a query for a humongous class metachunk,
// incorrectly matched the non-class medium metachunk size.
@@ -4368,266 +5281,6 @@
#ifdef ASSERT
-// ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and
-// returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager
-// content.
-class ChunkManagerReturnTestImpl : public CHeapObj<mtClass> {
-
- VirtualSpaceNode _vsn;
- ChunkManager _cm;
-
- // The expected content of the chunk manager.
- unsigned _chunks_in_chunkmanager;
- size_t _words_in_chunkmanager;
-
- // A fixed size pool of chunks. Chunks may be in the chunk manager (free) or not (in use).
- static const int num_chunks = 256;
- Metachunk* _pool[num_chunks];
-
- // Helper, return a random position into the chunk pool.
- static int get_random_position() {
- return os::random() % num_chunks;
- }
-
- // Asserts that ChunkManager counters match expectations.
- void assert_counters() {
- assert(_vsn.container_count() == num_chunks - _chunks_in_chunkmanager, "vsn counter mismatch.");
- assert(_cm.free_chunks_count() == _chunks_in_chunkmanager, "cm counter mismatch.");
- assert(_cm.free_chunks_total_words() == _words_in_chunkmanager, "cm counter mismatch.");
- }
-
- // Get a random chunk size. Equal chance to get spec/med/small chunk size or
- // a humongous chunk size. The latter itself is random in the range of [med+spec..4*med).
- size_t get_random_chunk_size() {
- const size_t sizes [] = { SpecializedChunk, SmallChunk, MediumChunk };
- const int rand = os::random() % 4;
- if (rand < 3) {
- return sizes[rand];
- } else {
- // Note: this affects the max. size of space (see _vsn initialization in ctor).
- return align_up(MediumChunk + 1 + (os::random() % (MediumChunk * 4)), SpecializedChunk);
- }
- }
-
- // Starting at pool index <start>+1, find the next chunk tagged as either free or in use, depending
- // on <is_free>. Search wraps. Returns its position, or -1 if no matching chunk was found.
- int next_matching_chunk(int start, bool is_free) const {
- assert(start >= 0 && start < num_chunks, "invalid parameter");
- int pos = start;
- do {
- if (++pos == num_chunks) {
- pos = 0;
- }
- if (_pool[pos]->is_tagged_free() == is_free) {
- return pos;
- }
- } while (pos != start);
- return -1;
- }
-
- // A structure to keep information about a chunk list including which
- // chunks are part of this list. This is needed to keep information about a chunk list
- // we will to return to the ChunkManager, because the original list will be destroyed.
- struct AChunkList {
- Metachunk* head;
- Metachunk* all[num_chunks];
- size_t size;
- int num;
- ChunkIndex index;
- };
-
- // Assemble, from the in-use chunks (not in the chunk manager) in the pool,
- // a random chunk list of max. length <list_size> of chunks with the same
- // ChunkIndex (chunk size).
- // Returns false if list cannot be assembled. List is returned in the <out>
- // structure. Returned list may be smaller than <list_size>.
- bool assemble_random_chunklist(AChunkList* out, int list_size) {
- // Choose a random in-use chunk from the pool...
- const int headpos = next_matching_chunk(get_random_position(), false);
- if (headpos == -1) {
- return false;
- }
- Metachunk* const head = _pool[headpos];
- out->all[0] = head;
- assert(head->is_tagged_free() == false, "Chunk state mismatch");
- // ..then go from there, chain it up with up to list_size - 1 number of other
- // in-use chunks of the same index.
- const ChunkIndex index = _cm.list_index(head->word_size());
- int num_added = 1;
- size_t size_added = head->word_size();
- int pos = headpos;
- Metachunk* tail = head;
- do {
- pos = next_matching_chunk(pos, false);
- if (pos != headpos) {
- Metachunk* c = _pool[pos];
- assert(c->is_tagged_free() == false, "Chunk state mismatch");
- if (index == _cm.list_index(c->word_size())) {
- tail->set_next(c);
- c->set_prev(tail);
- tail = c;
- out->all[num_added] = c;
- num_added ++;
- size_added += c->word_size();
- }
- }
- } while (num_added < list_size && pos != headpos);
- out->head = head;
- out->index = index;
- out->size = size_added;
- out->num = num_added;
- return true;
- }
-
- // Take a single random chunk from the ChunkManager.
- bool take_single_random_chunk_from_chunkmanager() {
- assert_counters();
- _cm.locked_verify();
- int pos = next_matching_chunk(get_random_position(), true);
- if (pos == -1) {
- return false;
- }
- Metachunk* c = _pool[pos];
- assert(c->is_tagged_free(), "Chunk state mismatch");
- // Note: instead of using ChunkManager::remove_chunk on this one chunk, we call
- // ChunkManager::free_chunks_get() with this chunk's word size. We really want
- // to exercise ChunkManager::free_chunks_get() because that one gets called for
- // normal chunk allocation.
- Metachunk* c2 = _cm.free_chunks_get(c->word_size());
- assert(c2 != NULL, "Unexpected.");
- assert(!c2->is_tagged_free(), "Chunk state mismatch");
- assert(c2->next() == NULL && c2->prev() == NULL, "Chunk should be outside of a list.");
- _chunks_in_chunkmanager --;
- _words_in_chunkmanager -= c->word_size();
- assert_counters();
- _cm.locked_verify();
- return true;
- }
-
- // Returns a single random chunk to the chunk manager. Returns false if that
- // was not possible (all chunks are already in the chunk manager).
- bool return_single_random_chunk_to_chunkmanager() {
- assert_counters();
- _cm.locked_verify();
- int pos = next_matching_chunk(get_random_position(), false);
- if (pos == -1) {
- return false;
- }
- Metachunk* c = _pool[pos];
- assert(c->is_tagged_free() == false, "wrong chunk information");
- _cm.return_single_chunk(_cm.list_index(c->word_size()), c);
- _chunks_in_chunkmanager ++;
- _words_in_chunkmanager += c->word_size();
- assert(c->is_tagged_free() == true, "wrong chunk information");
- assert_counters();
- _cm.locked_verify();
- return true;
- }
-
- // Return a random chunk list to the chunk manager. Returns the length of the
- // returned list.
- int return_random_chunk_list_to_chunkmanager(int list_size) {
- assert_counters();
- _cm.locked_verify();
- AChunkList aChunkList;
- if (!assemble_random_chunklist(&aChunkList, list_size)) {
- return 0;
- }
- // Before returning chunks are returned, they should be tagged in use.
- for (int i = 0; i < aChunkList.num; i ++) {
- assert(!aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
- }
- _cm.return_chunk_list(aChunkList.index, aChunkList.head);
- _chunks_in_chunkmanager += aChunkList.num;
- _words_in_chunkmanager += aChunkList.size;
- // After all chunks are returned, check that they are now tagged free.
- for (int i = 0; i < aChunkList.num; i ++) {
- assert(aChunkList.all[i]->is_tagged_free(), "chunk state mismatch.");
- }
- assert_counters();
- _cm.locked_verify();
- return aChunkList.num;
- }
-
-public:
-
- ChunkManagerReturnTestImpl()
- : _vsn(align_up(MediumChunk * num_chunks * 5 * sizeof(MetaWord), Metaspace::reserve_alignment()))
- , _cm(SpecializedChunk, SmallChunk, MediumChunk)
- , _chunks_in_chunkmanager(0)
- , _words_in_chunkmanager(0)
- {
- MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
- // Allocate virtual space and allocate random chunks. Keep these chunks in the _pool. These chunks are
- // "in use", because not yet added to any chunk manager.
- _vsn.initialize();
- _vsn.expand_by(_vsn.reserved_words(), _vsn.reserved_words());
- for (int i = 0; i < num_chunks; i ++) {
- const size_t size = get_random_chunk_size();
- _pool[i] = _vsn.get_chunk_vs(size);
- assert(_pool[i] != NULL, "allocation failed");
- }
- assert_counters();
- _cm.locked_verify();
- }
-
- // Test entry point.
- // Return some chunks to the chunk manager (return phase). Take some chunks out (take phase). Repeat.
- // Chunks are choosen randomly. Number of chunks to return or taken are choosen randomly, but affected
- // by the <phase_length_factor> argument: a factor of 0.0 will cause the test to quickly alternate between
- // returning and taking, whereas a factor of 1.0 will take/return all chunks from/to the
- // chunks manager, thereby emptying or filling it completely.
- void do_test(float phase_length_factor) {
- MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
- assert_counters();
- // Execute n operations, and operation being the move of a single chunk to/from the chunk manager.
- const int num_max_ops = num_chunks * 100;
- int num_ops = num_max_ops;
- const int average_phase_length = (int)(phase_length_factor * num_chunks);
- int num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
- bool return_phase = true;
- while (num_ops > 0) {
- int chunks_moved = 0;
- if (return_phase) {
- // Randomly switch between returning a single chunk or a random length chunk list.
- if (os::random() % 2 == 0) {
- if (return_single_random_chunk_to_chunkmanager()) {
- chunks_moved = 1;
- }
- } else {
- const int list_length = MAX2(1, (os::random() % num_ops_until_switch));
- chunks_moved = return_random_chunk_list_to_chunkmanager(list_length);
- }
- } else {
- // Breath out.
- if (take_single_random_chunk_from_chunkmanager()) {
- chunks_moved = 1;
- }
- }
- num_ops -= chunks_moved;
- num_ops_until_switch -= chunks_moved;
- if (chunks_moved == 0 || num_ops_until_switch <= 0) {
- return_phase = !return_phase;
- num_ops_until_switch = MAX2(1, (average_phase_length + os::random() % 8 - 4));
- }
- }
- }
-};
-
-void* setup_chunkmanager_returntests() {
- ChunkManagerReturnTestImpl* p = new ChunkManagerReturnTestImpl();
- return p;
-}
-
-void teardown_chunkmanager_returntests(void* p) {
- delete (ChunkManagerReturnTestImpl*) p;
-}
-
-void run_chunkmanager_returntests(void* p, float phase_length) {
- ChunkManagerReturnTestImpl* test = (ChunkManagerReturnTestImpl*) p;
- test->do_test(phase_length);
-}
-
// The following test is placed here instead of a gtest / unittest file
// because the ChunkManager class is only available in this file.
class SpaceManagerTest : AllStatic {
@@ -4678,3 +5331,39 @@
}
#endif // ASSERT
+
+struct chunkmanager_statistics_t {
+ int num_specialized_chunks;
+ int num_small_chunks;
+ int num_medium_chunks;
+ int num_humongous_chunks;
+};
+
+extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
+ ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
+ ChunkManager::ChunkManagerStatistics stat;
+ chunk_manager->get_statistics(&stat);
+ out->num_specialized_chunks = (int)stat.num_by_type[SpecializedIndex];
+ out->num_small_chunks = (int)stat.num_by_type[SmallIndex];
+ out->num_medium_chunks = (int)stat.num_by_type[MediumIndex];
+ out->num_humongous_chunks = (int)stat.num_humongous_chunks;
+}
+
+struct chunk_geometry_t {
+ size_t specialized_chunk_word_size;
+ size_t small_chunk_word_size;
+ size_t medium_chunk_word_size;
+};
+
+extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
+ if (mdType == Metaspace::NonClassType) {
+ out->specialized_chunk_word_size = SpecializedChunk;
+ out->small_chunk_word_size = SmallChunk;
+ out->medium_chunk_word_size = MediumChunk;
+ } else {
+ out->specialized_chunk_word_size = ClassSpecializedChunk;
+ out->small_chunk_word_size = ClassSmallChunk;
+ out->medium_chunk_word_size = ClassMediumChunk;
+ }
+}
+
--- a/src/hotspot/share/memory/metaspace.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/metaspace.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -80,15 +80,11 @@
// allocate() method returns a block for use as a
// quantum of metadata.
-class Metaspace : public CHeapObj<mtClass> {
- friend class VMStructs;
- friend class SpaceManager;
- friend class VM_CollectForMetadataAllocation;
- friend class MetaspaceGC;
- friend class MetaspaceAux;
+// Namespace for important central static functions
+// (auxiliary stuff goes into MetaspaceUtils)
+class Metaspace : public AllStatic {
+
friend class MetaspaceShared;
- friend class CollectedHeap;
- friend class PrintCLDMetaspaceInfoClosure;
public:
enum MetadataType {
@@ -104,15 +100,6 @@
};
private:
- static void verify_global_initialization();
-
- void initialize(Mutex* lock, MetaspaceType type);
-
- // Initialize the first chunk for a Metaspace. Used for
- // special cases such as the boot class loader, reflection
- // class loader and anonymous class loader.
- void initialize_first_chunk(MetaspaceType type, MetadataType mdtype);
- Metachunk* get_initialization_chunk(MetaspaceType type, MetadataType mdtype);
// Align up the word size to the allocation word size
static size_t align_word_size_up(size_t);
@@ -135,23 +122,6 @@
static size_t _reserve_alignment;
DEBUG_ONLY(static bool _frozen;)
- SpaceManager* _vsm;
- SpaceManager* vsm() const { return _vsm; }
-
- SpaceManager* _class_vsm;
- SpaceManager* class_vsm() const { return _class_vsm; }
- SpaceManager* get_space_manager(MetadataType mdtype) {
- assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
- return mdtype == ClassType ? class_vsm() : vsm();
- }
-
- // Allocate space for metadata of type mdtype. This is space
- // within a Metachunk and is used by
- // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
- MetaWord* allocate(size_t word_size, MetadataType mdtype);
-
- MetaWord* expand_and_allocate(size_t size, MetadataType mdtype);
-
// Virtual Space lists for both classes and other metadata
static VirtualSpaceList* _space_list;
static VirtualSpaceList* _class_space_list;
@@ -176,11 +146,19 @@
return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
}
+ // convenience function
+ static ChunkManager* get_chunk_manager(bool is_class) {
+ return is_class ? chunk_manager_class() : chunk_manager_metadata();
+ }
+
static const MetaspaceTracer* tracer() { return _tracer; }
static void freeze() {
assert(DumpSharedSpaces, "sanity");
DEBUG_ONLY(_frozen = true;)
}
+ static void assert_not_frozen() {
+ assert(!_frozen, "sanity");
+ }
#ifdef _LP64
static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
#endif
@@ -195,17 +173,15 @@
static void initialize_class_space(ReservedSpace rs);
#endif
- size_t class_chunk_size(size_t word_size);
public:
- Metaspace(Mutex* lock, MetaspaceType type);
- ~Metaspace();
-
static void ergo_initialize();
static void global_initialize();
static void post_initialize();
+ static void verify_global_initialization();
+
static size_t first_chunk_word_size() { return _first_chunk_word_size; }
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
@@ -214,16 +190,6 @@
static size_t commit_alignment() { return _commit_alignment; }
static size_t commit_alignment_words() { return _commit_alignment / BytesPerWord; }
- size_t used_words_slow(MetadataType mdtype) const;
- size_t free_words_slow(MetadataType mdtype) const;
- size_t capacity_words_slow(MetadataType mdtype) const;
-
- size_t used_bytes_slow(MetadataType mdtype) const;
- size_t capacity_bytes_slow(MetadataType mdtype) const;
-
- size_t allocated_blocks_bytes() const;
- size_t allocated_chunks_bytes() const;
-
static MetaWord* allocate(ClassLoaderData* loader_data, size_t word_size,
MetaspaceObj::Type type, TRAPS);
void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
@@ -231,8 +197,6 @@
static bool contains(const void* ptr);
static bool contains_non_shared(const void* ptr);
- void dump(outputStream* const out) const;
-
// Free empty virtualspaces
static void purge(MetadataType mdtype);
static void purge();
@@ -242,10 +206,6 @@
static const char* metadata_type_name(Metaspace::MetadataType mdtype);
- void print_on(outputStream* st) const;
- // Debugging support
- void verify();
-
static void print_compressed_class_space(outputStream* st, const char* requested_addr = 0) NOT_LP64({});
// Return TRUE only if UseCompressedClassPointers is True.
@@ -259,7 +219,70 @@
};
-class MetaspaceAux : AllStatic {
+// Manages the metaspace portion belonging to a class loader
+class ClassLoaderMetaspace : public CHeapObj<mtClass> {
+ friend class CollectedHeap; // For expand_and_allocate()
+ friend class Metaspace;
+ friend class MetaspaceUtils;
+ friend class PrintCLDMetaspaceInfoClosure;
+ friend class VM_CollectForMetadataAllocation; // For expand_and_allocate()
+
+ private:
+
+ void initialize(Mutex* lock, Metaspace::MetaspaceType type);
+
+ // Initialize the first chunk for a Metaspace. Used for
+ // special cases such as the boot class loader, reflection
+ // class loader and anonymous class loader.
+ void initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype);
+ Metachunk* get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype);
+
+ SpaceManager* _vsm;
+ SpaceManager* vsm() const { return _vsm; }
+
+ SpaceManager* _class_vsm;
+ SpaceManager* class_vsm() const { return _class_vsm; }
+ SpaceManager* get_space_manager(Metaspace::MetadataType mdtype) {
+ assert(mdtype != Metaspace::MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
+ return mdtype == Metaspace::ClassType ? class_vsm() : vsm();
+ }
+
+ MetaWord* expand_and_allocate(size_t size, Metaspace::MetadataType mdtype);
+
+ size_t class_chunk_size(size_t word_size);
+
+ public:
+
+ ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type);
+ ~ClassLoaderMetaspace();
+
+ // Allocate space for metadata of type mdtype. This is space
+ // within a Metachunk and is used by
+ // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
+ MetaWord* allocate(size_t word_size, Metaspace::MetadataType mdtype);
+
+ size_t used_words_slow(Metaspace::MetadataType mdtype) const;
+ size_t free_words_slow(Metaspace::MetadataType mdtype) const;
+ size_t capacity_words_slow(Metaspace::MetadataType mdtype) const;
+
+ size_t used_bytes_slow(Metaspace::MetadataType mdtype) const;
+ size_t capacity_bytes_slow(Metaspace::MetadataType mdtype) const;
+
+ size_t allocated_blocks_bytes() const;
+ size_t allocated_chunks_bytes() const;
+
+ void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
+
+ void dump(outputStream* const out) const;
+
+ void print_on(outputStream* st) const;
+ // Debugging support
+ void verify();
+
+}; // ClassLoaderMetaspace
+
+
+class MetaspaceUtils : AllStatic {
static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
// These methods iterate over the classloader data graph
--- a/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/metaspaceChunkFreeListSummary.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "memory/allocation.hpp"
-class MetaspaceChunkFreeListSummary VALUE_OBJ_CLASS_SPEC {
+class MetaspaceChunkFreeListSummary {
size_t _num_specialized_chunks;
size_t _num_small_chunks;
size_t _num_medium_chunks;
--- a/src/hotspot/share/memory/metaspaceCounters.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/metaspaceCounters.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -66,15 +66,15 @@
MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
size_t MetaspaceCounters::used() {
- return MetaspaceAux::used_bytes();
+ return MetaspaceUtils::used_bytes();
}
size_t MetaspaceCounters::capacity() {
- return MetaspaceAux::committed_bytes();
+ return MetaspaceUtils::committed_bytes();
}
size_t MetaspaceCounters::max_capacity() {
- return MetaspaceAux::reserved_bytes();
+ return MetaspaceUtils::reserved_bytes();
}
void MetaspaceCounters::initialize_performance_counters() {
@@ -98,15 +98,15 @@
MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
size_t CompressedClassSpaceCounters::used() {
- return MetaspaceAux::used_bytes(Metaspace::ClassType);
+ return MetaspaceUtils::used_bytes(Metaspace::ClassType);
}
size_t CompressedClassSpaceCounters::capacity() {
- return MetaspaceAux::committed_bytes(Metaspace::ClassType);
+ return MetaspaceUtils::committed_bytes(Metaspace::ClassType);
}
size_t CompressedClassSpaceCounters::max_capacity() {
- return MetaspaceAux::reserved_bytes(Metaspace::ClassType);
+ return MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
}
void CompressedClassSpaceCounters::update_performance_counters() {
--- a/src/hotspot/share/memory/metaspaceShared.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/metaspaceShared.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -38,7 +38,6 @@
#if INCLUDE_ALL_GCS
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
#include "gc/shared/gcLocker.hpp"
#include "interpreter/bytecodeStream.hpp"
@@ -375,9 +374,45 @@
StringTable::serialize(soc);
soc->do_tag(--tag);
+ serialize_well_known_classes(soc);
+ soc->do_tag(--tag);
+
soc->do_tag(666);
}
+void MetaspaceShared::serialize_well_known_classes(SerializeClosure* soc) {
+ java_lang_Class::serialize(soc);
+ java_lang_String::serialize(soc);
+ java_lang_System::serialize(soc);
+ java_lang_ClassLoader::serialize(soc);
+ java_lang_Throwable::serialize(soc);
+ java_lang_Thread::serialize(soc);
+ java_lang_ThreadGroup::serialize(soc);
+ java_lang_AssertionStatusDirectives::serialize(soc);
+ java_lang_ref_SoftReference::serialize(soc);
+ java_lang_invoke_MethodHandle::serialize(soc);
+ java_lang_invoke_DirectMethodHandle::serialize(soc);
+ java_lang_invoke_MemberName::serialize(soc);
+ java_lang_invoke_ResolvedMethodName::serialize(soc);
+ java_lang_invoke_LambdaForm::serialize(soc);
+ java_lang_invoke_MethodType::serialize(soc);
+ java_lang_invoke_CallSite::serialize(soc);
+ java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize(soc);
+ java_security_AccessControlContext::serialize(soc);
+ java_lang_reflect_AccessibleObject::serialize(soc);
+ java_lang_reflect_Method::serialize(soc);
+ java_lang_reflect_Constructor::serialize(soc);
+ java_lang_reflect_Field::serialize(soc);
+ java_nio_Buffer::serialize(soc);
+ reflect_ConstantPool::serialize(soc);
+ reflect_UnsafeStaticFieldAccessorImpl::serialize(soc);
+ java_lang_reflect_Parameter::serialize(soc);
+ java_lang_Module::serialize(soc);
+ java_lang_StackTraceElement::serialize(soc);
+ java_lang_StackFrameInfo::serialize(soc);
+ java_lang_LiveStackFrameInfo::serialize(soc);
+}
+
address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) {
if (DumpSharedSpaces) {
if (_cds_i2i_entry_code_buffers == NULL) {
@@ -414,8 +449,18 @@
class CollectClassesClosure : public KlassClosure {
void do_klass(Klass* k) {
+ if (!UseAppCDS && !k->class_loader_data()->is_the_null_class_loader_data()) {
+ // AppCDS is not enabled. Let's omit non-boot classes.
+ return;
+ }
+
if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
- _global_klass_objects->append_if_missing(k);
+ if (k->is_instance_klass() && InstanceKlass::cast(k)->signers() != NULL) {
+ // Mark any class with signers and don't add to the _global_klass_objects
+ k->set_has_signer_and_not_archived();
+ } else {
+ _global_klass_objects->append_if_missing(k);
+ }
}
if (k->is_array_klass()) {
// Add in the array classes too
@@ -452,6 +497,19 @@
}
}
+static void clear_basic_type_mirrors() {
+ assert(!MetaspaceShared::is_heap_object_archiving_allowed(), "Sanity");
+ Universe::set_int_mirror(NULL);
+ Universe::set_float_mirror(NULL);
+ Universe::set_double_mirror(NULL);
+ Universe::set_byte_mirror(NULL);
+ Universe::set_bool_mirror(NULL);
+ Universe::set_char_mirror(NULL);
+ Universe::set_long_mirror(NULL);
+ Universe::set_short_mirror(NULL);
+ Universe::set_void_mirror(NULL);
+}
+
static void rewrite_nofast_bytecode(Method* method) {
BytecodeStream bcs(method);
while (!bcs.is_last_bytecode()) {
@@ -775,6 +833,17 @@
_dump_region->append_intptr_t((intptr_t)tag);
}
+ void do_oop(oop* o) {
+ if (*o == NULL) {
+ _dump_region->append_intptr_t(0);
+ } else {
+ assert(MetaspaceShared::is_heap_object_archiving_allowed(),
+ "Archiving heap object is not allowed");
+ _dump_region->append_intptr_t(
+ (intptr_t)oopDesc::encode_heap_oop_not_null(*o));
+ }
+ }
+
void do_region(u_char* start, size_t size) {
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
assert(size % sizeof(intptr_t) == 0, "bad size");
@@ -935,7 +1004,7 @@
class VM_PopulateDumpSharedSpace: public VM_Operation {
private:
- GrowableArray<MemRegion> *_string_regions;
+ GrowableArray<MemRegion> *_closed_archive_heap_regions;
GrowableArray<MemRegion> *_open_archive_heap_regions;
void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN;
@@ -949,6 +1018,7 @@
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
void doit(); // outline because gdb sucks
static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only, bool allow_exec);
+ bool allow_nested_vm_operations() const { return true; }
}; // class VM_PopulateDumpSharedSpace
class SortedSymbolClosure: public SymbolClosure {
@@ -1193,6 +1263,7 @@
}
static Klass* get_relocated_klass(Klass* orig_klass) {
+ assert(DumpSharedSpaces, "dump time only");
address* pp = _new_loc_table->get((address)orig_klass);
assert(pp != NULL, "must be");
Klass* klass = (Klass*)(*pp);
@@ -1222,7 +1293,11 @@
// Reorder the system dictionary. Moving the symbols affects
// how the hash table indices are calculated.
SystemDictionary::reorder_dictionary_for_sharing();
+
tty->print("Removing java_mirror ... ");
+ if (!MetaspaceShared::is_heap_object_archiving_allowed()) {
+ clear_basic_type_mirrors();
+ }
remove_java_mirror_in_classes();
tty->print_cr("done. ");
NOT_PRODUCT(SystemDictionary::verify();)
@@ -1312,7 +1387,7 @@
dump_symbols();
// Dump supported java heap objects
- _string_regions = NULL;
+ _closed_archive_heap_regions = NULL;
_open_archive_heap_regions = NULL;
dump_java_heap_objects();
@@ -1375,7 +1450,7 @@
write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
_total_string_region_size = mapinfo->write_archive_heap_regions(
- _string_regions,
+ _closed_archive_heap_regions,
MetaspaceShared::first_string,
MetaspaceShared::max_strings);
_total_open_archive_region_size = mapinfo->write_archive_heap_regions(
@@ -1424,7 +1499,7 @@
_ro_region.print(total_reserved);
_md_region.print(total_reserved);
_od_region.print(total_reserved);
- print_heap_region_stats(_string_regions, "st", total_reserved);
+ print_heap_region_stats(_closed_archive_heap_regions, "st", total_reserved);
print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
@@ -1452,6 +1527,11 @@
o->set_klass(k);
}
+Klass* MetaspaceShared::get_relocated_klass(Klass *k) {
+ assert(DumpSharedSpaces, "sanity");
+ return ArchiveCompactor::get_relocated_klass(k);
+}
+
class LinkSharedClassesClosure : public KlassClosure {
Thread* THREAD;
bool _made_progress;
@@ -1693,11 +1773,11 @@
// Cache for recording where the archived objects are copied to
MetaspaceShared::create_archive_object_cache();
- tty->print_cr("Dumping String objects to closed archive heap region ...");
+ tty->print_cr("Dumping objects to closed archive heap region ...");
NOT_PRODUCT(StringTable::verify());
- // The string space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details.
- _string_regions = new GrowableArray<MemRegion>(2);
- StringTable::write_to_archive(_string_regions);
+ // The closed space has maximum two regions. See FileMapInfo::write_archive_heap_regions() for details.
+ _closed_archive_heap_regions = new GrowableArray<MemRegion>(2);
+ MetaspaceShared::dump_closed_archive_heap_objects(_closed_archive_heap_regions);
tty->print_cr("Dumping objects to open archive heap region ...");
_open_archive_heap_regions = new GrowableArray<MemRegion>(2);
@@ -1709,6 +1789,20 @@
G1HeapVerifier::verify_archive_regions();
}
+void MetaspaceShared::dump_closed_archive_heap_objects(
+ GrowableArray<MemRegion> * closed_archive) {
+ assert(is_heap_object_archiving_allowed(), "Cannot dump java heap objects");
+
+ Thread* THREAD = Thread::current();
+ G1CollectedHeap::heap()->begin_archive_alloc_range();
+
+ // Archive interned string objects
+ StringTable::write_to_archive(closed_archive);
+
+ G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
+ os::vm_allocation_granularity());
+}
+
void MetaspaceShared::dump_open_archive_heap_objects(
GrowableArray<MemRegion> * open_archive) {
assert(UseG1GC, "Only support G1 GC");
@@ -1718,21 +1812,33 @@
Thread* THREAD = Thread::current();
G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
- MetaspaceShared::archive_resolved_constants(THREAD);
+ java_lang_Class::archive_basic_type_mirrors(THREAD);
+
+ MetaspaceShared::archive_klass_objects(THREAD);
G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
os::vm_allocation_granularity());
}
MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL;
+oop MetaspaceShared::find_archived_heap_object(oop obj) {
+ assert(DumpSharedSpaces, "dump-time only");
+ ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
+ oop* p = cache->get(obj);
+ if (p != NULL) {
+ return *p;
+ } else {
+ return NULL;
+ }
+}
+
oop MetaspaceShared::archive_heap_object(oop obj, Thread* THREAD) {
assert(DumpSharedSpaces, "dump-time only");
- ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
- oop* p = cache->get(obj);
- if (p != NULL) {
+ oop ao = find_archived_heap_object(obj);
+ if (ao != NULL) {
// already archived
- return *p;
+ return ao;
}
int len = obj->size();
@@ -1745,15 +1851,23 @@
if (archived_oop != NULL) {
Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
relocate_klass_ptr(archived_oop);
+ ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache();
cache->put(obj, archived_oop);
}
+ log_debug(cds)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
+ p2i(obj), p2i(archived_oop));
return archived_oop;
}
-void MetaspaceShared::archive_resolved_constants(Thread* THREAD) {
+void MetaspaceShared::archive_klass_objects(Thread* THREAD) {
int i;
for (i = 0; i < _global_klass_objects->length(); i++) {
Klass* k = _global_klass_objects->at(i);
+
+ // archive mirror object
+ java_lang_Class::archive_mirror(k, CHECK);
+
+ // archive the resolved_referenes array
if (k->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(k);
ik->constants()->archive_resolved_references(THREAD);
@@ -1802,6 +1916,19 @@
FileMapInfo::assert_mark(tag == old_tag);
}
+ void do_oop(oop *p) {
+ narrowOop o = (narrowOop)nextPtr();
+ if (o == 0 || !MetaspaceShared::open_archive_heap_region_mapped()) {
+ p = NULL;
+ } else {
+ assert(MetaspaceShared::is_heap_object_archiving_allowed(),
+ "Archived heap object is not allowed");
+ assert(MetaspaceShared::open_archive_heap_region_mapped(),
+ "Open archive heap region is not mapped");
+ RootAccess<IN_ARCHIVE_ROOT>::oop_store(p, oopDesc::decode_heap_oop_not_null(o));
+ }
+ }
+
void do_region(u_char* start, size_t size) {
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
assert(size % sizeof(intptr_t) == 0, "bad size");
--- a/src/hotspot/share/memory/metaspaceShared.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/metaspaceShared.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -38,7 +38,7 @@
class FileMapInfo;
-class MetaspaceSharedStats VALUE_OBJ_CLASS_SPEC {
+class MetaspaceSharedStats {
public:
MetaspaceSharedStats() {
memset(this, 0, sizeof(*this));
@@ -113,8 +113,9 @@
static ArchivedObjectCache* archive_object_cache() {
return _archive_object_cache;
}
+ static oop find_archived_heap_object(oop obj);
static oop archive_heap_object(oop obj, Thread* THREAD);
- static void archive_resolved_constants(Thread* THREAD);
+ static void archive_klass_objects(Thread* THREAD);
#endif
static bool is_heap_object_archiving_allowed() {
CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedOops && UseCompressedClassPointers);)
@@ -128,6 +129,8 @@
}
static void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
+ static void dump_closed_archive_heap_objects(GrowableArray<MemRegion> * closed_archive) NOT_CDS_JAVA_HEAP_RETURN;
+
static void dump_open_archive_heap_objects(GrowableArray<MemRegion> * open_archive) NOT_CDS_JAVA_HEAP_RETURN;
static void set_open_archive_heap_region_mapped() {
CDS_JAVA_HEAP_ONLY(_open_archive_heap_region_mapped = true);
@@ -199,7 +202,8 @@
static void zero_cpp_vtable_clones_for_writing();
static void patch_cpp_vtable_pointers();
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
- static void serialize(SerializeClosure* sc);
+ static void serialize(SerializeClosure* sc) NOT_CDS_RETURN;
+ static void serialize_well_known_classes(SerializeClosure* soc) NOT_CDS_RETURN;
static MetaspaceSharedStats* stats() {
return &_stats;
@@ -248,5 +252,7 @@
return _cds_i2i_entry_code_buffers_size;
}
static void relocate_klass_ptr(oop o);
+
+ static Klass* get_relocated_klass(Klass *k);
};
#endif // SHARE_VM_MEMORY_METASPACESHARED_HPP
--- a/src/hotspot/share/memory/oopFactory.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/oopFactory.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -30,12 +30,13 @@
#include "gc/shared/collectedHeap.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceOop.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
+#include "runtime/handles.inline.hpp"
typeArrayOop oopFactory::new_charArray(const char* utf8_str, TRAPS) {
--- a/src/hotspot/share/memory/universe.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/universe.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -49,7 +49,7 @@
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/constantPool.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
#include "oops/instanceKlass.hpp"
@@ -247,7 +247,7 @@
_do_stack_walk_cache->metaspace_pointers_do(it);
}
-// Serialize metadata in and out of CDS archive, not oops.
+// Serialize metadata and pointers to primitive type mirrors in and out of CDS archive
void Universe::serialize(SerializeClosure* f, bool do_all) {
f->do_ptr((void**)&_boolArrayKlassObj);
@@ -271,6 +271,20 @@
}
}
+#if INCLUDE_CDS_JAVA_HEAP
+ // The mirrors are NULL if MetaspaceShared::is_heap_object_archiving_allowed
+ // is false.
+ f->do_oop(&_int_mirror);
+ f->do_oop(&_float_mirror);
+ f->do_oop(&_double_mirror);
+ f->do_oop(&_byte_mirror);
+ f->do_oop(&_bool_mirror);
+ f->do_oop(&_char_mirror);
+ f->do_oop(&_long_mirror);
+ f->do_oop(&_short_mirror);
+ f->do_oop(&_void_mirror);
+#endif
+
f->do_ptr((void**)&_the_array_interfaces_array);
f->do_ptr((void**)&_the_empty_int_array);
f->do_ptr((void**)&_the_empty_short_array);
@@ -446,32 +460,41 @@
assert(i == _fullgc_alot_dummy_array->length(), "just checking");
}
#endif
-
- // Initialize dependency array for null class loader
- ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK);
-
}
void Universe::initialize_basic_type_mirrors(TRAPS) {
- assert(_int_mirror==NULL, "basic type mirrors already initialized");
- _int_mirror =
- java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK);
- _float_mirror =
- java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK);
- _double_mirror =
- java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK);
- _byte_mirror =
- java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK);
- _bool_mirror =
- java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
- _char_mirror =
- java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK);
- _long_mirror =
- java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK);
- _short_mirror =
- java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK);
- _void_mirror =
- java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK);
+#if INCLUDE_CDS_JAVA_HEAP
+ if (UseSharedSpaces &&
+ MetaspaceShared::open_archive_heap_region_mapped() &&
+ _int_mirror != NULL) {
+ assert(MetaspaceShared::is_heap_object_archiving_allowed(), "Sanity");
+ assert(_float_mirror != NULL && _double_mirror != NULL &&
+ _byte_mirror != NULL && _byte_mirror != NULL &&
+ _bool_mirror != NULL && _char_mirror != NULL &&
+ _long_mirror != NULL && _short_mirror != NULL &&
+ _void_mirror != NULL, "Sanity");
+ } else
+#endif
+ {
+ _int_mirror =
+ java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK);
+ _float_mirror =
+ java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK);
+ _double_mirror =
+ java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK);
+ _byte_mirror =
+ java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK);
+ _bool_mirror =
+ java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
+ _char_mirror =
+ java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK);
+ _long_mirror =
+ java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK);
+ _short_mirror =
+ java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK);
+ _void_mirror =
+ java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK);
+ }
_mirrors[T_INT] = _int_mirror;
_mirrors[T_FLOAT] = _float_mirror;
@@ -1139,7 +1162,7 @@
} else if (strcmp(token, "classloader_data_graph") == 0) {
verify_flags |= Verify_ClassLoaderDataGraph;
} else if (strcmp(token, "metaspace") == 0) {
- verify_flags |= Verify_MetaspaceAux;
+ verify_flags |= Verify_MetaspaceUtils;
} else if (strcmp(token, "jni_handles") == 0) {
verify_flags |= Verify_JNIHandles;
} else if (strcmp(token, "codecache_oops") == 0) {
@@ -1211,9 +1234,9 @@
ClassLoaderDataGraph::verify();
}
#endif
- if (should_verify_subset(Verify_MetaspaceAux)) {
- log_debug(gc, verify)("MetaspaceAux");
- MetaspaceAux::verify_free_chunks();
+ if (should_verify_subset(Verify_MetaspaceUtils)) {
+ log_debug(gc, verify)("MetaspaceUtils");
+ MetaspaceUtils::verify_free_chunks();
}
if (should_verify_subset(Verify_JNIHandles)) {
log_debug(gc, verify)("JNIHandles");
--- a/src/hotspot/share/memory/universe.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/universe.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -294,6 +294,16 @@
static oop short_mirror() { return check_mirror(_short_mirror); }
static oop void_mirror() { return check_mirror(_void_mirror); }
+ static void set_int_mirror(oop m) { _int_mirror = m; }
+ static void set_float_mirror(oop m) { _float_mirror = m; }
+ static void set_double_mirror(oop m) { _double_mirror = m; }
+ static void set_byte_mirror(oop m) { _byte_mirror = m; }
+ static void set_bool_mirror(oop m) { _bool_mirror = m; }
+ static void set_char_mirror(oop m) { _char_mirror = m; }
+ static void set_long_mirror(oop m) { _long_mirror = m; }
+ static void set_short_mirror(oop m) { _short_mirror = m; }
+ static void set_void_mirror(oop m) { _void_mirror = m; }
+
// table of same
static oop _mirrors[T_VOID+1];
@@ -452,8 +462,6 @@
static bool is_module_initialized() { return _module_initialized; }
static bool is_fully_initialized() { return _fully_initialized; }
- static inline bool element_type_should_be_aligned(BasicType type);
- static inline bool field_type_should_be_aligned(BasicType type);
static bool on_page_boundary(void* addr);
static bool should_fill_in_stack_trace(Handle throwable);
static void check_alignment(uintx size, uintx alignment, const char* name);
@@ -481,7 +489,7 @@
Verify_CodeCache = 16,
Verify_SystemDictionary = 32,
Verify_ClassLoaderDataGraph = 64,
- Verify_MetaspaceAux = 128,
+ Verify_MetaspaceUtils = 128,
Verify_JNIHandles = 256,
Verify_CodeCacheOops = 512,
Verify_All = -1
--- a/src/hotspot/share/memory/universe.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_MEMORY_UNIVERSE_INLINE_HPP
-#define SHARE_VM_MEMORY_UNIVERSE_INLINE_HPP
-
-#include "memory/universe.hpp"
-
-// Check whether an element of a typeArrayOop with the given type must be
-// aligned 0 mod 8. The typeArrayOop itself must be aligned at least this
-// strongly.
-
-inline bool Universe::element_type_should_be_aligned(BasicType type) {
- return type == T_DOUBLE || type == T_LONG;
-}
-
-// Check whether an object field (static/non-static) of the given type must be aligned 0 mod 8.
-
-inline bool Universe::field_type_should_be_aligned(BasicType type) {
- return type == T_DOUBLE || type == T_LONG;
-}
-
-#endif // SHARE_VM_MEMORY_UNIVERSE_INLINE_HPP
--- a/src/hotspot/share/memory/virtualspace.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/memory/virtualspace.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
// ReservedSpace is a data structure for reserving a contiguous address range.
-class ReservedSpace VALUE_OBJ_CLASS_SPEC {
+class ReservedSpace {
friend class VMStructs;
protected:
char* _base;
@@ -133,7 +133,7 @@
// VirtualSpace is data structure for committing a previously reserved address range in smaller chunks.
-class VirtualSpace VALUE_OBJ_CLASS_SPEC {
+class VirtualSpace {
friend class VMStructs;
private:
// Reserved area
--- a/src/hotspot/share/metaprogramming/integralConstant.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/metaprogramming/integralConstant.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
// T is an integral type, and is the value_type.
// v is an integral constant, and is the value.
template<typename T, T v>
-struct IntegralConstant VALUE_OBJ_CLASS_SPEC {
+struct IntegralConstant {
typedef T value_type;
static const value_type value = v;
typedef IntegralConstant<T, v> type;
--- a/src/hotspot/share/metaprogramming/primitiveConversions.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/metaprogramming/primitiveConversions.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -104,7 +104,7 @@
// Give an informative error if the sizes differ.
template<typename T, typename U>
-struct PrimitiveConversions::Cast<T, U, false> VALUE_OBJ_CLASS_SPEC {
+struct PrimitiveConversions::Cast<T, U, false> {
STATIC_ASSERT(sizeof(T) == sizeof(U));
};
@@ -113,7 +113,6 @@
struct PrimitiveConversions::Cast<
T, U, true,
typename EnableIf<IsIntegral<T>::value && IsIntegral<U>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(U x) const { return cast_using_union<T>(x); }
};
@@ -125,7 +124,6 @@
typename EnableIf<IsIntegral<T>::value &&
(IsRegisteredEnum<U>::value ||
IsFloatingPoint<U>::value)>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(U x) const { return cast_using_union<T>(x); }
};
@@ -137,7 +135,6 @@
typename EnableIf<IsIntegral<U>::value &&
(IsRegisteredEnum<T>::value ||
IsFloatingPoint<T>::value)>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(U x) const { return cast_using_union<T>(x); }
};
@@ -147,7 +144,6 @@
struct PrimitiveConversions::Cast<
T, U*, true,
typename EnableIf<IsIntegral<T>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(U* x) const { return reinterpret_cast<T>(x); }
};
@@ -157,7 +153,6 @@
struct PrimitiveConversions::Cast<
T*, U, true,
typename EnableIf<IsIntegral<U>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
T* operator()(U x) const { return reinterpret_cast<T*>(x); }
};
--- a/src/hotspot/share/oops/access.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/access.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -402,10 +402,10 @@
}
template <typename T>
- static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
+ static inline void arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
verify_decorators<ARRAYCOPY_DECORATOR_MASK | IN_HEAP |
AS_DECORATOR_MASK>();
- return AccessInternal::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
+ AccessInternal::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
}
// Oop heap accesses
--- a/src/hotspot/share/oops/access.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/access.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
#define SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
-#include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/barrierSetConfig.inline.hpp"
#include "metaprogramming/conditional.hpp"
#include "metaprogramming/isFloatingPoint.hpp"
#include "metaprogramming/isIntegral.hpp"
@@ -139,7 +139,8 @@
struct PostRuntimeDispatch<GCBarrierType, BARRIER_ARRAYCOPY, decorators>: public AllStatic {
template <typename T>
static bool access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
- return GCBarrierType::arraycopy_in_heap(src_obj, dst_obj, src, dst, length);
+ GCBarrierType::arraycopy_in_heap(src_obj, dst_obj, src, dst, length);
+ return true;
}
template <typename T>
@@ -763,7 +764,7 @@
HasDecorator<decorators, AS_RAW>::value, bool>::type
arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
- return Raw::arraycopy(src, dst, length);
+ return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
}
template <DecoratorSet decorators, typename T>
@@ -1077,7 +1078,9 @@
template <DecoratorSet decorators, typename T>
inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
- verify_types<decorators, T>();
+ STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
+ (IsSame<T, void>::value || IsIntegral<T>::value) ||
+ IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
typedef typename Decay<T>::type DecayedT;
const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP |
(HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
--- a/src/hotspot/share/oops/accessBackend.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/accessBackend.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -153,6 +153,13 @@
}
template<>
+ void arraycopy_conjoint<void>(void* src, void* dst, size_t length) {
+ Copy::conjoint_jbytes(reinterpret_cast<jbyte*>(src),
+ reinterpret_cast<jbyte*>(dst),
+ length);
+ }
+
+ template<>
void arraycopy_conjoint_atomic<jbyte>(jbyte* src, jbyte* dst, size_t length) {
Copy::conjoint_jbytes_atomic(src, dst, length);
}
@@ -171,4 +178,9 @@
void arraycopy_conjoint_atomic<jlong>(jlong* src, jlong* dst, size_t length) {
Copy::conjoint_jlongs_atomic(src, dst, length);
}
+
+ template<>
+ void arraycopy_conjoint_atomic<void>(void* src, void* dst, size_t length) {
+ Copy::conjoint_memory_atomic(src, dst, length);
+ }
}
--- a/src/hotspot/share/oops/accessBackend.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/accessBackend.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -104,6 +104,11 @@
typedef oop (*resolve_func_t)(oop obj);
};
+ template <DecoratorSet decorators>
+ struct AccessFunctionTypes<decorators, void> {
+ typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, void* src, void* dst, size_t length);
+ };
+
template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
#define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \
@@ -130,7 +135,7 @@
template <DecoratorSet decorators, typename T, BarrierType barrier_type>
typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
- class AccessLocker VALUE_OBJ_CLASS_SPEC {
+ class AccessLocker {
public:
AccessLocker();
~AccessLocker();
@@ -335,7 +340,7 @@
}
template <typename T>
- static bool arraycopy(T* src, T* dst, size_t length);
+ static bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
template <typename T>
static void oop_store(void* addr, T value);
--- a/src/hotspot/share/oops/accessBackend.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/accessBackend.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -118,7 +118,7 @@
template <DecoratorSet decorators>
template <typename T>
inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
- return arraycopy(src, dst, length);
+ return arraycopy(src_obj, dst_obj, src, dst, length);
}
template <DecoratorSet decorators>
@@ -257,7 +257,7 @@
template <DecoratorSet decorators, typename T>
static inline typename EnableIf<
HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
- arraycopy(T* src, T* dst, size_t length) {
+ arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
// We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
AccessInternal::arraycopy_arrayof_conjoint_oops(src, dst, length);
@@ -271,7 +271,7 @@
template <DecoratorSet decorators, typename T>
static inline typename EnableIf<
!HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
- arraycopy(T* src, T* dst, size_t length) {
+ arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
AccessInternal::arraycopy_arrayof_conjoint(src, dst, length);
} else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) {
@@ -289,12 +289,23 @@
}
}
}
+
+ template <DecoratorSet decorators>
+ static inline typename EnableIf<
+ !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
+ arraycopy(arrayOop src_obj, arrayOop dst_obj, void* src, void* dst, size_t length) {
+ if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
+ AccessInternal::arraycopy_conjoint_atomic(src, dst, length);
+ } else {
+ AccessInternal::arraycopy_conjoint(src, dst, length);
+ }
+ }
};
template <DecoratorSet decorators>
template <typename T>
-inline bool RawAccessBarrier<decorators>::arraycopy(T* src, T* dst, size_t length) {
- RawAccessBarrierArrayCopy::arraycopy<decorators>(src, dst, length);
+inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+ RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
return true;
}
--- a/src/hotspot/share/oops/array.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/array.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -27,7 +27,6 @@
#include "memory/allocation.hpp"
#include "memory/metaspace.hpp"
-#include "runtime/orderAccess.hpp"
#include "utilities/align.hpp"
// Array for metadata allocation
@@ -122,8 +121,8 @@
T* adr_at(const int i) { assert(i >= 0 && i< _length, "oob: 0 <= %d < %d", i, _length); return &_data[i]; }
int find(const T& x) { return index_of(x); }
- T at_acquire(const int which) { return OrderAccess::load_acquire(adr_at(which)); }
- void release_at_put(int which, T contents) { OrderAccess::release_store(adr_at(which), contents); }
+ T at_acquire(const int which);
+ void release_at_put(int which, T contents);
static int size(int length) {
size_t bytes = align_up(byte_sizeof(length), BytesPerWord);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/array.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OOPS_ARRAY_INLINE_HPP
+#define SHARE_VM_OOPS_ARRAY_INLINE_HPP
+
+#include "oops/array.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+template <typename T>
+inline T Array<T>::at_acquire(const int which) { return OrderAccess::load_acquire(adr_at(which)); }
+
+template <typename T>
+inline void Array<T>::release_at_put(int which, T contents) { OrderAccess::release_store(adr_at(which), contents); }
+
+#endif // SHARE_VM_OOPS_ARRAY_INLINE_HPP
--- a/src/hotspot/share/oops/arrayKlass.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/arrayKlass.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,12 +31,13 @@
#include "jvmtifiles/jvmti.h"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/arrayKlass.hpp"
#include "oops/arrayOop.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
int ArrayKlass::static_size(int header_size) {
// size of an array klass object
--- a/src/hotspot/share/oops/arrayOop.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/arrayOop.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_OOPS_ARRAYOOP_HPP
#define SHARE_VM_OOPS_ARRAYOOP_HPP
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.hpp"
#include "utilities/align.hpp"
@@ -62,6 +62,13 @@
return (int)hs;
}
+ // Check whether an element of a typeArrayOop with the given type must be
+ // aligned 0 mod 8. The typeArrayOop itself must be aligned at least this
+ // strongly.
+ static bool element_type_should_be_aligned(BasicType type) {
+ return type == T_DOUBLE || type == T_LONG;
+ }
+
public:
// The _length field is not declared in C++. It is allocated after the
// declared nonstatic fields in arrayOopDesc if not compressed, otherwise
@@ -99,7 +106,7 @@
// array object type.
static int header_size(BasicType type) {
size_t typesize_in_bytes = header_size_in_bytes();
- return (int)(Universe::element_type_should_be_aligned(type)
+ return (int)(element_type_should_be_aligned(type)
? align_object_offset(typesize_in_bytes/HeapWordSize)
: typesize_in_bytes/HeapWordSize);
}
--- a/src/hotspot/share/oops/constMethod.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/constMethod.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -88,14 +88,14 @@
// Utility class describing elements in checked exceptions table inlined in Method*.
-class CheckedExceptionElement VALUE_OBJ_CLASS_SPEC {
+class CheckedExceptionElement {
public:
u2 class_cp_index;
};
// Utility class describing elements in local variable table inlined in Method*.
-class LocalVariableTableElement VALUE_OBJ_CLASS_SPEC {
+class LocalVariableTableElement {
public:
u2 start_bci;
u2 length;
@@ -106,7 +106,7 @@
};
// Utility class describing elements in exception table
-class ExceptionTableElement VALUE_OBJ_CLASS_SPEC {
+class ExceptionTableElement {
public:
u2 start_pc;
u2 end_pc;
@@ -115,7 +115,7 @@
};
// Utility class describing elements in method parameters
-class MethodParametersElement VALUE_OBJ_CLASS_SPEC {
+class MethodParametersElement {
public:
u2 name_cp_index;
u2 flags;
--- a/src/hotspot/share/oops/constantPool.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/constantPool.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -38,7 +38,9 @@
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/constantPool.hpp"
+#include "oops/array.inline.hpp"
+#include "oops/constantPool.inline.hpp"
+#include "oops/cpCache.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
@@ -51,6 +53,10 @@
#include "runtime/vframe.hpp"
#include "utilities/copy.hpp"
+constantTag ConstantPool::tag_at(int which) const { return (constantTag)tags()->at_acquire(which); }
+
+void ConstantPool::release_tag_at_put(int which, jbyte t) { tags()->release_at_put(which, t); }
+
ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
Array<u1>* tags = MetadataFactory::new_array<u1>(loader_data, length, 0, CHECK_NULL);
int size = ConstantPool::size(length);
@@ -493,7 +499,7 @@
// Make this class loader depend upon the class loader owning the class reference
ClassLoaderData* this_key = this_cp->pool_holder()->class_loader_data();
- this_key->record_dependency(k, CHECK_NULL); // Can throw OOM
+ this_key->record_dependency(k);
// logging for class+resolve.
if (log_is_enabled(Debug, class, resolve)){
@@ -2527,6 +2533,17 @@
}
+SymbolHashMap::~SymbolHashMap() {
+ SymbolHashMapEntry* next;
+ for (int i = 0; i < _table_size; i++) {
+ for (SymbolHashMapEntry* cur = bucket(i); cur != NULL; cur = next) {
+ next = cur->next();
+ delete(cur);
+ }
+ }
+ FREE_C_HEAP_ARRAY(SymbolHashMapBucket, _buckets);
+}
+
void SymbolHashMap::add_entry(Symbol* sym, u2 value) {
char *str = sym->as_utf8();
unsigned int hash = compute_hash(str, sym->utf8_length());
--- a/src/hotspot/share/oops/constantPool.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/constantPool.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_OOPS_CONSTANTPOOLOOP_HPP
#define SHARE_VM_OOPS_CONSTANTPOOLOOP_HPP
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
#include "oops/arrayOop.hpp"
#include "oops/cpCache.hpp"
#include "oops/objArrayOop.hpp"
@@ -48,7 +48,7 @@
class SymbolHashMap;
-class CPSlot VALUE_OBJ_CLASS_SPEC {
+class CPSlot {
friend class ConstantPool;
intptr_t _ptr;
enum TagBits {_pseudo_bit = 1};
@@ -67,7 +67,7 @@
// This represents a JVM_CONSTANT_Class, JVM_CONSTANT_UnresolvedClass, or
// JVM_CONSTANT_UnresolvedClassInError slot in the constant pool.
-class CPKlassSlot VALUE_OBJ_CLASS_SPEC {
+class CPKlassSlot {
// cp->symbol_at(_name_index) gives the name of the class.
int _name_index;
@@ -131,7 +131,7 @@
void set_tags(Array<u1>* tags) { _tags = tags; }
void tag_at_put(int which, jbyte t) { tags()->at_put(which, t); }
- void release_tag_at_put(int which, jbyte t) { tags()->release_at_put(which, t); }
+ void release_tag_at_put(int which, jbyte t);
u1* tag_addr_at(int which) const { return tags()->adr_at(which); }
@@ -143,14 +143,7 @@
private:
intptr_t* base() const { return (intptr_t*) (((char*) this) + sizeof(ConstantPool)); }
- CPSlot slot_at(int which) const {
- assert(is_within_bounds(which), "index out of bounds");
- assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool");
- // Uses volatile because the klass slot changes without a lock.
- intptr_t adr = OrderAccess::load_acquire(obj_at_addr(which));
- assert(adr != 0 || which == 0, "cp entry for klass should not be zero");
- return CPSlot(adr);
- }
+ CPSlot slot_at(int which) const;
void slot_at_put(int which, CPSlot s) const {
assert(is_within_bounds(which), "index out of bounds");
@@ -380,7 +373,7 @@
// Tag query
- constantTag tag_at(int which) const { return (constantTag)tags()->at_acquire(which); }
+ constantTag tag_at(int which) const;
// Fetching constants
@@ -409,16 +402,7 @@
return klass_slot_at(which).name_index();
}
- Klass* resolved_klass_at(int which) const { // Used by Compiler
- guarantee(tag_at(which).is_klass(), "Corrupted constant pool");
- // Must do an acquire here in case another thread resolved the klass
- // behind our back, lest we later load stale values thru the oop.
- CPKlassSlot kslot = klass_slot_at(which);
- assert(tag_at(kslot.name_index()).is_symbol(), "sanity");
-
- Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index());
- return OrderAccess::load_acquire(adr);
- }
+ Klass* resolved_klass_at(int which) const; // Used by Compiler
// RedefineClasses() API support:
Symbol* klass_at_noresolve(int which) { return klass_name_at(which); }
@@ -475,23 +459,11 @@
// Method oops internally created for method handles may also
// use pseudo-strings to link themselves to related metaobjects.
- bool is_pseudo_string_at(int which) {
- assert(tag_at(which).is_string(), "Corrupted constant pool");
- return slot_at(which).is_pseudo_string();
- }
+ bool is_pseudo_string_at(int which);
- oop pseudo_string_at(int which, int obj_index) {
- assert(is_pseudo_string_at(which), "must be a pseudo-string");
- oop s = resolved_references()->obj_at(obj_index);
- return s;
- }
+ oop pseudo_string_at(int which, int obj_index);
- oop pseudo_string_at(int which) {
- assert(is_pseudo_string_at(which), "must be a pseudo-string");
- int obj_index = cp_to_object_index(which);
- oop s = resolved_references()->obj_at(obj_index);
- return s;
- }
+ oop pseudo_string_at(int which);
void pseudo_string_at_put(int which, int obj_index, oop x) {
assert(tag_at(which).is_string(), "Corrupted constant pool");
@@ -1051,16 +1023,7 @@
return (entry == NULL) ? 0 : entry->value();
}
- ~SymbolHashMap() {
- SymbolHashMapEntry* next;
- for (int i = 0; i < _table_size; i++) {
- for (SymbolHashMapEntry* cur = bucket(i); cur != NULL; cur = next) {
- next = cur->next();
- delete(cur);
- }
- }
- FREE_C_HEAP_ARRAY(SymbolHashMapBucket, _buckets);
- }
+ ~SymbolHashMap();
}; // End SymbolHashMap class
#endif // SHARE_VM_OOPS_CONSTANTPOOLOOP_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/constantPool.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OOPS_CONSTANTPOOL_INLINE_HPP
+#define SHARE_VM_OOPS_CONSTANTPOOL_INLINE_HPP
+
+#include "oops/constantPool.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+inline CPSlot ConstantPool::slot_at(int which) const {
+ assert(is_within_bounds(which), "index out of bounds");
+ assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool");
+ // Uses volatile because the klass slot changes without a lock.
+ intptr_t adr = OrderAccess::load_acquire(obj_at_addr(which));
+ assert(adr != 0 || which == 0, "cp entry for klass should not be zero");
+ return CPSlot(adr);
+}
+
+inline Klass* ConstantPool::resolved_klass_at(int which) const { // Used by Compiler
+ guarantee(tag_at(which).is_klass(), "Corrupted constant pool");
+ // Must do an acquire here in case another thread resolved the klass
+ // behind our back, lest we later load stale values thru the oop.
+ CPKlassSlot kslot = klass_slot_at(which);
+ assert(tag_at(kslot.name_index()).is_symbol(), "sanity");
+
+ Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index());
+ return OrderAccess::load_acquire(adr);
+}
+
+inline bool ConstantPool::is_pseudo_string_at(int which) {
+ assert(tag_at(which).is_string(), "Corrupted constant pool");
+ return slot_at(which).is_pseudo_string();
+}
+
+inline oop ConstantPool::pseudo_string_at(int which, int obj_index) {
+ assert(is_pseudo_string_at(which), "must be a pseudo-string");
+ oop s = resolved_references()->obj_at(obj_index);
+ return s;
+}
+
+inline oop ConstantPool::pseudo_string_at(int which) {
+ assert(is_pseudo_string_at(which), "must be a pseudo-string");
+ int obj_index = cp_to_object_index(which);
+ oop s = resolved_references()->obj_at(obj_index);
+ return s;
+}
+
+#endif // SHARE_VM_OOPS_CONSTANTPOOL_INLINE_HPP
--- a/src/hotspot/share/oops/cpCache.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/cpCache.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,10 @@
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
-#include "oops/cpCache.hpp"
+#include "oops/constantPool.inline.hpp"
+#include "oops/cpCache.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
--- a/src/hotspot/share/oops/cpCache.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/cpCache.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
#include "memory/allocation.hpp"
#include "oops/array.hpp"
#include "oops/oopHandle.hpp"
-#include "runtime/orderAccess.hpp"
#include "utilities/align.hpp"
#include "utilities/constantTag.hpp"
@@ -130,7 +129,7 @@
class CallInfo;
-class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
+class ConstantPoolCacheEntry {
friend class VMStructs;
friend class constantPoolCacheKlass;
friend class ConstantPool;
@@ -328,42 +327,36 @@
}
// Has this bytecode been resolved? Only valid for invokes and get/put field/static.
- bool is_resolved(Bytecodes::Code code) const {
- switch (bytecode_number(code)) {
- case 1: return (bytecode_1() == code);
- case 2: return (bytecode_2() == code);
- }
- return false; // default: not resolved
- }
+ bool is_resolved(Bytecodes::Code code) const;
// Accessors
int indices() const { return _indices; }
- int indices_ord() const { return OrderAccess::load_acquire(&_indices); }
+ int indices_ord() const;
int constant_pool_index() const { return (indices() & cp_index_mask); }
- Bytecodes::Code bytecode_1() const { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); }
- Bytecodes::Code bytecode_2() const { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); }
- Metadata* f1_ord() const { return (Metadata *)OrderAccess::load_acquire(&_f1); }
- Method* f1_as_method() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; }
- Klass* f1_as_klass() const { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; }
+ Bytecodes::Code bytecode_1() const;
+ Bytecodes::Code bytecode_2() const;
+ Metadata* f1_ord() const;
+ Method* f1_as_method() const;
+ Klass* f1_as_klass() const;
// Use the accessor f1() to acquire _f1's value. This is needed for
// example in BytecodeInterpreter::run(), where is_f1_null() is
// called to check if an invokedynamic call is resolved. This load
// of _f1 must be ordered with the loads performed by
// cache->main_entry_index().
- bool is_f1_null() const { Metadata* f1 = f1_ord(); return f1 == NULL; } // classifies a CPC entry as unbound
+ bool is_f1_null() const; // classifies a CPC entry as unbound
int f2_as_index() const { assert(!is_vfinal(), ""); return (int) _f2; }
Method* f2_as_vfinal_method() const { assert(is_vfinal(), ""); return (Method*)_f2; }
- Method* f2_as_interface_method() const { assert(bytecode_1() == Bytecodes::_invokeinterface, ""); return (Method*)_f2; }
- intx flags_ord() const { return (intx)OrderAccess::load_acquire(&_flags); }
+ Method* f2_as_interface_method() const;
+ intx flags_ord() const;
int field_index() const { assert(is_field_entry(), ""); return (_flags & field_index_mask); }
int parameter_size() const { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
bool is_volatile() const { return (_flags & (1 << is_volatile_shift)) != 0; }
bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; }
bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; }
- bool indy_resolution_failed() const { intx flags = flags_ord(); return (flags & (1 << indy_resolution_failed_shift)) != 0; }
- bool has_appendix() const { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift)) != 0; }
- bool has_method_type() const { return (!is_f1_null()) && (_flags & (1 << has_method_type_shift)) != 0; }
+ bool indy_resolution_failed() const;
+ bool has_appendix() const;
+ bool has_method_type() const;
bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; }
bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; }
bool is_long() const { return flag_state() == ltos; }
@@ -440,16 +433,7 @@
ConstantPoolCache(int length,
const intStack& inverse_index_map,
const intStack& invokedynamic_inverse_index_map,
- const intStack& invokedynamic_references_map) :
- _length(length),
- _constant_pool(NULL) {
- CDS_JAVA_HEAP_ONLY(_archived_references = 0;)
- initialize(inverse_index_map, invokedynamic_inverse_index_map,
- invokedynamic_references_map);
- for (int i = 0; i < length; i++) {
- assert(entry_at(i)->is_f1_null(), "Failed to clear?");
- }
- }
+ const intStack& invokedynamic_references_map);
// Initialization
void initialize(const intArray& inverse_index_map,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/cpCache.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OOPS_CPCACHEOOP_INLINE_HPP
+#define SHARE_VM_OOPS_CPCACHEOOP_INLINE_HPP
+
+#include "oops/cpCache.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+inline int ConstantPoolCacheEntry::indices_ord() const { return OrderAccess::load_acquire(&_indices); }
+
+inline Bytecodes::Code ConstantPoolCacheEntry::bytecode_1() const {
+ return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask);
+}
+
+inline Bytecodes::Code ConstantPoolCacheEntry::bytecode_2() const {
+ return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask);
+}
+
+// Has this bytecode been resolved? Only valid for invokes and get/put field/static.
+inline bool ConstantPoolCacheEntry::is_resolved(Bytecodes::Code code) const {
+ switch (bytecode_number(code)) {
+ case 1: return (bytecode_1() == code);
+ case 2: return (bytecode_2() == code);
+ }
+ return false; // default: not resolved
+}
+
+inline Method* ConstantPoolCacheEntry::f2_as_interface_method() const {
+ assert(bytecode_1() == Bytecodes::_invokeinterface, "");
+ return (Method*)_f2;
+}
+
+inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)OrderAccess::load_acquire(&_f1); }
+
+inline Method* ConstantPoolCacheEntry::f1_as_method() const {
+ Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), "");
+ return (Method*)f1;
+}
+
+inline Klass* ConstantPoolCacheEntry::f1_as_klass() const {
+ Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), "");
+ return (Klass*)f1;
+}
+
+inline bool ConstantPoolCacheEntry::is_f1_null() const { Metadata* f1 = f1_ord(); return f1 == NULL; }
+
+inline bool ConstantPoolCacheEntry::has_appendix() const {
+ return (!is_f1_null()) && (_flags & (1 << has_appendix_shift)) != 0;
+}
+
+inline bool ConstantPoolCacheEntry::has_method_type() const {
+ return (!is_f1_null()) && (_flags & (1 << has_method_type_shift)) != 0;
+}
+
+inline intx ConstantPoolCacheEntry::flags_ord() const { return (intx)OrderAccess::load_acquire(&_flags); }
+
+inline bool ConstantPoolCacheEntry::indy_resolution_failed() const {
+ intx flags = flags_ord();
+ return (flags & (1 << indy_resolution_failed_shift)) != 0;
+}
+
+// Constructor
+inline ConstantPoolCache::ConstantPoolCache(int length,
+ const intStack& inverse_index_map,
+ const intStack& invokedynamic_inverse_index_map,
+ const intStack& invokedynamic_references_map) :
+ _length(length),
+ _constant_pool(NULL) {
+ CDS_JAVA_HEAP_ONLY(_archived_references = 0;)
+ initialize(inverse_index_map, invokedynamic_inverse_index_map,
+ invokedynamic_references_map);
+ for (int i = 0; i < length; i++) {
+ assert(entry_at(i)->is_f1_null(), "Failed to clear?");
+ }
+}
+
+#endif // SHARE_VM_OOPS_CPCACHEOOP_INLINE_HPP
--- a/src/hotspot/share/oops/fieldInfo.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/fieldInfo.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
// array type. FieldInfo generally shouldn't be used directly.
// Fields should be queried either through InstanceKlass or through
// the various FieldStreams.
-class FieldInfo VALUE_OBJ_CLASS_SPEC {
+class FieldInfo {
friend class fieldDescriptor;
friend class JavaFieldStream;
friend class ClassFileParser;
--- a/src/hotspot/share/oops/generateOopMap.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/generateOopMap.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "interpreter/bytecodeStream.hpp"
#include "memory/allocation.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/signature.hpp"
@@ -71,7 +71,7 @@
};
-class RetTable VALUE_OBJ_CLASS_SPEC {
+class RetTable {
private:
RetTableEntry *_first;
static int _init_nof_entries;
@@ -87,7 +87,7 @@
//
// CellTypeState
//
-class CellTypeState VALUE_OBJ_CLASS_SPEC {
+class CellTypeState {
private:
unsigned int _state;
@@ -288,7 +288,7 @@
//
// Main class used to compute the pointer-maps in a Method
//
-class GenerateOopMap VALUE_OBJ_CLASS_SPEC {
+class GenerateOopMap {
protected:
// _monitor_top is set to this constant to indicate that a monitor matching
--- a/src/hotspot/share/oops/instanceKlass.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/instanceKlass.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -44,6 +44,7 @@
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/heapInspection.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/metadataFactory.hpp"
@@ -184,13 +185,6 @@
return NULL;
}
- assert(ik != NULL, "invariant");
-
- const bool publicize = !parser.is_internal();
-
- // Add all classes to our internal class loader list here,
- // including classes in the bootstrap (NULL) class loader.
- loader_data->add_class(ik, publicize);
return ik;
}
@@ -1020,6 +1014,10 @@
return i;
}
+instanceHandle InstanceKlass::allocate_instance_handle(TRAPS) {
+ return instanceHandle(THREAD, allocate_instance(THREAD));
+}
+
void InstanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
if (is_interface() || is_abstract()) {
ResourceMark rm(THREAD);
--- a/src/hotspot/share/oops/instanceKlass.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/instanceKlass.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -87,9 +87,8 @@
};
#endif // !PRODUCT
-// ValueObjs embedded in klass. Describes where oops are located in instances of
-// this klass.
-class OopMapBlock VALUE_OBJ_CLASS_SPEC {
+// Describes where oops are located in instances of this klass.
+class OopMapBlock {
public:
// Byte offset of the first oop mapped by this block.
int offset() const { return _offset; }
@@ -912,7 +911,7 @@
instanceOop allocate_instance(TRAPS);
// additional member function to return a handle
- instanceHandle allocate_instance_handle(TRAPS) { return instanceHandle(THREAD, allocate_instance(THREAD)); }
+ instanceHandle allocate_instance_handle(TRAPS);
objArrayOop allocate_objArray(int n, int length, TRAPS);
// Helper function
--- a/src/hotspot/share/oops/klass.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/klass.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -39,6 +39,7 @@
#include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "trace/traceMacros.hpp"
#include "utilities/macros.hpp"
@@ -183,7 +184,8 @@
Klass::Klass() : _prototype_header(markOopDesc::prototype()),
_shared_class_path_index(-1),
_java_mirror(NULL) {
-
+ CDS_ONLY(_shared_class_flags = 0;)
+ CDS_JAVA_HEAP_ONLY(_archived_mirror = 0;)
_primary_supers[0] = this;
set_super_check_offset(in_bytes(primary_supers_offset()));
}
@@ -519,29 +521,71 @@
loader_data->add_class(this);
}
- // Recreate the class mirror.
+ Handle loader(THREAD, loader_data->class_loader());
+ ModuleEntry* module_entry = NULL;
+ Klass* k = this;
+ if (k->is_objArray_klass()) {
+ k = ObjArrayKlass::cast(k)->bottom_klass();
+ }
+ // Obtain klass' module.
+ if (k->is_instance_klass()) {
+ InstanceKlass* ik = (InstanceKlass*) k;
+ module_entry = ik->module();
+ } else {
+ module_entry = ModuleEntryTable::javabase_moduleEntry();
+ }
+ // Obtain java.lang.Module, if available
+ Handle module_handle(THREAD, ((module_entry != NULL) ? module_entry->module() : (oop)NULL));
+
+ if (this->has_raw_archived_mirror()) {
+ log_debug(cds, mirror)("%s has raw archived mirror", external_name());
+ if (MetaspaceShared::open_archive_heap_region_mapped()) {
+ oop m = archived_java_mirror();
+ log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m));
+ if (m != NULL) {
+ // mirror is archived, restore
+ assert(oopDesc::is_archive_object(m), "must be archived mirror object");
+ Handle m_h(THREAD, m);
+ java_lang_Class::restore_archived_mirror(this, m_h, loader, module_handle, protection_domain, CHECK);
+ return;
+ }
+ }
+
+ // No archived mirror data
+ _java_mirror = NULL;
+ this->clear_has_raw_archived_mirror();
+ }
+
// Only recreate it if not present. A previous attempt to restore may have
// gotten an OOM later but keep the mirror if it was created.
if (java_mirror() == NULL) {
- Handle loader(THREAD, loader_data->class_loader());
- ModuleEntry* module_entry = NULL;
- Klass* k = this;
- if (k->is_objArray_klass()) {
- k = ObjArrayKlass::cast(k)->bottom_klass();
- }
- // Obtain klass' module.
- if (k->is_instance_klass()) {
- InstanceKlass* ik = (InstanceKlass*) k;
- module_entry = ik->module();
- } else {
- module_entry = ModuleEntryTable::javabase_moduleEntry();
- }
- // Obtain java.lang.Module, if available
- Handle module_handle(THREAD, ((module_entry != NULL) ? module_entry->module() : (oop)NULL));
+ log_trace(cds, mirror)("Recreate mirror for %s", external_name());
java_lang_Class::create_mirror(this, loader, module_handle, protection_domain, CHECK);
}
}
+#if INCLUDE_CDS_JAVA_HEAP
+// Used at CDS dump time to access the archived mirror. No GC barrier.
+oop Klass::archived_java_mirror_raw() {
+ assert(DumpSharedSpaces, "called only during runtime");
+ assert(has_raw_archived_mirror(), "must have raw archived mirror");
+ return oopDesc::decode_heap_oop(_archived_mirror);
+}
+
+// Used at CDS runtime to get the archived mirror from shared class. Uses GC barrier.
+oop Klass::archived_java_mirror() {
+ assert(UseSharedSpaces, "UseSharedSpaces expected.");
+ assert(has_raw_archived_mirror(), "must have raw archived mirror");
+ return RootAccess<IN_ARCHIVE_ROOT>::oop_load(&_archived_mirror);
+}
+
+// No GC barrier
+void Klass::set_archived_java_mirror_raw(oop m) {
+ assert(DumpSharedSpaces, "called only during runtime");
+ _archived_mirror = oopDesc::encode_heap_oop(m);
+}
+#endif // INCLUDE_CDS_JAVA_HEAP
+
Klass* Klass::array_klass_or_null(int rank) {
EXCEPTION_MARK;
// No exception can be thrown by array_klass_impl when called with or_null == true.
@@ -593,12 +637,17 @@
return name()->as_klass_external_name();
}
-
const char* Klass::signature_name() const {
if (name() == NULL) return "<unknown>";
return name()->as_C_string();
}
+const char* Klass::external_kind() const {
+ if (is_interface()) return "interface";
+ if (is_abstract()) return "abstract class";
+ return "class";
+}
+
// Unless overridden, modifier_flags is 0.
jint Klass::compute_modifier_flags(TRAPS) const {
return 0;
--- a/src/hotspot/share/oops/klass.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/klass.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -156,6 +156,18 @@
// -1.
jshort _shared_class_path_index;
+#if INCLUDE_CDS
+ // Flags of the current shared class.
+ u2 _shared_class_flags;
+ enum {
+ _has_raw_archived_mirror = 1,
+ _has_signer_and_not_archived = 1 << 2
+ };
+#endif
+ // The _archived_mirror is set at CDS dump time pointing to the cached mirror
+ // in the open archive heap region when archiving java object is supported.
+ CDS_JAVA_HEAP_ONLY(narrowOop _archived_mirror);
+
friend class SharedClassUtil;
protected:
@@ -229,11 +241,17 @@
oop java_mirror() const;
void set_java_mirror(Handle m);
+ oop archived_java_mirror_raw() NOT_CDS_JAVA_HEAP_RETURN_(NULL); // no GC barrier
+ oop archived_java_mirror() NOT_CDS_JAVA_HEAP_RETURN_(NULL); // accessor with GC barrier
+ void set_archived_java_mirror_raw(oop m) NOT_CDS_JAVA_HEAP_RETURN; // no GC barrier
+
// Temporary mirror switch used by RedefineClasses
// Both mirrors are on the ClassLoaderData::_handles list already so no
// barriers are needed.
void set_java_mirror_handle(OopHandle mirror) { _java_mirror = mirror; }
- OopHandle java_mirror_handle() const { return _java_mirror; }
+ OopHandle java_mirror_handle() const {
+ return _java_mirror;
+ }
// modifier flags
jint modifier_flags() const { return _modifier_flags; }
@@ -267,6 +285,26 @@
_shared_class_path_index = index;
};
+ void set_has_raw_archived_mirror() {
+ CDS_ONLY(_shared_class_flags |= _has_raw_archived_mirror;)
+ }
+ void clear_has_raw_archived_mirror() {
+ CDS_ONLY(_shared_class_flags &= ~_has_raw_archived_mirror;)
+ }
+ bool has_raw_archived_mirror() const {
+ CDS_ONLY(return (_shared_class_flags & _has_raw_archived_mirror) != 0;)
+ NOT_CDS(return false;)
+ }
+#if INCLUDE_CDS
+ void set_has_signer_and_not_archived() {
+ _shared_class_flags |= _has_signer_and_not_archived;
+ }
+ bool has_signer_and_not_archived() const {
+ assert(DumpSharedSpaces, "dump time only");
+ return (_shared_class_flags & _has_signer_and_not_archived) != 0;
+ }
+#endif // INCLUDE_CDS
+
// Obtain the module or package for this class
virtual ModuleEntry* module() const = 0;
virtual PackageEntry* package() const = 0;
@@ -508,6 +546,9 @@
const char* class_loader_and_module_name() const;
+ // Returns "interface", "abstract class" or "class".
+ const char* external_kind() const;
+
// type testing operations
#ifdef ASSERT
protected:
--- a/src/hotspot/share/oops/klassVtable.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/klassVtable.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -31,7 +31,7 @@
#include "logging/logStream.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "oops/method.hpp"
--- a/src/hotspot/share/oops/klassVtable.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/klassVtable.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
class vtableEntry;
-class klassVtable VALUE_OBJ_CLASS_SPEC {
+class klassVtable {
Klass* _klass; // my klass
int _tableOffset; // offset of start of vtable data within klass
int _length; // length of vtable (number of entries)
@@ -188,7 +188,7 @@
// destination is compiled:
// from_compiled_code_entry_point -> nmethod entry point
// from_interpreter_entry_point -> i2cadapter
-class vtableEntry VALUE_OBJ_CLASS_SPEC {
+class vtableEntry {
friend class VMStructs;
friend class JVMCIVMStructs;
@@ -234,7 +234,7 @@
class klassItable;
class itableMethodEntry;
-class itableOffsetEntry VALUE_OBJ_CLASS_SPEC {
+class itableOffsetEntry {
private:
Klass* _interface;
int _offset;
@@ -257,7 +257,7 @@
};
-class itableMethodEntry VALUE_OBJ_CLASS_SPEC {
+class itableMethodEntry {
private:
Method* _method;
@@ -294,7 +294,7 @@
// -- vtable for interface 2 ---
// ...
//
-class klassItable VALUE_OBJ_CLASS_SPEC {
+class klassItable {
private:
InstanceKlass* _klass; // my klass
int _table_offset; // offset of start of itable data within klass (in words)
--- a/src/hotspot/share/oops/method.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/method.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -35,6 +35,7 @@
#include "interpreter/bytecodes.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/oopMapCache.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
@@ -42,7 +43,7 @@
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/constMethod.hpp"
-#include "oops/method.hpp"
+#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
@@ -2190,8 +2191,8 @@
}
#ifndef PRODUCT
-void Method::print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) {
- out->print_cr("jni_method_id count = %d", loader_data->jmethod_ids()->count_methods());
+void Method::print_jmethod_ids(const ClassLoaderData* loader_data, outputStream* out) {
+ out->print(" jni_method_id count = %d", loader_data->jmethod_ids()->count_methods());
}
#endif // PRODUCT
--- a/src/hotspot/share/oops/method.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/method.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -136,9 +136,9 @@
static address make_adapters(const methodHandle& mh, TRAPS);
- address from_compiled_entry() const { return OrderAccess::load_acquire(&_from_compiled_entry); }
+ address from_compiled_entry() const;
address from_compiled_entry_no_trampoline() const;
- address from_interpreted_entry() const{ return OrderAccess::load_acquire(&_from_interpreted_entry); }
+ address from_interpreted_entry() const;
// access flag
AccessFlags access_flags() const { return _access_flags; }
@@ -333,12 +333,7 @@
return _method_data;
}
- void set_method_data(MethodData* data) {
- // The store into method must be released. On platforms without
- // total store order (TSO) the reference may become visible before
- // the initialization of data otherwise.
- OrderAccess::release_store(&_method_data, data);
- }
+ void set_method_data(MethodData* data);
MethodCounters* method_counters() const {
return _method_counters;
@@ -449,7 +444,7 @@
// nmethod/verified compiler entry
address verified_code_entry();
bool check_code() const; // Not inline to avoid circular ref
- CompiledMethod* volatile code() const { assert( check_code(), "" ); return OrderAccess::load_acquire(&_code); }
+ CompiledMethod* volatile code() const;
void clear_code(bool acquire_lock = true); // Clear out any compiled code
static void set_code(const methodHandle& mh, CompiledMethod* code);
void set_adapter_entry(AdapterHandlerEntry* adapter) {
@@ -662,7 +657,7 @@
// compiled code support
// NOTE: code() is inherently racy as deopt can be clearing code
// simultaneously. Use with caution.
- bool has_compiled_code() const { return code() != NULL; }
+ bool has_compiled_code() const;
#ifdef TIERED
bool has_aot_code() const { return aot_code() != NULL; }
@@ -814,7 +809,7 @@
// Clear methods
static void clear_jmethod_ids(ClassLoaderData* loader_data);
- static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
+ static void print_jmethod_ids(const ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
// Get this method's jmethodID -- allocate if it doesn't exist
jmethodID jmethod_id() { return method_holder()->get_jmethod_id(this); }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/method.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OOPS_METHOD_INLINE_HPP
+#define SHARE_VM_OOPS_METHOD_INLINE_HPP
+
+#include "oops/method.hpp"
+#include "runtime/orderAccess.hpp"
+
+inline address Method::from_compiled_entry() const {
+ return OrderAccess::load_acquire(&_from_compiled_entry);
+}
+
+inline address Method::from_interpreted_entry() const {
+ return OrderAccess::load_acquire(&_from_interpreted_entry);
+}
+
+inline void Method::set_method_data(MethodData* data) {
+ // The store into method must be released. On platforms without
+ // total store order (TSO) the reference may become visible before
+ // the initialization of data otherwise.
+ OrderAccess::release_store(&_method_data, data);
+}
+
+inline CompiledMethod* volatile Method::code() const {
+ assert( check_code(), "" );
+ return OrderAccess::load_acquire(&_code);
+}
+
+inline bool Method::has_compiled_code() const { return code() != NULL; }
+
+#endif // SHARE_VM_OOPS_METHOD_INLINE_HPP
--- a/src/hotspot/share/oops/methodData.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/methodData.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@
#include "memory/heapInspection.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/methodData.hpp"
+#include "oops/methodData.inline.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
#include "runtime/arguments.hpp"
#include "runtime/compilationPolicy.hpp"
--- a/src/hotspot/share/oops/methodData.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/methodData.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
#include "memory/universe.hpp"
#include "oops/method.hpp"
#include "oops/oop.hpp"
-#include "runtime/orderAccess.hpp"
#include "utilities/align.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmci_globals.hpp"
@@ -75,7 +74,7 @@
// DataLayout
//
// Overlay for generic profiling data.
-class DataLayout VALUE_OBJ_CLASS_SPEC {
+class DataLayout {
friend class VMStructs;
friend class JVMCIVMStructs;
@@ -201,9 +200,7 @@
void set_cell_at(int index, intptr_t value) {
_cells[index] = value;
}
- void release_set_cell_at(int index, intptr_t value) {
- OrderAccess::release_store(&_cells[index], value);
- }
+ void release_set_cell_at(int index, intptr_t value);
intptr_t cell_at(int index) const {
return _cells[index];
}
@@ -325,10 +322,7 @@
assert(0 <= index && index < cell_count(), "oob");
data()->set_cell_at(index, value);
}
- void release_set_intptr_at(int index, intptr_t value) {
- assert(0 <= index && index < cell_count(), "oob");
- data()->release_set_cell_at(index, value);
- }
+ void release_set_intptr_at(int index, intptr_t value);
intptr_t intptr_at(int index) const {
assert(0 <= index && index < cell_count(), "oob");
return data()->cell_at(index);
@@ -336,18 +330,14 @@
void set_uint_at(int index, uint value) {
set_intptr_at(index, (intptr_t) value);
}
- void release_set_uint_at(int index, uint value) {
- release_set_intptr_at(index, (intptr_t) value);
- }
+ void release_set_uint_at(int index, uint value);
uint uint_at(int index) const {
return (uint)intptr_at(index);
}
void set_int_at(int index, int value) {
set_intptr_at(index, (intptr_t) value);
}
- void release_set_int_at(int index, int value) {
- release_set_intptr_at(index, (intptr_t) value);
- }
+ void release_set_int_at(int index, int value);
int int_at(int index) const {
return (int)intptr_at(index);
}
@@ -1603,12 +1593,7 @@
assert((uint)row < row_limit(), "oob");
set_int_at(bci0_offset + row * ret_row_cell_count, bci);
}
- void release_set_bci(uint row, int bci) {
- assert((uint)row < row_limit(), "oob");
- // 'release' when setting the bci acts as a valid flag for other
- // threads wrt bci_count and bci_displacement.
- release_set_int_at(bci0_offset + row * ret_row_cell_count, bci);
- }
+ void release_set_bci(uint row, int bci);
void set_bci_count(uint row, uint count) {
assert((uint)row < row_limit(), "oob");
set_uint_at(count0_offset + row * ret_row_cell_count, count);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/methodData.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OOPS_METHODDATA_INLINE_HPP
+#define SHARE_VM_OOPS_METHODDATA_INLINE_HPP
+
+#include "oops/methodData.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+inline void DataLayout::release_set_cell_at(int index, intptr_t value) {
+ OrderAccess::release_store(&_cells[index], value);
+}
+
+inline void ProfileData::release_set_intptr_at(int index, intptr_t value) {
+ assert(0 <= index && index < cell_count(), "oob");
+ data()->release_set_cell_at(index, value);
+}
+
+inline void ProfileData::release_set_uint_at(int index, uint value) {
+ release_set_intptr_at(index, (intptr_t) value);
+}
+
+inline void ProfileData::release_set_int_at(int index, int value) {
+ release_set_intptr_at(index, (intptr_t) value);
+}
+
+inline void RetData::release_set_bci(uint row, int bci) {
+ assert((uint)row < row_limit(), "oob");
+ // 'release' when setting the bci acts as a valid flag for other
+ // threads wrt bci_count and bci_displacement.
+ release_set_int_at(bci0_offset + row * ret_row_cell_count, bci);
+}
+
+#endif // SHARE_VM_OOPS_METHODDATA_INLINE_HPP
--- a/src/hotspot/share/oops/objArrayKlass.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/objArrayKlass.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/arrayKlass.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
--- a/src/hotspot/share/oops/objArrayKlass.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/objArrayKlass.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -37,7 +37,7 @@
template <bool nv, typename T, class OopClosureType>
void ObjArrayKlass::oop_oop_iterate_elements_specialized(objArrayOop a, OopClosureType* closure) {
- T* p = (T*)a->base();
+ T* p = (T*)a->base_raw();
T* const end = p + a->length();
for (;p < end; p++) {
@@ -52,7 +52,7 @@
T* const l = (T*)low;
T* const h = (T*)high;
- T* p = (T*)a->base();
+ T* p = (T*)a->base_raw();
T* end = p + a->length();
if (p < l) {
@@ -112,7 +112,7 @@
template <bool nv, typename T, class OopClosureType>
void ObjArrayKlass::oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end) {
T* low = start == 0 ? cast_from_oop<T*>(a) : a->obj_at_addr_raw<T>(start);
- T* high = (T*)a->base() + end;
+ T* high = (T*)a->base_raw() + end;
oop_oop_iterate_elements_specialized_bounded<nv, T>(a, closure, low, high);
}
--- a/src/hotspot/share/oops/typeArrayKlass.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/oops/typeArrayKlass.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/arrayKlass.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
@@ -152,9 +152,9 @@
// This is an attempt to make the copy_array fast.
int l2es = log2_element_size();
int ihs = array_header_in_bytes() / wordSize;
- char* src = (char*) ((oop*)s + ihs) + ((size_t)src_pos << l2es);
- char* dst = (char*) ((oop*)d + ihs) + ((size_t)dst_pos << l2es);
- Copy::conjoint_memory_atomic(src, dst, (size_t)length << l2es);
+ void* src = (char*) (s->base(element_type())) + ((size_t)src_pos << l2es);
+ void* dst = (char*) (d->base(element_type())) + ((size_t)dst_pos << l2es);
+ HeapAccess<ARRAYCOPY_ATOMIC>::arraycopy(s, d, src, dst, (size_t)length << l2es);
}
--- a/src/hotspot/share/opto/block.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/block.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -661,7 +661,7 @@
//----------------------------BlockProbPair---------------------------
// Ordered pair of Node*.
-class BlockProbPair VALUE_OBJ_CLASS_SPEC {
+class BlockProbPair {
protected:
Block* _target; // block target
double _prob; // probability of edge to block
--- a/src/hotspot/share/opto/chaitin.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/chaitin.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -281,7 +281,7 @@
// The LiveRangeMap class is responsible for storing node to live range id mapping.
// Each node is mapped to a live range id (a virtual register). Nodes that are
// not considered for register allocation are given live range id 0.
-class LiveRangeMap VALUE_OBJ_CLASS_SPEC {
+class LiveRangeMap {
private:
--- a/src/hotspot/share/opto/classes.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/classes.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -241,6 +241,7 @@
macro(Phi)
macro(PopCountI)
macro(PopCountL)
+macro(PopCountVI)
macro(PrefetchAllocation)
macro(Proj)
macro(RShiftI)
--- a/src/hotspot/share/opto/graphKit.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/graphKit.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "ci/ciUtilities.hpp"
#include "compiler/compileLog.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
@@ -1561,7 +1561,7 @@
BarrierSet* bs = Universe::heap()->barrier_set();
set_control(ctl);
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
break;
@@ -1577,7 +1577,7 @@
bool GraphKit::can_move_pre_barrier() const {
BarrierSet* bs = Universe::heap()->barrier_set();
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
return true; // Can move it if no safepoint
case BarrierSet::CardTableModRef:
@@ -1600,7 +1600,7 @@
BarrierSet* bs = Universe::heap()->barrier_set();
set_control(ctl);
switch (bs->kind()) {
- case BarrierSet::G1SATBCTLogging:
+ case BarrierSet::G1BarrierSet:
g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
break;
@@ -4349,7 +4349,7 @@
// The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
// We don't need a barrier here if the destination is a newly allocated object
// in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
- // are set to 'g1_young_gen' (see G1SATBCardTableModRefBS::verify_g1_young_region()).
+ // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).
assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
__ if_then(card_val, BoolTest::ne, young_card); {
--- a/src/hotspot/share/opto/indexSet.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/indexSet.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -393,7 +393,7 @@
//-------------------------------- class IndexSetIterator --------------------
// An iterator for IndexSets.
-class IndexSetIterator VALUE_OBJ_CLASS_SPEC {
+class IndexSetIterator {
friend class IndexSet;
public:
--- a/src/hotspot/share/opto/library_call.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/library_call.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -2578,7 +2578,8 @@
// the one that guards them: pin the Load node
LoadNode::ControlDependency dep = LoadNode::Pinned;
Node* ctrl = control();
- if (adr_type->isa_instptr()) {
+ // non volatile loads may be able to float
+ if (!need_mem_bar && adr_type->isa_instptr()) {
assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
intptr_t offset = Type::OffsetBot;
AddPNode::Ideal_base_and_offset(adr, &_gvn, offset);
--- a/src/hotspot/share/opto/node.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/node.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1194,7 +1194,7 @@
#if OPTO_DU_ITERATOR_ASSERT
// Common code for assertion checking on DU iterators.
-class DUIterator_Common VALUE_OBJ_CLASS_SPEC {
+class DUIterator_Common {
#ifdef ASSERT
protected:
bool _vdui; // cached value of VerifyDUIterators
@@ -1618,7 +1618,7 @@
//-----------------------------Node_Notes--------------------------------------
// Debugging or profiling annotations loosely and sparsely associated
// with some nodes. See Compile::node_notes_at for the accessor.
-class Node_Notes VALUE_OBJ_CLASS_SPEC {
+class Node_Notes {
friend class VMStructs;
JVMState* _jvms;
--- a/src/hotspot/share/opto/optoreg.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/optoreg.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
// if the value is outside the enumeration's valid range. OptoReg::Name is
// typedef'ed as int, because it needs to be able to represent spill-slots.
//
-class OptoReg VALUE_OBJ_CLASS_SPEC {
+class OptoReg {
friend class C2Compiler;
public:
--- a/src/hotspot/share/opto/parse2.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/parse2.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -29,7 +29,7 @@
#include "compiler/compileLog.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "opto/addnode.hpp"
#include "opto/castnode.hpp"
--- a/src/hotspot/share/opto/parse3.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/parse3.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "compiler/compileLog.hpp"
#include "interpreter/linkResolver.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/objArrayKlass.hpp"
#include "opto/addnode.hpp"
#include "opto/castnode.hpp"
--- a/src/hotspot/share/opto/regmask.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/regmask.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@
// However, it means the ADLC can redefine the unroll macro and all loops
// over register masks will be unrolled by the correct amount.
-class RegMask VALUE_OBJ_CLASS_SPEC {
+class RegMask {
union {
double _dummy_force_double_alignment[RM_SIZE>>1];
// Array of Register Mask bits. This array is large enough to cover
--- a/src/hotspot/share/opto/replacednodes.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/replacednodes.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,9 +41,9 @@
// replaced nodes are also merged. When parsing exits a method to
// return to a caller, the replaced nodes on the exit path are used to
// update the caller's map.
-class ReplacedNodes VALUE_OBJ_CLASS_SPEC {
+class ReplacedNodes {
private:
- class ReplacedNode VALUE_OBJ_CLASS_SPEC {
+ class ReplacedNode {
private:
Node* _initial;
Node* _improved;
--- a/src/hotspot/share/opto/runtime.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/runtime.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -34,7 +34,6 @@
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/oopMap.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
--- a/src/hotspot/share/opto/subnode.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/subnode.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -279,7 +279,7 @@
// Convert condition codes to a boolean test value (0 or -1).
// We pick the values as 3 bits; the low order 2 bits we compare against the
// condition codes, the high bit flips the sense of the result.
-struct BoolTest VALUE_OBJ_CLASS_SPEC {
+struct BoolTest {
enum mask { eq = 0, ne = 4, le = 5, ge = 7, lt = 3, gt = 1, overflow = 2, no_overflow = 6, illegal = 8 };
mask _test;
BoolTest( mask btm ) : _test(btm) {}
--- a/src/hotspot/share/opto/superword.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/superword.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -2325,8 +2325,11 @@
vn = VectorNode::make(opc, in1, in2, vlen, velt_basic_type(n));
vlen_in_bytes = vn->as_Vector()->length_in_bytes();
}
- } else if (opc == Op_SqrtF || opc == Op_SqrtD || opc == Op_AbsF || opc == Op_AbsD || opc == Op_NegF || opc == Op_NegD) {
- // Promote operand to vector (Sqrt/Abs/Neg are 2 address instructions)
+ } else if (opc == Op_SqrtF || opc == Op_SqrtD ||
+ opc == Op_AbsF || opc == Op_AbsD ||
+ opc == Op_NegF || opc == Op_NegD ||
+ opc == Op_PopCountI) {
+ assert(n->req() == 2, "only one input expected");
Node* in = vector_opd(p, 1);
vn = VectorNode::make(opc, in, NULL, vlen, velt_basic_type(n));
vlen_in_bytes = vn->as_Vector()->length_in_bytes();
--- a/src/hotspot/share/opto/superword.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/superword.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -114,7 +114,7 @@
};
//------------------------------DepGraph---------------------------
-class DepGraph VALUE_OBJ_CLASS_SPEC {
+class DepGraph {
protected:
Arena* _arena;
GrowableArray<DepMem*> _map;
@@ -190,7 +190,7 @@
// -----------------------------SWNodeInfo---------------------------------
// Per node info needed by SuperWord
-class SWNodeInfo VALUE_OBJ_CLASS_SPEC {
+class SWNodeInfo {
public:
int _alignment; // memory alignment for a node
int _depth; // Max expression (DAG) depth from block start
@@ -222,7 +222,7 @@
// JVMCI: OrderedPair is moved up to deal with compilation issues on Windows
//------------------------------OrderedPair---------------------------
// Ordered pair of Node*.
-class OrderedPair VALUE_OBJ_CLASS_SPEC {
+class OrderedPair {
protected:
Node* _p1;
Node* _p2;
@@ -544,7 +544,7 @@
//------------------------------SWPointer---------------------------
// Information about an address for dependence checking and vector alignment
-class SWPointer VALUE_OBJ_CLASS_SPEC {
+class SWPointer {
protected:
MemNode* _mem; // My memory reference node
SuperWord* _slp; // SuperWord class
--- a/src/hotspot/share/opto/vectornode.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/vectornode.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -122,6 +122,13 @@
case Op_SqrtD:
assert(bt == T_DOUBLE, "must be");
return Op_SqrtVD;
+ case Op_PopCountI:
+ if (bt == T_INT) {
+ return Op_PopCountVI;
+ }
+ // Unimplemented for subword types since bit count changes
+ // depending on size of lane (and sign bit).
+ return 0;
case Op_LShiftI:
switch (bt) {
case T_BOOLEAN:
@@ -325,6 +332,8 @@
case Op_SqrtVF: return new SqrtVFNode(n1, vt);
case Op_SqrtVD: return new SqrtVDNode(n1, vt);
+ case Op_PopCountVI: return new PopCountVINode(n1, vt);
+
case Op_LShiftVB: return new LShiftVBNode(n1, n2, vt);
case Op_LShiftVS: return new LShiftVSNode(n1, n2, vt);
case Op_LShiftVI: return new LShiftVINode(n1, n2, vt);
--- a/src/hotspot/share/opto/vectornode.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/opto/vectornode.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -381,6 +381,14 @@
virtual int Opcode() const;
};
+//------------------------------PopCountVINode---------------------------------
+// Vector popcount integer bits
+class PopCountVINode : public VectorNode {
+ public:
+ PopCountVINode(Node* in, const TypeVect* vt) : VectorNode(in,vt) {}
+ virtual int Opcode() const;
+};
+
//------------------------------SqrtVFNode--------------------------------------
// Vector Sqrt float
class SqrtVFNode : public VectorNode {
--- a/src/hotspot/share/precompiled/precompiled.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/precompiled/precompiled.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -138,7 +138,7 @@
# include "memory/oopFactory.hpp"
# include "memory/resourceArea.hpp"
# include "memory/universe.hpp"
-# include "memory/universe.inline.hpp"
+# include "memory/universe.hpp"
# include "memory/virtualspace.hpp"
# include "oops/array.hpp"
# include "oops/arrayKlass.hpp"
--- a/src/hotspot/share/prims/cdsoffsets.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/cdsoffsets.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,13 @@
#include "memory/allocation.inline.hpp"
#include "prims/cdsoffsets.hpp"
+CDSOffsets::CDSOffsets(const char* name, int offset, CDSOffsets* next) {
+ _name = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtInternal);
+ strcpy(_name, name);
+ _offset = offset;
+ _next = next;
+}
+
CDSOffsets* CDSOffsets::_all = NULL;
#define ADD_NEXT(list, name, value) \
list->add_end(new CDSOffsets(name, value, NULL))
--- a/src/hotspot/share/prims/cdsoffsets.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/cdsoffsets.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#ifndef SHARE_PRIMS_CDSOFFSETS_HPP
#define SHARE_PRIMS_CDSOFFSETS_HPP
+
class CDSOffsets: public CHeapObj<mtInternal> {
private:
char* _name;
@@ -31,12 +32,7 @@
CDSOffsets* _next;
static CDSOffsets* _all; // sole list for cds
public:
- CDSOffsets(const char* name, int offset, CDSOffsets* next) {
- _name = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtInternal);
- strcpy(_name, name);
- _offset = offset;
- _next = next;
- }
+ CDSOffsets(const char* name, int offset, CDSOffsets* next);
char* get_name() const { return _name; }
int get_offset() const { return _offset; }
@@ -45,4 +41,5 @@
static int find_offset(const char* name);
};
+
#endif // SHARE_PRIMS_CDSOFFSETS_HPP
--- a/src/hotspot/share/prims/forte.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/forte.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -27,7 +27,7 @@
#include "code/pcDesc.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/space.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "prims/forte.hpp"
#include "runtime/javaCalls.hpp"
--- a/src/hotspot/share/prims/jni.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jni.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -42,7 +42,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/arrayOop.inline.hpp"
#include "oops/instanceKlass.hpp"
@@ -4215,25 +4215,26 @@
jint JNICALL jni_DetachCurrentThread(JavaVM *vm) {
HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY(vm);
- VM_Exit::block_if_vm_exited();
JNIWrapper("DetachCurrentThread");
// If the thread has already been detached the operation is a no-op
if (Thread::current_or_null() == NULL) {
- HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
+ HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
return JNI_OK;
}
+ VM_Exit::block_if_vm_exited();
+
JavaThread* thread = JavaThread::current();
if (thread->has_last_Java_frame()) {
- HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN((uint32_t) JNI_ERR);
+ HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN((uint32_t) JNI_ERR);
// Can't detach a thread that's running java, that can't work.
return JNI_ERR;
}
// Safepoint support. Have to do call-back to safepoint code, if in the
- // middel of a safepoint operation
+ // middle of a safepoint operation
ThreadStateTransition::transition_from_native(thread, _thread_in_vm);
// XXX: Note that JavaThread::exit() call below removes the guards on the
--- a/src/hotspot/share/prims/jniCheck.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jniCheck.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -28,6 +28,7 @@
#include "classfile/javaClasses.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/guardedMemory.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/prims/jvm.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvm.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -39,7 +39,7 @@
#include "interpreter/bytecode.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/fieldStreams.hpp"
#include "oops/instanceKlass.hpp"
--- a/src/hotspot/share/prims/jvmti.xml Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmti.xml Tue Mar 20 04:36:44 2018 +0100
@@ -693,7 +693,7 @@
mechanism causes the unload (an unload mechanism is not specified in this document)
or the library is (in effect) unloaded by the termination of the VM whether through
normal termination or VM failure, including start-up failure.
- Uncontrolled shutdown is, of couse, an exception to this rule.
+ Uncontrolled shutdown is, of course, an exception to this rule.
Note the distinction between this function and the
<eventlink id="VMDeath">VM Death event</eventlink>: for the VM Death event
to be sent, the VM must have run at least to the point of initialization and a valid
@@ -9405,7 +9405,7 @@
the event <paramlink id="event_type"></paramlink> will be disabled
</constant>
</constants>
- If <code>thread</code> is <code>NULL</code>,
+ If <code>event_thread</code> is <code>NULL</code>,
the event is enabled or disabled globally; otherwise, it is
enabled or disabled for a particular thread.
An event is generated for
--- a/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "code/codeCache.hpp"
#include "code/scopeDesc.hpp"
#include "code/vtableStubs.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiCodeBlobEvents.hpp"
--- a/src/hotspot/share/prims/jvmtiEnv.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiEnv.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -35,7 +35,7 @@
#include "logging/log.hpp"
#include "logging/logConfiguration.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/prims/jvmtiEnvThreadState.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiEnvThreadState.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,12 +42,12 @@
//
// Wrapper class for FramePop, used in the JvmtiFramePops class.
//
-// Two problems: 1) this isn't being used as a ValueObj class, in
+// Two problems: 1) this isn't being used as a value class, in
// several places there are constructors for it. 2) It seems like
-// overkill as a means to get an assert and name the geater than
+// overkill as a means to get an assert and name the greater than
// operator. I'm trying to to rewrite everything.
-class JvmtiFramePop VALUE_OBJ_CLASS_SPEC {
+class JvmtiFramePop {
private:
// Frame number counting from BOTTOM (oldest) frame;
// bottom frame == #0
--- a/src/hotspot/share/prims/jvmtiEventController.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiEventController.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@
// for inlines see jvmtiEventController_inline.hpp
//
-class JvmtiEventEnabled VALUE_OBJ_CLASS_SPEC {
+class JvmtiEventEnabled {
private:
friend class JvmtiEventControllerPrivate;
jlong _enabled_bits;
@@ -104,7 +104,7 @@
// for inlines see jvmtiEventController_inline.hpp
//
-class JvmtiEnvThreadEventEnable VALUE_OBJ_CLASS_SPEC {
+class JvmtiEnvThreadEventEnable {
private:
friend class JvmtiEventControllerPrivate;
JvmtiEventEnabled _event_user_enabled;
@@ -127,7 +127,7 @@
// for inlines see jvmtiEventController_inline.hpp
//
-class JvmtiThreadEventEnable VALUE_OBJ_CLASS_SPEC {
+class JvmtiThreadEventEnable {
private:
friend class JvmtiEventControllerPrivate;
JvmtiEventEnabled _event_enabled;
@@ -148,7 +148,7 @@
// for inlines see jvmtiEventController_inline.hpp
//
-class JvmtiEnvEventEnable VALUE_OBJ_CLASS_SPEC {
+class JvmtiEnvEventEnable {
private:
friend class JvmtiEventControllerPrivate;
--- a/src/hotspot/share/prims/jvmtiExport.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiExport.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -31,6 +31,7 @@
#include "jvmtifiles/jvmtiEnv.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.hpp"
--- a/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,13 +25,13 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "prims/jvmtiGetLoadedClasses.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/stack.inline.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#endif
@@ -51,7 +51,7 @@
// might not find the object.
#if INCLUDE_ALL_GCS
if (UseG1GC && o != NULL) {
- G1SATBCardTableModRefBS::enqueue(o);
+ G1BarrierSet::enqueue(o);
}
#endif
}
--- a/src/hotspot/share/prims/jvmtiImpl.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiImpl.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "jvmtifiles/jvmtiEnv.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/prims/jvmtiImpl.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiImpl.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -74,7 +74,7 @@
virtual void metadata_do(void f(Metadata*)) =0;
};
-class GrowableCache VALUE_OBJ_CLASS_SPEC {
+class GrowableCache {
private:
// Object pointer passed into cache & listener functions.
@@ -451,7 +451,7 @@
* This is currently only used for posting compiled-method-load and unload
* events, which we don't want posted from the compiler thread.
*/
-class JvmtiDeferredEvent VALUE_OBJ_CLASS_SPEC {
+class JvmtiDeferredEvent {
friend class JvmtiDeferredEventQueue;
private:
typedef enum {
--- a/src/hotspot/share/prims/jvmtiManageCapabilities.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiManageCapabilities.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -312,7 +312,10 @@
}
#endif // ZERO
- if (avail.can_generate_breakpoint_events) {
+ if (avail.can_generate_breakpoint_events
+ || avail.can_generate_field_access_events
+ || avail.can_generate_field_modification_events)
+ {
RewriteFrequentPairs = false;
}
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "memory/allocation.inline.hpp"
#include "prims/jvmtiRawMonitor.hpp"
#include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.hpp"
@@ -421,4 +422,3 @@
SimpleNotify (THREAD, true) ;
return OM_OK;
}
-
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -37,7 +37,7 @@
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/fieldStreams.hpp"
#include "oops/klassVtable.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -29,9 +29,11 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "jvmtifiles/jvmtiEnv.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/arrayOop.inline.hpp"
+#include "oops/constantPool.inline.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
@@ -1760,7 +1762,7 @@
// Base class for all heap walk contexts. The base class maintains a flag
// to indicate if the context is valid or not.
-class HeapWalkContext VALUE_OBJ_CLASS_SPEC {
+class HeapWalkContext {
private:
bool _valid;
public:
--- a/src/hotspot/share/prims/methodComparator.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/methodComparator.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "oops/constantPool.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "prims/methodComparator.hpp"
--- a/src/hotspot/share/prims/nativeLookup.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/nativeLookup.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "classfile/vmSymbols.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/prims/privilegedStack.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/privilegedStack.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
#include "runtime/vframe.hpp"
#include "utilities/growableArray.hpp"
-class PrivilegedElement VALUE_OBJ_CLASS_SPEC {
+class PrivilegedElement {
private:
Klass* _klass; // klass for method
oop _privileged_context; // context for operation
--- a/src/hotspot/share/prims/whitebox.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/prims/whitebox.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -40,7 +40,8 @@
#include "memory/universe.hpp"
#include "memory/oopFactory.hpp"
#include "oops/array.hpp"
-#include "oops/constantPool.hpp"
+#include "oops/constantPool.inline.hpp"
+#include "oops/method.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
@@ -191,7 +192,7 @@
void TestReservedSpace_test();
void TestReserveMemorySpecial_test();
void TestVirtualSpace_test();
-void TestMetaspaceAux_test();
+void TestMetaspaceUtils_test();
#endif
WB_ENTRY(void, WB_RunMemoryUnitTests(JNIEnv* env, jobject o))
@@ -199,7 +200,7 @@
TestReservedSpace_test();
TestReserveMemorySpecial_test();
TestVirtualSpace_test();
- TestMetaspaceAux_test();
+ TestMetaspaceUtils_test();
#endif
WB_END
@@ -2162,7 +2163,6 @@
Handle loader(THREAD, ik->class_loader());
if (loader.is_null()) {
WhiteBox::register_methods(env, wbclass, thread, methods, sizeof(methods) / sizeof(methods[0]));
- WhiteBox::register_extended(env, wbclass, thread);
WhiteBox::set_used();
}
}
--- a/src/hotspot/share/prims/whitebox_ext.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-
-#include "prims/whitebox.hpp"
-
-void WhiteBox::register_extended(JNIEnv* env, jclass wbclass, JavaThread* thread) { }
--- a/src/hotspot/share/runtime/arguments.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/arguments.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -38,7 +38,7 @@
#include "logging/logStream.hpp"
#include "logging/logTag.hpp"
#include "memory/allocation.inline.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
@@ -1856,11 +1856,6 @@
return JNI_OK;
}
-void Arguments::set_gc_specific_flags() {
- // Set GC flags
- GCArguments::arguments()->initialize_flags();
-}
-
julong Arguments::limit_by_allocatable_memory(julong limit) {
julong max_allocatable;
julong result = limit;
@@ -4286,7 +4281,7 @@
// Set heap size based on available physical memory
set_heap_size();
- ArgumentsExt::set_gc_specific_flags();
+ GCArguments::arguments()->initialize_flags();
// Initialize Metaspace flags and alignments
Metaspace::ergo_initialize();
--- a/src/hotspot/share/runtime/arguments.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/arguments.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -167,7 +167,7 @@
};
// maintain an order of entry list of AgentLibrary
-class AgentLibraryList VALUE_OBJ_CLASS_SPEC {
+class AgentLibraryList {
private:
AgentLibrary* _first;
AgentLibrary* _last;
@@ -539,7 +539,6 @@
// Adjusts the arguments after the OS have adjusted the arguments
static jint adjust_after_os();
- static void set_gc_specific_flags();
#if INCLUDE_JVMCI
// Check consistency of jvmci vm argument settings.
static bool check_jvmci_args_consistency();
--- a/src/hotspot/share/runtime/arguments_ext.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/arguments_ext.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -30,7 +30,6 @@
class ArgumentsExt: AllStatic {
public:
- static inline void set_gc_specific_flags();
// The argument processing extension. Returns true if there is
// no additional parsing needed in Arguments::parse() for the option.
// Otherwise returns false.
@@ -38,8 +37,4 @@
static inline void report_unsupported_options() { }
};
-void ArgumentsExt::set_gc_specific_flags() {
- Arguments::set_gc_specific_flags();
-}
-
#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
--- a/src/hotspot/share/runtime/atomic.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/atomic.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -367,7 +367,6 @@
T,
PlatformOp,
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value || IsPointer<T>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(T const volatile* dest) const {
// Forward to the platform handler for the size of T.
@@ -387,7 +386,6 @@
T,
PlatformOp,
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(T const volatile* dest) const {
typedef PrimitiveConversions::Translate<T> Translator;
@@ -405,7 +403,7 @@
// supports wide atomics, then it has to use specialization
// of Atomic::PlatformLoad for that wider size class.
template<size_t byte_size>
-struct Atomic::PlatformLoad VALUE_OBJ_CLASS_SPEC {
+struct Atomic::PlatformLoad {
template<typename T>
T operator()(T const volatile* dest) const {
STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
@@ -421,7 +419,6 @@
T, T,
PlatformOp,
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
void operator()(T new_value, T volatile* dest) const {
// Forward to the platform handler for the size of T.
@@ -439,7 +436,6 @@
T*, D*,
PlatformOp,
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
void operator()(T* new_value, D* volatile* dest) const {
// Allow derived to base conversion, and adding cv-qualifiers.
@@ -459,7 +455,6 @@
T, T,
PlatformOp,
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
void operator()(T new_value, T volatile* dest) const {
typedef PrimitiveConversions::Translate<T> Translator;
@@ -477,7 +472,7 @@
// supports wide atomics, then it has to use specialization
// of Atomic::PlatformStore for that wider size class.
template<size_t byte_size>
-struct Atomic::PlatformStore VALUE_OBJ_CLASS_SPEC {
+struct Atomic::PlatformStore {
template<typename T>
void operator()(T new_value,
T volatile* dest) const {
@@ -491,13 +486,13 @@
// be complete.
template<typename Derived>
-struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
+struct Atomic::FetchAndAdd {
template<typename I, typename D>
D operator()(I add_value, D volatile* dest) const;
};
template<typename Derived>
-struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
+struct Atomic::AddAndFetch {
template<typename I, typename D>
D operator()(I add_value, D volatile* dest) const;
};
@@ -541,7 +536,7 @@
// specializations of the class. The platform file is responsible for
// providing those.
template<size_t byte_size>
-struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC {
+struct Atomic::PlatformCmpxchg {
template<typename T>
T operator()(T exchange_value,
T volatile* dest,
@@ -552,7 +547,7 @@
// Define the class before including platform file, which may use this
// as a base class, requiring it be complete. The definition is later
// in this file, near the other definitions related to cmpxchg.
-struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
+struct Atomic::CmpxchgByteUsingInt {
template<typename T>
T operator()(T exchange_value,
T volatile* dest,
@@ -566,7 +561,7 @@
// specializations of the class. The platform file is responsible for
// providing those.
template<size_t byte_size>
-struct Atomic::PlatformXchg VALUE_OBJ_CLASS_SPEC {
+struct Atomic::PlatformXchg {
template<typename T>
T operator()(T exchange_value,
T volatile* dest) const;
@@ -605,7 +600,6 @@
IsIntegral<D>::value &&
(sizeof(I) <= sizeof(D)) &&
(IsSigned<I>::value == IsSigned<D>::value)>::type>
- VALUE_OBJ_CLASS_SPEC
{
D operator()(I add_value, D volatile* dest) const {
D addend = add_value;
@@ -617,7 +611,6 @@
struct Atomic::AddImpl<
I, P*,
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
- VALUE_OBJ_CLASS_SPEC
{
P* operator()(I add_value, P* volatile* dest) const {
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
@@ -640,7 +633,7 @@
//
// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
template<>
-struct Atomic::AddImpl<short, short> VALUE_OBJ_CLASS_SPEC {
+struct Atomic::AddImpl<short, short> {
short operator()(short add_value, short volatile* dest) const {
#ifdef VM_LITTLE_ENDIAN
assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
@@ -707,7 +700,6 @@
struct Atomic::CmpxchgImpl<
T, T, T,
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(T exchange_value, T volatile* dest, T compare_value,
cmpxchg_memory_order order) const {
@@ -734,7 +726,6 @@
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
IsSame<typename RemoveCV<D>::type,
typename RemoveCV<U>::type>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
cmpxchg_memory_order order) const {
@@ -758,7 +749,6 @@
struct Atomic::CmpxchgImpl<
T, T, T,
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(T exchange_value, T volatile* dest, T compare_value,
cmpxchg_memory_order order) const {
@@ -830,7 +820,6 @@
struct Atomic::XchgImpl<
T, T,
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(T exchange_value, T volatile* dest) const {
// Forward to the platform handler for the size of T.
@@ -847,7 +836,6 @@
struct Atomic::XchgImpl<
T*, D*,
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
D* operator()(T* exchange_value, D* volatile* dest) const {
// Allow derived to base conversion, and adding cv-qualifiers.
@@ -867,7 +855,6 @@
struct Atomic::XchgImpl<
T, T,
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
- VALUE_OBJ_CLASS_SPEC
{
T operator()(T exchange_value, T volatile* dest) const {
typedef PrimitiveConversions::Translate<T> Translator;
--- a/src/hotspot/share/runtime/basicLock.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/basicLock.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "oops/markOop.hpp"
#include "runtime/handles.hpp"
-class BasicLock VALUE_OBJ_CLASS_SPEC {
+class BasicLock {
friend class VMStructs;
friend class JVMCIVMStructs;
private:
@@ -54,7 +54,7 @@
// alignment of the embedded BasicLock objects on such machines, we
// put the embedded BasicLock at the beginning of the struct.
-class BasicObjectLock VALUE_OBJ_CLASS_SPEC {
+class BasicObjectLock {
friend class VMStructs;
private:
BasicLock _lock; // the lock, must be double word aligned
--- a/src/hotspot/share/runtime/biasedLocking.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/biasedLocking.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -108,7 +108,7 @@
// again, a bulk heap sweep.
// Biased locking counters
-class BiasedLockingCounters VALUE_OBJ_CLASS_SPEC {
+class BiasedLockingCounters {
private:
int _total_entry_count;
int _biased_lock_entry_count;
--- a/src/hotspot/share/runtime/compilationPolicy.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/compilationPolicy.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -30,7 +30,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "oops/methodData.hpp"
-#include "oops/method.hpp"
+#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/advancedThresholdPolicy.hpp"
--- a/src/hotspot/share/runtime/deoptimization.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/deoptimization.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -56,6 +56,7 @@
#include "runtime/vframeArray.hpp"
#include "runtime/vframe_hp.hpp"
#include "utilities/events.hpp"
+#include "utilities/preserveException.hpp"
#include "utilities/xmlstream.hpp"
#if INCLUDE_JVMCI
@@ -648,6 +649,8 @@
#ifndef PRODUCT
if (VerifyStack) {
ResourceMark res_mark;
+ // Clear pending exception to not break verification code (restored afterwards)
+ PRESERVE_EXCEPTION_MARK;
thread->validate_frame_layout();
--- a/src/hotspot/share/runtime/extendedPC.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/extendedPC.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@
// An ExtendedPC contains the _pc from a signal handler in a platform
// independent way.
-class ExtendedPC VALUE_OBJ_CLASS_SPEC {
+class ExtendedPC {
private:
address _pc;
--- a/src/hotspot/share/runtime/fieldDescriptor.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/fieldDescriptor.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,7 +26,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/annotations.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/runtime/fieldDescriptor.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/fieldDescriptor.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
// It needs the class constant pool to work (because it only holds indices into the pool
// rather than the actual info).
-class fieldDescriptor VALUE_OBJ_CLASS_SPEC {
+class fieldDescriptor {
private:
AccessFlags _access_flags;
int _index; // the field index
--- a/src/hotspot/share/runtime/fieldType.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/fieldType.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,14 +31,6 @@
#include "runtime/fieldType.hpp"
#include "runtime/signature.hpp"
-void FieldType::skip_optional_size(Symbol* signature, int* index) {
- jchar c = signature->byte_at(*index);
- while (c >= '0' && c <= '9') {
- *index = *index + 1;
- c = signature->byte_at(*index);
- }
-}
-
BasicType FieldType::basic_type(Symbol* signature) {
return char2type(signature->byte_at(0));
}
@@ -78,11 +70,9 @@
assert(basic_type(signature) == T_ARRAY, "must be array");
int index = 1;
int dim = 1;
- skip_optional_size(signature, &index);
while (signature->byte_at(index) == '[') {
index++;
dim++;
- skip_optional_size(signature, &index);
}
ResourceMark rm;
char *element = signature->as_C_string() + index;
--- a/src/hotspot/share/runtime/fieldType.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/fieldType.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,6 @@
class FieldType: public AllStatic {
private:
- static void skip_optional_size(Symbol* signature, int* index);
static bool is_valid_array_signature(Symbol* signature);
public:
--- a/src/hotspot/share/runtime/frame.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/frame.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/oopMapCache.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/markOop.hpp"
#include "oops/method.hpp"
#include "oops/methodData.hpp"
--- a/src/hotspot/share/runtime/frame.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/frame.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,7 @@
// so that one physical frame can correspond to multiple source level
// frames because of inlining.
-class frame VALUE_OBJ_CLASS_SPEC {
+class frame {
private:
// Instance variables:
intptr_t* _sp; // stack pointer (from Thread::last_Java_sp)
@@ -337,33 +337,6 @@
// tells whether there is another chunk of Delta stack above
bool entry_frame_is_first() const;
- // Compiled frames:
-
- public:
- // Given the index of a local, and the number of argument words
- // in this stack frame, tell which word of the stack frame to find
- // the local in. Arguments are stored above the ofp/rpc pair,
- // while other locals are stored below it.
- // Since monitors (BasicLock blocks) are also assigned indexes,
- // but may have different storage requirements, their presence
- // can also affect the calculation of offsets.
- static int local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors);
-
- // Given the index of a monitor, etc., tell which word of the
- // stack frame contains the start of the BasicLock block.
- // Note that the local index by convention is the __higher__
- // of the two indexes allocated to the block.
- static int monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors);
-
- // Tell the smallest value that local_offset_for_compiler will attain.
- // This is used to help determine how much stack frame to allocate.
- static int min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors);
-
- // Tells if this register must be spilled during a call.
- // On Intel, all registers are smashed by calls.
- static bool volatile_across_calls(Register reg);
-
-
// Safepoints
public:
@@ -416,8 +389,6 @@
// Usage:
// assert(frame::verify_return_pc(return_address), "must be a return pc");
- int pd_oop_map_offset_adjustment() const;
-
NOT_PRODUCT(void pd_ps();) // platform dependent frame printing
#include CPU_HEADER(frame)
@@ -426,7 +397,7 @@
#ifndef PRODUCT
// A simple class to describe a location on the stack
-class FrameValue VALUE_OBJ_CLASS_SPEC {
+class FrameValue {
public:
intptr_t* location;
char* description;
--- a/src/hotspot/share/runtime/globals.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/globals.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1430,9 +1430,10 @@
"Use semaphore synchronization for the GC Threads, " \
"instead of synchronization based on mutexes") \
\
- product(bool, UseDynamicNumberOfGCThreads, false, \
- "Dynamically choose the number of parallel threads " \
- "parallel gc will use") \
+ product(bool, UseDynamicNumberOfGCThreads, true, \
+ "Dynamically choose the number of threads up to a maximum of " \
+ "ParallelGCThreads parallel collectors will use for garbage " \
+ "collection work") \
\
diagnostic(bool, InjectGCWorkerCreationFailure, false, \
"Inject thread creation failures for " \
@@ -4056,7 +4057,12 @@
\
product(ccstr, AllocateHeapAt, NULL, \
"Path to the directoy where a temporary file will be created " \
- "to use as the backing store for Java Heap.")
+ "to use as the backing store for Java Heap.") \
+ \
+ develop(bool, VerifyMetaspace, false, \
+ "Verify metaspace on chunk movements.") \
+ \
+
/*
--- a/src/hotspot/share/runtime/handles.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/handles.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,7 @@
// allocated and passed around via Handles within the VM. A handle is
// simply an extra indirection allocated in a thread local handle area.
//
-// A handle is a ValueObj, so it can be passed around as a value, can
+// A handle is a value object, so it can be passed around as a value, can
// be used as a parameter w/o using &-passing, and can be returned as a
// return value.
//
@@ -61,7 +61,7 @@
// Base class for all handles. Provides overloading of frequently
// used operators for ease of use.
-class Handle VALUE_OBJ_CLASS_SPEC {
+class Handle {
private:
oop* _handle;
@@ -72,7 +72,7 @@
public:
// Constructors
Handle() { _handle = NULL; }
- Handle(Thread* thread, oop obj);
+ inline Handle(Thread* thread, oop obj);
// General access
oop operator () () const { return obj(); }
@@ -108,9 +108,7 @@
public: \
/* Constructors */ \
type##Handle () : Handle() {} \
- type##Handle (Thread* thread, type##Oop obj) : Handle(thread, (oop)obj) { \
- assert(is_null() || ((oop)obj)->is_a(), "illegal type"); \
- } \
+ inline type##Handle (Thread* thread, type##Oop obj); \
\
/* Operators for ease of use */ \
type##Oop operator () () const { return obj(); } \
--- a/src/hotspot/share/runtime/handles.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/handles.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,17 @@
}
}
+// Inline constructors for Specific Handles for different oop types
+#define DEF_HANDLE_CONSTR(type, is_a) \
+inline type##Handle::type##Handle (Thread* thread, type##Oop obj) : Handle(thread, (oop)obj) { \
+ assert(is_null() || ((oop)obj)->is_a(), "illegal type"); \
+}
+
+DEF_HANDLE_CONSTR(instance , is_instance_noinline )
+DEF_HANDLE_CONSTR(array , is_array_noinline )
+DEF_HANDLE_CONSTR(objArray , is_objArray_noinline )
+DEF_HANDLE_CONSTR(typeArray, is_typeArray_noinline)
+
// Constructor for metadata handles
#define DEF_METADATA_HANDLE_FN(name, type) \
inline name##Handle::name##Handle(type* obj) : _value(obj), _thread(NULL) { \
--- a/src/hotspot/share/runtime/handshake.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/handshake.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
// VM thread and JavaThread are serialized with the semaphore making sure
// the operation is only done by either VM thread on behalf of the JavaThread
// or the JavaThread itself.
-class HandshakeState VALUE_OBJ_CLASS_SPEC {
+class HandshakeState {
HandshakeOperation* volatile _operation;
Semaphore _semaphore;
--- a/src/hotspot/share/runtime/java.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/java.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -341,7 +341,9 @@
}
if (PrintSystemDictionaryAtExit) {
+ ResourceMark rm;
SystemDictionary::print();
+ ClassLoaderDataGraph::print();
}
if (LogTouchedMethods && PrintTouchedMethodsAtExit) {
@@ -483,7 +485,7 @@
Universe::print_on(&ls_info);
if (log.is_trace()) {
LogStream ls_trace(log.trace());
- ClassLoaderDataGraph::dump_on(&ls_trace);
+ ClassLoaderDataGraph::print_on(&ls_trace);
}
}
--- a/src/hotspot/share/runtime/java.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/java.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@
* as defined by JEP-223, most of the code related to handle the version
* string prior to JDK 1.6 was removed (partial initialization)
*/
-class JDK_Version VALUE_OBJ_CLASS_SPEC {
+class JDK_Version {
friend class VMStructs;
friend class Universe;
friend void JDK_Version_init();
--- a/src/hotspot/share/runtime/javaCalls.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/javaCalls.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -29,7 +29,8 @@
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
+#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jniCheck.hpp"
#include "runtime/compilationPolicy.hpp"
--- a/src/hotspot/share/runtime/javaFrameAnchor.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/javaFrameAnchor.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
//
class JavaThread;
-class JavaFrameAnchor VALUE_OBJ_CLASS_SPEC {
+class JavaFrameAnchor {
// Too many friends...
friend class CallNativeDirectNode;
friend class OptoRuntime;
--- a/src/hotspot/share/runtime/jniHandles.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/jniHandles.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -27,6 +27,7 @@
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
@@ -34,7 +35,7 @@
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
#endif
OopStorage* JNIHandles::_global_handles = NULL;
@@ -153,7 +154,7 @@
oop result = jweak_ref(handle);
#if INCLUDE_ALL_GCS
if (result != NULL && UseG1GC) {
- G1SATBCardTableModRefBS::enqueue(result);
+ G1BarrierSet::enqueue(result);
}
#endif // INCLUDE_ALL_GCS
return result;
--- a/src/hotspot/share/runtime/orderAccess.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/orderAccess.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -296,7 +296,7 @@
// generalized variant is used.
template<size_t byte_size, ScopedFenceType type>
-struct OrderAccess::PlatformOrderedStore VALUE_OBJ_CLASS_SPEC {
+struct OrderAccess::PlatformOrderedStore {
template <typename T>
void operator()(T v, volatile T* p) const {
ordered_store<T, type>(p, v);
@@ -304,7 +304,7 @@
};
template<size_t byte_size, ScopedFenceType type>
-struct OrderAccess::PlatformOrderedLoad VALUE_OBJ_CLASS_SPEC {
+struct OrderAccess::PlatformOrderedLoad {
template <typename T>
T operator()(const volatile T* p) const {
return ordered_load<T, type>(p);
--- a/src/hotspot/share/runtime/os.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/os.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -245,13 +245,6 @@
return OS_OK;
}
-
-#if !defined(LINUX) && !defined(_WINDOWS)
-size_t os::committed_stack_size(address bottom, size_t size) {
- return size;
-}
-#endif
-
bool os::dll_build_name(char* buffer, size_t size, const char* fname) {
int n = jio_snprintf(buffer, size, "%s%s%s", JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX);
return (n != -1);
--- a/src/hotspot/share/runtime/os.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/os.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -271,10 +271,6 @@
static void map_stack_shadow_pages(address sp);
static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp);
- // Return size of stack that is actually committed. For Java thread, the bottom should be above
- // guard pages (stack grows downward)
- static size_t committed_stack_size(address bottom, size_t size);
-
// OS interface to Virtual Memory
// Return the default page size.
--- a/src/hotspot/share/runtime/osThread.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/osThread.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,11 +53,6 @@
ZOMBIE // All done, but not reclaimed yet
};
-// I'd make OSThread a ValueObj embedded in Thread to avoid an indirection, but
-// the assembler test in java.cpp expects that it can install the OSThread of
-// the main thread into its own Thread at will.
-
-
class OSThread: public CHeapObj<mtThread> {
friend class VMStructs;
friend class JVMCIVMStructs;
--- a/src/hotspot/share/runtime/perfData.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/perfData.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "jvm.h"
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
+#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
--- a/src/hotspot/share/runtime/reflection.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/reflection.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
#include "interpreter/linkResolver.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
--- a/src/hotspot/share/runtime/reflectionUtils.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/reflectionUtils.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "runtime/reflectionUtils.hpp"
KlassStream::KlassStream(InstanceKlass* klass, bool local_only,
--- a/src/hotspot/share/runtime/reflectionUtils.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/reflectionUtils.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,10 +29,11 @@
#include "oops/instanceKlass.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oopsHierarchy.hpp"
-#include "runtime/handles.inline.hpp"
+#include "runtime/handles.hpp"
#include "runtime/reflection.hpp"
#include "utilities/accessFlags.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/growableArray.hpp"
// A KlassStream is an abstract stream for streaming over self, superclasses
// and (super)interfaces. Streaming is done in reverse order (subclasses first,
@@ -43,7 +44,7 @@
// ...
// }
-class KlassStream VALUE_OBJ_CLASS_SPEC {
+class KlassStream {
protected:
InstanceKlass* _klass; // current klass/interface iterated over
InstanceKlass* _base_klass; // initial klass/interface to iterate over
--- a/src/hotspot/share/runtime/relocator.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/relocator.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "interpreter/bytecodes.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/relocator.hpp"
--- a/src/hotspot/share/runtime/rframe.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/rframe.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "interpreter/interpreter.hpp"
+#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "runtime/frame.inline.hpp"
--- a/src/hotspot/share/runtime/rtmLocking.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/rtmLocking.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@
// supported for stack locks just like inflated locks.
// RTM locking counters
-class RTMLockingCounters VALUE_OBJ_CLASS_SPEC {
+class RTMLockingCounters {
private:
uintx _total_count; // Total RTM locks count
uintx _abort_count; // Total aborts count
--- a/src/hotspot/share/runtime/safepoint.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/safepoint.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -40,7 +40,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "runtime/atomic.hpp"
--- a/src/hotspot/share/runtime/safepoint.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/safepoint.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -209,6 +209,20 @@
}
};
+// Some helper assert macros for safepoint checks.
+
+#define assert_at_safepoint() \
+ assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint")
+
+#define assert_at_safepoint_msg(...) \
+ assert(SafepointSynchronize::is_at_safepoint(), __VA_ARGS__)
+
+#define assert_not_at_safepoint() \
+ assert(!SafepointSynchronize::is_at_safepoint(), "should not be at a safepoint")
+
+#define assert_not_at_safepoint_msg(...) \
+ assert(!SafepointSynchronize::is_at_safepoint(), __VA_ARGS__)
+
// State class for a thread suspended at a safepoint
class ThreadSafepointState: public CHeapObj<mtInternal> {
public:
--- a/src/hotspot/share/runtime/sharedRuntime.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -41,8 +41,9 @@
#include "logging/log.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/klass.hpp"
+#include "oops/method.inline.hpp"
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/forte.hpp"
@@ -838,9 +839,15 @@
if (vt_stub->is_abstract_method_error(pc)) {
assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
Events::log_exception(thread, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
- return StubRoutines::throw_AbstractMethodError_entry();
+ // Instead of throwing the abstract method error here directly, we re-resolve
+ // and will throw the AbstractMethodError during resolve. As a result, we'll
+ // get a more detailed error message.
+ return SharedRuntime::get_handle_wrong_method_stub();
} else {
Events::log_exception(thread, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
+ // Assert that the signal comes from the expected location in stub code.
+ assert(vt_stub->is_null_pointer_exception(pc),
+ "obtained signal from unexpected location in stub code");
return StubRoutines::throw_NullPointerException_at_call_entry();
}
} else {
@@ -1453,7 +1460,33 @@
// Handle abstract method call
JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread))
- return StubRoutines::throw_AbstractMethodError_entry();
+ // Verbose error message for AbstractMethodError.
+ // Get the called method from the invoke bytecode.
+ vframeStream vfst(thread, true);
+ assert(!vfst.at_end(), "Java frame must exist");
+ methodHandle caller(vfst.method());
+ Bytecode_invoke invoke(caller, vfst.bci());
+ DEBUG_ONLY( invoke.verify(); )
+
+ // Find the compiled caller frame.
+ RegisterMap reg_map(thread);
+ frame stubFrame = thread->last_frame();
+ assert(stubFrame.is_runtime_frame(), "must be");
+ frame callerFrame = stubFrame.sender(®_map);
+ assert(callerFrame.is_compiled_frame(), "must be");
+
+ // Install exception and return forward entry.
+ address res = StubRoutines::throw_AbstractMethodError_entry();
+ JRT_BLOCK
+ methodHandle callee = invoke.static_target(thread);
+ if (!callee.is_null()) {
+ oop recv = callerFrame.retrieve_receiver(®_map);
+ Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL;
+ LinkResolver::throw_abstract_method_error(callee, recv_klass, thread);
+ res = StubRoutines::forward_exception_entry();
+ }
+ JRT_BLOCK_END
+ return res;
JRT_END
--- a/src/hotspot/share/runtime/signature.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/signature.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,14 +54,6 @@
_index++;
}
-
-void SignatureIterator::skip_optional_size() {
- Symbol* sig = _signature;
- char c = sig->byte_at(_index);
- while ('0' <= c && c <= '9') c = sig->byte_at(++_index);
-}
-
-
int SignatureIterator::parse_type() {
// Note: This function could be simplified by using "return T_XXX_size;"
// instead of the assignment and the break statements. However, it
@@ -99,11 +91,9 @@
break;
case '[':
{ int begin = ++_index;
- skip_optional_size();
Symbol* sig = _signature;
while (sig->byte_at(_index) == '[') {
_index++;
- skip_optional_size();
}
if (sig->byte_at(_index) == 'L') {
while (sig->byte_at(_index++) != ';') ;
@@ -250,10 +240,8 @@
case '[':
{
int begin = ++_index;
- skip_optional_size();
while (sig->byte_at(_index) == '[') {
_index++;
- skip_optional_size();
}
if (sig->byte_at(_index) == 'L') {
while (sig->byte_at(_index++) != ';') ;
--- a/src/hotspot/share/runtime/signature.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/signature.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,6 @@
BasicType _return_type;
void expect(char c);
- void skip_optional_size();
int parse_type(); // returns the parameter size in words (0 for void)
void check_signature_end();
--- a/src/hotspot/share/runtime/simpleThresholdPolicy.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/simpleThresholdPolicy.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,13 +82,7 @@
template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale, Method* method);
// Get a compilation level for a given method.
- static CompLevel comp_level(Method* method) {
- CompiledMethod *nm = method->code();
- if (nm != NULL && nm->is_in_use()) {
- return (CompLevel)nm->comp_level();
- }
- return CompLevel_none;
- }
+ static CompLevel comp_level(Method* method);
virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
CompLevel level, CompiledMethod* nm, JavaThread* thread);
virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
--- a/src/hotspot/share/runtime/simpleThresholdPolicy.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/simpleThresholdPolicy.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
#include "compiler/compilerOracle.hpp"
+#include "oops/method.inline.hpp"
#ifdef TIERED
@@ -94,6 +95,14 @@
return false;
}
+inline CompLevel SimpleThresholdPolicy::comp_level(Method* method) {
+ CompiledMethod *nm = method->code();
+ if (nm != NULL && nm->is_in_use()) {
+ return (CompLevel)nm->comp_level();
+ }
+ return CompLevel_none;
+}
+
#endif // TIERED
#endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
--- a/src/hotspot/share/runtime/sweeper.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/sweeper.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
#include "compiler/compileBroker.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.hpp"
#include "runtime/atomic.hpp"
--- a/src/hotspot/share/runtime/synchronizer.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/synchronizer.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/padded.hpp"
#include "memory/resourceArea.hpp"
--- a/src/hotspot/share/runtime/thread.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/thread.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -43,10 +43,11 @@
#include "logging/log.hpp"
#include "logging/logConfiguration.hpp"
#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
@@ -858,7 +859,6 @@
st->print("os_prio=%d ", os_prio);
}
st->print("tid=" INTPTR_FORMAT " ", p2i(this));
- ext().print_on(st);
osthread()->print_on(st);
}
if (_threads_hazard_ptr != NULL) {
@@ -3126,8 +3126,6 @@
// Push the Java priority down to the native thread; needs Threads_lock
Thread::set_priority(this, prio);
- prepare_ext();
-
// Add the new thread to the Threads list and set it in motion.
// We must have threads lock in order to call Threads::add.
// It is crucial that we do not block before the thread is
--- a/src/hotspot/share/runtime/thread.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/thread.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -41,7 +41,6 @@
#include "runtime/safepoint.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/threadLocalStorage.hpp"
-#include "runtime/thread_ext.hpp"
#include "runtime/unhandledOops.hpp"
#include "trace/traceBackend.hpp"
#include "trace/traceMacros.hpp"
@@ -326,8 +325,6 @@
mutable TRACE_DATA _trace_data; // Thread-local data for tracing
- ThreadExt _ext;
-
int _vm_operation_started_count; // VM_Operation support
int _vm_operation_completed_count; // VM_Operation support
@@ -508,9 +505,6 @@
TRACE_DATA* trace_data() const { return &_trace_data; }
bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; }
- const ThreadExt& ext() const { return _ext; }
- ThreadExt& ext() { return _ext; }
-
// VM operation support
int vm_operation_ticket() { return ++_vm_operation_started_count; }
int vm_operation_completed_count() { return _vm_operation_completed_count; }
@@ -1137,7 +1131,6 @@
// not specified, use the priority of the thread object. Threads_lock
// must be held while this function is called.
void prepare(jobject jni_thread, ThreadPriority prio=NoPriority);
- void prepare_ext();
void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; }
address saved_exception_pc() { return _saved_exception_pc; }
--- a/src/hotspot/share/runtime/thread_ext.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/thread.hpp"
-#include "runtime/thread_ext.hpp"
-
-void JavaThread::prepare_ext() {
-}
-
--- a/src/hotspot/share/runtime/thread_ext.hpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_THREAD_EXT_HPP
-#define SHARE_VM_RUNTIME_THREAD_EXT_HPP
-
-#include "memory/allocation.hpp"
-
-class ThreadExt VALUE_OBJ_CLASS_SPEC {
-public:
- void print_on(outputStream* st) const {};
-};
-
-#endif // SHARE_VM_RUNTIME_THREAD_EXT_HPP
--- a/src/hotspot/share/runtime/timer.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/timer.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
// Timers for simple measurement.
-class elapsedTimer VALUE_OBJ_CLASS_SPEC {
+class elapsedTimer {
friend class VMStructs;
private:
jlong _counter;
@@ -50,7 +50,7 @@
};
// TimeStamp is used for recording when an event took place.
-class TimeStamp VALUE_OBJ_CLASS_SPEC {
+class TimeStamp {
private:
jlong _counter;
public:
--- a/src/hotspot/share/runtime/vframeArray.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/vframeArray.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
+#include "memory/universe.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiThreadState.hpp"
--- a/src/hotspot/share/runtime/vframeArray.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/vframeArray.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_RUNTIME_VFRAMEARRAY_HPP
#define SHARE_VM_RUNTIME_VFRAMEARRAY_HPP
+#include "memory/allocation.hpp"
#include "oops/arrayOop.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
@@ -46,7 +47,7 @@
// A vframeArrayElement is an element of a vframeArray. Each element
// represent an interpreter frame which will eventually be created.
-class vframeArrayElement : public _ValueObj {
+class vframeArrayElement {
friend class VMStructs;
private:
--- a/src/hotspot/share/runtime/vmStructs.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/vmStructs.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -138,8 +138,6 @@
#include "runtime/vmStructs_trace.hpp"
#endif
-#include "runtime/vmStructs_ext.hpp"
-
#ifdef COMPILER2
#include "opto/addnode.hpp"
#include "opto/block.hpp"
@@ -1998,6 +1996,7 @@
declare_c2_type(MulReductionVDNode, ReductionNode) \
declare_c2_type(DivVFNode, VectorNode) \
declare_c2_type(DivVDNode, VectorNode) \
+ declare_c2_type(PopCountVINode, VectorNode) \
declare_c2_type(LShiftVBNode, VectorNode) \
declare_c2_type(LShiftVSNode, VectorNode) \
declare_c2_type(LShiftVINode, VectorNode) \
@@ -2239,8 +2238,7 @@
\
declare_constant(BarrierSet::ModRef) \
declare_constant(BarrierSet::CardTableModRef) \
- declare_constant(BarrierSet::G1SATBCT) \
- declare_constant(BarrierSet::G1SATBCTLogging) \
+ declare_constant(BarrierSet::G1BarrierSet) \
\
declare_constant(BOTConstants::LogN) \
declare_constant(BOTConstants::LogN_words) \
@@ -3032,9 +3030,6 @@
GENERATE_STATIC_VM_STRUCT_ENTRY)
#endif
- VM_STRUCTS_EXT(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
- GENERATE_STATIC_VM_STRUCT_ENTRY)
-
VM_STRUCTS_OS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY,
GENERATE_STATIC_VM_STRUCT_ENTRY,
GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY,
@@ -3099,9 +3094,6 @@
GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
#endif
- VM_TYPES_EXT(GENERATE_VM_TYPE_ENTRY,
- GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
-
VM_TYPES_OS(GENERATE_VM_TYPE_ENTRY,
GENERATE_TOPLEVEL_VM_TYPE_ENTRY,
GENERATE_OOP_VM_TYPE_ENTRY,
@@ -3249,9 +3241,6 @@
CHECK_STATIC_VM_STRUCT_ENTRY);
#endif
- VM_STRUCTS_EXT(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
- CHECK_STATIC_VM_STRUCT_ENTRY);
-
VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY,
CHECK_NO_OP,
@@ -3299,9 +3288,6 @@
CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
#endif
- VM_TYPES_EXT(CHECK_VM_TYPE_ENTRY,
- CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
-
VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
CHECK_SINGLE_ARG_VM_TYPE_NO_OP,
@@ -3373,9 +3359,6 @@
ENSURE_FIELD_TYPE_PRESENT));
#endif
- debug_only(VM_STRUCTS_EXT(ENSURE_FIELD_TYPE_PRESENT,
- ENSURE_FIELD_TYPE_PRESENT));
-
debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT,
ENSURE_FIELD_TYPE_PRESENT,
CHECK_NO_OP,
--- a/src/hotspot/share/runtime/vmStructs_ext.hpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_VMSTRUCTS_EXT_HPP
-#define SHARE_VM_RUNTIME_VMSTRUCTS_EXT_HPP
-
-#define VM_STRUCTS_EXT(a, b)
-
-#define VM_TYPES_EXT(a, b)
-
-
-#endif // SHARE_VM_RUNTIME_VMSTRUCTS_EXT_HPP
--- a/src/hotspot/share/runtime/vm_operations.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/vm_operations.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -234,7 +234,7 @@
}
void VM_PrintMetadata::doit() {
- MetaspaceAux::print_metadata_for_nmt(_out, _scale);
+ MetaspaceUtils::print_metadata_for_nmt(_out, _scale);
}
VM_FindDeadlocks::~VM_FindDeadlocks() {
--- a/src/hotspot/share/runtime/vm_operations.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/runtime/vm_operations.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -72,7 +72,6 @@
template(HandshakeOneThread) \
template(HandshakeAllThreads) \
template(HandshakeFallback) \
- template(DestroyAllocationContext) \
template(EnableBiasedLocking) \
template(RevokeBias) \
template(BulkRevokeBias) \
--- a/src/hotspot/share/services/allocationSite.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/allocationSite.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
// Allocation site represents a code path that makes a memory
// allocation
-template <class E> class AllocationSite VALUE_OBJ_CLASS_SPEC {
+template <class E> class AllocationSite {
private:
NativeCallStack _call_stack;
E e;
--- a/src/hotspot/share/services/attachListener.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/attachListener.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -32,6 +32,7 @@
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
--- a/src/hotspot/share/services/attachListener.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/attachListener.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,9 +27,9 @@
#include "memory/allocation.hpp"
#include "utilities/debug.hpp"
-#include "utilities/ostream.hpp"
+#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
// The AttachListener thread services a queue of operations that are enqueued
// by client tools. Each operation is identified by a name and has up to 3
@@ -122,9 +122,10 @@
// set the operation name
void set_name(char* name) {
- size_t len = strlen(name);
- assert(len <= name_length_max, "exceeds maximum name length");
- memcpy(_name, name, MIN2(len + 1, (size_t)name_length_max));
+ assert(strlen(name) <= name_length_max, "exceeds maximum name length");
+ size_t len = MIN2(strlen(name), (size_t)name_length_max);
+ memcpy(_name, name, len);
+ _name[len] = '\0';
}
// get an argument value
@@ -139,9 +140,10 @@
if (arg == NULL) {
_arg[i][0] = '\0';
} else {
- size_t len = strlen(arg);
- assert(len <= arg_length_max, "exceeds maximum argument length");
- memcpy(_arg[i], arg, MIN2(len + 1, (size_t)arg_length_max));
+ assert(strlen(arg) <= arg_length_max, "exceeds maximum argument length");
+ size_t len = MIN2(strlen(arg), (size_t)arg_length_max);
+ memcpy(_arg[i], arg, len);
+ _arg[i][len] = '\0';
}
}
--- a/src/hotspot/share/services/diagnosticFramework.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/diagnosticFramework.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "services/diagnosticArgument.hpp"
--- a/src/hotspot/share/services/heapDumper.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/heapDumper.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -30,6 +30,7 @@
#include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/vmGCOperations.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/objArrayKlass.hpp"
@@ -650,7 +651,7 @@
// dump a jdouble
static void dump_double(DumpWriter* writer, jdouble d);
// dumps the raw value of the given field
- static void dump_field_value(DumpWriter* writer, char type, address addr);
+ static void dump_field_value(DumpWriter* writer, char type, oop obj, int offset);
// dumps static fields of the given class
static void dump_static_fields(DumpWriter* writer, Klass* k);
// dump the raw values of the instance fields of the given object
@@ -754,64 +755,59 @@
}
// dumps the raw value of the given field
-void DumperSupport::dump_field_value(DumpWriter* writer, char type, address addr) {
+void DumperSupport::dump_field_value(DumpWriter* writer, char type, oop obj, int offset) {
switch (type) {
case JVM_SIGNATURE_CLASS :
case JVM_SIGNATURE_ARRAY : {
- oop o;
- if (UseCompressedOops) {
- o = oopDesc::load_decode_heap_oop((narrowOop*)addr);
- } else {
- o = oopDesc::load_decode_heap_oop((oop*)addr);
- }
-
- // reflection and Unsafe classes may have a reference to a
- // Klass* so filter it out.
+ oop o = obj->obj_field_access<ON_UNKNOWN_OOP_REF>(offset);
assert(oopDesc::is_oop_or_null(o), "Expected an oop or NULL at " PTR_FORMAT, p2i(o));
writer->write_objectID(o);
break;
}
- case JVM_SIGNATURE_BYTE : {
- jbyte* b = (jbyte*)addr;
- writer->write_u1((u1)*b);
+ case JVM_SIGNATURE_BYTE : {
+ jbyte b = obj->byte_field(offset);
+ writer->write_u1((u1)b);
break;
}
- case JVM_SIGNATURE_CHAR : {
- jchar* c = (jchar*)addr;
- writer->write_u2((u2)*c);
+ case JVM_SIGNATURE_CHAR : {
+ jchar c = obj->char_field(offset);
+ writer->write_u2((u2)c);
break;
}
case JVM_SIGNATURE_SHORT : {
- jshort* s = (jshort*)addr;
- writer->write_u2((u2)*s);
+ jshort s = obj->short_field(offset);
+ writer->write_u2((u2)s);
break;
}
case JVM_SIGNATURE_FLOAT : {
- jfloat* f = (jfloat*)addr;
- dump_float(writer, *f);
+ jfloat f = obj->float_field(offset);
+ dump_float(writer, f);
break;
}
case JVM_SIGNATURE_DOUBLE : {
- jdouble* f = (jdouble*)addr;
- dump_double(writer, *f);
+ jdouble d = obj->double_field(offset);
+ dump_double(writer, d);
break;
}
case JVM_SIGNATURE_INT : {
- jint* i = (jint*)addr;
- writer->write_u4((u4)*i);
+ jint i = obj->int_field(offset);
+ writer->write_u4((u4)i);
break;
}
- case JVM_SIGNATURE_LONG : {
- jlong* l = (jlong*)addr;
- writer->write_u8((u8)*l);
+ case JVM_SIGNATURE_LONG : {
+ jlong l = obj->long_field(offset);
+ writer->write_u8((u8)l);
break;
}
case JVM_SIGNATURE_BOOLEAN : {
- jboolean* b = (jboolean*)addr;
- writer->write_u1((u1)*b);
+ jboolean b = obj->bool_field(offset);
+ writer->write_u1((u1)b);
break;
}
- default : ShouldNotReachHere();
+ default : {
+ ShouldNotReachHere();
+ break;
+ }
}
}
@@ -893,10 +889,7 @@
writer->write_u1(sig2tag(sig)); // type
// value
- int offset = fld.offset();
- address addr = (address)ik->java_mirror() + offset;
-
- dump_field_value(writer, sig->byte_at(0), addr);
+ dump_field_value(writer, sig->byte_at(0), ik->java_mirror(), fld.offset());
}
}
@@ -932,9 +925,7 @@
for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) {
if (!fld.access_flags().is_static()) {
Symbol* sig = fld.signature();
- address addr = (address)o + fld.offset();
-
- dump_field_value(writer, sig->byte_at(0), addr);
+ dump_field_value(writer, sig->byte_at(0), o, fld.offset());
}
}
}
--- a/src/hotspot/share/services/mallocTracker.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/mallocTracker.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -38,7 +38,7 @@
* records total memory allocation size and number of allocations.
* The counters are updated atomically.
*/
-class MemoryCounter VALUE_OBJ_CLASS_SPEC {
+class MemoryCounter {
private:
volatile size_t _count;
volatile size_t _size;
@@ -89,7 +89,7 @@
* It includes the memory acquired through os::malloc()
* call and arena's backing memory.
*/
-class MallocMemory VALUE_OBJ_CLASS_SPEC {
+class MallocMemory {
private:
MemoryCounter _malloc;
MemoryCounter _arena;
@@ -242,7 +242,7 @@
* which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
*/
-class MallocHeader VALUE_OBJ_CLASS_SPEC {
+class MallocHeader {
#ifdef _LP64
size_t _size : 64;
size_t _flags : 8;
--- a/src/hotspot/share/services/management.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/management.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -26,6 +26,7 @@
#include "jmm.h"
#include "classfile/systemDictionary.hpp"
#include "compiler/compileBroker.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/iterator.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
--- a/src/hotspot/share/services/memBaseline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/memBaseline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -42,7 +42,7 @@
/*
* Baseline a memory snapshot
*/
-class MemBaseline VALUE_OBJ_CLASS_SPEC {
+class MemBaseline {
public:
enum BaselineThreshold {
SIZE_THRESHOLD = K // Only allocation size over this threshold will be baselined.
--- a/src/hotspot/share/services/memReporter.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/memReporter.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -197,18 +197,18 @@
outputStream* out = output();
const char* scale = current_scale();
- size_t committed = MetaspaceAux::committed_bytes(type);
- size_t used = MetaspaceAux::used_bytes(type);
- size_t free = (MetaspaceAux::capacity_bytes(type) - used)
- + MetaspaceAux::free_chunks_total_bytes(type)
- + MetaspaceAux::free_bytes(type);
+ size_t committed = MetaspaceUtils::committed_bytes(type);
+ size_t used = MetaspaceUtils::used_bytes(type);
+ size_t free = (MetaspaceUtils::capacity_bytes(type) - used)
+ + MetaspaceUtils::free_chunks_total_bytes(type)
+ + MetaspaceUtils::free_bytes(type);
assert(committed >= used + free, "Sanity");
size_t waste = committed - (used + free);
out->print_cr("%27s ( %s)", " ", name);
out->print("%27s ( ", " ");
- print_total(MetaspaceAux::reserved_bytes(type), committed);
+ print_total(MetaspaceUtils::reserved_bytes(type), committed);
out->print_cr(")");
out->print_cr("%27s ( used=" SIZE_FORMAT "%s)", " ", amount_in_current_scale(used), scale);
out->print_cr("%27s ( free=" SIZE_FORMAT "%s)", " ", amount_in_current_scale(free), scale);
--- a/src/hotspot/share/services/memTracker.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/memTracker.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -246,7 +246,7 @@
if (addr != NULL) {
// uses thread stack malloc slot for book keeping number of threads
MallocMemorySummary::record_malloc(0, mtThreadStack);
- record_virtual_memory_reserve(addr, size, CALLER_PC, mtThreadStack);
+ record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
}
}
--- a/src/hotspot/share/services/memoryManager.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/memoryManager.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
+#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
--- a/src/hotspot/share/services/memoryPool.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/memoryPool.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -194,12 +194,12 @@
MemoryPool("Metaspace", NonHeap, 0, calculate_max_size(), true, false) { }
MemoryUsage MetaspacePool::get_memory_usage() {
- size_t committed = MetaspaceAux::committed_bytes();
+ size_t committed = MetaspaceUtils::committed_bytes();
return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
size_t MetaspacePool::used_in_bytes() {
- return MetaspaceAux::used_bytes();
+ return MetaspaceUtils::used_bytes();
}
size_t MetaspacePool::calculate_max_size() const {
@@ -211,10 +211,10 @@
MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
size_t CompressedKlassSpacePool::used_in_bytes() {
- return MetaspaceAux::used_bytes(Metaspace::ClassType);
+ return MetaspaceUtils::used_bytes(Metaspace::ClassType);
}
MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
- size_t committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+ size_t committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
--- a/src/hotspot/share/services/memoryService.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/memoryService.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
#include "memory/memRegion.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/globals.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "services/classLoadingService.hpp"
#include "services/lowMemoryDetector.hpp"
--- a/src/hotspot/share/services/memoryUsage.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/memoryUsage.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
// memory for memory management could be less than the amount of
// committed memory. Its value may be undefined.
-class MemoryUsage VALUE_OBJ_CLASS_SPEC {
+class MemoryUsage {
private:
size_t _initSize;
size_t _used;
--- a/src/hotspot/share/services/virtualMemoryTracker.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/virtualMemoryTracker.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -38,12 +38,6 @@
::new ((void*)_snapshot) VirtualMemorySnapshot();
}
-void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
- // Snapshot current thread stacks
- VirtualMemoryTracker::snapshot_thread_stacks();
- as_snapshot()->copy_to(s);
-}
-
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
@@ -292,26 +286,6 @@
}
}
-address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
- assert(flag() == mtThreadStack, "Only for thread stack");
- LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
- address bottom = base();
- address top = base() + size();
- while (head != NULL) {
- address committed_top = head->data()->base() + head->data()->size();
- if (committed_top < top) {
- // committed stack guard pages, skip them
- bottom = head->data()->base() + head->data()->size();
- head = head->next();
- } else {
- assert(top == committed_top, "Sanity");
- break;
- }
- }
-
- return bottom;
-}
-
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
if (level >= NMT_summary) {
VirtualMemorySummary::initialize();
@@ -486,32 +460,6 @@
}
}
-// Walk all known thread stacks, snapshot their committed ranges.
-class SnapshotThreadStackWalker : public VirtualMemoryWalker {
-public:
- SnapshotThreadStackWalker() {}
-
- bool do_allocation_site(const ReservedMemoryRegion* rgn) {
- if (rgn->flag() == mtThreadStack) {
- address stack_bottom = rgn->thread_stack_uncommitted_bottom();
- size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
- size_t committed_size = os::committed_stack_size(stack_bottom, stack_size);
- if (committed_size > 0) {
- ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
- NativeCallStack ncs; // empty stack
-
- // Stack grows downward
- region->add_committed_region(rgn->base() + rgn->size() - committed_size, committed_size, ncs);
- }
- }
- return true;
- }
-};
-
-void VirtualMemoryTracker::snapshot_thread_stacks() {
- SnapshotThreadStackWalker walker;
- walk_virtual_memory(&walker);
-}
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
assert(_reserved_regions != NULL, "Sanity check");
@@ -562,13 +510,13 @@
void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) {
assert_valid_metadata_type(type);
- mss._reserved_in_bytes[type] = MetaspaceAux::reserved_bytes(type);
- mss._committed_in_bytes[type] = MetaspaceAux::committed_bytes(type);
- mss._used_in_bytes[type] = MetaspaceAux::used_bytes(type);
+ mss._reserved_in_bytes[type] = MetaspaceUtils::reserved_bytes(type);
+ mss._committed_in_bytes[type] = MetaspaceUtils::committed_bytes(type);
+ mss._used_in_bytes[type] = MetaspaceUtils::used_bytes(type);
- size_t free_in_bytes = (MetaspaceAux::capacity_bytes(type) - MetaspaceAux::used_bytes(type))
- + MetaspaceAux::free_chunks_total_bytes(type)
- + MetaspaceAux::free_bytes(type);
+ size_t free_in_bytes = (MetaspaceUtils::capacity_bytes(type) - MetaspaceUtils::used_bytes(type))
+ + MetaspaceUtils::free_chunks_total_bytes(type)
+ + MetaspaceUtils::free_bytes(type);
mss._free_in_bytes[type] = free_in_bytes;
}
--- a/src/hotspot/share/services/virtualMemoryTracker.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/virtualMemoryTracker.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -39,7 +39,7 @@
/*
* Virtual memory counter
*/
-class VirtualMemory VALUE_OBJ_CLASS_SPEC {
+class VirtualMemory {
private:
size_t _reserved;
size_t _committed;
@@ -160,7 +160,9 @@
as_snapshot()->by_type(to)->commit_memory(size);
}
- static void snapshot(VirtualMemorySnapshot* s);
+ static inline void snapshot(VirtualMemorySnapshot* s) {
+ as_snapshot()->copy_to(s);
+ }
static VirtualMemorySnapshot* as_snapshot() {
return (VirtualMemorySnapshot*)_snapshot;
@@ -175,7 +177,7 @@
/*
* A virtual memory region
*/
-class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
+class VirtualMemoryRegion {
private:
address _base_address;
size_t _size;
@@ -334,9 +336,6 @@
return compare(rgn) == 0;
}
- // uncommitted thread stack bottom, above guard pages if there is any.
- address thread_stack_uncommitted_bottom() const;
-
bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
bool remove_uncommitted_region(address addr, size_t size);
@@ -390,7 +389,6 @@
// Main class called from MemTracker to track virtual memory allocations, commits and releases.
class VirtualMemoryTracker : AllStatic {
friend class VirtualMemoryTrackerTest;
- friend class ThreadStackTrackingTest;
public:
static bool initialize(NMT_TrackingLevel level);
@@ -410,9 +408,6 @@
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
- // Snapshot current thread stacks
- static void snapshot_thread_stacks();
-
private:
static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
};
--- a/src/hotspot/share/services/writeableFlags.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/services/writeableFlags.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
+#include "memory/allocation.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/commandLineFlagRangeList.hpp"
#include "runtime/java.hpp"
--- a/src/hotspot/share/trace/traceevents.xml Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/trace/traceevents.xml Tue Mar 20 04:36:44 2018 +0100
@@ -512,7 +512,6 @@
<value type="G1HEAPREGIONTYPE" field="to" label="To" />
<value type="ADDRESS" field="start" label="Start" />
<value type="BYTES64" field="used" label="Used" />
- <value type="UINT" field="allocationContext" label="Allocation Context" />
</event>
<!-- Compiler events -->
--- a/src/hotspot/share/utilities/accessFlags.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/accessFlags.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -97,7 +97,7 @@
};
-class AccessFlags VALUE_OBJ_CLASS_SPEC {
+class AccessFlags {
friend class VMStructs;
private:
jint _flags;
--- a/src/hotspot/share/utilities/bitMap.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/bitMap.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
//
// The allocation of the backing storage for the BitMap are handled by
// the subclasses. BitMap doesn't allocate or delete backing storage.
-class BitMap VALUE_OBJ_CLASS_SPEC {
+class BitMap {
friend class BitMap2D;
public:
@@ -381,7 +381,7 @@
};
// Convenience class wrapping BitMap which provides multiple bits per slot.
-class BitMap2D VALUE_OBJ_CLASS_SPEC {
+class BitMap2D {
public:
typedef BitMap::idx_t idx_t; // Type used for bit and word indices.
typedef BitMap::bm_word_t bm_word_t; // Element type of array that
@@ -427,7 +427,7 @@
// Closure for iterating over BitMaps
-class BitMapClosure VALUE_OBJ_CLASS_SPEC {
+class BitMapClosure {
public:
// Callback when bit in map is set. Should normally return "true";
// return of false indicates that the bitmap iteration should terminate.
--- a/src/hotspot/share/utilities/constantTag.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/constantTag.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,7 @@
};
-class constantTag VALUE_OBJ_CLASS_SPEC {
+class constantTag {
private:
jbyte _tag;
public:
--- a/src/hotspot/share/utilities/elfFile.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/elfFile.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -76,7 +76,7 @@
class ElfFuncDescTable;
// ELF section, may or may not have cached data
-class ElfSection VALUE_OBJ_CLASS_SPEC {
+class ElfSection {
private:
Elf_Shdr _section_hdr;
void* _section_data;
--- a/src/hotspot/share/utilities/fakeRttiSupport.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/fakeRttiSupport.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
// bound on the size of a class hierarchy this utility can be used
// with.
template<typename T, typename TagType>
-class FakeRttiSupport VALUE_OBJ_CLASS_SPEC {
+class FakeRttiSupport {
friend class VMStructs;
public:
// Construct with the indicated concrete tag, and include the
--- a/src/hotspot/share/utilities/globalDefinitions_gcc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/globalDefinitions_gcc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -230,7 +230,6 @@
// Portability macros
#define PRAGMA_INTERFACE #pragma interface
#define PRAGMA_IMPLEMENTATION #pragma implementation
-#define VALUE_OBJ_CLASS_SPEC
#if (__GNUC__ == 2) && (__GNUC_MINOR__ < 95)
#define TEMPLATE_TABLE_BUG
--- a/src/hotspot/share/utilities/globalDefinitions_sparcWorks.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/globalDefinitions_sparcWorks.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -240,7 +240,6 @@
#define PRAGMA_INTERFACE
#define PRAGMA_IMPLEMENTATION
#define PRAGMA_IMPLEMENTATION_(arg)
-#define VALUE_OBJ_CLASS_SPEC : public _ValueObj
// Formatting.
#ifdef _LP64
--- a/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -151,7 +151,6 @@
#define PRAGMA_INTERFACE
#define PRAGMA_IMPLEMENTATION
#define PRAGMA_IMPLEMENTATION_(arg)
-#define VALUE_OBJ_CLASS_SPEC : public _ValueObj
// Formatting.
#define FORMAT64_MODIFIER "I64"
--- a/src/hotspot/share/utilities/globalDefinitions_xlc.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/globalDefinitions_xlc.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -129,7 +129,6 @@
// Portability macros
#define PRAGMA_INTERFACE #pragma interface
#define PRAGMA_IMPLEMENTATION #pragma implementation
-#define VALUE_OBJ_CLASS_SPEC
// Formatting.
#ifdef _LP64
--- a/src/hotspot/share/utilities/internalVMTests.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/internalVMTests.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -44,8 +44,7 @@
run_unit_test(TestReservedSpace_test);
run_unit_test(TestReserveMemorySpecial_test);
run_unit_test(TestVirtualSpace_test);
- run_unit_test(TestMetaspaceAux_test);
- run_unit_test(TestVirtualSpaceNode_test);
+ run_unit_test(TestMetaspaceUtils_test);
run_unit_test(GCTimer_test);
run_unit_test(ObjectMonitor_test);
run_unit_test(DirectivesParser_test);
--- a/src/hotspot/share/utilities/ostream.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/ostream.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "compiler/compileLog.hpp"
+#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/os.hpp"
--- a/src/hotspot/share/utilities/sizes.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/sizes.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
#ifdef ASSERT
-class ByteSize VALUE_OBJ_CLASS_SPEC {
+class ByteSize {
private:
int _size;
@@ -92,7 +92,7 @@
inline int in_bytes(ByteSize x) { return x._size; }
-class WordSize VALUE_OBJ_CLASS_SPEC {
+class WordSize {
private:
int _size;
--- a/src/hotspot/share/utilities/stack.inline.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/stack.inline.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_UTILITIES_STACK_INLINE_HPP
#define SHARE_VM_UTILITIES_STACK_INLINE_HPP
+#include "memory/allocation.inline.hpp"
#include "utilities/align.hpp"
#include "utilities/stack.hpp"
#include "utilities/copy.hpp"
--- a/src/hotspot/share/utilities/ticks.hpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/ticks.hpp Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
class Ticks;
-class Tickspan VALUE_OBJ_CLASS_SPEC {
+class Tickspan {
friend class Ticks;
friend Tickspan operator-(const Ticks& end, const Ticks& start);
@@ -53,7 +53,7 @@
};
-class Ticks VALUE_OBJ_CLASS_SPEC {
+class Ticks {
private:
jlong _stamp_ticks;
--- a/src/hotspot/share/utilities/vmError.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/vmError.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -29,6 +29,7 @@
#include "compiler/disassembler.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "logging/logConfiguration.hpp"
+#include "memory/resourceArea.hpp"
#include "prims/whitebox.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
@@ -773,7 +774,10 @@
if (desc != NULL) {
desc->print_on(st);
Disassembler::decode(desc->begin(), desc->end(), st);
- } else {
+ } else if (_thread != NULL) {
+ // Disassembling nmethod will incur resource memory allocation,
+ // only do so when thread is valid.
+ ResourceMark rm(_thread);
Disassembler::decode(cb, st);
st->cr();
}
--- a/src/hotspot/share/utilities/xmlstream.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/src/hotspot/share/utilities/xmlstream.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -31,6 +31,7 @@
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/deoptimization.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/vmError.hpp"
#include "utilities/xmlstream.hpp"
--- a/src/jdk.hotspot.agent/share/native/libsaproc/sadis.c Fri Mar 23 11:14:43 2018 -0700
+++ b/src/jdk.hotspot.agent/share/native/libsaproc/sadis.c Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,14 @@
#include <errno.h>
#ifdef _WINDOWS
+#define JVM_MAXPATHLEN _MAX_PATH
+#else
+#include <sys/param.h>
+#define JVM_MAXPATHLEN MAXPATHLEN
+#endif
+
+
+#ifdef _WINDOWS
static int getLastErrorString(char *buf, size_t len)
{
long errval;
@@ -112,7 +120,7 @@
const char *error_message = NULL;
const char *jrepath = NULL;
const char *libname = NULL;
- char buffer[128];
+ char buffer[JVM_MAXPATHLEN];
#ifdef _WINDOWS
HINSTANCE hsdis_handle = (HINSTANCE) NULL;
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/stack/StackIntrospection.java Fri Mar 23 11:14:43 2018 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/stack/StackIntrospection.java Tue Mar 20 04:36:44 2018 +0100
@@ -27,15 +27,16 @@
public interface StackIntrospection {
/**
- * Accesses the current stack, providing {@link InspectedFrame}s to the visitor that can be used
- * to inspect the stack frames' contents. Iteration continues as long as
+ * Walks the current stack, providing {@link InspectedFrame}s to the visitor that can be used to
+ * inspect the stack frame's contents. Iteration continues as long as
* {@link InspectedFrameVisitor#visitFrame}, which is invoked for every {@link InspectedFrame},
- * returns null. Any non-null result of the visitor indicates that frame iteration should stop.
+ * returns {@code null}. A non-null return value from {@link InspectedFrameVisitor#visitFrame}
+ * indicates that frame iteration should stop.
*
- * @param initialMethods if this is non-{@code null}, then the stack trace will start at these
- * methods
- * @param matchingMethods if this is non-{@code null}, then only matching stack frames are
- * returned
+ * @param initialMethods if this is non-{@code null}, then the stack walk will start at the
+ * first frame whose method is one of these methods.
+ * @param matchingMethods if this is non-{@code null}, then only frames whose methods are in
+ * this array are visited
* @param initialSkip the number of matching methods to skip (including the initial method)
* @param visitor the visitor that is called for every matching method
* @return the last result returned by the visitor (which is non-null to indicate that iteration
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java Fri Mar 23 11:14:43 2018 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.code.InvalidInstalledCodeException;
import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.code.stack.InspectedFrameVisitor;
import jdk.vm.ci.common.InitTimer;
import jdk.vm.ci.common.JVMCIError;
import jdk.vm.ci.meta.JavaType;
@@ -514,13 +515,9 @@
native String getSymbol(long metaspaceSymbol);
/**
- * Looks for the next Java stack frame matching an entry in {@code methods}.
- *
- * @param frame the starting point of the search, where {@code null} refers to the topmost frame
- * @param methods the methods to look for, where {@code null} means that any frame is returned
- * @return the frame, or {@code null} if the end of the stack was reached during the search
+ * @see jdk.vm.ci.code.stack.StackIntrospection#iterateFrames
*/
- native HotSpotStackFrameReference getNextStackFrame(HotSpotStackFrameReference frame, ResolvedJavaMethod[] methods, int initialSkip);
+ native <T> T iterateFrames(ResolvedJavaMethod[] initialMethods, ResolvedJavaMethod[] matchingMethods, int initialSkip, InspectedFrameVisitor<T> visitor);
/**
* Materializes all virtual objects within {@code stackFrame} and updates its locals.
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java Fri Mar 23 11:14:43 2018 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -71,6 +71,12 @@
static {
try (InitTimer t = timer("HotSpotJVMCIRuntime.<init>")) {
instance = new HotSpotJVMCIRuntime();
+
+ // Can only do eager initialization of the JVMCI compiler
+ // once the singleton instance is available.
+ if (instance.config.getFlag("EagerJVMCI", Boolean.class)) {
+ instance.getCompiler();
+ }
}
}
}
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotStackFrameReference.java Fri Mar 23 11:14:43 2018 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotStackFrameReference.java Tue Mar 20 04:36:44 2018 +0100
@@ -30,6 +30,8 @@
public class HotSpotStackFrameReference implements InspectedFrame {
private CompilerToVM compilerToVM;
+ // set in the VM when materializeVirtualObjects is called
+ @SuppressWarnings("unused") private boolean objectsMaterialized;
// information used to find the stack frame
private long stackPointer;
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotStackIntrospection.java Fri Mar 23 11:14:43 2018 -0700
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotStackIntrospection.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,14 +37,6 @@
@Override
public <T> T iterateFrames(ResolvedJavaMethod[] initialMethods, ResolvedJavaMethod[] matchingMethods, int initialSkip, InspectedFrameVisitor<T> visitor) {
CompilerToVM compilerToVM = runtime.getCompilerToVM();
- HotSpotStackFrameReference current = compilerToVM.getNextStackFrame(null, initialMethods, initialSkip);
- while (current != null) {
- T result = visitor.visitFrame(current);
- if (result != null) {
- return result;
- }
- current = compilerToVM.getNextStackFrame(current, matchingMethods, 0);
- }
- return null;
+ return compilerToVM.iterateFrames(initialMethods, matchingMethods, initialSkip, visitor);
}
}
--- a/src/jdk.jcmd/share/classes/sun/tools/jstat/resources/jstat_options Fri Mar 23 11:14:43 2018 -0700
+++ b/src/jdk.jcmd/share/classes/sun/tools/jstat/resources/jstat_options Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -255,8 +255,24 @@
format "0.000"
}
column {
+ header "^CGC^" /* Concurrent Collections (STW phase) */
+ data sun.gc.collector.2.invocations
+ align center
+ width 5
+ scale raw
+ format "0"
+ }
+ column {
+ header "^CGCT^" /* Concurrent Garbage Collection Time (STW phase) */
+ data sun.gc.collector.2.time/sun.os.hrt.frequency
+ align right
+ width 8
+ scale sec
+ format "0.000"
+ }
+ column {
header "^GCT^" /* Total Garbage Collection Time */
- data (sun.gc.collector.0.time + sun.gc.collector.1.time)/sun.os.hrt.frequency
+ data (sun.gc.collector.0.time + sun.gc.collector.1.time + sun.gc.collector.2.time)/sun.os.hrt.frequency
align right
width 8
scale sec
@@ -408,6 +424,14 @@
scale raw
format "0"
}
+ column {
+ header "^CGC^" /* Concurrent Collections (STW phase) */
+ data sun.gc.collector.2.invocations
+ align right
+ width 5
+ scale raw
+ format "0"
+ }
}
option gccause {
@@ -491,8 +515,24 @@
format "0.000"
}
column {
+ header "^CGC^" /* Concurrent Collections (STW phase) */
+ data sun.gc.collector.2.invocations
+ align right
+ width 5
+ scale raw
+ format "0"
+ }
+ column {
+ header "^CGCT^" /* Concurrent Garbage Collection Time (STW phase) */
+ data sun.gc.collector.2.time/sun.os.hrt.frequency
+ align right
+ width 8
+ scale sec
+ format "0.000"
+ }
+ column {
header "^GCT^" /* Total Garbage Collection Time */
- data (sun.gc.collector.0.time + sun.gc.collector.1.time)/sun.os.hrt.frequency
+ data (sun.gc.collector.0.time + sun.gc.collector.1.time + sun.gc.collector.2.time)/sun.os.hrt.frequency
align right
width 8
scale sec
@@ -690,6 +730,14 @@
scale raw
format "0"
}
+ column {
+ header "^CGC^" /* Concurrent Collections (STW phase) */
+ data sun.gc.collector.2.invocations
+ align right
+ width 5
+ scale raw
+ format "0"
+ }
}
option gcold {
@@ -765,8 +813,24 @@
format "0.000"
}
column {
+ header "^CGC^" /* Concurrent Collections (STW phase) */
+ data sun.gc.collector.2.invocations
+ align right
+ width 5
+ scale raw
+ format "0"
+ }
+ column {
+ header "^CGCT^" /* Concurrent Garbage Collection Time (STW phase) */
+ data sun.gc.collector.2.time/sun.os.hrt.frequency
+ align right
+ width 8
+ scale sec
+ format "0.000"
+ }
+ column {
header "^GCT^" /* Total Garbage Collection Time */
- data (sun.gc.collector.0.time + sun.gc.collector.1.time)/sun.os.hrt.frequency
+ data (sun.gc.collector.0.time + sun.gc.collector.1.time + sun.gc.collector.2.time)/sun.os.hrt.frequency
align right
width 8
scale sec
@@ -831,8 +895,24 @@
format "0.000"
}
column {
+ header "^CGC^" /* Concurrent Collections (STW phase) */
+ data sun.gc.collector.2.invocations
+ align right
+ width 5
+ scale raw
+ format "0"
+ }
+ column {
+ header "^CGCT^" /* Concurrent Garbage Collection Time (STW phase) */
+ data sun.gc.collector.2.time/sun.os.hrt.frequency
+ align right
+ width 8
+ scale sec
+ format "0.000"
+ }
+ column {
header "^GCT^" /* Total Garbage Collection Time */
- data (sun.gc.collector.0.time + sun.gc.collector.1.time)/sun.os.hrt.frequency
+ data (sun.gc.collector.0.time + sun.gc.collector.1.time + sun.gc.collector.2.time)/sun.os.hrt.frequency
align right
width 8
scale sec
@@ -913,8 +993,24 @@
format "0.000"
}
column {
+ header "^CGC^" /* Concurrent Collections (STW phase) */
+ data sun.gc.collector.2.invocations
+ align right
+ width 5
+ scale raw
+ format "0"
+ }
+ column {
+ header "^CGCT^" /* Concurrent Garbage Collection Time (STW phase) */
+ data sun.gc.collector.2.time/sun.os.hrt.frequency
+ align right
+ width 8
+ scale sec
+ format "0.000"
+ }
+ column {
header "^GCT^" /* Total Garbage Collection Time */
- data (sun.gc.collector.0.time + sun.gc.collector.1.time)/sun.os.hrt.frequency
+ data (sun.gc.collector.0.time + sun.gc.collector.1.time + sun.gc.collector.2.time)/sun.os.hrt.frequency
align right
width 8
scale sec
@@ -1003,8 +1099,24 @@
format "0.000"
}
column {
+ header "^CGC^" /* Concurrent Collections (STW phase) */
+ data sun.gc.collector.2.invocations
+ align right
+ width 5
+ scale raw
+ format "0"
+ }
+ column {
+ header "^CGCT^" /* Concurrent Garbage Collection Time (STW phase) */
+ data sun.gc.collector.2.time/sun.os.hrt.frequency
+ align right
+ width 8
+ scale sec
+ format "0.000"
+ }
+ column {
header "^GCT^" /* Total Garbage Collection Time */
- data (sun.gc.collector.0.time + sun.gc.collector.1.time)/sun.os.hrt.frequency
+ data (sun.gc.collector.0.time + sun.gc.collector.1.time + sun.gc.collector.2.time)/sun.os.hrt.frequency
align right
width 8
scale sec
--- a/test/hotspot/gtest/gc/shared/test_oopStorage.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/gtest/gc/shared/test_oopStorage.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -530,7 +530,7 @@
}
#endif // DISABLE_GARBAGE_ALLOCATION_STATUS_TESTS
-class OopStorageTest::CountingIterateClosure VALUE_OBJ_CLASS_SPEC {
+class OopStorageTest::CountingIterateClosure {
public:
size_t _const_count;
size_t _const_non_null;
@@ -672,7 +672,7 @@
const unsigned char OopStorageTestIteration::mark_const;
const unsigned char OopStorageTestIteration::mark_non_const;
-class OopStorageTestIteration::VerifyState VALUE_OBJ_CLASS_SPEC {
+class OopStorageTestIteration::VerifyState {
public:
unsigned char _expected_mark;
const oop* const* _entries;
@@ -742,7 +742,7 @@
}
};
-class OopStorageTestIteration::VerifyFn VALUE_OBJ_CLASS_SPEC {
+class OopStorageTestIteration::VerifyFn {
public:
VerifyFn(VerifyState* state, uint worker_id = 0) :
_state(state),
@@ -762,7 +762,7 @@
uint _worker_id;
};
-class OopStorageTestIteration::VerifyClosure VALUE_OBJ_CLASS_SPEC {
+class OopStorageTestIteration::VerifyClosure {
public:
VerifyClosure(VerifyState* state, uint worker_id = 0) :
_state(state),
--- a/test/hotspot/gtest/logging/logTestFixture.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/gtest/logging/logTestFixture.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -27,6 +27,7 @@
#include "logTestUtils.inline.hpp"
#include "logging/logConfiguration.hpp"
#include "logging/logOutput.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "unittest.hpp"
#include "utilities/ostream.hpp"
--- a/test/hotspot/gtest/memory/test_chunkManager.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, 2017 Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-
-// The test function is only available in debug builds
-#ifdef ASSERT
-
-#include "unittest.hpp"
-
-void ChunkManager_test_list_index();
-
-TEST(ChunkManager, list_index) {
- // The ChunkManager is only available in metaspace.cpp,
- // so the test code is located in that file.
- ChunkManager_test_list_index();
-
-}
-
-extern void* setup_chunkmanager_returntests();
-extern void teardown_chunkmanager_returntests(void*);
-extern void run_chunkmanager_returntests(void* p, float phase_length_factor);
-
-class ChunkManagerReturnTest : public ::testing::Test {
-protected:
- void* _test;
- virtual void SetUp() {
- _test = setup_chunkmanager_returntests();
- }
- virtual void TearDown() {
- teardown_chunkmanager_returntests(_test);
- }
-};
-
-TEST_VM_F(ChunkManagerReturnTest, test00) { run_chunkmanager_returntests(_test, 0.0f); }
-TEST_VM_F(ChunkManagerReturnTest, test05) { run_chunkmanager_returntests(_test, 0.5f); }
-TEST_VM_F(ChunkManagerReturnTest, test10) { run_chunkmanager_returntests(_test, 1.0f); }
-
-#endif // ASSERT
--- a/test/hotspot/gtest/memory/test_metachunk.cpp Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/gtest/memory/test_metachunk.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -41,11 +41,16 @@
};
TEST(Metachunk, basic) {
- size_t size = 2 * 1024 * 1024;
- void* memory = malloc(size);
+ const ChunkIndex chunk_type = MediumIndex;
+ const bool is_class = false;
+ const size_t word_size = get_size_for_nonhumongous_chunktype(chunk_type, is_class);
+ // Allocate the chunk with correct alignment.
+ void* memory = malloc(word_size * BytesPerWord * 2);
ASSERT_TRUE(NULL != memory) << "Failed to malloc 2MB";
- Metachunk* metachunk = ::new (memory) Metachunk(size / BytesPerWord, NULL);
+ void* p_placement = align_up(memory, word_size * BytesPerWord);
+
+ Metachunk* metachunk = ::new (p_placement) Metachunk(chunk_type, is_class, word_size, NULL);
EXPECT_EQ((MetaWord*) metachunk, metachunk->bottom());
EXPECT_EQ((uintptr_t*) metachunk + metachunk->size(), metachunk->end());
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/memory/test_metaspace_allocation.cpp Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/metaspace.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+#include "unittest.hpp"
+
+#define NUM_PARALLEL_METASPACES 50
+#define MAX_PER_METASPACE_ALLOCATION_WORDSIZE (512 * K)
+
+//#define DEBUG_VERBOSE true
+
+#ifdef DEBUG_VERBOSE
+
+struct chunkmanager_statistics_t {
+ int num_specialized_chunks;
+ int num_small_chunks;
+ int num_medium_chunks;
+ int num_humongous_chunks;
+};
+
+extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out);
+
+static void print_chunkmanager_statistics(outputStream* st, Metaspace::MetadataType mdType) {
+ chunkmanager_statistics_t stat;
+ test_metaspace_retrieve_chunkmanager_statistics(mdType, &stat);
+ st->print_cr("free chunks: %d / %d / %d / %d", stat.num_specialized_chunks, stat.num_small_chunks,
+ stat.num_medium_chunks, stat.num_humongous_chunks);
+}
+
+#endif
+
+struct chunk_geometry_t {
+ size_t specialized_chunk_word_size;
+ size_t small_chunk_word_size;
+ size_t medium_chunk_word_size;
+};
+
+extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out);
+
+
+class MetaspaceAllocationTest : public ::testing::Test {
+protected:
+
+ struct {
+ size_t allocated;
+ Mutex* lock;
+ ClassLoaderMetaspace* space;
+ bool is_empty() const { return allocated == 0; }
+ bool is_full() const { return allocated >= MAX_PER_METASPACE_ALLOCATION_WORDSIZE; }
+ } _spaces[NUM_PARALLEL_METASPACES];
+
+ chunk_geometry_t _chunk_geometry;
+
+ virtual void SetUp() {
+ ::memset(_spaces, 0, sizeof(_spaces));
+ test_metaspace_retrieve_chunk_geometry(Metaspace::NonClassType, &_chunk_geometry);
+ }
+
+ virtual void TearDown() {
+ for (int i = 0; i < NUM_PARALLEL_METASPACES; i ++) {
+ if (_spaces[i].space != NULL) {
+ delete _spaces[i].space;
+ delete _spaces[i].lock;
+ }
+ }
+ }
+
+ void create_space(int i) {
+ assert(i >= 0 && i < NUM_PARALLEL_METASPACES, "Sanity");
+ assert(_spaces[i].space == NULL && _spaces[i].allocated == 0, "Sanity");
+ if (_spaces[i].lock == NULL) {
+ _spaces[i].lock = new Mutex(Monitor::native, "gtest-MetaspaceAllocationTest-lock", false, Monitor::_safepoint_check_never);
+ ASSERT_TRUE(_spaces[i].lock != NULL);
+ }
+ // Let every ~10th space be an anonymous one to test different allocation patterns.
+ const Metaspace::MetaspaceType msType = (os::random() % 100 < 10) ?
+ Metaspace::AnonymousMetaspaceType : Metaspace::StandardMetaspaceType;
+ _spaces[i].space = new ClassLoaderMetaspace(_spaces[i].lock, msType);
+ _spaces[i].allocated = 0;
+ ASSERT_TRUE(_spaces[i].space != NULL);
+ }
+
+ // Returns the index of a random space where index is [0..metaspaces) and which is
+ // empty, non-empty or full.
+ // Returns -1 if no matching space exists.
+ enum fillgrade { fg_empty, fg_non_empty, fg_full };
+ int get_random_matching_space(int metaspaces, fillgrade fg) {
+ const int start_index = os::random() % metaspaces;
+ int i = start_index;
+ do {
+ if (fg == fg_empty && _spaces[i].is_empty()) {
+ return i;
+ } else if ((fg == fg_full && _spaces[i].is_full()) ||
+ (fg == fg_non_empty && !_spaces[i].is_full() && !_spaces[i].is_empty())) {
+ return i;
+ }
+ i ++;
+ if (i == metaspaces) {
+ i = 0;
+ }
+ } while (i != start_index);
+ return -1;
+ }
+
+ int get_random_emtpy_space(int metaspaces) { return get_random_matching_space(metaspaces, fg_empty); }
+ int get_random_non_emtpy_space(int metaspaces) { return get_random_matching_space(metaspaces, fg_non_empty); }
+ int get_random_full_space(int metaspaces) { return get_random_matching_space(metaspaces, fg_full); }
+
+ void do_test(Metaspace::MetadataType mdType, int metaspaces, int phases, int allocs_per_phase,
+ float probability_for_large_allocations // 0.0-1.0
+ ) {
+ // Alternate between breathing in (allocating n blocks for a random Metaspace) and
+ // breathing out (deleting a random Metaspace). The intent is to stress the coalescation
+ // and splitting of free chunks.
+ int phases_done = 0;
+ bool allocating = true;
+ while (phases_done < phases) {
+ bool force_switch = false;
+ if (allocating) {
+ // Allocate space from metaspace, with a preference for completely empty spaces. This
+ // should provide a good mixture of metaspaces in the virtual space.
+ int index = get_random_emtpy_space(metaspaces);
+ if (index == -1) {
+ index = get_random_non_emtpy_space(metaspaces);
+ }
+ if (index == -1) {
+ // All spaces are full, switch to freeing.
+ force_switch = true;
+ } else {
+ // create space if it does not yet exist.
+ if (_spaces[index].space == NULL) {
+ create_space(index);
+ }
+ // Allocate a bunch of blocks from it. Mostly small stuff but mix in large allocations
+ // to force humongous chunk allocations.
+ int allocs_done = 0;
+ while (allocs_done < allocs_per_phase && !_spaces[index].is_full()) {
+ size_t size = 0;
+ int r = os::random() % 1000;
+ if ((float)r < probability_for_large_allocations * 1000.0) {
+ size = (os::random() % _chunk_geometry.medium_chunk_word_size) + _chunk_geometry.medium_chunk_word_size;
+ } else {
+ size = os::random() % 64;
+ }
+ MetaWord* const p = _spaces[index].space->allocate(size, mdType);
+ if (p == NULL) {
+ // We very probably did hit the metaspace "until-gc" limit.
+#ifdef DEBUG_VERBOSE
+ tty->print_cr("OOM for " SIZE_FORMAT " words. ", size);
+#endif
+ // Just switch to deallocation and resume tests.
+ force_switch = true;
+ break;
+ } else {
+ _spaces[index].allocated += size;
+ allocs_done ++;
+ }
+ }
+ }
+ } else {
+ // freeing: find a metaspace and delete it, with preference for completely filled spaces.
+ int index = get_random_full_space(metaspaces);
+ if (index == -1) {
+ index = get_random_non_emtpy_space(metaspaces);
+ }
+ if (index == -1) {
+ force_switch = true;
+ } else {
+ assert(_spaces[index].space != NULL && _spaces[index].allocated > 0, "Sanity");
+ delete _spaces[index].space;
+ _spaces[index].space = NULL;
+ _spaces[index].allocated = 0;
+ }
+ }
+
+ if (force_switch) {
+ allocating = !allocating;
+ } else {
+ // periodically switch between allocating and freeing, but prefer allocation because
+ // we want to intermingle allocations of multiple metaspaces.
+ allocating = os::random() % 5 < 4;
+ }
+ phases_done ++;
+#ifdef DEBUG_VERBOSE
+ int metaspaces_in_use = 0;
+ size_t total_allocated = 0;
+ for (int i = 0; i < metaspaces; i ++) {
+ if (_spaces[i].allocated > 0) {
+ total_allocated += _spaces[i].allocated;
+ metaspaces_in_use ++;
+ }
+ }
+ tty->print("%u:\tspaces: %d total words: " SIZE_FORMAT "\t\t\t", phases_done, metaspaces_in_use, total_allocated);
+ print_chunkmanager_statistics(tty, mdType);
+#endif
+ }
+#ifdef DEBUG_VERBOSE
+ tty->print_cr("Test finished. ");
+ MetaspaceUtils::print_metaspace_map(tty, mdType);
+ print_chunkmanager_statistics(tty, mdType);
+#endif
+ }
+};
+
+
+
+TEST_F(MetaspaceAllocationTest, chunk_geometry) {
+ ASSERT_GT(_chunk_geometry.specialized_chunk_word_size, (size_t) 0);
+ ASSERT_GT(_chunk_geometry.small_chunk_word_size, _chunk_geometry.specialized_chunk_word_size);
+ ASSERT_EQ(_chunk_geometry.small_chunk_word_size % _chunk_geometry.specialized_chunk_word_size, (size_t)0);
+ ASSERT_GT(_chunk_geometry.medium_chunk_word_size, _chunk_geometry.small_chunk_word_size);
+ ASSERT_EQ(_chunk_geometry.medium_chunk_word_size % _chunk_geometry.small_chunk_word_size, (size_t)0);
+}
+
+
+TEST_VM_F(MetaspaceAllocationTest, single_space_nonclass) {
+ do_test(Metaspace::NonClassType, 1, 1000, 100, 0);
+}
+
+TEST_VM_F(MetaspaceAllocationTest, single_space_class) {
+ do_test(Metaspace::ClassType, 1, 1000, 100, 0);
+}
+
+TEST_VM_F(MetaspaceAllocationTest, multi_space_nonclass) {
+ do_test(Metaspace::NonClassType, NUM_PARALLEL_METASPACES, 100, 1000, 0.0);
+}
+
+TEST_VM_F(MetaspaceAllocationTest, multi_space_class) {
+ do_test(Metaspace::ClassType, NUM_PARALLEL_METASPACES, 100, 1000, 0.0);
+}
+
+TEST_VM_F(MetaspaceAllocationTest, multi_space_nonclass_2) {
+ // many metaspaces, with humongous chunks mixed in.
+ do_test(Metaspace::NonClassType, NUM_PARALLEL_METASPACES, 100, 1000, .006f);
+}
+
--- a/test/hotspot/gtest/runtime/test_threadstack_tracking.cpp Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-
-// Included early because the NMT flags don't include it.
-#include "utilities/macros.hpp"
-
-#include "runtime/thread.hpp"
-#include "services/memTracker.hpp"
-#include "services/virtualMemoryTracker.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "unittest.hpp"
-
-
-class ThreadStackTrackingTest {
-public:
- static void test() {
- VirtualMemoryTracker::initialize(NMT_detail);
- VirtualMemoryTracker::late_initialize(NMT_detail);
-
- Thread* thr = Thread::current();
- address stack_end = thr->stack_end();
- size_t stack_size = thr->stack_size();
-
- MemTracker::record_thread_stack(stack_end, stack_size);
-
- VirtualMemoryTracker::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
-
- // snapshot current stack usage
- VirtualMemoryTracker::snapshot_thread_stacks();
-
- ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(stack_end, stack_size));
- ASSERT_TRUE(rmr != NULL);
-
- ASSERT_EQ(rmr->base(), stack_end);
- ASSERT_EQ(rmr->size(), stack_size);
-
- CommittedRegionIterator iter = rmr->iterate_committed_regions();
- int i = 0;
- address i_addr = (address)&i;
-
- // stack grows downward
- address stack_top = stack_end + stack_size;
- bool found_stack_top = false;
-
- for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
- if (region->base() + region->size() == stack_top) {
- // This should be active part, "i" should be here
- ASSERT_TRUE(i_addr < stack_top && i_addr >= region->base());
- ASSERT_TRUE(region->size() <= stack_size);
- found_stack_top = true;
- }
-
- i++;
- }
-
- // NMT was not turned on when the thread was created, so we don't have guard pages
- ASSERT_TRUE(i == 1);
- ASSERT_TRUE(found_stack_top);
- }
-};
-
-TEST_VM(VirtualMemoryTracker, thread_stack_tracking) {
- ThreadStackTrackingTest::test();
-}
--- a/test/hotspot/jtreg/ProblemList.txt Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/ProblemList.txt Tue Mar 20 04:36:44 2018 +0100
@@ -80,9 +80,13 @@
# :hotspot_serviceability
-serviceability/jdwp/AllModulesCommandTest.java 8170541 generic-all
serviceability/sa/TestRevPtrsForInvokeDynamic.java 8191270 generic-all
serviceability/sa/sadebugd/SADebugDTest.java 8163805 generic-all
+serviceability/tmtools/jstat/GcTest01.java 8199519 generic-all
+serviceability/tmtools/jstat/GcTest02.java 8199519 generic-all
+serviceability/tmtools/jstat/GcCauseTest01.java 8199519 generic-all
+serviceability/tmtools/jstat/GcCauseTest02.java 8199519 generic-all
+serviceability/tmtools/jstat/GcCauseTest03.java 8199519 generic-all
#############################################################################
--- a/test/hotspot/jtreg/TEST.groups Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/TEST.groups Tue Mar 20 04:36:44 2018 +0100
@@ -111,6 +111,7 @@
compiler/types/ \
compiler/uncommontrap/ \
compiler/unsafe/ \
+ compiler/vectorization/ \
-compiler/intrinsics/bmi \
-compiler/intrinsics/mathexact \
-compiler/intrinsics/sha \
@@ -125,6 +126,14 @@
-:tier1_compiler_2 \
-:tier1_compiler_3 \
+ctw_1 = \
+ applications/ctw/modules/ \
+ -:ctw_2
+
+ctw_2 = \
+ applications/ctw/modules/java_base.java \
+ applications/ctw/modules/java_desktop.java
+
tier1_gc = \
:tier1_gc_1 \
:tier1_gc_2 \
--- a/test/hotspot/jtreg/compiler/aot/TEST.properties Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-# TODO: remove as soon as JIB supports concurrent installations
-exclusiveAccess.dirs=.
-
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/aot/fingerprint/CDSDumper.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package compiler.aot.fingerprint;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+// Usage:
+// java CDSDumper <classpath> <classlist> <archive> <class1> <class2> ...
+public class CDSDumper {
+ public static void main(String[] args) throws Exception {
+ String classpath = args[0];
+ String classlist = args[1];
+ String archive = args[2];
+
+ // Prepare the classlist
+ FileOutputStream fos = new FileOutputStream(classlist);
+ PrintStream ps = new PrintStream(fos);
+
+ for (int i=3; i<args.length; i++) {
+ ps.println(args[i].replace('.', '/'));
+ }
+ ps.close();
+ fos.close();
+
+ // Dump the archive
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:+IgnoreUnrecognizedVMOptions",
+ "-XX:+UnlockCommercialFeatures",
+ "-XX:+UseAppCDS",
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-cp", classpath,
+ "-XX:ExtraSharedClassListFile=" + classlist,
+ "-XX:SharedArchiveFile=" + archive,
+ "-Xshare:dump",
+ "-Xlog:cds");
+
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Loading classes to share");
+ output.shouldHaveExitValue(0);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/aot/fingerprint/CDSRunner.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package compiler.aot.fingerprint;
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+// Usage:
+// java CDSRunner <vmargs> <class> <args> ...
+public class CDSRunner {
+ public static void main(String[] args) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args);
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ System.out.println("[stdout = " + output.getStdout() + "]");
+ System.out.println("[stderr = " + output.getStderr() + "]");
+
+ output.shouldContain("PASSED");
+ output.shouldHaveExitValue(0);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/aot/fingerprint/SelfChanged.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary AOT methods should be swept if a super class has changed.
+ * @library /test/lib /
+ * @modules java.base/jdk.internal.misc
+ * java.management
+ * @requires vm.aot
+ * @build compiler.aot.fingerprint.SelfChanged
+ * compiler.aot.AotCompiler
+ *
+ * @run main
+ * compiler.aot.fingerprint.SelfChanged WRITE-UNMODIFIED-CLASS
+ * @run driver compiler.aot.AotCompiler -libname libSelfChanged.so
+ * -class compiler.aot.fingerprint.Blah
+ *
+ * @run main/othervm
+ * compiler.aot.fingerprint.SelfChanged TEST-UNMODIFIED
+ * @run main/othervm -XX:+UseAOT -XX:+PrintAOT -XX:AOTLibrary=./libSelfChanged.so
+ * -Xlog:aot+class+fingerprint=trace -Xlog:aot+class+load=trace
+ * compiler.aot.fingerprint.SelfChanged TEST-UNMODIFIED
+ *
+ * @run main
+ * compiler.aot.fingerprint.SelfChanged WRITE-MODIFIED-CLASS
+ * @run main
+ * compiler.aot.fingerprint.SelfChanged TEST-MODIFIED
+ * @run main/othervm -XX:+UseAOT -XX:+PrintAOT -XX:AOTLibrary=./libSelfChanged.so
+ * -Xlog:aot+class+fingerprint=trace -Xlog:aot+class+load=trace
+ * compiler.aot.fingerprint.SelfChanged TEST-MODIFIED
+ */
+
+package compiler.aot.fingerprint;
+
+import jdk.test.lib.Asserts;
+import jdk.test.lib.compiler.InMemoryJavaCompiler;
+
+import java.io.*;
+
+class Blah {
+ volatile int z;
+ int getX() {
+ for (z = 0; z < 10000; z++) {
+ if (z % 7 == 1) {
+ z += 2;
+ }
+ }
+ return 0;
+ }
+}
+
+public class SelfChanged {
+ public static void main(String args[]) throws Throwable {
+ Blah f = new Blah();
+ System.out.println("f.getX = " + f.getX());
+ switch (args[0]) {
+ case "WRITE-UNMODIFIED-CLASS":
+ compileClass(false);
+ break;
+ case "WRITE-MODIFIED-CLASS":
+ compileClass(true);
+ break;
+ case "TEST-UNMODIFIED":
+ Asserts.assertTrue(f.getX() == 0, "getX from unmodified Blah class should return 0");
+ break;
+ case "TEST-MODIFIED":
+ Asserts.assertTrue(f.getX() == 1, "getX from modified Blah class should return 1");
+ break;
+ default:
+ throw new RuntimeException("unexpected option: " + args[0]);
+ }
+ }
+
+ static void compileClass(boolean isModified) throws Throwable {
+ String src =
+ "package compiler.aot.fingerprint;"
+ + "public class Blah {"
+ + " volatile int z;"
+ + " int getX() {"
+ + " for (z = 0; z < 10000; z++) {"
+ + " if (z % 7 == 1) {"
+ + " z += 2;"
+ + " }"
+ + " }"
+ + " return " + ((isModified) ? "1" : "0") + ";"
+ + " }"
+ + " int getY() {return 255;}"
+
+ // The following is for the SelfChangedCDS.java test case. We always load an unmodified
+ // version of Blah from the CDS archive. However, we would load an AOT library that
+ // was compiled using a modified version of Blah. The getX method in this AOT library should
+ // not be used.
+
+ + " public static void main(String args[]) {"
+ + " Blah b = new Blah();"
+ + " int n = b.getX();"
+ + " if (n != 0) {"
+ + " throw new RuntimeException(args[0] + \" : \" + n);"
+ + " }"
+ + " System.out.println(\"PASSED\");"
+ + " }"
+ + "}";
+
+ String filename = System.getProperty("test.classes") + "/compiler/aot/fingerprint/Blah.class";
+ FileOutputStream fos = new FileOutputStream(filename);
+ fos.write(InMemoryJavaCompiler.compile("compiler.aot.fingerprint.Blah", src));
+ fos.close();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/aot/fingerprint/SelfChangedCDS.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary AOT methods should be swept if a super class has changed (with CDS).
+ * @library /test/lib /
+ * @modules java.base/jdk.internal.misc
+ * java.management
+ * @requires vm.aot
+ * @build compiler.aot.fingerprint.SelfChanged
+ * compiler.aot.AotCompiler
+ *
+ * @run main compiler.aot.fingerprint.SelfChanged WRITE-UNMODIFIED-CLASS
+ * @run driver compiler.aot.AotCompiler -libname libSelfChanged.so
+ * -class compiler.aot.fingerprint.Blah
+ *
+ * @run driver ClassFileInstaller -jar SelfChangedCDS.jar compiler.aot.fingerprint.Blah
+ * @run main compiler.aot.fingerprint.CDSDumper SelfChangedCDS.jar SelfChangedCDS.classlist SelfChangedCDS.jsa
+ * compiler.aot.fingerprint.Blah
+ *
+ * @run main compiler.aot.fingerprint.CDSRunner -cp SelfChangedCDS.jar
+ * compiler.aot.fingerprint.Blah TEST-UNMODIFIED
+ * @run main compiler.aot.fingerprint.CDSRunner -cp SelfChangedCDS.jar
+ * -XX:+UseAOT -XX:+PrintAOT -XX:AOTLibrary=./libSelfChanged.so
+ * -XX:+UnlockDiagnosticVMOptions -XX:SharedArchiveFile=SelfChangedCDS.jsa
+ * -XX:+IgnoreUnrecognizedVMOptions
+ * -Xshare:auto -XX:+UnlockCommercialFeatures -XX:+UseAppCDS -showversion
+ * -Xlog:aot+class+fingerprint=trace -Xlog:aot+class+load=trace
+ * compiler.aot.fingerprint.Blah TEST-UNMODIFIED
+ *
+ * @run main
+ * compiler.aot.fingerprint.SelfChanged WRITE-MODIFIED-CLASS
+ * @run driver compiler.aot.AotCompiler -libname libSelfChanged.so
+ * -class compiler.aot.fingerprint.Blah
+ *
+ * @run main compiler.aot.fingerprint.CDSRunner -cp SelfChangedCDS.jar
+ * compiler.aot.fingerprint.Blah TEST-MODIFIED
+ * @run main compiler.aot.fingerprint.CDSRunner -cp SelfChangedCDS.jar
+ * -XX:+UseAOT -XX:+PrintAOT -XX:AOTLibrary=./libSelfChanged.so
+ * -XX:+UnlockDiagnosticVMOptions -XX:SharedArchiveFile=SelfChangedCDS.jsa
+ * -XX:+IgnoreUnrecognizedVMOptions
+ * -Xshare:auto -XX:+UnlockCommercialFeatures -XX:+UseAppCDS -showversion
+ * -Xlog:aot+class+fingerprint=trace -Xlog:aot+class+load=trace
+ * compiler.aot.fingerprint.Blah TEST-MODIFIED
+ */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/aot/fingerprint/SuperChanged.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary AOT methods should be swept if a super class has changed.
+ * @library /test/lib /
+ * @modules java.base/jdk.internal.misc
+ * java.management
+ * @requires vm.aot
+ * @build compiler.aot.fingerprint.SuperChanged
+ * compiler.aot.AotCompiler
+ *
+ * @run main
+ * compiler.aot.fingerprint.SuperChanged WRITE-UNMODIFIED-CLASS
+ * @run driver compiler.aot.AotCompiler -libname libSuperChanged.so
+ * -class compiler.aot.fingerprint.Foo
+ *
+ * @run main
+ * compiler.aot.fingerprint.SuperChanged TEST-UNMODIFIED
+ * @run main/othervm -XX:+UseAOT -XX:+PrintAOT -XX:AOTLibrary=./libSuperChanged.so
+ * -Xlog:aot+class+fingerprint=trace -Xlog:aot+class+load=trace
+ * compiler.aot.fingerprint.SuperChanged TEST-UNMODIFIED
+ *
+ * @run main
+ * compiler.aot.fingerprint.SuperChanged WRITE-MODIFIED-CLASS
+ * @run main
+ * compiler.aot.fingerprint.SuperChanged TEST-MODIFIED
+ * @run main/othervm -XX:+UseAOT -XX:+PrintAOT -XX:AOTLibrary=./libSuperChanged.so
+ * -Xlog:aot+class+fingerprint=trace -Xlog:aot+class+load=trace
+ * compiler.aot.fingerprint.SuperChanged TEST-MODIFIED
+ */
+
+package compiler.aot.fingerprint;
+
+import jdk.test.lib.Asserts;
+import jdk.test.lib.compiler.InMemoryJavaCompiler;
+
+import java.io.*;
+
+class Bar {
+ volatile int x = 0;
+ volatile int y = 1;
+}
+
+class Foo extends Bar {
+
+ volatile int z;
+ int getX() {
+ for (z = 0; z < 10000; z++) {
+ if (z % 7 == 1) {
+ z += 2;
+ }
+ }
+ return x;
+ }
+}
+
+public class SuperChanged {
+ public static void main(String args[]) throws Throwable {
+ Foo f = new Foo();
+ System.out.println("f.getX = " + f.getX());
+ switch (args[0]) {
+ case "WRITE-UNMODIFIED-CLASS":
+ compileClass(false);
+ break;
+ case "WRITE-MODIFIED-CLASS":
+ compileClass(true);
+ break;
+ case "TEST-UNMODIFIED":
+ Asserts.assertTrue(f.getX() == 0, "getX from unmodified Foo class should return 0");
+ break;
+ case "TEST-MODIFIED":
+ Asserts.assertTrue(f.getX() == 1, "getX from modified Foo class should return 1");
+ break;
+ default:
+ throw new RuntimeException("unexpected option: " + args[0]);
+ }
+ }
+
+ static void compileClass(boolean isModified) throws Throwable {
+ String class_src_0 = "package compiler.aot.fingerprint; class Bar {volatile int x = 0; volatile int y = 1;}";
+ String class_src_1 = "package compiler.aot.fingerprint; class Bar {volatile int y = 0; volatile int x = 1;}";
+ String src = (isModified) ? class_src_1 : class_src_0;
+
+ String filename = System.getProperty("test.classes") + "/compiler/aot/fingerprint/Bar.class";
+ FileOutputStream fos = new FileOutputStream(filename);
+ fos.write(InMemoryJavaCompiler.compile("compiler.aot.fingerprint.Bar", src));
+ fos.close();
+ }
+}
--- a/test/hotspot/jtreg/compiler/jvmci/common/patches/jdk.internal.vm.ci/jdk/vm/ci/hotspot/CompilerToVMHelper.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/compiler/jvmci/common/patches/jdk.internal.vm.ci/jdk/vm/ci/hotspot/CompilerToVMHelper.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.code.InvalidInstalledCodeException;
import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.code.stack.InspectedFrameVisitor;
import jdk.vm.ci.meta.ConstantPool;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import java.lang.reflect.Executable;
@@ -258,10 +259,12 @@
return CTVM.getSymbol(metaspaceSymbol);
}
- public static HotSpotStackFrameReference getNextStackFrame(
- HotSpotStackFrameReference frame,
- ResolvedJavaMethod[] methods, int initialSkip) {
- return CTVM.getNextStackFrame(frame, methods, initialSkip);
+ public static <T> T iterateFrames(
+ ResolvedJavaMethod[] initialMethods,
+ ResolvedJavaMethod[] matchingMethods,
+ int initialSkip,
+ InspectedFrameVisitor<T> visitor) {
+ return CTVM.iterateFrames(initialMethods, matchingMethods, initialSkip, visitor);
}
public static void materializeVirtualObjects(
--- a/test/hotspot/jtreg/compiler/jvmci/compilerToVM/GetNextStackFrameTest.java Fri Mar 23 11:14:43 2018 -0700
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,244 +0,0 @@
-/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @bug 8136421
- * @requires vm.jvmci
- * @library / /test/lib
- * @library ../common/patches
- * @modules java.base/jdk.internal.misc
- * @modules java.base/jdk.internal.org.objectweb.asm
- * java.base/jdk.internal.org.objectweb.asm.tree
- * jdk.internal.vm.ci/jdk.vm.ci.hotspot
- * jdk.internal.vm.ci/jdk.vm.ci.code
- * jdk.internal.vm.ci/jdk.vm.ci.meta
- * @build jdk.internal.vm.ci/jdk.vm.ci.hotspot.CompilerToVMHelper
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
- * -Djvmci.Compiler=null
- * compiler.jvmci.compilerToVM.GetNextStackFrameTest
- */
-
-package compiler.jvmci.compilerToVM;
-
-import compiler.jvmci.common.CTVMUtilities;
-import jdk.test.lib.Asserts;
-import jdk.vm.ci.hotspot.CompilerToVMHelper;
-import jdk.vm.ci.hotspot.HotSpotStackFrameReference;
-import jdk.vm.ci.meta.ResolvedJavaMethod;
-
-import java.lang.reflect.Method;
-
-public class GetNextStackFrameTest {
- private static final int RECURSION_AMOUNT = 3;
- private static final ResolvedJavaMethod REC_FRAME_METHOD;
- private static final ResolvedJavaMethod FRAME1_METHOD;
- private static final ResolvedJavaMethod FRAME2_METHOD;
- private static final ResolvedJavaMethod FRAME3_METHOD;
- private static final ResolvedJavaMethod FRAME4_METHOD;
- private static final ResolvedJavaMethod RUN_METHOD;
-
- static {
- Method method;
- try {
- Class<?> aClass = GetNextStackFrameTest.class;
- method = aClass.getDeclaredMethod("recursiveFrame", int.class);
- REC_FRAME_METHOD = CTVMUtilities.getResolvedMethod(method);
- method = aClass.getDeclaredMethod("frame1");
- FRAME1_METHOD = CTVMUtilities.getResolvedMethod(method);
- method = aClass.getDeclaredMethod("frame2");
- FRAME2_METHOD = CTVMUtilities.getResolvedMethod(method);
- method = aClass.getDeclaredMethod("frame3");
- FRAME3_METHOD = CTVMUtilities.getResolvedMethod(method);
- method = aClass.getDeclaredMethod("frame4");
- FRAME4_METHOD = CTVMUtilities.getResolvedMethod(method);
- method = Thread.class.getDeclaredMethod("run");
- RUN_METHOD = CTVMUtilities.getResolvedMethod(Thread.class, method);
- } catch (NoSuchMethodException e) {
- throw new Error("TEST BUG: can't find a test method : " + e, e);
- }
- }
-
- public static void main(String[] args) {
- new GetNextStackFrameTest().test();
- }
-
- private void test() {
- // Create new thread to get new clean stack
- Thread thread = new Thread(() -> recursiveFrame(RECURSION_AMOUNT));
- thread.start();
- try {
- thread.join();
- } catch (InterruptedException e) {
- throw new Error("Interrupted while waiting to join", e);
- }
- }
-
- // Helper methods for a longer stack
- private void recursiveFrame(int recursionAmount) {
- if (--recursionAmount != 0) {
- recursiveFrame(recursionAmount);
- } else {
- frame1();
- }
- }
-
- private void frame1() {
- frame2();
- }
-
- private void frame2() {
- frame3();
- }
-
- private void frame3() {
- frame4();
- }
-
- private void frame4() {
- check();
- }
-
- private void check() {
- findFirst();
- walkThrough();
- skipAll();
- findNextSkipped();
- findYourself();
- }
-
- /**
- * Finds the first topmost frame from the list of methods to search
- */
- private void findFirst() {
- checkNextFrameFor(null /* topmost frame */,
- new ResolvedJavaMethod[]
- {FRAME2_METHOD, FRAME3_METHOD, FRAME4_METHOD},
- FRAME4_METHOD, 0);
- }
-
- /**
- * Walks through whole stack and checks that every frame could be found
- * while going down the stack till the end
- */
- private void walkThrough() {
- // Check that we would get a frame 4 starting from the topmost frame
- HotSpotStackFrameReference nextStackFrame = checkNextFrameFor(
- null /* topmost frame */,
- new ResolvedJavaMethod[] {FRAME4_METHOD},
- FRAME4_METHOD, 0);
- // Check that we would get a frame 3 starting from frame 4 when we try
- // to search one of the next two frames
- nextStackFrame = checkNextFrameFor(nextStackFrame,
- new ResolvedJavaMethod[] {FRAME3_METHOD,
- FRAME2_METHOD},
- FRAME3_METHOD, 0);
- // Check that we would get a frame 1
- nextStackFrame = checkNextFrameFor(nextStackFrame,
- new ResolvedJavaMethod[] {FRAME1_METHOD},
- FRAME1_METHOD, 0);
- // Check that we would skip (RECURSION_AMOUNT - 1) methods and find a
- // recursionFrame starting from frame 1
- nextStackFrame = checkNextFrameFor(nextStackFrame,
- new ResolvedJavaMethod[] {REC_FRAME_METHOD},
- REC_FRAME_METHOD, RECURSION_AMOUNT - 1);
- // Check that we would get a Thread::run method frame;
- nextStackFrame = checkNextFrameFor(nextStackFrame,
- new ResolvedJavaMethod[] {RUN_METHOD},
- RUN_METHOD, 0);
- // Check that there are no more frames after thread's run method
- nextStackFrame = CompilerToVMHelper.getNextStackFrame(nextStackFrame,
- null /* any */, 0);
- Asserts.assertNull(nextStackFrame,
- "Found stack frame after Thread::run");
- }
-
- /**
- * Skips all frames to get null at the end of the stack
- */
- private void skipAll() {
- // Skip all frames (stack size) + 2 (getNextStackFrame() itself
- // and from CompilerToVMHelper)
- int initialSkip = Thread.currentThread().getStackTrace().length + 2;
- HotSpotStackFrameReference nextStackFrame = CompilerToVMHelper
- .getNextStackFrame(null /* topmost frame */, null /* any */,
- initialSkip);
- Asserts.assertNull(nextStackFrame, "Unexpected frame");
- }
-
- /**
- * Search for any frame skipping one frame
- */
- private void findNextSkipped() {
- // Get frame 4
- HotSpotStackFrameReference nextStackFrame = CompilerToVMHelper
- .getNextStackFrame(null /* topmost frame */,
- new ResolvedJavaMethod[] {FRAME4_METHOD}, 0);
- // Get frame 2 by skipping one method starting from frame 4
- checkNextFrameFor(nextStackFrame, null /* any */,
- FRAME2_METHOD , 1 /* skip one */);
- }
-
- /**
- * Finds test method in the stack
- */
- private void findYourself() {
- Method method;
- Class<?> aClass = CompilerToVMHelper.CompilerToVMClass();
- try {
- method = aClass.getDeclaredMethod(
- "getNextStackFrame",
- HotSpotStackFrameReference.class,
- ResolvedJavaMethod[].class,
- int.class);
- } catch (NoSuchMethodException e) {
- throw new Error("TEST BUG: can't find getNextStackFrame : " + e, e);
- }
- ResolvedJavaMethod self
- = CTVMUtilities.getResolvedMethod(aClass, method);
- checkNextFrameFor(null /* topmost frame */, null /* any */, self, 0);
- }
-
- /**
- * Searches next frame and checks that it equals to expected
- *
- * @param currentFrame start frame to search from
- * @param searchMethods a list of methods to search
- * @param expected expected frame
- * @param skip amount of frames to be skipped
- * @return frame reference
- */
- private HotSpotStackFrameReference checkNextFrameFor(
- HotSpotStackFrameReference currentFrame,
- ResolvedJavaMethod[] searchMethods,
- ResolvedJavaMethod expected,
- int skip) {
- HotSpotStackFrameReference nextStackFrame = CompilerToVMHelper
- .getNextStackFrame(currentFrame, searchMethods, skip);
- Asserts.assertNotNull(nextStackFrame);
- Asserts.assertTrue(nextStackFrame.isMethod(expected),
- "Unexpected next frame: " + nextStackFrame
- + " from current frame: " + currentFrame);
- return nextStackFrame;
- }
-}
--- a/test/hotspot/jtreg/compiler/jvmci/compilerToVM/MaterializeVirtualObjectTest.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/compiler/jvmci/compilerToVM/MaterializeVirtualObjectTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -34,6 +34,7 @@
* java.base/jdk.internal.org.objectweb.asm.tree
* jdk.internal.vm.ci/jdk.vm.ci.hotspot
* jdk.internal.vm.ci/jdk.vm.ci.code
+ * jdk.internal.vm.ci/jdk.vm.ci.code.stack
* jdk.internal.vm.ci/jdk.vm.ci.meta
*
* @build jdk.internal.vm.ci/jdk.vm.ci.hotspot.CompilerToVMHelper sun.hotspot.WhiteBox
@@ -91,6 +92,7 @@
import compiler.testlibrary.CompilerUtils;
import compiler.whitebox.CompilerWhiteBoxTest;
import jdk.test.lib.Asserts;
+import jdk.vm.ci.code.stack.InspectedFrame;
import jdk.vm.ci.hotspot.CompilerToVMHelper;
import jdk.vm.ci.hotspot.HotSpotStackFrameReference;
import jdk.vm.ci.meta.ResolvedJavaMethod;
@@ -200,18 +202,30 @@
// Materialize virtual objects on last invocation
if (iteration == COMPILE_THRESHOLD) {
// get frames and check not-null
- HotSpotStackFrameReference materialized = CompilerToVMHelper.getNextStackFrame(
- /* topmost frame */ null, new ResolvedJavaMethod[]{MATERIALIZED_RESOLVED},
- /* don't skip any */ 0);
+ HotSpotStackFrameReference materialized = CompilerToVMHelper.iterateFrames(
+ new ResolvedJavaMethod[] {MATERIALIZED_RESOLVED},
+ null /* any */,
+ 0,
+ f -> (HotSpotStackFrameReference) f);
Asserts.assertNotNull(materialized, getName()
+ " : got null frame for materialized method");
- HotSpotStackFrameReference notMaterialized = CompilerToVMHelper.getNextStackFrame(
- /* topmost frame */ null, new ResolvedJavaMethod[]{NOT_MATERIALIZED_RESOLVED},
- /* don't skip any */ 0);
+ Asserts.assertTrue(materialized.isMethod(MATERIALIZED_RESOLVED),
+ "Expected materialized method but got " + materialized);
+ InspectedFrame notMaterialized = CompilerToVMHelper.iterateFrames(
+ new ResolvedJavaMethod[] {NOT_MATERIALIZED_RESOLVED},
+ null /* any */,
+ 0,
+ f -> f);
Asserts.assertNE(materialized, notMaterialized,
"Got same frame pointer for both tested frames");
+ Asserts.assertTrue(notMaterialized.isMethod(NOT_MATERIALIZED_RESOLVED),
+ "Expected notMaterialized method but got " + notMaterialized);
Asserts.assertNotNull(notMaterialized, getName()
+ " : got null frame for not materialized method");
+ Asserts.assertTrue(WB.isMethodCompiled(MATERIALIZED_METHOD), getName()
+ + " : materialized method not compiled");
+ Asserts.assertTrue(WB.isMethodCompiled(NOT_MATERIALIZED_METHOD),
+ getName() + " : not materialized method not compiled");
// check that frames has virtual objects before materialization stage
Asserts.assertTrue(materialized.hasVirtualObjects(), getName()
+ ": materialized frame has no virtual object before materialization");
--- a/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/compiler/testlibrary/sha/predicate/IntrinsicPredicates.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
import jdk.test.lib.cli.predicate.OrPredicate;
import sun.hotspot.WhiteBox;
+import java.lang.reflect.Method;
import java.util.function.BooleanSupplier;
/**
@@ -100,27 +101,23 @@
IntrinsicPredicates.SHA512_INSTRUCTION_AVAILABLE));
public static final BooleanSupplier SHA1_INTRINSICS_AVAILABLE
- = new AndPredicate(new AndPredicate(
- IntrinsicPredicates.SHA1_INSTRUCTION_AVAILABLE,
- IntrinsicPredicates.COMPILABLE_BY_C2),
- IntrinsicPredicates.booleanOptionValue("UseSHA1Intrinsics"));
+ = new AndPredicate(IntrinsicPredicates.COMPILABLE_BY_C2,
+ IntrinsicPredicates.isIntrinsicAvailable("sun.security.provider.SHA", "implCompress0"));
public static final BooleanSupplier SHA256_INTRINSICS_AVAILABLE
- = new AndPredicate(new AndPredicate(
- IntrinsicPredicates.SHA256_INSTRUCTION_AVAILABLE,
- IntrinsicPredicates.COMPILABLE_BY_C2),
- IntrinsicPredicates.booleanOptionValue("UseSHA256Intrinsics"));
+ = new AndPredicate(IntrinsicPredicates.COMPILABLE_BY_C2,
+ IntrinsicPredicates.isIntrinsicAvailable("sun.security.provider.SHA2", "implCompress0"));
public static final BooleanSupplier SHA512_INTRINSICS_AVAILABLE
- = new AndPredicate(new AndPredicate(
- IntrinsicPredicates.SHA512_INSTRUCTION_AVAILABLE,
- IntrinsicPredicates.COMPILABLE_BY_C2),
- IntrinsicPredicates.booleanOptionValue("UseSHA512Intrinsics"));
+ = new AndPredicate(IntrinsicPredicates.COMPILABLE_BY_C2,
+ IntrinsicPredicates.isIntrinsicAvailable("sun.security.provider.SHA5", "implCompress0"));
- private static BooleanSupplier booleanOptionValue(String option) {
- return () -> IntrinsicPredicates.WHITE_BOX.getBooleanVMFlag(option);
- }
-
- private IntrinsicPredicates() {
- }
+ private static BooleanSupplier isIntrinsicAvailable(String klass, String method) {
+ try {
+ Method m = Class.forName(klass).getDeclaredMethod(method, byte[].class, int.class);
+ return () -> WHITE_BOX.isIntrinsicAvailable(m, (int)IntrinsicPredicates.TIERED_MAX_LEVEL);
+ } catch (Exception e) {
+ throw new RuntimeException("Intrinsified method " + klass + "::" + method + " not found!");
+ }
+ };
}
--- a/test/hotspot/jtreg/compiler/uncommontrap/TestDeoptOOM.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/compiler/uncommontrap/TestDeoptOOM.java Tue Mar 20 04:36:44 2018 +0100
@@ -23,10 +23,10 @@
/*
* @test
- * @bug 6898462
+ * @bug 6898462 8198826
* @summary failed reallocations of scalar replaced objects during deoptimization causes crash
*
- * @run main/othervm -XX:-BackgroundCompilation -Xmx128M
+ * @run main/othervm -XX:-BackgroundCompilation -Xmx128M -XX:+IgnoreUnrecognizedVMOptions -XX:+VerifyStack
* -XX:CompileCommand=exclude,compiler.uncommontrap.TestDeoptOOM::main
* -XX:CompileCommand=exclude,compiler.uncommontrap.TestDeoptOOM::m9_1
* compiler.uncommontrap.TestDeoptOOM
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/vectorization/TestPopCountVector.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8199421
+ * @summary Test vectorization of popcount
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UsePopCountInstruction
+ * compiler.vectorization.TestPopCountVector
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UsePopCountInstruction
+ * -XX:MaxVectorSize=8 compiler.vectorization.TestPopCountVector
+ */
+
+package compiler.vectorization;
+
+public class TestPopCountVector {
+ private int[] input;
+ private int[] output;
+ private static final int LEN = 1024;
+
+ public static void main(String args[]) {
+ TestPopCountVector test = new TestPopCountVector();
+
+ for (int i = 0; i < 10_000; ++i) {
+ test.vectorizeBitCount();
+ }
+ System.out.println("Checking popcount result");
+ test.checkResult();
+
+ for (int i = 0; i < 10_000; ++i) {
+ test.vectorizeBitCount();
+ }
+ System.out.println("Checking popcount result");
+ test.checkResult();
+ }
+
+ public TestPopCountVector() {
+ input = new int[LEN];
+ output = new int[LEN];
+ for (int i = 0; i < LEN; ++i) {
+ input[i] = i % 2 == 0 ? i : -1 * i;
+ }
+ }
+
+ public void vectorizeBitCount() {
+ for (int i = 0; i < LEN; ++i) {
+ output[i] = Integer.bitCount(input[i]);
+ }
+ }
+
+ public void checkResult() {
+ for (int i = 0; i < LEN; ++i) {
+ int expected = Integer.bitCount(input[i]);
+ if (output[i] != expected) {
+ throw new RuntimeException("Invalid result: output[" + i + "] = " + output[i] + " != " + expected);
+ }
+ }
+ }
+}
+
--- a/test/hotspot/jtreg/gc/TestVerifySubSet.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/gc/TestVerifySubSet.java Tue Mar 20 04:36:44 2018 +0100
@@ -69,7 +69,7 @@
output.shouldContain("Heap");
output.shouldContain("Threads");
output.shouldContain("CodeCache");
- output.shouldContain("MetaspaceAux");
+ output.shouldContain("MetaspaceUtils");
output.shouldNotContain("SymbolTable");
output.shouldNotContain("StringTable");
output.shouldNotContain("SystemDictionary");
@@ -80,7 +80,7 @@
output.shouldContain("memory sub-system is unknown, please correct it");
output.shouldNotContain("Threads");
output.shouldNotContain("CodeCache");
- output.shouldNotContain("MetaspaceAux");
+ output.shouldNotContain("MetaspaceUtils");
output.shouldHaveExitValue(1);
}
}
--- a/test/hotspot/jtreg/runtime/SharedArchiveFile/DumpSharedDictionary.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/runtime/SharedArchiveFile/DumpSharedDictionary.java Tue Mar 20 04:36:44 2018 +0100
@@ -81,7 +81,7 @@
output = CDSTestUtils.executeAndLog(pb, "jcmd-systemdictionary-verbose");
try {
output.shouldContain("Shared Dictionary");
- output.shouldContain("Dictionary for class loader 0x");
+ output.shouldContain("Dictionary for loader data: 0x");
output.shouldContain("^java.lang.String");
} catch (RuntimeException e) {
output.shouldContain("Unknown diagnostic command");
--- a/test/hotspot/jtreg/runtime/SharedArchiveFile/DumpSymbolAndStringTable.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/runtime/SharedArchiveFile/DumpSymbolAndStringTable.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -71,7 +71,7 @@
pb.command(new String[] {JDKToolFinder.getJDKTool("jcmd"), pid, "VM.systemdictionary", "-verbose"});
output = CDSTestUtils.executeAndLog(pb, "jcmd-systemdictionary");
try {
- output.shouldContain("Dictionary for class loader 0x");
+ output.shouldContain("Dictionary for loader data: 0x");
output.shouldContain("^java.lang.String");
} catch (RuntimeException e) {
output.shouldContain("Unknown diagnostic command");
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/StackGap/T.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class T {
+
+ public static void test(int n) {
+ if (n == 0) return;
+ System.out.println (n);
+ test (n - 1);
+
+ }
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/StackGap/exestack-gap.c Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+JNIEnv* create_vm(JavaVM **jvm, char *extra_option)
+{
+ JNIEnv* env;
+ JavaVMInitArgs args;
+ JavaVMOption options[4];
+ args.version = JNI_VERSION_1_8;
+ args.nOptions = 3 + (extra_option != NULL);
+ options[0].optionString = "-Xss2048k";
+ char classpath[4096];
+ snprintf(classpath, sizeof classpath,
+ "-Djava.class.path=%s", getenv("CLASSPATH"));
+ options[1].optionString = classpath;
+ options[2].optionString = "-XX:+UnlockExperimentalVMOptions";
+ if (extra_option) {
+ options[3].optionString = extra_option;
+ }
+ args.options = &options[0];
+ args.ignoreUnrecognized = 0;
+ int rv;
+ rv = JNI_CreateJavaVM(jvm, (void**)&env, &args);
+ if (rv < 0) return NULL;
+ return env;
+}
+
+void run(char *extra_arg) {
+ JavaVM *jvm;
+ jclass T_class;
+ jmethodID test_method;
+ JNIEnv *env = create_vm(&jvm, extra_arg);
+ if (env == NULL)
+ exit(1);
+ T_class = (*env)->FindClass(env, "T");
+ test_method = (*env)->GetStaticMethodID(env, T_class, "test", "(I)V");
+ (*env)->CallStaticVoidMethod(env, T_class, test_method, 1000);
+}
+
+
+int main(int argc, char **argv)
+{
+ if (argc > 1) {
+ run(argv[1]);
+ } else {
+ run(NULL);
+ }
+
+ return 0;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/StackGap/testme.sh Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,49 @@
+# Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#!/bin/sh
+
+#
+# @test testme.sh
+# @summary Linux kernel stack guard should not cause segfaults on x86-32
+# @compile T.java
+# @run shell testme.sh
+#
+
+if [ "${TESTSRC}" = "" ]
+then
+ TESTSRC=${PWD}
+ echo "TESTSRC not set. Using "${TESTSRC}" as default"
+fi
+echo "TESTSRC=${TESTSRC}"
+## Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../../test_env.sh
+
+if [ "${VM_OS}" != "linux" ]
+then
+ echo "Test only valid for Linux"
+ exit 0
+fi
+
+LD_LIBRARY_PATH=.:${TESTJAVA}/lib/${VM_TYPE}:/usr/lib:$LD_LIBRARY_PATH
+export LD_LIBRARY_PATH
+
+${TESTNATIVEPATH}/stack-gap || exit $?
+${TESTNATIVEPATH}/stack-gap -XX:+DisablePrimordialThreadGuardPages || exit $?
--- a/test/hotspot/jtreg/runtime/appcds/ClassLoaderTest.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/runtime/appcds/ClassLoaderTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,15 +62,10 @@
TestCommon.dump(appJar, appClasses, bootClassPath);
- OutputAnalyzer runtimeOutput = TestCommon.execCommon(
+ TestCommon.run(
"-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
- "-cp", appJar, bootClassPath, "-Xlog:class+load", "HelloWB");
-
- if (!TestCommon.isUnableToMap(runtimeOutput)) {
- runtimeOutput.shouldNotContain(
- "[class,load] HelloWB source: shared objects file by jdk/internal/misc/ClassLoaders$AppClassLoader");
- runtimeOutput.shouldContain("[class,load] HelloWB source: shared objects file");
- }
+ "-cp", appJar, bootClassPath, "HelloWB")
+ .assertNormalExit(output -> output.shouldContain("HelloWB.class.getClassLoader() = null"));
}
public void testDefiningLoader() throws Exception {
@@ -84,9 +79,11 @@
String bootClassPath = "-Xbootclasspath/a:" + helloJar +
File.pathSeparator + whiteBoxJar;
+ // Archive the "Hello" class from the appended bootclasspath
TestCommon.dump(helloJar, TestCommon.list("Hello"), bootClassPath);
- TestCommon.execCommon("-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
- "-cp", appJar, bootClassPath, "-XX:+TraceClassPaths", "ForNameTest");
+ TestCommon.run("-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
+ "-cp", appJar, bootClassPath, "-Xlog:class+path=trace", "ForNameTest")
+ .assertNormalExit();
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/GraalWithLimitedMetaspace.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @summary Test dumping with limited metaspace with loading of JVMCI related classes.
+ * VM should not crash but CDS dump will abort upon failure in allocating metaspace.
+ * @requires vm.cds & vm.graal.enabled
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ * java.management
+ * jdk.jartool/sun.tools.jar
+ * @build UseAppCDS_Test
+ * @run driver ClassFileInstaller -jar test.jar UseAppCDS_Test
+ * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
+ * -XX:+TieredCompilation -XX:+UseJVMCICompiler -Djvmci.Compiler=graal
+ * GraalWithLimitedMetaspace
+ */
+
+import jdk.test.lib.cds.CDSTestUtils;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.io.*;
+
+public class GraalWithLimitedMetaspace {
+
+ // Class UseAppCDS_Test is loaded by the App loader
+
+ static final String TEST_OUT = "UseAppCDS_Test.main--executed";
+
+ private static final String TESTJAR = "./test.jar";
+ private static final String TESTNAME = "UseAppCDS_Test";
+ private static final String TESTCLASS = TESTNAME + ".class";
+
+ private static final String CLASSLIST_FILE = "./GraalWithLimitedMetaspace.classlist";
+ private static final String ARCHIVE_FILE = "./GraalWithLimitedMetaspace.jsa";
+ private static final String BOOTCLASS = "java.lang.Class";
+
+ public static void main(String[] args) throws Exception {
+
+ // dump loaded classes into a classlist file
+ dumpLoadedClasses(new String[] { BOOTCLASS, TESTNAME });
+
+
+ // create an archive using the classlist
+ dumpArchive();
+
+ }
+
+ public static List<String> toClassNames(String filename) throws IOException {
+ ArrayList<String> classes = new ArrayList<>();
+ try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(filename)))) {
+ for (; ; ) {
+ String line = br.readLine();
+ if (line == null) {
+ break;
+ }
+ classes.add(line.replaceAll("/", "."));
+ }
+ }
+ return classes;
+ }
+
+ static void dumpLoadedClasses(String[] expectedClasses) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true,
+ TestCommon.makeCommandLineForAppCDS(
+ "-XX:DumpLoadedClassList=" + CLASSLIST_FILE,
+ // trigger JVMCI runtime init so that JVMCI classes will be
+ // included in the classlist
+ "-XX:+EagerJVMCI",
+ "-cp",
+ TESTJAR,
+ "-XX:+UseAppCDS",
+ TESTNAME,
+ TEST_OUT));
+
+ OutputAnalyzer output = TestCommon.executeAndLog(pb, "dump-loaded-classes")
+ .shouldHaveExitValue(0)
+ .shouldContain(TEST_OUT);
+
+ List<String> dumpedClasses = toClassNames(CLASSLIST_FILE);
+
+ for (String clazz : expectedClasses) {
+ if (!dumpedClasses.contains(clazz)) {
+ throw new RuntimeException(clazz + " missing in " +
+ CLASSLIST_FILE);
+ }
+ }
+ }
+
+ static void dumpArchive() throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true,
+ TestCommon.makeCommandLineForAppCDS(
+ "-cp",
+ TESTJAR,
+ "-XX:+UseAppCDS",
+ "-XX:SharedClassListFile=" + CLASSLIST_FILE,
+ "-XX:SharedArchiveFile=" + ARCHIVE_FILE,
+ "-Xlog:cds",
+ "-Xshare:dump",
+ "-XX:MetaspaceSize=12M",
+ "-XX:MaxMetaspaceSize=12M"));
+
+ OutputAnalyzer output = TestCommon.executeAndLog(pb, "dump-archive")
+ .shouldHaveExitValue(1)
+ .shouldContain("Failed allocating metaspace object type");
+ }
+}
--- a/test/hotspot/jtreg/runtime/appcds/UseAppCDS.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/runtime/appcds/UseAppCDS.java Tue Mar 20 04:36:44 2018 +0100
@@ -87,8 +87,11 @@
// Next tests rely on the classlist we just dumped
// Test 3: No AppCDS - "test" classes in classlist ignored when dumping
+ // Although AppCDS isn't used, all classes will be found during dumping
+ // after the fix for JDK-8193434. Classes which are not in the boot
+ // loader dictionary will not be saved into the archive.
dumpArchive(false, new String[] { BOOTCLASS },
- new String[] { TESTNAME});
+ new String[0]);
// Test 4: AppCDS - "test" classes in classlist are dumped
dumpArchive(true, new String[] { BOOTCLASS, TESTNAME },
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/cacheObject/CheckCachedMirrorApp.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.io.File;
+import java.net.URL;
+import java.net.URLClassLoader;
+import sun.hotspot.WhiteBox;
+
+//
+// Test class mirror objects are cached when open archive heap objects are mapped:
+// - Well-known shared library classes:
+// java.lang.Object
+// java.lang.String
+// - Shared application class loaded by the system class loader
+// - Shared application class loaded user defined class loader
+//
+public class CheckCachedMirrorApp {
+ static WhiteBox wb;
+ public static void main(String args[]) throws Exception {
+ String path = args[0];
+ URL url = new File(path).toURI().toURL();
+ URL[] urls = new URL[] {url};
+
+ URLClassLoader loader = new URLClassLoader(urls);
+ Class hello = loader.loadClass("Hello");
+ System.out.println("Loaded " + hello + " from " + url + " using loader " + loader);
+
+ wb = WhiteBox.getWhiteBox();
+
+ if (!wb.areOpenArchiveHeapObjectsMapped()) {
+ System.out.println("Archived open_archive_heap objects are not mapped.");
+ System.out.println("This may happen during normal operation. Test Skipped.");
+ return;
+ }
+
+ // Well-known shared library classes
+ Class object_class = Object.class;
+ checkMirror(object_class, true);
+ Class string_class = String.class;
+ checkMirror(string_class, true);
+
+ // Shared app class
+ Class app_class = CheckCachedMirrorApp.class;
+ checkMirror(app_class, true);
+
+ // Hello is shared class and loaded by the 'loader' defined in current app.
+ // It should not have cached resolved_references.
+ Class class_with_user_defined_loader = hello;
+ checkMirror(class_with_user_defined_loader, false);
+ }
+
+ static void checkMirror(Class c, boolean mirrorShouldBeArchived) {
+ System.out.print("Check cached mirror for " + c);
+ if (wb.isSharedClass(c)) {
+ // Check if the Class object is cached
+ if (mirrorShouldBeArchived && wb.isShared(c)) {
+ System.out.println(c + " mirror is cached. Expected.");
+ } else if (!mirrorShouldBeArchived && !wb.isShared(c)) {
+ System.out.println(c + " mirror is not cached. Expected.");
+ } else if (mirrorShouldBeArchived && !wb.isShared(c)) {
+ throw new RuntimeException(
+ "FAILED. " + c + " mirror is not cached.");
+ } else {
+ throw new RuntimeException(
+ "FAILED. " + c + " mirror should not be cached.");
+ }
+ } else {
+ System.out.println("Class " + c + "is not shared, skipping the check for mirror");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/cacheObject/CheckCachedMirrorTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @summary Test archived mirror
+ * @requires vm.cds.archived.java.heap
+ * @requires vm.cds.custom.loaders
+ * @library /test/lib /test/hotspot/jtreg/runtime/appcds
+ * @modules java.base/jdk.internal.misc
+ * @modules java.management
+ * jdk.jartool/sun.tools.jar
+ * @build sun.hotspot.WhiteBox
+ * @compile CheckCachedMirrorApp.java
+ * @compile ../test-classes/Hello.java
+ * @run driver ClassFileInstaller -jar app.jar CheckCachedMirrorApp
+ * @run driver ClassFileInstaller -jar hello.jar Hello
+ * @run driver ClassFileInstaller -jar WhiteBox.jar sun.hotspot.WhiteBox
+ * @run main CheckCachedMirrorTest
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import sun.hotspot.WhiteBox;
+
+public class CheckCachedMirrorTest {
+ public static void main(String[] args) throws Exception {
+ String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
+ String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
+ String appJar = ClassFileInstaller.getJarPath("app.jar");
+ String helloJarPath = ClassFileInstaller.getJarPath("hello.jar");
+
+ String classlist[] = new String[] {
+ "CheckCachedMirrorApp", // built-in app loader
+ "java/lang/Object id: 1", // boot loader
+ "Hello id: 2 super: 1 source: " + helloJarPath // custom loader
+ };
+
+ TestCommon.testDump(appJar, classlist, use_whitebox_jar);
+ OutputAnalyzer output = TestCommon.exec(appJar, use_whitebox_jar,
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+WhiteBoxAPI",
+ "-Xlog:cds=debug",
+ "CheckCachedMirrorApp",
+ helloJarPath);
+ TestCommon.checkExec(output);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/cacheObject/MirrorWithReferenceFieldsApp.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.io.File;
+import java.net.URL;
+import sun.hotspot.WhiteBox;
+
+//
+// - Test static final String field with initial value in cached mirror should be also archived.
+// - GC should not crash when reference fields in cached mirror are updated at runtime
+// - Reference fields are updated to point to runtime created objects
+// - Reference fields are nullified
+//
+public class MirrorWithReferenceFieldsApp {
+
+ // Static String field with initial value
+ static final String archived_field = "abc";
+
+ // Static object field
+ static Object non_archived_field_1;
+
+ // Instance field
+ Integer non_archived_field_2;
+
+ public MirrorWithReferenceFieldsApp() {
+ non_archived_field_1 = new Object();
+ non_archived_field_2 = new Integer(1);
+ }
+
+ public static void main(String args[]) throws Exception {
+ WhiteBox wb = WhiteBox.getWhiteBox();
+
+ if (!wb.areOpenArchiveHeapObjectsMapped()) {
+ System.out.println("Archived open_archive_heap objects are not mapped.");
+ System.out.println("This may happen during normal operation. Test Skipped.");
+ return;
+ }
+
+ MirrorWithReferenceFieldsApp m = new MirrorWithReferenceFieldsApp();
+ m.test(wb);
+ }
+
+ public void test(WhiteBox wb) {
+ Class c = MirrorWithReferenceFieldsApp.class;
+ if (wb.isSharedClass(c)) {
+ // Check if the Class object is cached
+ if (wb.isShared(c)) {
+ System.out.println(c + " mirror is cached. Expected.");
+ } else {
+ throw new RuntimeException(
+ "FAILED. " + c + " mirror should be cached.");
+ }
+
+ // Check fields
+
+ if (wb.isShared(archived_field)) {
+ System.out.println("archived_field is archived as excepted");
+ } else {
+ throw new RuntimeException(
+ "FAILED. archived_field is not archived.");
+ }
+
+ // GC should not crash
+ System.gc();
+ System.gc();
+ System.gc();
+
+ non_archived_field_1 = null;
+ non_archived_field_2 = null;
+
+ System.gc();
+ System.gc();
+ System.gc();
+
+ System.out.println("Done.");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/cacheObject/MirrorWithReferenceFieldsTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @summary Test archived mirror with reference fields
+ * @requires vm.cds.archived.java.heap
+ * @library /test/lib /test/hotspot/jtreg/runtime/appcds
+ * @modules java.base/jdk.internal.misc
+ * @modules java.management
+ * jdk.jartool/sun.tools.jar
+ * @build sun.hotspot.WhiteBox
+ * @compile MirrorWithReferenceFieldsApp.java
+ * @run driver ClassFileInstaller -jar app.jar MirrorWithReferenceFieldsApp
+ * @run driver ClassFileInstaller -jar WhiteBox.jar sun.hotspot.WhiteBox
+ * @run main MirrorWithReferenceFieldsTest
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import sun.hotspot.WhiteBox;
+
+public class MirrorWithReferenceFieldsTest {
+ public static void main(String[] args) throws Exception {
+ String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
+ String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
+ String appJar = ClassFileInstaller.getJarPath("app.jar");
+
+ String classlist[] = new String[] {
+ "MirrorWithReferenceFieldsApp",
+ };
+
+ TestCommon.testDump(appJar, classlist, use_whitebox_jar);
+ OutputAnalyzer output = TestCommon.exec(appJar, use_whitebox_jar,
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+WhiteBoxAPI",
+ "-XX:+VerifyAfterGC",
+ "MirrorWithReferenceFieldsApp");
+ try {
+ TestCommon.checkExec(output, "Done");
+ } catch (Exception e) {
+ output.shouldContain("Archived open_archive_heap objects are not mapped");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/cacheObject/PrimitiveTypesApp.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.lang.reflect.Field;
+import sun.hotspot.WhiteBox;
+
+//
+// Test primitive type class mirror objects are cached when open archive heap
+// objects are mapped.
+//
+public class PrimitiveTypesApp {
+ public static void main(String[] args) {
+ WhiteBox wb = WhiteBox.getWhiteBox();
+ if (!wb.areOpenArchiveHeapObjectsMapped()) {
+ System.out.println("Archived open_archive_heap objects are not mapped.");
+ System.out.println("This may happen during normal operation. Test Skipped.");
+ return;
+ }
+
+ FieldsTest ft = new FieldsTest();
+ ft.testBoolean(wb);
+ ft.testByte(wb);
+ ft.testChar(wb);
+ ft.testInt(wb);
+ ft.testShort(wb);
+ ft.testLong(wb);
+ ft.testFloat(wb);
+ ft.testDouble(wb);
+ }
+}
+
+class FieldsTest {
+ public boolean f_boolean;
+ public byte f_byte;
+ public char f_char;
+ public int f_int;
+ public short f_short;
+ public long f_long;
+ public float f_float;
+ public double f_double;
+
+ FieldsTest() {
+ f_byte = 1;
+ f_boolean = false;
+ f_char = 'a';
+ f_int = 1;
+ f_short = 100;
+ f_long = 2018L;
+ f_float = 1.0f;
+ f_double = 2.5;
+ }
+
+ void testBoolean(WhiteBox wb) {
+ try {
+ Field f = this.getClass().getDeclaredField("f_boolean");
+ f.setBoolean(this, true);
+ if (!f_boolean) {
+ throw new RuntimeException("FAILED. Field f_boolean has unexpected value: " + f_boolean);
+ }
+ checkPrimitiveType(wb, f, Boolean.TYPE);
+ } catch (NoSuchFieldException nsfe) {
+ throw new RuntimeException(nsfe);
+ } catch (IllegalAccessException iae) {
+ throw new RuntimeException(iae);
+ }
+ }
+
+ void testByte(WhiteBox wb) {
+ try {
+ Field f = this.getClass().getDeclaredField("f_byte");
+ f.setByte(this, (byte)9);
+ if (f_byte != (byte)9) {
+ throw new RuntimeException("FAILED. Field f_byte has unexpected value: " + f_byte);
+ }
+ checkPrimitiveType(wb, f, Byte.TYPE);
+ } catch (NoSuchFieldException nsfe) {
+ throw new RuntimeException(nsfe);
+ } catch (IllegalAccessException iae) {
+ throw new RuntimeException(iae);
+ }
+ }
+
+ void testChar(WhiteBox wb) {
+ try {
+ Field f = this.getClass().getDeclaredField("f_char");
+ f.setChar(this, 'b');
+ if (f_char != 'b') {
+ throw new RuntimeException("FAILED. Field f_char has unexpected value: " + f_char);
+ }
+ checkPrimitiveType(wb, f, Character.TYPE);
+ } catch (NoSuchFieldException nsfe) {
+ throw new RuntimeException(nsfe);
+ } catch (IllegalAccessException iae) {
+ throw new RuntimeException(iae);
+ }
+ }
+
+ void testInt(WhiteBox wb) {
+ try {
+ Field f = this.getClass().getDeclaredField("f_int");
+ f.setInt(this, 9999);
+ if (f_int != 9999) {
+ throw new RuntimeException("FAILED. Field f_int has unexpected value: " + f_int);
+ }
+ checkPrimitiveType(wb, f, Integer.TYPE);
+ } catch (NoSuchFieldException nsfe) {
+ throw new RuntimeException(nsfe);
+ } catch (IllegalAccessException iae) {
+ throw new RuntimeException(iae);
+ }
+ }
+
+ void testShort(WhiteBox wb) {
+ try {
+ Field f = this.getClass().getDeclaredField("f_short");
+ f.setShort(this, (short)99);
+ if (f_short != 99) {
+ throw new RuntimeException("FAILED. Field f_short has unexpected value: " + f_short);
+ }
+ checkPrimitiveType(wb, f, Short.TYPE);
+ } catch (NoSuchFieldException nsfe) {
+ throw new RuntimeException(nsfe);
+ } catch (IllegalAccessException iae) {
+ throw new RuntimeException(iae);
+ }
+ }
+
+ void testLong(WhiteBox wb) {
+ try {
+ Field f = this.getClass().getDeclaredField("f_long");
+ f.setLong(this, 99L);
+ if (f_long != 99L) {
+ throw new RuntimeException("FAILED. Field f_long has unexpected value: " + f_long);
+ }
+ checkPrimitiveType(wb, f, Long.TYPE);
+ } catch (NoSuchFieldException nsfe) {
+ throw new RuntimeException(nsfe);
+ } catch (IllegalAccessException iae) {
+ throw new RuntimeException(iae);
+ }
+ }
+
+ void testFloat(WhiteBox wb) {
+ try {
+ Field f = this.getClass().getDeclaredField("f_float");
+ f.setFloat(this, 9.9f);
+ if (f_float != 9.9f) {
+ throw new RuntimeException("FAILED. Field f_float has unexpected value: " + f_float);
+ }
+ checkPrimitiveType(wb, f, Float.TYPE);
+ } catch (NoSuchFieldException nsfe) {
+ throw new RuntimeException(nsfe);
+ } catch (IllegalAccessException iae) {
+ throw new RuntimeException(iae);
+ }
+ }
+
+ void testDouble(WhiteBox wb) {
+ try {
+ Field f = this.getClass().getDeclaredField("f_double");
+ f.setDouble(this, 9.9);
+ if (f_double != 9.9) {
+ throw new RuntimeException("FAILED. Field f_double has unexpected value: " + f_double);
+ }
+ checkPrimitiveType(wb, f, Double.TYPE);
+ } catch (NoSuchFieldException nsfe) {
+ throw new RuntimeException(nsfe);
+ } catch (IllegalAccessException iae) {
+ throw new RuntimeException(iae);
+ }
+ }
+
+ void checkPrimitiveType(WhiteBox wb, Field f, Class t) {
+ Class c = f.getType();
+ if (!(c.isPrimitive() && c == t)) {
+ throw new RuntimeException("FAILED. " + c + " is not primitive type " + t);
+ }
+ if (wb.isShared(c)) {
+ System.out.println(c + " is cached, expected");
+ } else {
+ throw new RuntimeException("FAILED. " + c + " is not cached.");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/cacheObject/PrimitiveTypesTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @summary Test archived primitive type mirrors
+ * @requires vm.cds.archived.java.heap
+ * @library /test/lib /test/hotspot/jtreg/runtime/appcds
+ * @modules java.base/jdk.internal.misc
+ * @modules java.management
+ * jdk.jartool/sun.tools.jar
+ * @build sun.hotspot.WhiteBox
+ * @compile PrimitiveTypesApp.java
+ * @run driver ClassFileInstaller -jar app.jar PrimitiveTypesApp FieldsTest
+ * @run driver ClassFileInstaller -jar WhiteBox.jar sun.hotspot.WhiteBox
+ * @run main PrimitiveTypesTest
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import sun.hotspot.WhiteBox;
+
+public class PrimitiveTypesTest {
+ public static void main(String[] args) throws Exception {
+ String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
+ String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
+ String appJar = ClassFileInstaller.getJarPath("app.jar");
+
+ String classlist[] = new String[] {
+ "PrimitiveTypesApp",
+ "FieldsTest"
+ };
+
+ TestCommon.testDump(appJar, classlist, use_whitebox_jar);
+ OutputAnalyzer output = TestCommon.exec(appJar, use_whitebox_jar,
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+WhiteBoxAPI",
+ "-XX:+VerifyAfterGC",
+ "PrimitiveTypesApp");
+ TestCommon.checkExec(output);
+ }
+}
--- a/test/hotspot/jtreg/runtime/appcds/cacheObject/RedefineClassApp.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/runtime/appcds/cacheObject/RedefineClassApp.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -89,6 +89,16 @@
doTest(group, new Foo(), jar);
}
+ static void checkArchivedMirrorObject(Class klass) {
+ if (wb.areOpenArchiveHeapObjectsMapped()) {
+ if (!wb.isShared(klass)) {
+ failed ++;
+ System.out.println("FAILED. " + klass + " mirror object is not archived");
+ return;
+ }
+ }
+ }
+
static void doTest(String group, Intf object, File jar) throws Throwable {
numTests ++;
@@ -101,6 +111,9 @@
System.out.println("Test is shared = " + wb.isSharedClass(klass));
System.out.println("++++++++++++++++++++++++++");
+ // Check archived mirror object before redefine
+ checkArchivedMirrorObject(klass);
+
// Call get() before redefine. All strings in archived classes are shared.
String res = object.get();
System.out.println("get() returns " + res);
@@ -144,6 +157,9 @@
System.gc();
System.gc();
+ // Check archived mirror object after redefine and GC
+ checkArchivedMirrorObject(klass);
+
System.out.println("++++++++++++++++++++++++++++++++++++++++++++++++ (done)\n\n");
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/condy/CondyHello.jasm Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * This test uses ldc to load an integer value via a condy bootstrap method.
+ */
+
+class CondyHello
+ version 55:0
+{
+
+public static Method condyBSM:"(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/Class;)Ljava/lang/Object;"
+ throws java/lang/Throwable
+ stack 4 locals 6
+{
+ sipush 12345;
+ invokestatic Method java/lang/Integer.valueOf:"(I)Ljava/lang/Integer;";
+ areturn;
+}
+
+public static Method I:"()I"
+ stack 1 locals 0
+{
+ ldc Dynamic REF_invokeStatic:CondyHello.condyBSM:"(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/Class;)Ljava/lang/Object;":I:"I";
+ ireturn;
+}
+
+public static Method doit:"()I"
+ stack 4 locals 1
+{
+ invokestatic Method I:"()I";
+ ireturn;
+}
+
+} // end Class CondyHello
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/condy/CondyHelloApp.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+
+public class CondyHelloApp {
+ public static void main(String[] args) throws Throwable {
+ int n;
+ if ((n = CondyHello.doit()) != 12345) {
+ throw new java.lang.RuntimeException("Expected 12345 but got " + n);
+ } else {
+ System.out.println("n = " + n + " (as expected)");
+ }
+ WhiteBox wb = WhiteBox.getWhiteBox();
+ if (wb.isSharedClass(CondyHelloApp.class)) {
+ if (!wb.isSharedClass(CondyHello.class)) {
+ throw new java.lang.RuntimeException("CondyHello should be in shared space but is not.");
+ } else {
+ System.out.println("CondyHello.class is shared (as expected)");
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/condy/CondyHelloTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Make sure CDS works with a minimal test case that uses a CONSTANT_Dynamic constant-pool entry
+ * @requires os.arch != "sparcv9"
+ * @modules java.base/jdk.internal.misc
+ * @library /test/lib /test/hotspot/jtreg/runtime/appcds
+ * @build CondyHello
+ * @build sun.hotspot.WhiteBox CondyHelloTest CondyHelloApp
+ * @run driver ClassFileInstaller -jar condy_hello.jar CondyHello CondyHelloApp
+ * @run driver ClassFileInstaller -jar WhiteBox.jar sun.hotspot.WhiteBox
+ * @run main CondyHelloTest
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+
+public class CondyHelloTest {
+
+ static String classes[] = {
+ "CondyHello",
+ "CondyHelloApp",
+ };
+
+ public static void main(String[] args) throws Exception {
+ String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
+ String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
+ String appJar = ClassFileInstaller.getJarPath("condy_hello.jar");
+
+ TestCommon.dump(appJar, TestCommon.list(classes), use_whitebox_jar);
+
+ TestCommon.run("-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+WhiteBoxAPI",
+ "-cp", appJar,
+ use_whitebox_jar,
+ "CondyHelloApp")
+ .assertNormalExit();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/javaldr/AnonVmClassesDuringDump.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @summary When dumping the CDS archive, try to load VM anonymous classes to make sure they
+ * are handled properly. Note: these are not "anonymous inner classes" in the Java source code,
+ * but rather classes that are not recorded in any ClassLoaderData::dictionary(),
+ * such as classes that are generated for Lambda expressions.
+ * See https://blogs.oracle.com/jrose/anonymous-classes-in-the-vm.
+ * @library /test/lib /test/hotspot/jtreg/runtime/appcds /test/hotspot/jtreg/runtime/appcds/test-classes
+ * @requires vm.cds
+ * @requires vm.flavor != "minimal"
+ * @modules java.base/jdk.internal.misc
+ * jdk.jartool/sun.tools.jar
+ * java.management
+ * @build AnonVmClassesDuringDumpTransformer Hello
+ * @run main/othervm AnonVmClassesDuringDump
+ */
+
+public class AnonVmClassesDuringDump {
+ public static String appClasses[] = {
+ "Hello",
+ };
+ public static String agentClasses[] = {
+ "AnonVmClassesDuringDumpTransformer",
+ };
+
+ public static void main(String[] args) throws Throwable {
+ String agentJar =
+ ClassFileInstaller.writeJar("AnonVmClassesDuringDumpTransformer.jar",
+ ClassFileInstaller.Manifest.fromSourceFile("AnonVmClassesDuringDumpTransformer.mf"),
+ agentClasses);
+
+ String appJar =
+ ClassFileInstaller.writeJar("AnonVmClassesDuringDumpApp.jar", appClasses);
+
+ TestCommon.testDump(appJar, TestCommon.list("Hello"),
+ "-javaagent:" + agentJar,
+ // Set the following property to see logs for dynamically generated classes
+ // in STDOUT
+ "-Djava.lang.invoke.MethodHandle.DUMP_CLASS_FILES=true");
+ TestCommon.run("-cp", appJar, "Hello")
+ .assertNormalExit();
+ }
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/javaldr/AnonVmClassesDuringDumpTransformer.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.lang.instrument.ClassFileTransformer;
+import java.lang.instrument.Instrumentation;
+import java.lang.instrument.IllegalClassFormatException;
+import java.security.ProtectionDomain;
+
+public class AnonVmClassesDuringDumpTransformer implements ClassFileTransformer {
+ public byte[] transform(ClassLoader loader, String name, Class<?> classBeingRedefined,
+ ProtectionDomain pd, byte[] buffer) throws IllegalClassFormatException {
+ return null;
+ }
+
+ private static Instrumentation savedInstrumentation;
+
+ public static void premain(String agentArguments, Instrumentation instrumentation) {
+ System.out.println("ClassFileTransformer.premain() is called");
+ instrumentation.addTransformer(new AnonVmClassesDuringDumpTransformer(), /*canRetransform=*/true);
+ savedInstrumentation = instrumentation;
+
+ // This will create a Lambda, which will result in some Anonymous VM Classes
+ // being generated.
+ //
+ // Look for something like these in the STDOUT:
+ // ----------------
+ // ClassFileTransformer.premain() is called
+ // Dumping class files to DUMP_CLASS_FILES/...
+ // dump: DUMP_CLASS_FILES/java/lang/invoke/LambdaForm$MH000.class
+ // dump: DUMP_CLASS_FILES/java/lang/invoke/LambdaForm$MH001.class
+ // Invoked inside a Lambda
+ // ----------------
+ Runnable r = () -> {
+ System.out.println("Invoked inside a Lambda");
+ };
+ r.run();
+ }
+
+ public static Instrumentation getInstrumentation() {
+ return savedInstrumentation;
+ }
+
+ public static void agentmain(String args, Instrumentation inst) throws Exception {
+ premain(args, inst);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/appcds/javaldr/AnonVmClassesDuringDumpTransformer.mf Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,5 @@
+Manifest-Version: 1.0
+Premain-Class: AnonVmClassesDuringDumpTransformer
+Agent-Class: AnonVmClassesDuringDumpTransformer
+Can-Retransform-Classes: true
+Can-Redefine-Classes: true
--- a/test/hotspot/jtreg/runtime/appcds/test-classes/ForNameTest.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/runtime/appcds/test-classes/ForNameTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
public class ForNameTest {
public static void main(String[] args) throws Throwable {
// Hello is on the bootclasspath. The defining classloader is
- // the NULL classloader. See AppCDSClassLoaderTest.
+ // the NULL classloader. See ../ClassLoaderTest.java
Class c = Class.forName("Hello");
ClassLoader cl = c.getClassLoader();
if (cl != null) {
@@ -36,10 +36,10 @@
}
WhiteBox wb = WhiteBox.getWhiteBox();
- if (!wb.isSharedClass(c)) {
- System.out.println("As expected, Hello.class is not in shared space.");
+ if (wb.isSharedClass(c)) {
+ System.out.println("As expected, Hello.class is in shared space.");
} else {
- throw new java.lang.RuntimeException("Hello.class shouldn't be in shared space.");
+ throw new java.lang.RuntimeException("Hello.class must be in shared space.");
}
}
}
--- a/test/hotspot/jtreg/runtime/appcds/test-classes/HelloWB.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/runtime/appcds/test-classes/HelloWB.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
public class HelloWB {
public static void main(String[] args) throws Throwable {
+ System.out.println("HelloWB.class.getClassLoader() = " + HelloWB.class.getClassLoader());
WhiteBox wb = WhiteBox.getWhiteBox();
if (wb.isSharedClass(HelloWB.class)) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/Dockerfile-BasicTest-aarch64 Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,8 @@
+# Use generic ubuntu Linux on AArch64
+FROM aarch64/ubuntu
+
+COPY /jdk /jdk
+
+ENV JAVA_HOME=/jdk
+
+CMD ["/bin/bash"]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/AbstractMethodError/AME1_E.jasm Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* Method aFunctionOfMyInterface:"()Ljava/lang/String;" is missing in this implementation to cause error. */
+
+class AME1_E extends AME1_B implements AME1_C {
+
+ public Method "<init>":"()V"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokespecial Method AME1_B."<init>":"()V";
+ return;
+ }
+
+ public Method firstAbstractMethod:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ public Method secondAbstractMethod:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ /* Missing to cause error.
+ public Method anAbstractMethod:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+ */
+
+ public Method firstFunctionOfMyInterface0:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ public Method secondFunctionOfMyInterface0:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ public Method firstFunctionOfMyInterface:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ public Method secondFunctionOfMyInterface:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ /* Missing to cause error.
+ public Method aFunctionOfMyInterface:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+ */
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/AbstractMethodError/AME2_C.jasm Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* Method aFunctionOfMyInterface() is missing in this implementation to cause error. */
+
+class AME2_C extends AME2_B {
+
+ public Method "<init>":"()V"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokespecial Method AME2_B."<init>":"()V";
+ return;
+ }
+
+ public Method fun2:"()V"
+ stack 0 locals 1
+ {
+ return;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/AbstractMethodError/AME3_C.jasm Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* Method ma() is missing in this implementation to cause error. */
+
+class AME3_C extends AME3_B {
+ public Method "<init>":"()V"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokespecial Method AME3_B."<init>":()V;
+ return;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/AbstractMethodError/AME4_E.jasm Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/* Method ma() is missing in this implementation to cause error. */
+
+class AME4_E extends AME4_B {
+ public Method "<init>":"()V"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokespecial Method AME4_B."<init>":()V;
+ return;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/AbstractMethodError/AME5_B.jasm Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* Method mc() is missing in this implementation to cause error. */
+
+class AME5_B extends AME5_A {
+ public Method "<init>":"()V"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokespecial Method AME5_A."<init>":()V;
+ return;
+ }
+
+ public Method ma:"()V"
+ stack 2 locals 1
+ {
+ getstatic Field java/lang/System.out:"Ljava/io/PrintStream;";
+ ldc String "B.ma() ";
+ invokevirtual Method java/io/PrintStream.print:"(Ljava/lang/String;)V";
+ return;
+ }
+
+ public Method mb:"()V"
+ stack 2 locals 1
+ {
+ getstatic Field java/lang/System.out:"Ljava/io/PrintStream;";
+ ldc String "B.mb() ";
+ invokevirtual Method java/io/PrintStream.print:"(Ljava/lang/String;)V";
+ return;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/AbstractMethodError/AME6_B.jasm Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* Method mc() is missing in this implementation to cause error. */
+
+class AME6_B implements AME6_A {
+ public Method "<init>":"()V"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokespecial Method java/lang/Object."<init>":"()V";
+ return;
+ }
+
+ public Method ma:"()V"
+ stack 2 locals 1
+ {
+ getstatic Field java/lang/System.out:"Ljava/io/PrintStream;";
+ ldc String "B.ma() ";
+ invokevirtual Method java/io/PrintStream.print:"(Ljava/lang/String;)V";
+ return;
+ }
+
+ public Method mb:"()V"
+ stack 2 locals 1
+ {
+ getstatic Field java/lang/System.out:"Ljava/io/PrintStream;";
+ ldc String "B.mb() ";
+ invokevirtual Method java/io/PrintStream.print:"(Ljava/lang/String;)V";
+ return;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/AbstractMethodError/AbstractMethodErrorTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @summary Check that the verbose message of the AME is printed correctly.
+ * @requires !(os.arch=="arm") & vm.flavor == "server" & !vm.emulatedClient & vm.compMode=="Xmixed" & (!vm.graal.enabled | vm.opt.TieredCompilation == true) & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel==4)
+ * @library /test/lib /
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @compile AbstractMethodErrorTest.java
+ * @compile AME1_E.jasm AME2_C.jasm AME3_C.jasm AME4_E.jasm AME5_B.jasm AME6_B.jasm
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ * -XX:CompileThreshold=1000 -XX:-BackgroundCompilation -XX:-Inline
+ * -XX:CompileCommand=exclude,AbstractMethodErrorTest::test_ame1
+ * AbstractMethodErrorTest
+ */
+
+import sun.hotspot.WhiteBox;
+import compiler.whitebox.CompilerWhiteBoxTest;
+import java.lang.reflect.Method;
+
+// This test assembles an errorneous installation of classes.
+// First, compile the test by @compile. This results in a legal set
+// of classes.
+// Then, with jasm, generate incompatible classes that overwrite
+// the class files in the build directory.
+// Last, call the real test throwing an AbstractMethodError and
+// check the message generated.
+public class AbstractMethodErrorTest {
+
+ private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+ private static boolean enableChecks = true;
+
+ public static void setup_test() {
+ // Assure all exceptions are loaded.
+ new AbstractMethodError();
+ new IncompatibleClassChangeError();
+
+ enableChecks = false;
+ // Warmup
+ System.out.println("warmup:");
+ test_ame5_compiled_vtable_stub();
+ test_ame6_compiled_itable_stub();
+ enableChecks = true;
+
+ // Compile
+ try {
+ Method method = AbstractMethodErrorTest.class.getMethod("test_ame5_compiled_vtable_stub");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException(method.getName() + " is not compiled");
+ }
+ method = AbstractMethodErrorTest.class.getMethod("test_ame6_compiled_itable_stub");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException(method.getName() + " is not compiled");
+ }
+ method = AME5_C.class.getMethod("c");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException("AME5_C." + method.getName() + " is not compiled");
+ }
+ method = AME5_D.class.getMethod("c");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException("AME5_D." + method.getName() + " is not compiled");
+ }
+ method = AME5_E.class.getMethod("c");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException("AME5_E." + method.getName() + " is not compiled");
+ }
+ method = AME6_C.class.getMethod("c");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException("AME6_C." + method.getName() + " is not compiled");
+ }
+ method = AME6_D.class.getMethod("c");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException("AME6_D." + method.getName() + " is not compiled");
+ }
+ method = AME6_E.class.getMethod("c");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException("AME6_E." + method.getName() + " is not compiled");
+ }
+ } catch (NoSuchMethodException e) { }
+ }
+
+ private static String expectedErrorMessageAME1_1 =
+ "Missing implementation of resolved method abstract " +
+ "anAbstractMethod()Ljava/lang/String; of abstract class AME1_B.";
+ private static String expectedErrorMessageAME1_2 =
+ "Receiver class AME1_E does not define or inherit an implementation of the " +
+ "resolved method abstract aFunctionOfMyInterface()Ljava/lang/String; of " +
+ "interface AME1_C.";
+
+ public static void test_ame1() {
+ AME1_B objectAbstract = new AME1_D();
+ AME1_C objectInterface = new AME1_D();
+ objectInterface.secondFunctionOfMyInterface();
+ objectAbstract.anAbstractMethod();
+ objectInterface.aFunctionOfMyInterface();
+
+ try {
+ objectAbstract = new AME1_E();
+ // AbstractMethodError gets thrown in the interpreter at:
+ // InterpreterGenerator::generate_abstract_entry
+ objectAbstract.anAbstractMethod();
+ throw new RuntimeException("Expected AbstractRuntimeError was not thrown.");
+ } catch (AbstractMethodError e) {
+ String errorMsg = e.getMessage();
+ if (errorMsg == null) {
+ throw new RuntimeException("Caught AbstractMethodError with empty message.");
+ } else if (!errorMsg.equals(expectedErrorMessageAME1_1)) {
+ System.out.println("Expected: " + expectedErrorMessageAME1_1 + "\n" +
+ "but got: " + errorMsg);
+ throw new RuntimeException("Wrong error message of AbstractMethodError.");
+ }
+ } catch (RuntimeException e) {
+ throw e;
+ } catch (Throwable e) {
+ throw new RuntimeException("Caught unexpected exception: " + e);
+ }
+
+ try {
+ objectInterface = new AME1_E();
+ // AbstractMethodError gets thrown in:
+ // TemplateTable::invokeinterface or C-Interpreter loop
+ objectInterface.aFunctionOfMyInterface();
+ throw new RuntimeException("Expected AbstractRuntimeError was not thrown.");
+ } catch (AbstractMethodError e) {
+ String errorMsg = e.getMessage();
+ if (errorMsg == null) {
+ throw new RuntimeException("Caught AbstractMethodError with empty message.");
+ } else if (!errorMsg.equals(expectedErrorMessageAME1_2)) {
+ // Thrown via InterpreterRuntime::throw_AbstractMethodErrorVerbose().
+ System.out.println("Expected: " + expectedErrorMessageAME1_2 + "\n" +
+ "but got: " + errorMsg);
+ throw new RuntimeException("Wrong error message of AbstractMethodError.");
+ } else {
+ System.out.println("Passed with message: " + errorMsg);
+ }
+ } catch (Throwable e) {
+ throw new RuntimeException("Caught unexpected exception: " + e);
+ }
+ }
+
+ private static String expectedErrorMessageAME2_Interpreted =
+ "Missing implementation of resolved method abstract " +
+ "aFunctionOfMyInterface()V of interface AME2_A.";
+ private static String expectedErrorMessageAME2_Compiled =
+ "Receiver class AME2_C does not define or inherit an implementation of the resolved method " +
+ "abstract aFunctionOfMyInterface()V of interface AME2_A.";
+
+ public AbstractMethodErrorTest() throws InstantiationException, IllegalAccessException {
+ try {
+ AME2_B myAbstract = new ImplementsAllFunctions();
+ myAbstract.fun2();
+ myAbstract.aFunctionOfMyInterface();
+
+ // AME2_C does not implement the method
+ // aFunctionOfMyInterface(). Expected runtime behavior is
+ // throwing an AbstractMethodError.
+ // The error will be thrown via throw_AbstractMethodErrorWithMethod()
+ // if the template interpreter calls an abstract method by
+ // entering the abstract method entry.
+ myAbstract = new AME2_C();
+ myAbstract.fun2();
+ myAbstract.aFunctionOfMyInterface();
+ } catch (SecurityException e) {
+ e.printStackTrace();
+ }
+ }
+
+ // Loop so that method gets eventually compiled/osred.
+ public static void test_ame2() throws Exception {
+ boolean seenInterpreted = false;
+ boolean seenCompiled = false;
+
+ // Loop to test both, the interpreted and the compiled case.
+ for (int i = 0; i < 10000 && !(seenInterpreted && seenCompiled); ++i) {
+ try {
+ // Supposed to throw AME with verbose message.
+ new AbstractMethodErrorTest();
+
+ throw new RuntimeException("Expected AbstractMethodError was not thrown.");
+ } catch (AbstractMethodError e) {
+ String errorMsg = e.getMessage();
+
+ // Check the message obtained.
+ if (errorMsg == null) {
+ throw new RuntimeException("Caught AbstractMethodError with empty message.");
+ } else if (errorMsg.equals(expectedErrorMessageAME2_Interpreted)) {
+ seenInterpreted = true;
+ } else if (errorMsg.equals(expectedErrorMessageAME2_Compiled)) {
+ // Sparc and the other platforms behave differently here:
+ // Sparc throws the exception via SharedRuntime::handle_wrong_method_abstract(),
+ // x86, ppc and s390 via LinkResolver::runtime_resolve_virtual_method(). Thus,
+ // sparc misses the test case for LinkResolver::runtime_resolve_virtual_method().
+ seenCompiled = true;
+ } else {
+ System.out.println("Expected: " + expectedErrorMessageAME2_Interpreted + "\n" +
+ "or: " + expectedErrorMessageAME2_Compiled + "\n" +
+ "but got: " + errorMsg);
+ throw new RuntimeException("Wrong error message of AbstractMethodError.");
+ }
+ }
+ }
+ if (!(seenInterpreted && seenCompiled)) {
+ if (seenInterpreted) { System.out.println("Saw interpreted message."); }
+ if (seenCompiled) { System.out.println("Saw compiled message."); }
+ throw new RuntimeException("Test did not produce wrong error messages for AbstractMethodError, " +
+ "but it did not test both cases (interpreted and compiled).");
+ }
+ }
+
+ private static String expectedErrorMessageAME3_1 =
+ "Receiver class AME3_C does not define or inherit an implementation of the resolved method " +
+ "ma()V of class AME3_A. Selected method is abstract AME3_B.ma()V.";
+
+ // Testing abstract class that extends a class that has an implementation.
+ // Loop so that method gets eventually compiled/osred.
+ public static void test_ame3_1() throws Exception {
+ AME3_A c = new AME3_C();
+
+ try {
+ // Supposed to throw AME with verbose message.
+ c.ma();
+
+ throw new RuntimeException("Expected AbstractMethodError was not thrown.");
+ } catch (AbstractMethodError e) {
+ String errorMsg = e.getMessage();
+
+ // Check the message obtained.
+ if (errorMsg == null) {
+ throw new RuntimeException("Caught AbstractMethodError with empty message.");
+ } else if (errorMsg.equals(expectedErrorMessageAME3_1)) {
+ // Expected test case thrown via LinkResolver::runtime_resolve_virtual_method().
+ System.out.println("Passed with message: " + errorMsg);
+ } else {
+ System.out.println("Expected: " + expectedErrorMessageAME3_1 + "\n" +
+ "but got: " + errorMsg);
+ throw new RuntimeException("Wrong error message of AbstractMethodError.");
+ }
+ }
+ }
+
+ private static String expectedErrorMessageAME3_2 =
+ "Receiver class AME3_C does not define or inherit an implementation of " +
+ "the resolved method abstract ma()V of abstract class AME3_B.";
+
+ // Testing abstract class that extends a class that has an implementation.
+ // Loop so that method gets eventually compiled/osred.
+ public static void test_ame3_2() throws Exception {
+ AME3_C c = new AME3_C();
+
+ try {
+ // Supposed to throw AME with verbose message.
+ c.ma();
+
+ throw new RuntimeException("Expected AbstractMethodError was not thrown.");
+ } catch (AbstractMethodError e) {
+ String errorMsg = e.getMessage();
+
+ // Check the message obtained.
+ if (errorMsg == null) {
+ throw new RuntimeException("Caught AbstractMethodError with empty message.");
+ } else if (errorMsg.equals(expectedErrorMessageAME3_2)) {
+ // Expected test case thrown via LinkResolver::runtime_resolve_virtual_method().
+ System.out.println("Passed with message: " + errorMsg);
+ } else {
+ System.out.println("Expected: " + expectedErrorMessageAME3_2 + "\n" +
+ "but got: " + errorMsg);
+ throw new RuntimeException("Wrong error message of AbstractMethodError.");
+ }
+ }
+ }
+
+ private static String expectedErrorMessageAME4 =
+ "Missing implementation of resolved method abstract ma()V of " +
+ "abstract class AME4_B.";
+
+ // Testing abstract class that extends a class that has an implementation.
+ public static void test_ame4() throws Exception {
+ AME4_C c = new AME4_C();
+ AME4_D d = new AME4_D();
+ AME4_E e = new AME4_E(); // Errorneous.
+
+ AME4_A a;
+ try {
+ // Test: calls errorneous e.ma() in the last iteration.
+ final int iterations = 10;
+ for (int i = 0; i < iterations; i++) {
+ a = e;
+ if (i % 2 == 0 && i < iterations - 1) {
+ a = c;
+ }
+ if (i % 2 == 1 && i < iterations - 1) {
+ a = d;
+ }
+
+ // AbstractMethodError gets thrown in the interpreter at:
+ // InterpreterGenerator::generate_abstract_entry
+ a.ma();
+ }
+
+ throw new RuntimeException("Expected AbstractMethodError was not thrown.");
+ } catch (AbstractMethodError exc) {
+ System.out.println();
+ String errorMsg = exc.getMessage();
+
+ // Check the message obtained.
+ if (enableChecks && errorMsg == null) {
+ throw new RuntimeException("Caught AbstractMethodError with empty message.");
+ } else if (errorMsg.equals(expectedErrorMessageAME4)) {
+ // Expected test case.
+ System.out.println("Passed with message: " + errorMsg);
+ } else if (enableChecks) {
+ System.out.println("Expected: " + expectedErrorMessageAME4 + "\n" +
+ "but got: " + errorMsg);
+ throw new RuntimeException("Wrong error message of AbstractMethodError.");
+ }
+ }
+ }
+
+ private static String expectedErrorMessageAME5_VtableStub =
+ "Receiver class AME5_B does not define or inherit an implementation of the resolved method abstract mc()V " +
+ "of abstract class AME5_A.";
+
+ // AbstractMethodErrors detected in vtable stubs.
+ // Note: How can we verify that we really stepped through the vtable stub?
+ // - Bimorphic inlining should not happen since we have no profiling data when
+ // we compile the method
+ // - As a result, an inline cache call should be generated
+ // - This inline cache call is patched into a real vtable call at the first
+ // re-resolve, which happens constantly during the first 10 iterations of the loop.
+ // => we should be fine! :-)
+ public static void test_ame5_compiled_vtable_stub() {
+ // Allocated the objects we need and call a valid method.
+ boolean caught_ame = false;
+ AME5_B b = new AME5_B();
+ AME5_C c = new AME5_C();
+ AME5_D d = new AME5_D();
+ AME5_E e = new AME5_E();
+ b.ma();
+ c.ma();
+ d.ma();
+ e.ma();
+
+ try {
+ final int iterations = 10;
+ // Test: calls b.c() in the last iteration.
+ for (int i = 0; i < iterations; i++) {
+ AME5_A a = b;
+ if (i % 3 == 0 && i < iterations - 1) {
+ a = c;
+ }
+ if (i % 3 == 1 && i < iterations - 1) {
+ a = d;
+ }
+ if (i % 3 == 2 && i < iterations - 1) {
+ a = e;
+ }
+
+ a.mc();
+ }
+ System.out.println();
+ } catch (AbstractMethodError exc) {
+ caught_ame = true;
+ System.out.println();
+ String errorMsg = exc.getMessage();
+ if (enableChecks && errorMsg == null) {
+ System.out.println(exc);
+ throw new RuntimeException("Empty error message of AbstractMethodError.");
+ }
+ if (enableChecks &&
+ !errorMsg.equals(expectedErrorMessageAME5_VtableStub)) {
+ // Thrown via SharedRuntime::handle_wrong_method_abstract().
+ System.out.println("Expected: " + expectedErrorMessageAME5_VtableStub + "\n" +
+ "but got: " + errorMsg);
+ System.out.println(exc);
+ throw new RuntimeException("Wrong error message of AbstractMethodError.");
+ }
+ if (enableChecks) {
+ System.out.println("Passed with message: " + errorMsg);
+ }
+ } catch (Throwable exc) {
+
+ throw exc;
+ }
+
+ // Check that we got the exception at some point.
+ if (enableChecks && !caught_ame) {
+ throw new RuntimeException("Expected AbstractMethodError was not thrown.");
+ }
+ }
+
+ private static String expectedErrorMessageAME6_ItableStub =
+ "Receiver class AME6_B does not define or inherit an implementation of the resolved" +
+ " method abstract mc()V of interface AME6_A.";
+
+ // -------------------------------------------------------------------------
+ // AbstractMethodErrors detected in itable stubs.
+ // Note: How can we verify that we really stepped through the itable stub?
+ // - Bimorphic inlining should not happen since we have no profiling data when
+ // we compile the method
+ // - As a result, an inline cache call should be generated
+ // - This inline cache call is patched into a real vtable call at the first
+ // re-resolve, which happens constantly during the first 10 iterations of the loop.
+ // => we should be fine! :-)
+ public static void test_ame6_compiled_itable_stub() {
+ // Allocated the objects we need and call a valid method.
+ boolean caught_ame = false;
+ AME6_B b = new AME6_B();
+ AME6_C c = new AME6_C();
+ AME6_D d = new AME6_D();
+ AME6_E e = new AME6_E();
+ b.ma();
+ c.ma();
+ d.ma();
+ e.ma();
+
+ try {
+ final int iterations = 10;
+ // Test: calls b.c() in the last iteration.
+ for (int i = 0; i < iterations; i++) {
+ AME6_A a = b;
+ if (i % 3 == 0 && i < iterations - 1) {
+ a = c;
+ }
+ if (i % 3 == 1 && i < iterations - 1) {
+ a = d;
+ }
+ if (i % 3 == 2 && i < iterations - 1) {
+ a = e;
+ }
+ a.mc();
+ }
+ System.out.println();
+ } catch (AbstractMethodError exc) {
+ caught_ame = true;
+ System.out.println();
+ String errorMsg = exc.getMessage();
+ if (enableChecks && errorMsg == null) {
+ System.out.println(exc);
+ throw new RuntimeException("Empty error message of AbstractMethodError.");
+ }
+ if (enableChecks &&
+ !errorMsg.equals(expectedErrorMessageAME6_ItableStub)) {
+ // Thrown via LinkResolver::runtime_resolve_interface_method().
+ System.out.println("Expected: " + expectedErrorMessageAME6_ItableStub + "\n" +
+ "but got: " + errorMsg);
+ System.out.println(exc);
+ throw new RuntimeException("Wrong error message of AbstractMethodError.");
+ }
+ if (enableChecks) {
+ System.out.println("Passed with message: " + errorMsg);
+ }
+ } catch (Throwable exc) {
+ throw exc;
+ }
+
+ // Check that we got the exception at some point.
+ if (enableChecks && !caught_ame) {
+ throw new RuntimeException("Expected AbstractMethodError was not thrown.");
+ }
+ }
+
+
+ public static void main(String[] args) throws Exception {
+ setup_test();
+ test_ame1();
+ test_ame2();
+ test_ame3_1();
+ test_ame3_2();
+ test_ame4();
+ test_ame5_compiled_vtable_stub();
+ test_ame6_compiled_itable_stub();
+ }
+}
+
+// Helper classes to test abstract method error.
+//
+// Errorneous versions of these classes are implemented in java
+// assembler.
+
+
+// -------------------------------------------------------------------------
+// This error should be detected interpreted.
+//
+// Class hierachy:
+//
+// C // interface, defines aFunctionOfMyInterface()
+// |
+// A | // interface
+// | |
+// B | // abstract class, defines anAbstractMethod()
+// \ /
+// E // errorneous class implementation lacks methods C::aFunctionOfMyInterface()
+// B::anAbstractMethod()
+interface AME1_A {
+
+ public String firstFunctionOfMyInterface0();
+
+ public String secondFunctionOfMyInterface0();
+}
+
+abstract class AME1_B implements AME1_A {
+
+ abstract public String firstAbstractMethod();
+
+ abstract public String secondAbstractMethod();
+
+ abstract public String anAbstractMethod();
+}
+
+interface AME1_C {
+
+ public String firstFunctionOfMyInterface();
+
+ public String secondFunctionOfMyInterface();
+
+ public String aFunctionOfMyInterface();
+}
+
+class AME1_D extends AME1_B implements AME1_C {
+
+ public AME1_D() {
+ }
+
+ public String firstAbstractMethod() {
+ return this.getClass().getName();
+ }
+
+ public String secondAbstractMethod() {
+ return this.getClass().getName();
+ }
+
+ public String anAbstractMethod() {
+ return this.getClass().getName();
+ }
+
+ public String firstFunctionOfMyInterface0() {
+ return this.getClass().getName();
+ }
+
+ public String secondFunctionOfMyInterface0() {
+ return this.getClass().getName();
+ }
+
+ public String firstFunctionOfMyInterface() {
+ return this.getClass().getName();
+ }
+
+ public String secondFunctionOfMyInterface() {
+ return this.getClass().getName();
+ }
+
+ public String aFunctionOfMyInterface() {
+ return this.getClass().getName();
+ }
+}
+
+class AME1_E extends AME1_B implements AME1_C {
+
+ public AME1_E() {
+ }
+
+ public String firstAbstractMethod() {
+ return this.getClass().getName();
+ }
+
+ public String secondAbstractMethod() {
+ return this.getClass().getName();
+ }
+
+ // This method is missing in the .jasm implementation.
+ public String anAbstractMethod() {
+ return this.getClass().getName();
+ }
+
+ public String firstFunctionOfMyInterface0() {
+ return this.getClass().getName();
+ }
+
+ public String secondFunctionOfMyInterface0() {
+ return this.getClass().getName();
+ }
+
+ public String firstFunctionOfMyInterface() {
+ return this.getClass().getName();
+ }
+
+ public String secondFunctionOfMyInterface() {
+ return this.getClass().getName();
+ }
+
+ // This method is missing in the .jasm implementation.
+ public String aFunctionOfMyInterface() {
+ return this.getClass().getName();
+ }
+}
+
+// -------------------------------------------------------------------------
+// This error should be detected interpreted.
+//
+// Class hierachy:
+//
+// A // an interface declaring aFunctionOfMyInterface()
+// |
+// B // an abstract class
+// |
+// C // errorneous implementation lacks method A::aFunctionOfMyInterface()
+//
+interface AME2_A {
+ public void aFunctionOfMyInterface();
+}
+
+abstract class AME2_B implements AME2_A {
+ abstract public void fun2();
+}
+
+class ImplementsAllFunctions extends AME2_B {
+
+ public ImplementsAllFunctions() {}
+
+ public void fun2() {
+ //System.out.print("You called public void ImplementsAllFunctions::fun2().\n");
+ }
+
+ public void aFunctionOfMyInterface() {
+ //System.out.print("You called public void ImplementsAllFunctions::aFunctionOfMyInterface()\n");
+ }
+}
+
+class AME2_C extends AME2_B {
+
+ public AME2_C() {}
+
+ public void fun2() {
+ //System.out.print("You called public void AME2_C::fun2().\n");
+ }
+
+ // This method is missing in the .jasm implementation.
+ public void aFunctionOfMyInterface() {
+ //System.out.print("You called public void AME2_C::aFunctionOfMyInterface()\n");
+ }
+}
+
+// -----------------------------------------------------------------------
+// Test AbstractMethod error shadowing existing implementation.
+//
+// Class hierachy:
+//
+// A // a class implementing m()
+// |
+// B // an abstract class defining m() abstract
+// |
+// C // an errorneous class lacking an implementation of m()
+//
+class AME3_A {
+ public void ma() {
+ System.out.print("A.ma() ");
+ }
+}
+
+abstract class AME3_B extends AME3_A {
+ public abstract void ma();
+}
+
+class AME3_C extends AME3_B {
+ // This method is missing in the .jasm implementation.
+ public void ma() {
+ System.out.print("C.ma() ");
+ }
+}
+
+// -----------------------------------------------------------------------
+// Test AbstractMethod error shadowing existing implementation. In
+// this test there are several subclasses of the abstract class.
+//
+// Class hierachy:
+//
+// A // A: a class implementing ma()
+// |
+// B // B: an abstract class defining ma() abstract
+// / | \
+// C D E // E: an errorneous class lacking an implementation of ma()
+//
+class AME4_A {
+ public void ma() {
+ System.out.print("A.ma() ");
+ }
+}
+
+abstract class AME4_B extends AME4_A {
+ public abstract void ma();
+}
+
+class AME4_C extends AME4_B {
+ public void ma() {
+ System.out.print("C.ma() ");
+ }
+}
+
+class AME4_D extends AME4_B {
+ public void ma() {
+ System.out.print("D.ma() ");
+ }
+}
+
+class AME4_E extends AME4_B {
+ // This method is missing in the .jasm implementation.
+ public void ma() {
+ System.out.print("E.ma() ");
+ }
+}
+
+// -------------------------------------------------------------------------
+// This error should be detected while processing the vtable stub.
+//
+// Class hierachy:
+//
+// A__ // abstract
+// /|\ \
+// C D E \
+// B // Bad class, missing method implementation.
+//
+// Test:
+// - Call D.mc() / E.mc() / F.mc() several times to force real vtable call constrution
+// - Call errorneous B.mc() in the end to raise the AbstraceMethodError
+
+abstract class AME5_A {
+ abstract void ma();
+ abstract void mb();
+ abstract void mc();
+}
+
+class AME5_B extends AME5_A {
+ void ma() {
+ System.out.print("B.ma() ");
+ }
+
+ void mb() {
+ System.out.print("B.mb() ");
+ }
+
+ // This method is missing in the .jasm implementation.
+ void mc() {
+ System.out.print("B.mc() ");
+ }
+}
+
+class AME5_C extends AME5_A {
+ void ma() {
+ System.out.print("C.ma() ");
+ }
+
+ void mb() {
+ System.out.print("C.mb() ");
+ }
+
+ void mc() {
+ System.out.print("C.mc() ");
+ }
+}
+
+class AME5_D extends AME5_A {
+ void ma() {
+ System.out.print("D.ma() ");
+ }
+
+ void mb() {
+ System.out.print("D.mb() ");
+ }
+
+ void mc() {
+ System.out.print("D.mc() ");
+ }
+}
+
+class AME5_E extends AME5_A {
+ void ma() {
+ System.out.print("E.ma() ");
+ }
+
+ void mb() {
+ System.out.print("E.mb() ");
+ }
+
+ void mc() {
+ System.out.print("E.mc() ");
+ }
+}
+
+//-------------------------------------------------------------------------
+// Test AbstractMethod error detected while processing
+// the itable stub.
+//
+// Class hierachy:
+//
+// A__ (interface)
+// /|\ \
+// C D E \
+// B (bad class, missing method)
+//
+// Test:
+// - Call D.mc() / E.mc() / F.mc() several times to force real itable call constrution
+// - Call errorneous B.mc() in the end to raise the AbstraceMethodError
+
+interface AME6_A {
+ abstract void ma();
+ abstract void mb();
+ abstract void mc();
+}
+
+class AME6_B implements AME6_A {
+ public void ma() {
+ System.out.print("B.ma() ");
+ }
+
+ public void mb() {
+ System.out.print("B.mb() ");
+ }
+
+ // This method is missing in the .jasm implementation.
+ public void mc() {
+ System.out.print("B.mc() ");
+ }
+}
+
+class AME6_C implements AME6_A {
+ public void ma() {
+ System.out.print("C.ma() ");
+ }
+
+ public void mb() {
+ System.out.print("C.mb() ");
+ }
+
+ public void mc() {
+ System.out.print("C.mc() ");
+ }
+}
+
+class AME6_D implements AME6_A {
+ public void ma() {
+ System.out.print("D.ma() ");
+ }
+
+ public void mb() {
+ System.out.print("D.mb() ");
+ }
+
+ public void mc() {
+ System.out.print("D.mc() ");
+ }
+}
+
+class AME6_E implements AME6_A {
+ public void ma() {
+ System.out.print("E.ma() ");
+ }
+
+ public void mb() {
+ System.out.print("E.mb() ");
+ }
+
+ public void mc() {
+ System.out.print("E.mc() ");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/IncompatibleClassChangeError/ICC_B.jasm Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+class ICC_B implements ICC_iA {
+
+ public Method "<init>":"()V"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokespecial Method java/lang/Object."<init>":"()V";
+ return;
+ }
+
+ public Method a:"()V"
+ stack 2 locals 1
+ {
+ getstatic Field java/lang/System.out:"Ljava/io/PrintStream;";
+ ldc String "B.a()";
+ invokevirtual Method java/io/PrintStream.print:"(Ljava/lang/String;)V";
+ return;
+ }
+
+ public Method b:"()V"
+ stack 2 locals 1
+ {
+ getstatic Field java/lang/System.out:"Ljava/io/PrintStream;";
+ ldc String "B.b()";
+ invokevirtual Method java/io/PrintStream.print:"(Ljava/lang/String;)V";
+ return;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/IncompatibleClassChangeError/ImplementsSomeInterfaces.jasm Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+
+
+
+class ImplementsSomeInterfaces extends AbstractICCE0 {
+
+ public Method "<init>":"()V"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokespecial Method AbstractICCE0."<init>":"()V";
+ return;
+ }
+
+ public Method firstAbstractMethod:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ public Method secondAbstractMethod:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ public Method firstFunctionOfMyInterface0:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ public Method secondFunctionOfMyInterface0:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ public Method firstFunctionOfMyInterface:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+
+ public Method secondFunctionOfMyInterface:"()Ljava/lang/String;"
+ stack 1 locals 1
+ {
+ aload_0;
+ invokevirtual Method java/lang/Object.getClass:"()Ljava/lang/Class;";
+ invokevirtual Method java/lang/Class.getName:"()Ljava/lang/String;";
+ areturn;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/IncompatibleClassChangeError/IncompatibleClassChangeErrorTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @summary Check that the verbose message of ICCE is printed correctly.
+ * The test forces errors in vtable stubs and interpreter.
+ * @requires !(os.arch=="arm") & vm.flavor == "server" & !vm.emulatedClient & vm.compMode=="Xmixed" & (!vm.graal.enabled | vm.opt.TieredCompilation == true) & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel==4)
+ * @library /test/lib /
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @compile IncompatibleClassChangeErrorTest.java
+ * @compile ImplementsSomeInterfaces.jasm ICC_B.jasm
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ * -XX:CompileThreshold=1000 -XX:-BackgroundCompilation -XX:-Inline
+ * -XX:CompileCommand=exclude,IncompatibleClassChangeErrorTest::test_iccInt
+ * IncompatibleClassChangeErrorTest
+ */
+
+import sun.hotspot.WhiteBox;
+import compiler.whitebox.CompilerWhiteBoxTest;
+import java.lang.reflect.Method;
+
+// This test assembles an errorneous installation of classes.
+// First, compile the test by @compile. This results in a legal set
+// of classes.
+// Then, with jasm, generate incompatible classes that overwrite
+// the class files in the build directory.
+// Last, call the real tests throwing IncompatibleClassChangeErrors
+// and check the messages generated.
+public class IncompatibleClassChangeErrorTest {
+
+ private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+ private static boolean enableChecks = true;
+
+ private static String expectedErrorMessageInterpreted =
+ "Class ImplementsSomeInterfaces " +
+ "does not implement the requested interface InterfaceICCE1";
+ private static String expectedErrorMessageCompiled =
+ "Class ICC_B does not implement the requested interface ICC_iB";
+ // old message: "vtable stub"
+
+ public static void setup_test() {
+ // Assure all exceptions are loaded.
+ new AbstractMethodError();
+ new IncompatibleClassChangeError();
+
+ enableChecks = false;
+ // Warmup
+ System.out.println("warmup:");
+ test_iccInt();
+ test_icc_compiled_itable_stub();
+ enableChecks = true;
+
+ // Compile
+ try {
+ Method method = IncompatibleClassChangeErrorTest.class.getMethod("test_icc_compiled_itable_stub");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException(method.getName() + " is not compiled");
+ }
+ method = ICC_C.class.getMethod("b");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException("ICC_C." + method.getName() + " is not compiled");
+ }
+ method = ICC_D.class.getMethod("b");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException("ICC_D." + method.getName() + " is not compiled");
+ }
+ method = ICC_E.class.getMethod("b");
+ WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException("ICC_E." + method.getName() + " is not compiled");
+ }
+ } catch (NoSuchMethodException e) { }
+ System.out.println("warmup done.");
+ }
+
+ // Should never be compiled.
+ public static void test_iccInt() {
+ boolean caught_icc = false;
+ try {
+ InterfaceICCE1 objectInterface = new ImplementsSomeInterfaces();
+ // IncompatibleClassChangeError gets thrown in
+ // - TemplateTable::invokeinterface()
+ // - LinkResolver::runtime_resolve_interface_method()
+ objectInterface.aFunctionOfMyInterface();
+ } catch (IncompatibleClassChangeError e) {
+ String errorMsg = e.getMessage();
+ if (enableChecks && !errorMsg.equals(expectedErrorMessageInterpreted)) {
+ System.out.println("Expected: " + expectedErrorMessageInterpreted + "\n" +
+ "but got: " + errorMsg);
+ throw new RuntimeException("Wrong error message of IncompatibleClassChangeError.");
+ }
+ caught_icc = true;
+ } catch (Throwable e) {
+ throw new RuntimeException("Caught unexpected exception: " + e);
+ }
+
+ // Check we got the exception.
+ if (!caught_icc) {
+ throw new RuntimeException("Expected IncompatibleClassChangeError was not thrown.");
+ }
+ }
+
+ // -------------------------------------------------------------------------
+ // Test AbstractMethodErrors detected in itable stubs.
+ // Note: How can we verify that we really stepped through the vtable stub?
+ // - Bimorphic inlining should not happen since we have no profiling data when
+ // we compile the method
+ // - As a result, an inline cache call should be generated
+ // - This inline cache call is patched into a real vtable call at the first
+ // re-resolve, which happens constantly during the first 10 iterations of the loop.
+ // => we should be fine! :-)
+ public static void test_icc_compiled_itable_stub() {
+ // Allocated the objects we need and call a valid method.
+ boolean caught_icc = false;
+ ICC_B b = new ICC_B();
+ ICC_C c = new ICC_C();
+ ICC_D d = new ICC_D();
+ ICC_E e = new ICC_E();
+ b.a();
+ c.a();
+ d.a();
+ e.a();
+
+ try {
+ final int iterations = 10;
+ // Test: calls b.b() in the last iteration.
+ for (int i = 0; i < iterations; i++) {
+ ICC_iB a = b;
+ if (i % 3 == 0 && i < iterations - 1) {
+ a = c;
+ }
+ if (i % 3 == 1 && i < iterations - 1) {
+ a = d;
+ }
+ if (i % 3 == 2 && i < iterations - 1) {
+ a = e;
+ }
+ a.b();
+ }
+ } catch (AbstractMethodError exc) {
+ // It's a subclass of IncompatibleClassChangeError, so we must catch this first.
+ System.out.println();
+ System.out.println(exc);
+ if (enableChecks) {
+ String errorMsg = exc.getMessage();
+ if (errorMsg == null) {
+ throw new RuntimeException("Caught unexpected AbstractMethodError with empty message.");
+ }
+ throw new RuntimeException("Caught unexpected AbstractMethodError.");
+ }
+ } catch (IncompatibleClassChangeError exc) {
+ caught_icc = true;
+ System.out.println();
+ String errorMsg = exc.getMessage();
+ if (enableChecks && errorMsg == null) {
+ System.out.println(exc);
+ throw new RuntimeException("Empty error message of IncompatibleClassChangeError.");
+ }
+ if (enableChecks &&
+ !errorMsg.equals(expectedErrorMessageCompiled)) {
+ System.out.println("Expected: " + expectedErrorMessageCompiled + "\n" +
+ "but got: " + errorMsg);
+ System.out.println(exc);
+ throw new RuntimeException("Wrong error message of IncompatibleClassChangeError.");
+ }
+ if (enableChecks) {
+ System.out.println("Passed with message: " + errorMsg);
+ }
+ } catch (Throwable exc) {
+ throw exc; // new RuntimeException("Caught unexpected exception: " + exc);
+ }
+
+ // Check we got the exception at some point.
+ if (enableChecks && !caught_icc) {
+ throw new RuntimeException("Expected IncompatibleClassChangeError was not thrown.");
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ setup_test();
+ test_iccInt();
+ test_icc_compiled_itable_stub();
+ }
+}
+
+
+// Helper classes to test incompatible class change in interpreter.
+//
+// The test also contains .jasm files with implementations
+// of the classes that shall generate the errors.
+
+
+// I0 // interface defining aFunctionOfMyInterface()
+// |
+// | I1 // interface
+// | |
+// A0 | // abstract class
+// \ /
+// C // class not implementing I1 and
+// not implementing I0::aFunctionOfMyInterface()
+//
+// Test is expected to throw error because of missing interface and not
+// because of missing method.
+
+interface InterfaceICCE0 {
+ public String firstFunctionOfMyInterface0();
+ public String secondFunctionOfMyInterface0();
+}
+
+interface InterfaceICCE1 {
+
+ public String firstFunctionOfMyInterface();
+
+ public String secondFunctionOfMyInterface();
+
+ public String aFunctionOfMyInterface();
+}
+
+abstract class AbstractICCE0 implements InterfaceICCE0 {
+ abstract public String firstAbstractMethod();
+ abstract public String secondAbstractMethod();
+
+ abstract public String anAbstractMethod();
+}
+
+class ImplementsSomeInterfaces extends
+ AbstractICCE0
+ // This interface is missing in the .jasm implementation.
+ implements InterfaceICCE1
+{
+
+ public String firstAbstractMethod() {
+ return this.getClass().getName();
+ }
+
+ public String secondAbstractMethod() {
+ return this.getClass().getName();
+ }
+
+ // This method is missing in the .jasm implementation.
+ public String anAbstractMethod() {
+ return this.getClass().getName();
+ }
+
+ public String firstFunctionOfMyInterface0() {
+ return this.getClass().getName();
+ }
+
+ public String secondFunctionOfMyInterface0() {
+ return this.getClass().getName();
+ }
+
+ public String firstFunctionOfMyInterface() {
+ return this.getClass().getName();
+ }
+
+ public String secondFunctionOfMyInterface() {
+ return this.getClass().getName();
+ }
+
+ // This method is missing in the .jasm implementation.
+ public String aFunctionOfMyInterface() {
+ return this.getClass().getName();
+ }
+}
+
+// Helper classes to test incompatible class change in itable stub.
+//
+// Class hierachy:
+//
+// iA,iB (interfaces)
+// /|\ \
+// C D E \
+// B (bad class, missing interface implementation)
+
+interface ICC_iA {
+ public void a();
+}
+
+interface ICC_iB {
+ public void b();
+}
+
+// This is the errorneous class. A variant of it not
+// implementing ICC_iB is copied into the test before
+// it is run.
+class ICC_B implements ICC_iA,
+ // This interface is missing in the .jasm implementation.
+ ICC_iB {
+ public void a() {
+ System.out.print("B.a() ");
+ }
+
+ public void b() {
+ System.out.print("B.b() ");
+ }
+}
+
+class ICC_C implements ICC_iA, ICC_iB {
+ public void a() {
+ System.out.print("C.a() ");
+ }
+
+ public void b() {
+ System.out.print("C.b() ");
+ }
+}
+
+class ICC_D implements ICC_iA, ICC_iB {
+ public void a() {
+ System.out.print("D.a() ");
+ }
+
+ public void b() {
+ System.out.print("D.b() ");
+ }
+}
+
+class ICC_E implements ICC_iA, ICC_iB {
+ public void a() {
+ System.out.print("E.a() ");
+ }
+
+ public void b() {
+ System.out.print("E.b() ");
+ }
+}
--- a/test/hotspot/jtreg/runtime/logging/ClassLoadUnloadTest.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/runtime/logging/ClassLoadUnloadTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -124,7 +124,7 @@
// -Xlog:class+loader+data=trace
pb = exec("-Xlog:class+loader+data=trace");
- checkFor("[class,loader,data]", "create class loader data");
+ checkFor("[class,loader,data]", "create loader data");
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/stringtable/StringTableVerifyTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8199137
+ * @summary VerifyStringTableAtExit should not crash
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ * @run main StringTableVerifyTest
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+public class StringTableVerifyTest {
+ public static void main(String[] args) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UnlockDiagnosticVMOptions", "-XX:+VerifyStringTableAtExit", "-version");
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ output.shouldHaveExitValue(0);
+ }
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/dcmd/framework/TEST.properties Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,2 @@
+exclusiveAccess.dirs=.
+
--- a/test/hotspot/jtreg/serviceability/jdwp/JdwpReply.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/jdwp/JdwpReply.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,17 @@
int dataLength = length - HEADER_LEN;
if (dataLength > 0) {
data = new byte[dataLength];
- ds.read(data, 0, dataLength);
+ int bytesRead = ds.read(data, 0, dataLength);
+ // For large data JDWP agent sends two packets: 1011 bytes in
+ // the first packet (1000 + HEADER_LEN) and the rest in the
+ // second packet.
+ if (bytesRead > 0 && bytesRead < dataLength) {
+ System.out.println("[" + getClass().getName() + "] Only " +
+ bytesRead + " bytes of " + dataLength + " were " +
+ "read in the first packet. Reading the rest...");
+ ds.read(data, bytesRead, dataLength - bytesRead);
+ }
+
parseData(new DataInputStream(new ByteArrayInputStream(data)));
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/FieldAccessWatch/FieldAccessWatch.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8193369
+ * @summary Tests that all FieldAccess and FieldModification notifications
+ are generated.
+ * @compile FieldAccessWatch.java
+ * @run main/othervm/native -agentlib:FieldAccessWatch FieldAccessWatch
+ */
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+
+public class FieldAccessWatch {
+
+ private static final String agentLib = "FieldAccessWatch";
+
+ private static class MyItem {
+ }
+
+ private static class MyList {
+ public List<MyItem> items = new ArrayList<>();
+ }
+
+ public static void main(String[] args) throws Exception {
+ try {
+ System.loadLibrary(agentLib);
+ } catch (UnsatisfiedLinkError ex) {
+ System.err.println("Failed to load " + agentLib + " lib");
+ System.err.println("java.library.path: " + System.getProperty("java.library.path"));
+ throw ex;
+ }
+
+ if (!initWatchers(MyList.class, MyList.class.getDeclaredField("items"))) {
+ throw new RuntimeException("Watchers initializations error");
+ }
+
+ MyList list = new MyList();
+
+ test("[1]items.add(0, object)",() -> list.items.add(0, new MyItem()));
+ test("[2]items.add(object)", () -> list.items.add(new MyItem()));
+ test("[3]items.add(1, object)", () -> list.items.add(1, new MyItem()));
+ test("[4]items.add(object)", () -> list.items.add(new MyItem()));
+ test("[5]items.add(1, object)", () -> list.items.add(1, new MyItem()));
+ }
+
+ private static void log(String msg) {
+ System.out.println(msg);
+ System.out.flush();
+ }
+
+ // For every access/modify notification native part tries to locate
+ // boolean "<field_name>_access"/"<field_name>_modify" field and sets it to true
+ private static class TestResult {
+ // MyList.items
+ public boolean items_access;
+
+ // AbstractList.modCount
+ public boolean modCount_access;
+ public boolean modCount_modify;
+
+ // ArrayList.size
+ public boolean size_access;
+ public boolean size_modify;
+
+ // ArrayList.elementData
+ public boolean elementData_access;
+
+ // verify that all fields are set to true
+ public void verify() {
+ Arrays.stream(this.getClass().getDeclaredFields()).forEach(f -> verify(f));
+ }
+
+ private void verify(Field f) {
+ try {
+ if (!f.getBoolean(this)) {
+ throw new RuntimeException(f.getName() + " notification is missed");
+ }
+ } catch (IllegalAccessException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ }
+
+ @FunctionalInterface
+ private interface TestAction {
+ void apply();
+ }
+
+ private static void test(String descr, TestAction action) throws Exception {
+ log(descr + ": starting");
+ TestResult result = new TestResult();
+ if (!startTest(result)) {
+ throw new RuntimeException("startTest failed");
+ }
+ action.apply();
+ // wait some time to ensure all posted events are handled
+ Thread.sleep(500);
+
+ stopTest();
+
+ // check the results
+ result.verify();
+
+ log(descr + ": OK");
+ }
+
+ private static native boolean initWatchers(Class cls, Field field);
+ private static native boolean startTest(TestResult results);
+ private static native void stopTest();
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/jvmti/FieldAccessWatch/libFieldAccessWatch.c Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include "jvmti.h"
+#include "jni.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+static jvmtiEnv *jvmti = NULL;
+
+// valid while a test is executed
+static JNIEnv *javaEnv = NULL;
+static jobject testResultObject = NULL;
+static jclass testResultClass = NULL;
+
+
+static void reportError(const char *msg, int err) {
+ printf("%s, error: %d\n", msg, err);
+}
+
+
+// logs the notification and updates currentTestResult
+static void handleNotification(jmethodID method,
+ jfieldID field,
+ jclass field_klass,
+ int modified,
+ jlocation location)
+{
+ jvmtiError err;
+ char *name = NULL;
+ char *mname = NULL;
+ char *mgensig = NULL;
+ jclass methodClass = NULL;
+ char *csig = NULL;
+
+ if (testResultObject == NULL) {
+ // we are out of test
+ return;
+ }
+
+ err = (*jvmti)->GetFieldName(jvmti, field_klass, field, &name, NULL, NULL);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("GetFieldName failed", err);
+ return;
+ }
+
+ err = (*jvmti)->GetMethodName(jvmti, method, &mname, NULL, &mgensig);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("GetMethodName failed", err);
+ return;
+ }
+
+ err = (*jvmti)->GetMethodDeclaringClass(jvmti, method, &methodClass);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("GetMethodDeclaringClass failed", err);
+ return;
+ }
+
+ err = (*jvmti)->GetClassSignature(jvmti, methodClass, &csig, NULL);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("GetClassSignature failed", err);
+ return;
+ }
+
+ printf("\"class: %s method: %s%s\" %s field: \"%s\", location: %d\n",
+ csig, mname, mgensig, modified ? "modified" : "accessed", name, (int)location);
+
+ // set TestResult
+ if (javaEnv != NULL && testResultObject != NULL && testResultClass != NULL) {
+ jfieldID fieldID;
+ // field names in TestResult are "<field_name>_access"/"<field_name>_modify"
+ char *fieldName = (char *)malloc(strlen(name) + 16);
+ strcpy(fieldName, name);
+ strcat(fieldName, modified ? "_modify" : "_access");
+
+ fieldID = (*javaEnv)->GetFieldID(javaEnv, testResultClass, fieldName, "Z");
+ if (fieldID != NULL) {
+ (*javaEnv)->SetBooleanField(javaEnv, testResultObject, fieldID, JNI_TRUE);
+ } else {
+ // the field is not interesting for the test
+ }
+ // clear any possible exception
+ (*javaEnv)->ExceptionClear(javaEnv);
+
+ free(fieldName);
+ }
+
+ (*jvmti)->Deallocate(jvmti, (unsigned char*)csig);
+ (*jvmti)->Deallocate(jvmti, (unsigned char*)mname);
+ (*jvmti)->Deallocate(jvmti, (unsigned char*)mgensig);
+ (*jvmti)->Deallocate(jvmti, (unsigned char*)name);
+}
+
+// recursively sets access and modification watchers for all
+// fields of the object specified.
+void setWatchers(JNIEnv *jni_env, const jobject obj)
+{
+ jclass klass;
+
+ if (obj == NULL) {
+ return;
+ }
+
+ klass = (*jni_env)->GetObjectClass(jni_env, obj);
+ do {
+ jfieldID* klassFields = NULL;
+ jint fieldCount = 0;
+ int i;
+ jvmtiError err = (*jvmti)->GetClassFields(jvmti, klass, &fieldCount, &klassFields);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("Failed to get class fields", err);
+ return;
+ }
+
+ for (i = 0; i < fieldCount; ++i) {
+ char *sig = NULL;
+ err = (*jvmti)->SetFieldModificationWatch(jvmti, klass, klassFields[i]);
+ if (err != JVMTI_ERROR_NONE && err != JVMTI_ERROR_DUPLICATE) {
+ reportError("Failed to set field modification", err);
+ return;
+ }
+
+ err = (*jvmti)->SetFieldAccessWatch(jvmti, klass, klassFields[i]);
+ if (err != JVMTI_ERROR_NONE && err != JVMTI_ERROR_DUPLICATE) {
+ reportError("Failed to set field access", err);
+ return;
+ }
+
+ err = (*jvmti)->GetFieldName(jvmti, klass, klassFields[i], NULL, &sig, NULL);
+ if (sig) {
+ if (sig[0] == 'L') {
+ jobject fieldVal = (*jni_env)->GetObjectField(jni_env, obj, klassFields[i]);
+ setWatchers(jni_env, fieldVal);
+ }
+ (*jvmti)->Deallocate(jvmti, (unsigned char*)sig);
+ }
+ }
+
+ (*jvmti)->Deallocate(jvmti, (unsigned char*)klassFields);
+
+ klass = (*jni_env)->GetSuperclass(jni_env, klass);
+ } while (klass != NULL);
+}
+
+
+static void JNICALL
+onFieldAccess(jvmtiEnv *jvmti_env,
+ JNIEnv* jni_env,
+ jthread thread,
+ jmethodID method,
+ jlocation location,
+ jclass field_klass,
+ jobject object,
+ jfieldID field)
+{
+ handleNotification(method, field, field_klass, 0, location);
+}
+
+
+static void JNICALL
+onFieldModification(jvmtiEnv *jvmti_env,
+ JNIEnv* jni_env,
+ jthread thread,
+ jmethodID method,
+ jlocation location,
+ jclass field_klass,
+ jobject object,
+ jfieldID field,
+ char signature_type,
+ jvalue new_value)
+{
+ handleNotification(method, field, field_klass, 1, location);
+
+ if (signature_type == 'L') {
+ jobject newObject = new_value.l;
+ setWatchers(jni_env, newObject);
+ }
+}
+
+
+JNIEXPORT jint JNICALL
+Agent_OnLoad(JavaVM *jvm, char *options, void *reserved)
+{
+ jvmtiError err;
+ jvmtiCapabilities caps = {0};
+ jvmtiEventCallbacks callbacks = {0};
+ jint res = (*jvm)->GetEnv(jvm, (void **) &jvmti, JVMTI_VERSION_1_1);
+ if (res != JNI_OK || jvmti == NULL) {
+ reportError("GetEnv failed", res);
+ return JNI_ERR;
+ }
+
+ caps.can_generate_field_modification_events = 1;
+ caps.can_generate_field_access_events = 1;
+ caps.can_tag_objects = 1;
+ err = (*jvmti)->AddCapabilities(jvmti, &caps);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("Failed to set capabilities", err);
+ return JNI_ERR;
+ }
+
+ callbacks.FieldModification = &onFieldModification;
+ callbacks.FieldAccess = &onFieldAccess;
+
+ err = (*jvmti)->SetEventCallbacks(jvmti, &callbacks, sizeof(callbacks));
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("Failed to set event callbacks", err);
+ return JNI_ERR;
+ }
+
+ err = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, JVMTI_EVENT_FIELD_ACCESS, NULL);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("Failed to set access notifications", err);
+ return JNI_ERR;
+ }
+
+ err = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, JVMTI_EVENT_FIELD_MODIFICATION, NULL);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("Failed to set modification notifications", err);
+ return JNI_ERR;
+ }
+ setbuf(stdout, NULL);
+ return JNI_OK;
+}
+
+
+JNIEXPORT jboolean JNICALL
+Java_FieldAccessWatch_initWatchers(JNIEnv *env, jclass thisClass, jclass cls, jobject field)
+{
+ jfieldID fieldId;
+ jvmtiError err;
+
+ if (jvmti == NULL) {
+ reportError("jvmti is NULL", 0);
+ return JNI_FALSE;
+ }
+
+ fieldId = (*env)->FromReflectedField(env, field);
+
+ err = (*jvmti)->SetFieldModificationWatch(jvmti, cls, fieldId);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("SetFieldModificationWatch failed", err);
+ return JNI_FALSE;
+ }
+
+ err = (*jvmti)->SetFieldAccessWatch(jvmti, cls, fieldId);
+ if (err != JVMTI_ERROR_NONE) {
+ reportError("SetFieldAccessWatch failed", err);
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+
+JNIEXPORT jboolean JNICALL
+Java_FieldAccessWatch_startTest(JNIEnv *env, jclass thisClass, jobject testResults)
+{
+ javaEnv = env;
+ testResultObject = (*javaEnv)->NewGlobalRef(javaEnv, testResults);
+ testResultClass = (jclass)(*javaEnv)->NewGlobalRef(javaEnv, (*javaEnv)->GetObjectClass(javaEnv, testResultObject));
+
+ return JNI_TRUE;
+}
+
+JNIEXPORT void JNICALL
+Java_FieldAccessWatch_stopTest(JNIEnv *env, jclass thisClass)
+{
+ if (testResultObject != NULL) {
+ (*env)->DeleteGlobalRef(env, testResultObject);
+ testResultObject = NULL;
+ }
+ if (testResultClass != NULL) {
+ (*env)->DeleteGlobalRef(env, testResultClass);
+ testResultClass = NULL;
+ }
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/utils/JstatGcCauseResults.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/utils/JstatGcCauseResults.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,23 +25,25 @@
* Results of running the JstatGcTool ("jstat -gccause <pid>")
*
* Output example:
- * S0 S1 E O M CCS YGC YGCT FGC FGCT GCT LGCC GCC
- * 0.00 6.25 46.19 0.34 57.98 54.63 15305 1270.551 0 0.000 1270.551 Allocation Failure No GC
+ * S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT LGCC GCC
+ * 0.00 6.25 46.19 0.34 57.98 54.63 15305 1270.551 0 0.000 0 0.00 1270.551 Allocation Failure No GC
* Output description:
- * S0 Survivor space 0 utilization as a percentage of the space's current capacity.
- * S1 Survivor space 1 utilization as a percentage of the space's current capacity.
- * E Eden space utilization as a percentage of the space's current capacity.
- * O Old space utilization as a percentage of the space's current capacity.
- * M Metaspace utilization as a percentage of the space's current capacity.
+ * S0 Survivor space 0 utilization as a percentage of the space's current capacity.
+ * S1 Survivor space 1 utilization as a percentage of the space's current capacity.
+ * E Eden space utilization as a percentage of the space's current capacity.
+ * O Old space utilization as a percentage of the space's current capacity.
+ * M Metaspace utilization as a percentage of the space's current capacity.
* CCS Compressed Class Space
* YGC Number of young generation GC events.
* YGCT Young generation garbage collection time.
- * FGC Number of full GC events.
+ * FGC Number of full GC events.
* FGCT Full garbage collection time.
- * GCT Total garbage collection time.
+ * CGC Concurrent Collections (STW phase)
+ * CGCT Concurrent Garbage Collection Time (STW phase)
+ * GCT Total garbage collection time.
* LGCC Cause of last Garbage Collection.
- * GCC Cause of current Garbage Collection.
+ * GCC Cause of current Garbage Collection.
*/
package utils;
@@ -72,6 +74,13 @@
assertThat(GCT >= 0, "Incorrect time value for GCT");
assertThat(GCT >= YGCT, "GCT < YGCT (total garbage collection time < young generation garbage collection time)");
+ int CGC = getIntValue("CGC");
+ float CGCT = getFloatValue("CGCT");
+ assertThat(CGCT >= 0, "Incorrect time value for CGCT");
+ if (CGC > 0) {
+ assertThat(CGCT > 0, "Number of concurrent GC events is " + CGC + ", but CGCT is 0");
+ }
+
int FGC = getIntValue("FGC");
float FGCT = getFloatValue("FGCT");
assertThat(FGCT >= 0, "Incorrect time value for FGCT");
@@ -81,7 +90,7 @@
assertThat(GCT >= FGCT, "GCT < YGCT (total garbage collection time < full generation garbage collection time)");
- assertThat(checkFloatIsSum(GCT, YGCT, FGCT), "GCT != (YGCT + FGCT) " + "(GCT = " + GCT + ", YGCT = " + YGCT
- + ", FGCT = " + FGCT + ", (YCGT + FGCT) = " + (YGCT + FGCT) + ")");
+ assertThat(checkFloatIsSum(GCT, YGCT, CGCT, FGCT), "GCT != (YGCT + CGCT + FGCT) " + "(GCT = " + GCT + ", YGCT = " + YGCT
+ + ", CGCT = " + CGCT + ", FGCT = " + FGCT + ", (YCGT + CGCT + FGCT) = " + (YGCT + CGCT + FGCT) + ")");
}
}
--- a/test/hotspot/jtreg/serviceability/tmtools/jstat/utils/JstatGcResults.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/hotspot/jtreg/serviceability/tmtools/jstat/utils/JstatGcResults.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,8 @@
* Results of running the JstatGcTool ("jstat -gc <pid>")
*
* Output example:
- * (S0C S1C S0U S1U EC EU OC OU MC MU CCSC CCSU YGC YGCT FGC FGCT GCT
- * 512.0 512.0 32.0 0.0 288768.0 168160.6 83968.0 288.1 4864.0 2820.3 512.0 279.7 18510 1559.208 0 0.000 1559.208
+ * S0C S1C S0U S1U EC EU OC OU MC MU CCSC CCSU YGC YGCT FGC FGCT CGC CGCT GCT
+ * 512.0 512.0 32.0 0.0 288768.0 168160.6 83968.0 288.1 4864.0 2820.3 512.0 279.7 18510 1559.208 0 0.000 0 0.0 1559.208
*
* Output description:
* S0C Current survivor space 0 capacity (KB).
@@ -45,6 +45,8 @@
* YGCT Young generation garbage collection time.
* FGC Number of full GC events.
* FGCT Full garbage collection time.
+ * CGC Concurrent Collections (STW phase)
+ * CGCT Concurrent Garbage Collection Time (STW phase)
* GCT Total garbage collection time.
*
*/
@@ -101,6 +103,13 @@
assertThat(GCT >= 0, "Incorrect time value for GCT");
assertThat(GCT >= YGCT, "GCT < YGCT (total garbage collection time < young generation garbage collection time)");
+ int CGC = getIntValue("CGC");
+ float CGCT = getFloatValue("CGCT");
+ assertThat(CGCT >= 0, "Incorrect time value for CGCT");
+ if (CGC > 0) {
+ assertThat(CGCT > 0, "Number of concurrent GC events is " + CGC + ", but CGCT is 0");
+ }
+
int FGC = getIntValue("FGC");
float FGCT = getFloatValue("FGCT");
assertThat(FGCT >= 0, "Incorrect time value for FGCT");
@@ -110,7 +119,7 @@
assertThat(GCT >= FGCT, "GCT < YGCT (total garbage collection time < full generation garbage collection time)");
- assertThat(checkFloatIsSum(GCT, YGCT, FGCT), "GCT != (YGCT + FGCT) " + "(GCT = " + GCT + ", YGCT = " + YGCT
- + ", FGCT = " + FGCT + ", (YCGT + FGCT) = " + (YGCT + FGCT) + ")");
+ assertThat(checkFloatIsSum(GCT, YGCT, CGCT, FGCT), "GCT != (YGCT + CGCT + FGCT) " + "(GCT = " + GCT + ", YGCT = " + YGCT
+ + ", CGCT = " + CGCT + ", FGCT = " + FGCT + ", (YCGT + CGCT + FGCT) = " + (YGCT + CGCT + FGCT) + ")");
}
}
--- a/test/jdk/ProblemList.txt Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/ProblemList.txt Tue Mar 20 04:36:44 2018 +0100
@@ -493,9 +493,6 @@
java/lang/String/nativeEncoding/StringPlatformChars.java 8182569 windows-all,solaris-all
-java/lang/invoke/condy/CondyRepeatFailedResolution.java 8197944 windows-all
-java/lang/invoke/condy/CondyReturnPrimitiveTest.java 8197944 windows-all
-
############################################################################
# jdk_instrument
--- a/test/jdk/java/lang/StackWalker/LocalsAndOperands.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/java/lang/StackWalker/LocalsAndOperands.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,13 @@
* @modules java.base/java.lang:open
* @run testng/othervm -Xint -DtestUnused=true LocalsAndOperands
* @run testng/othervm -Xcomp LocalsAndOperands
+ */
+
+/*
+ * @test
+ * @bug 8020968 8147039 8156073
+ * @modules java.base/java.lang:open
+ * @requires !vm.graal.enabled
* @run testng/othervm -Xcomp -XX:-TieredCompilation LocalsAndOperands
*/
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/lang/invoke/condy/CondyInterfaceWithOverpassMethods.java Tue Mar 20 04:36:44 2018 +0100
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8186046
+ * @summary Test for an interface using condy with default overpass methods
+ * @library /lib/testlibrary/bytecode
+ * @build jdk.experimental.bytecode.BasicClassBuilder
+ * @run testng CondyInterfaceWithOverpassMethods
+ * @run testng/othervm -XX:+UnlockDiagnosticVMOptions -XX:UseBootstrapCallInfo=3 CondyInterfaceWithOverpassMethods
+ */
+
+import jdk.experimental.bytecode.BasicClassBuilder;
+import jdk.experimental.bytecode.Flag;
+import jdk.experimental.bytecode.TypedCodeBuilder;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+
+@Test
+public class CondyInterfaceWithOverpassMethods {
+ interface A {
+ int a();
+
+ default int x() {
+ return 1;
+ }
+ }
+
+
+ // Generated class with methods containing condy ldc
+ Class<?> gc;
+
+ public static Object bsm(MethodHandles.Lookup l, String name, Class<?> type) {
+ return name;
+ }
+
+ @BeforeClass
+ public void generateClass() throws Exception {
+// interface B extends A {
+// // Overpass for method A.a
+//
+// default void y() {
+// // ldc to Dynamic
+// }
+// }
+ Class<?> thisClass = CondyInterfaceWithOverpassMethods.class;
+
+ String genClassName = thisClass.getSimpleName() + "$Code";
+ String bsmClassName = thisClass.getCanonicalName().replace('.', '/');
+ String bsmMethodName = "bsm";
+ String bsmDescriptor = MethodType.methodType(Object.class, MethodHandles.Lookup.class,
+ String.class, Class.class).toMethodDescriptorString();
+
+ byte[] byteArray = new BasicClassBuilder(genClassName, 55, 0)
+ .withFlags(Flag.ACC_INTERFACE, Flag.ACC_ABSTRACT)
+ .withSuperclass("java/lang/Object")
+ .withSuperinterface(thisClass.getCanonicalName().replace('.', '/') + "$" + A.class.getSimpleName())
+ .withMethod("y", "()Ljava/lang/String;", M ->
+ M.withFlags(Flag.ACC_PUBLIC)
+ .withCode(TypedCodeBuilder::new, C ->
+ C.ldc("String", "Ljava/lang/String;", bsmClassName, bsmMethodName, bsmDescriptor,
+ S -> {})
+ .areturn()
+ ))
+ .build();
+
+ gc = MethodHandles.lookup().defineClass(byteArray);
+ }
+
+ @Test
+ public void testClass() throws Exception {
+ // Trigger initialization
+ Class.forName(gc.getName());
+ }
+}
--- a/test/jdk/java/lang/invoke/condy/CondyRepeatFailedResolution.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/java/lang/invoke/condy/CondyRepeatFailedResolution.java Tue Mar 20 04:36:44 2018 +0100
@@ -39,8 +39,6 @@
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
-import java.io.File;
-import java.io.FileOutputStream;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.lang.reflect.InvocationTargetException;
@@ -217,9 +215,6 @@
))
.build();
- // For debugging purposes
- new FileOutputStream(new File(genClassName + ".class")).write(byteArray);
-
gc = MethodHandles.lookup().defineClass(byteArray);
}
--- a/test/jdk/java/lang/invoke/condy/CondyReturnPrimitiveTest.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/java/lang/invoke/condy/CondyReturnPrimitiveTest.java Tue Mar 20 04:36:44 2018 +0100
@@ -39,8 +39,6 @@
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
-import java.io.File;
-import java.io.FileOutputStream;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodType;
import java.lang.reflect.Method;
@@ -218,9 +216,6 @@
))
.build();
- // For debugging purposes
- new FileOutputStream(new File(genClassName + ".class")).write(byteArray);
-
gc = MethodHandles.lookup().defineClass(byteArray);
}
--- a/test/jdk/sun/tools/jstat/gcCapacityOutput1.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/gcCapacityOutput1.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,19 +3,19 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# NGCMN NGCMX NGC S0C S1C EC OGCMN OGCMX OGC OC MCMN MCMX MC CCSMN CCSMX CCSC YGC FGC
-# 4096.0 657408.0 8192.0 512.0 512.0 3072.0 6144.0 1312768.0 6144.0 6144.0 512.0 132096.0 5120.0 512.0 131072.0 512.0 1 0
+# NGCMN NGCMX NGC S0C S1C EC OGCMN OGCMX OGC OC MCMN MCMX MC CCSMN CCSMX CCSC YGC FGC CGC
+# 4096.0 657408.0 8192.0 512.0 512.0 3072.0 6144.0 1312768.0 6144.0 6144.0 512.0 132096.0 5120.0 512.0 131072.0 512.0 1 0 0
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^ NGCMN NGCMX NGC S0C S1C EC OGCMN OGCMX OGC OC MCMN MCMX MC CCSMN CCSMX CCSC YGC FGC $/ {
+/^ NGCMN NGCMX NGC S0C S1C EC OGCMN OGCMX OGC OC MCMN MCMX MC CCSMN CCSMX CCSC YGC FGC CGC $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+[ ]*[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstat/gcCauseOutput1.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/gcCauseOutput1.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,15 +3,15 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# S0 S1 E O M CCS YGC YGCT FGC FGCT GCT LGCC GCC
-# 0.00 0.00 0.00 9.97 90.94 87.70 2 0.013 0 0.000 0.013 Allocation Failure No GC
+# S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT LGCC GCC
+# 0.00 0.00 0.00 9.97 90.94 87.70 2 0.013 0 0.000 0 0.000 0.013 Allocation Failure No GC
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^ S0 S1 E O M CCS YGC YGCT FGC FGCT GCT LGCC GCC $/ {
+/^ S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT LGCC GCC $/ {
headerlines++;
}
--- a/test/jdk/sun/tools/jstat/gcMetaCapacityOutput1.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/gcMetaCapacityOutput1.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,18 +3,18 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# MCMN MCMX MC CCSMN CCSMX CCSC YGC FGC FGCT GCT
-# 512.0 132096.0 5120.0 512.0 131072.0 512.0 1 0 0.000 0.004
+# MCMN MCMX MC CCSMN CCSMX CCSC YGC FGC FGCT CGC CGCT GCT
+# 512.0 132096.0 5120.0 512.0 131072.0 512.0 1 0 0.000 0 0.000 0.004
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^ MCMN MCMX MC CCSMN CCSMX CCSC YGC FGC FGCT GCT $/ {
+/^ MCMN MCMX MC CCSMN CCSMX CCSC YGC FGC FGCT CGC CGCT GCT $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstat/gcNewCapacityOutput1.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/gcNewCapacityOutput1.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,19 +3,19 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# NGCMN NGCMX NGC S0CMX S0C S1CMX S1C ECMX EC YGC FGC
-# 2176.0 7232.0 2176.0 192.0 64.0 192.0 64.0 6848.0 2048.0 1 0
+# NGCMN NGCMX NGC S0CMX S0C S1CMX S1C ECMX EC YGC FGC CGC
+# 2176.0 7232.0 2176.0 192.0 64.0 192.0 64.0 6848.0 2048.0 1 0 0
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^ NGCMN NGCMX NGC S0CMX S0C S1CMX S1C ECMX EC YGC FGC $/ {
+/^ NGCMN NGCMX NGC S0CMX S0C S1CMX S1C ECMX EC YGC FGC CGC $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+[ ]*[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstat/gcOldCapacityOutput1.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/gcOldCapacityOutput1.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,18 +3,18 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# OGCMN OGCMX OGC OC YGC FGC FGCT GCT
-# 6016.0 58304.0 6016.0 6016.0 1 0 0.000 0.030
+# OGCMN OGCMX OGC OC YGC FGC FGCT CGC CGCT GCT
+# 6016.0 58304.0 6016.0 6016.0 1 0 0.000 0 0.000 0.030
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^ OGCMN OGCMX OGC OC YGC FGC FGCT GCT $/ {
+/^ OGCMN OGCMX OGC OC YGC FGC FGCT CGC CGCT GCT $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstat/gcOldOutput1.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/gcOldOutput1.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,7 +3,7 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# MC MU CCSC CCSU OC OU YGC FGC FGCT GCT
+# MC MU CCSC CCSU OC OU YGC FGC FGCT CGC CGCT GCT
# 5120.0 4152.0 512.0 397.9 6144.0 200.0 1 0 0.000 0.005
@@ -11,11 +11,11 @@
headerlines=0; datalines=0; totallines=0
}
-/^ MC MU CCSC CCSU OC OU YGC FGC FGCT GCT $/ {
+/^ MC MU CCSC CCSU OC OU YGC FGC FGCT CGC CGCT GCT $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstat/gcOutput1.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/gcOutput1.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,19 +3,19 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# S0C S1C S0U S1U EC EU OC OU MC MU CCSC CCSU YGC YGCT FGC FGCT GCT
-# 512.0 512.0 0.0 496.0 3072.0 615.5 6144.0 280.0 5120.0 4176.0 512.0 401.0 1 0.005 0 0.000 0.005
+# S0C S1C S0U S1U EC EU OC OU MC MU CCSC CCSU YGC YGCT FGC FGCT CGC CGCT GCT
+# 512.0 512.0 0.0 496.0 3072.0 615.5 6144.0 280.0 5120.0 4176.0 512.0 401.0 1 0.005 0 0.000 0 0.000 0.005
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^ S0C S1C S0U S1U EC EU OC OU MC MU CCSC CCSU YGC YGCT FGC FGCT GCT $/ {
+/^ S0C S1C S0U S1U EC EU OC OU MC MU CCSC CCSU YGC YGCT FGC FGCT CGC CGCT GCT $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstat/lineCounts1.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/lineCounts1.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,22 +3,22 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# S0 S1 E O M CCS YGC YGCT FGC FGCT GCT
-# 0.00 93.76 28.80 1.82 77.74 68.02 1 0.005 0 0.000 0.005
-# 0.00 93.76 73.04 1.82 77.74 68.02 1 0.005 0 0.000 0.005
-# 0.00 93.76 73.04 1.82 77.74 68.02 1 0.005 0 0.000 0.005
-# 0.00 93.76 73.04 1.82 77.74 68.02 1 0.005 0 0.000 0.005
-# 0.00 93.76 75.00 1.82 77.74 68.02 1 0.005 0 0.000 0.005
+# S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT
+# 0.00 93.76 28.80 1.82 77.74 68.02 1 0.005 0 0.000 0 0.000 0.005
+# 0.00 93.76 73.04 1.82 77.74 68.02 1 0.005 0 0.000 0 0.000 0.005
+# 0.00 93.76 73.04 1.82 77.74 68.02 1 0.005 0 0.000 0 0.000 0.005
+# 0.00 93.76 73.04 1.82 77.74 68.02 1 0.005 0 0.000 0 0.000 0.005
+# 0.00 93.76 75.00 1.82 77.74 68.02 1 0.005 0 0.000 0 0.000 0.005
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^ S0 S1 E O M CCS YGC YGCT FGC FGCT GCT $/ {
+/^ S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstat/lineCounts2.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/lineCounts2.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,18 +3,18 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# S0 S1 E O M CCS YGC YGCT FGC FGCT GCT
-# 0.00 93.76 28.40 1.82 77.74 68.02 1 0.005 0 0.000 0.005
+# S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT
+# 0.00 93.76 28.40 1.82 77.74 68.02 1 0.005 0 0.000 0 0.000 0.005
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^ S0 S1 E O M CCS YGC YGCT FGC FGCT GCT $/ {
+/^ S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstat/lineCounts3.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/lineCounts3.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,27 +3,27 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# S0 S1 E O M CCS YGC YGCT FGC FGCT GCT
-# 0.00 93.76 26.48 1.95 77.78 68.02 1 0.006 0 0.000 0.006
-# 0.00 93.76 71.58 1.95 77.78 68.02 1 0.006 0 0.000 0.006
-# 0.00 93.76 73.58 1.95 77.78 68.02 1 0.006 0 0.000 0.006
-# 0.00 93.76 73.58 1.95 77.78 68.02 1 0.006 0 0.000 0.006
-# 0.00 93.76 73.58 1.95 77.78 68.02 1 0.006 0 0.000 0.006
-# 0.00 93.76 75.58 1.95 77.78 68.02 1 0.006 0 0.000 0.006
-# 0.00 93.76 75.58 1.95 77.78 68.02 1 0.006 0 0.000 0.006
-# 0.00 93.76 77.58 1.95 77.78 68.02 1 0.006 0 0.000 0.006
-# 0.00 93.76 77.58 1.95 77.78 68.02 1 0.006 0 0.000 0.006
-# 0.00 93.76 77.58 1.95 77.78 68.02 1 0.006 0 0.000 0.006
+# S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT
+# 0.00 93.76 26.48 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
+# 0.00 93.76 71.58 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
+# 0.00 93.76 73.58 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
+# 0.00 93.76 73.58 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
+# 0.00 93.76 73.58 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
+# 0.00 93.76 75.58 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
+# 0.00 93.76 75.58 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
+# 0.00 93.76 77.58 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
+# 0.00 93.76 77.58 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
+# 0.00 93.76 77.58 1.95 77.78 68.02 1 0.006 0 0.000 0 0.000 0.006
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^ S0 S1 E O M CCS YGC YGCT FGC FGCT GCT $/ {
+/^ S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstat/lineCounts4.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/lineCounts4.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,30 +3,30 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-# S0 S1 E O M CCS YGC YGCT FGC FGCT GCT
-# 0.00 96.88 66.55 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# 0.00 96.88 71.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# 0.00 96.88 73.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# 0.00 96.88 73.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# 0.00 96.88 73.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# 0.00 96.88 75.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# 0.00 96.88 75.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# 0.00 96.88 77.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# 0.00 96.88 77.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# 0.00 96.88 77.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
-# S0 S1 E O M CCS YGC YGCT FGC FGCT GCT
-# 0.00 96.88 79.58 2.34 77.78 68.02 1 0.003 0 0.000 0.003
+# S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT
+# 0.00 96.88 66.55 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# 0.00 96.88 71.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# 0.00 96.88 73.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# 0.00 96.88 73.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# 0.00 96.88 73.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# 0.00 96.88 75.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# 0.00 96.88 75.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# 0.00 96.88 77.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# 0.00 96.88 77.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# 0.00 96.88 77.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
+# S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT
+# 0.00 96.88 79.58 2.34 77.78 68.02 1 0.003 0 0.000 0 0.000 0.003
BEGIN {
headerlines=0; datalines=0; totallines=0
datalines2=0;
}
-/^ S0 S1 E O M CCS YGC YGCT FGC FGCT GCT $/ {
+/^ S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
if (headerlines == 2) {
datalines2++;
}
--- a/test/jdk/sun/tools/jstat/timeStamp1.awk Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstat/timeStamp1.awk Tue Mar 20 04:36:44 2018 +0100
@@ -3,18 +3,18 @@
# that the numerical values conform to a specific pattern, rather than
# specific values.
#
-#Timestamp S0 S1 E O M CCS YGC YGCT FGC FGCT GCT
-# 0.3 0.00 100.00 68.74 1.95 77.73 68.02 1 0.004 0 0.000 0.004
+#Timestamp S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT
+# 0.3 0.00 100.00 68.74 1.95 77.73 68.02 1 0.004 0 0.000 0 0.000 0.004
BEGIN {
headerlines=0; datalines=0; totallines=0
}
-/^Timestamp S0 S1 E O M CCS YGC YGCT FGC FGCT GCT $/ {
+/^Timestamp S0 S1 E O M CCS YGC YGCT FGC FGCT CGC CGCT GCT $/ {
headerlines++;
}
-/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
+/^[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*([0-9]+\.[0-9]+)|-[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+[ ]*[0-9]+\.[0-9]+[ ]*[0-9]+\.[0-9]+$/ {
datalines++;
}
--- a/test/jdk/sun/tools/jstatd/JstatGCUtilParser.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jdk/sun/tools/jstatd/JstatGCUtilParser.java Tue Mar 20 04:36:44 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,6 +56,8 @@
YGCT(GcStatisticsType.DOUBLE),
FGC(GcStatisticsType.INTEGER),
FGCT(GcStatisticsType.DOUBLE),
+ CGC(GcStatisticsType.INTEGER),
+ CGCT(GcStatisticsType.DOUBLE),
GCT(GcStatisticsType.DOUBLE);
private final GcStatisticsType type;
--- a/test/jtreg-ext/requires/VMProps.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/jtreg-ext/requires/VMProps.java Tue Mar 20 04:36:44 2018 +0100
@@ -366,18 +366,34 @@
* @return true if docker is supported in a given environment
*/
protected String dockerSupport() {
- // currently docker testing is only supported for Linux-x64, Linux-s390x and Linux-ppc64le
- String arch = System.getProperty("os.arch");
- if (! (Platform.isLinux() && (Platform.isX64() || Platform.isS390x() || arch.equals("ppc64le")))) {
- return "false";
+ boolean isSupported = false;
+ if (Platform.isLinux()) {
+ // currently docker testing is only supported for Linux,
+ // on certain platforms
+
+ String arch = System.getProperty("os.arch");
+
+ if (Platform.isX64()) {
+ isSupported = true;
+ }
+ else if (Platform.isAArch64()) {
+ isSupported = true;
+ }
+ else if (Platform.isS390x()) {
+ isSupported = true;
+ }
+ else if (arch.equals("ppc64le")) {
+ isSupported = true;
+ }
}
- boolean isSupported;
- try {
- isSupported = checkDockerSupport();
- } catch (Exception e) {
- isSupported = false;
- }
+ if (isSupported) {
+ try {
+ isSupported = checkDockerSupport();
+ } catch (Exception e) {
+ isSupported = false;
+ }
+ }
return (isSupported) ? "true" : "false";
}
--- a/test/lib/sun/hotspot/WhiteBox.java Fri Mar 23 11:14:43 2018 -0700
+++ b/test/lib/sun/hotspot/WhiteBox.java Tue Mar 20 04:36:44 2018 +0100
@@ -439,16 +439,6 @@
// CPU features
public native String getCPUFeatures();
- // Native extensions
- public native long getHeapUsageForContext(int context);
- public native long getHeapRegionCountForContext(int context);
- private native int getContextForObject0(Object obj);
- public int getContextForObject(Object obj) {
- Objects.requireNonNull(obj);
- return getContextForObject0(obj);
- }
- public native void printRegionInfo(int context);
-
// VM flags
public native boolean isConstantVMFlag(String name);
public native boolean isLockedVMFlag(String name);