--- a/hotspot/make/windows/build.make Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/make/windows/build.make Fri Dec 05 15:32:59 2008 -0800
@@ -200,29 +200,6 @@
checkSA::
@echo Not building SA: ARCH = ia64
-!elseif exist("$(MSVCDIR)\PlatformSDK\Include\dbgeng.h")
-# These don't have to be set because the default
-# setting of INCLUDE and LIB already contain the needed dirs.
-SA_INCLUDE =
-SA_LIB =
-
-!elseif exist("$(SYSTEMROOT)\..\Program Files\Microsoft SDK\include\dbgeng.h")
-# These don't have to be set because the default
-# setting of INCLUDE and LIB already contain the needed dirs.
-SA_INCLUDE =
-SA_LIB =
-
-!else
-checkSA::
- @echo .
- @echo ERROR: Can't build SA because dbgeng.h does not exist here:
- @echo $(MSVCDIR)\PlatformSDK\Include\dbgeng.h
- @echo nor here:
- @echo $(SYSTEMROOT)\..\Program Files\Microsoft SDK\include\dbgeng.h
- @echo You must use Vis. Studio .Net 2003 on Win 32, and you must
- @echo have the Microsoft SDK installed on Win amd64.
- @echo You can disable building of SA by specifying BUILD_WIN_SA = 0
- @echo . && false
!endif # ! "$(BUILD_WIN_SA)" != "1"
#########################################################################
--- a/hotspot/make/windows/makefiles/defs.make Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/make/windows/makefiles/defs.make Fri Dec 05 15:32:59 2008 -0800
@@ -119,7 +119,7 @@
# we want to release it. If we build it here,
# the SDK makefiles will copy it over and put it into
# the created image.
-BUILD_WIN_SA = 0
+BUILD_WIN_SA = 1
ifneq ($(ALT_BUILD_WIN_SA),)
BUILD_WIN_SA = $(ALT_BUILD_WIN_SA)
endif
--- a/hotspot/make/windows/makefiles/sa.make Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/make/windows/makefiles/sa.make Fri Dec 05 15:32:59 2008 -0800
@@ -49,6 +49,9 @@
default:: $(GENERATED)\sa-jdi.jar
+# Remove the space between $(SA_BUILD_VERSION_PROP) and > below as it adds a white space
+# at the end of SA version string and causes a version mismatch with the target VM version.
+
$(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\)
@if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR)
@echo ...Building sa-jdi.jar
@@ -56,15 +59,15 @@
@$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\)
@$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\)
$(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
- $(QUIETLY) echo $(SA_BUILD_VERSION_PROP) > $(SA_PROPERTIES)
- $(RUN_JAR) cf $@ -C saclasses .
- $(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector
+ $(QUIETLY) echo $(SA_BUILD_VERSION_PROP)> $(SA_PROPERTIES)
$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
- $(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
- $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
- $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
- $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
+ $(QUIETLY) rm -rf $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
+ $(QUIETLY) mkdir $(SA_CLASSDIR)\sun\jvm\hotspot\ui\resources
+ $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
+ $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)
+ $(RUN_JAR) cf $@ -C saclasses .
+ $(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext
@@ -93,7 +96,7 @@
SA_CFLAGS = /nologo $(MS_RUNTIME_OPTION) /W3 /Gm $(GX_OPTION) /ZI /Od /D "WIN32" /D "_WINDOWS" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /GZ /c
!endif
!if "$(MT)" != ""
- SA_LINK_FLAGS = /manifest $(SA_LINK_FLAGS)
+SA_LINK_FLAGS = /manifest $(SA_LINK_FLAGS)
!endif
SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
SA_LFLAGS = $(SA_LINK_FLAGS) /nologo /subsystem:console /map /debug /machine:$(MACHINE)
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -2085,7 +2085,7 @@
} else {
if (has_tos) {
// save object pointer before call_VM() clobbers it
- __ mov(Otos_i, Lscratch);
+ __ push_ptr(Otos_i); // put object on tos where GC wants it.
} else {
// Load top of stack (do not pop the value off the stack);
__ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
@@ -2097,7 +2097,7 @@
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
Otos_i, Rcache);
if (!is_static && has_tos) {
- __ mov(Lscratch, Otos_i); // restore object pointer
+ __ pop_ptr(Otos_i); // restore object pointer
__ verify_oop(Otos_i);
}
__ get_cache_and_index_at_bcp(Rcache, index, 1);
--- a/hotspot/src/os/linux/vm/os_linux.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -2272,7 +2272,9 @@
uncommit_memory(addr, bytes);
}
-void os::numa_make_global(char *addr, size_t bytes) { }
+void os::numa_make_global(char *addr, size_t bytes) {
+ Linux::numa_interleave_memory(addr, bytes);
+}
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
@@ -2314,7 +2316,7 @@
extern "C" void numa_warn(int number, char *where, ...) { }
extern "C" void numa_error(char *where) { }
-void os::Linux::libnuma_init() {
+bool os::Linux::libnuma_init() {
// sched_getcpu() should be in libc.
set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
dlsym(RTLD_DEFAULT, "sched_getcpu")));
@@ -2330,31 +2332,51 @@
dlsym(handle, "numa_available")));
set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
dlsym(handle, "numa_tonode_memory")));
+ set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
+ dlsym(handle, "numa_interleave_memory")));
+
+
if (numa_available() != -1) {
+ set_numa_all_nodes((unsigned long*)dlsym(handle, "numa_all_nodes"));
// Create a cpu -> node mapping
_cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true);
rebuild_cpu_to_node_map();
+ return true;
}
}
}
+ return false;
}
// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
// The table is later used in get_node_by_cpu().
void os::Linux::rebuild_cpu_to_node_map() {
- int cpu_num = os::active_processor_count();
+ const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
+ // in libnuma (possible values are starting from 16,
+ // and continuing up with every other power of 2, but less
+ // than the maximum number of CPUs supported by kernel), and
+ // is a subject to change (in libnuma version 2 the requirements
+ // are more reasonable) we'll just hardcode the number they use
+ // in the library.
+ const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
+
+ size_t cpu_num = os::active_processor_count();
+ size_t cpu_map_size = NCPUS / BitsPerCLong;
+ size_t cpu_map_valid_size =
+ MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
+
cpu_to_node()->clear();
cpu_to_node()->at_grow(cpu_num - 1);
- int node_num = numa_get_groups_num();
- int cpu_map_size = (cpu_num + BitsPerLong - 1) / BitsPerLong;
+ size_t node_num = numa_get_groups_num();
+
unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size);
- for (int i = 0; i < node_num; i++) {
+ for (size_t i = 0; i < node_num; i++) {
if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
- for (int j = 0; j < cpu_map_size; j++) {
+ for (size_t j = 0; j < cpu_map_valid_size; j++) {
if (cpu_map[j] != 0) {
- for (int k = 0; k < BitsPerLong; k++) {
+ for (size_t k = 0; k < BitsPerCLong; k++) {
if (cpu_map[j] & (1UL << k)) {
- cpu_to_node()->at_put(j * BitsPerLong + k, i);
+ cpu_to_node()->at_put(j * BitsPerCLong + k, i);
}
}
}
@@ -2377,7 +2399,8 @@
os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
os::Linux::numa_available_func_t os::Linux::_numa_available;
os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
-
+os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
+unsigned long* os::Linux::_numa_all_nodes;
bool os::uncommit_memory(char* addr, size_t size) {
return ::mmap(addr, size,
@@ -3695,7 +3718,17 @@
}
if (UseNUMA) {
- Linux::libnuma_init();
+ if (!Linux::libnuma_init()) {
+ UseNUMA = false;
+ } else {
+ if ((Linux::numa_max_node() < 1)) {
+ // There's only one node(they start from 0), disable NUMA.
+ UseNUMA = false;
+ }
+ }
+ if (!UseNUMA && ForceNUMA) {
+ UseNUMA = true;
+ }
}
if (MaxFDLimit) {
--- a/hotspot/src/os/linux/vm/os_linux.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -146,7 +146,7 @@
static bool is_floating_stack() { return _is_floating_stack; }
static void libpthread_init();
- static void libnuma_init();
+ static bool libnuma_init();
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
@@ -240,20 +240,23 @@
typedef int (*numa_max_node_func_t)(void);
typedef int (*numa_available_func_t)(void);
typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
-
+ typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
static sched_getcpu_func_t _sched_getcpu;
static numa_node_to_cpus_func_t _numa_node_to_cpus;
static numa_max_node_func_t _numa_max_node;
static numa_available_func_t _numa_available;
static numa_tonode_memory_func_t _numa_tonode_memory;
+ static numa_interleave_memory_func_t _numa_interleave_memory;
+ static unsigned long* _numa_all_nodes;
static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; }
static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
-
+ static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
+ static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
public:
static int sched_getcpu() { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
@@ -264,6 +267,11 @@
static int numa_tonode_memory(void *start, size_t size, int node) {
return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
}
+ static void numa_interleave_memory(void *start, size_t size) {
+ if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
+ _numa_interleave_memory(start, size, _numa_all_nodes);
+ }
+ }
static int get_node_by_cpu(int cpu_id);
};
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -4638,7 +4638,7 @@
}
}
-void os::Solaris::liblgrp_init() {
+bool os::Solaris::liblgrp_init() {
void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
if (handle != NULL) {
os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
@@ -4653,9 +4653,9 @@
lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
set_lgrp_cookie(c);
- } else {
- warning("your OS does not support NUMA");
- }
+ return true;
+ }
+ return false;
}
void os::Solaris::misc_sym_init() {
@@ -4824,9 +4824,25 @@
vm_page_size()));
Solaris::libthread_init();
+
if (UseNUMA) {
- Solaris::liblgrp_init();
- }
+ if (!Solaris::liblgrp_init()) {
+ UseNUMA = false;
+ } else {
+ size_t lgrp_limit = os::numa_get_groups_num();
+ int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
+ size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
+ FREE_C_HEAP_ARRAY(int, lgrp_ids);
+ if (lgrp_num < 2) {
+ // There's only one locality group, disable NUMA.
+ UseNUMA = false;
+ }
+ }
+ if (!UseNUMA && ForceNUMA) {
+ UseNUMA = true;
+ }
+ }
+
Solaris::misc_sym_init();
Solaris::signal_sets_init();
Solaris::init_signal_mem();
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -176,7 +176,7 @@
public:
static void libthread_init();
static void synchronization_init();
- static void liblgrp_init();
+ static bool liblgrp_init();
// Load miscellaneous symbols.
static void misc_sym_init();
// This boolean allows users to forward their own non-matching signals
--- a/hotspot/src/os/windows/vm/os_windows.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -2217,15 +2217,10 @@
// We only expect null pointers in the stubs (vtable)
// the rest are checked explicitly now.
//
- CodeBlob* cb = CodeCache::find_blob(pc);
- if (cb != NULL) {
- if (VtableStubs::stub_containing(pc) != NULL) {
- if (((uintptr_t)addr) < os::vm_page_size() ) {
- // an access to the first page of VM--assume it is a null pointer
- return Handle_Exception(exceptionInfo,
- SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL));
- }
- }
+ if (((uintptr_t)addr) < os::vm_page_size() ) {
+ // an access to the first page of VM--assume it is a null pointer
+ address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+ if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
}
}
} // in_java
@@ -2241,9 +2236,8 @@
// Windows 98 reports faulting addresses incorrectly
if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) ||
!os::win32::is_nt()) {
-
- return Handle_Exception(exceptionInfo,
- SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL));
+ address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+ if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
}
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
@@ -3353,6 +3347,10 @@
// initialize thread priority policy
prio_init();
+ if (UseNUMA && !ForceNUMA) {
+ UseNUMA = false; // Currently unsupported.
+ }
+
return JNI_OK;
}
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -676,21 +676,6 @@
}
-void GraphBuilder::kill_field(ciField* field) {
- if (UseLocalValueNumbering) {
- vmap()->kill_field(field);
- }
-}
-
-
-void GraphBuilder::kill_array(Value value) {
- if (UseLocalValueNumbering) {
- vmap()->kill_array(value->type());
- }
- _memory->store_value(value);
-}
-
-
void GraphBuilder::kill_all() {
if (UseLocalValueNumbering) {
vmap()->kill_all();
@@ -987,8 +972,8 @@
length = append(new ArrayLength(array, lock_stack()));
}
StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack());
- kill_array(value); // invalidate all CSEs that are memory accesses of the same type
append(result);
+ _memory->store_value(value);
}
@@ -1478,9 +1463,6 @@
case Bytecodes::_putstatic:
{ Value val = pop(type);
append(new StoreField(append(obj), offset, field, val, true, lock_stack(), state_copy, is_loaded, is_initialized));
- if (UseLocalValueNumbering) {
- vmap()->kill_field(field); // invalidate all CSEs that are memory accesses
- }
}
break;
case Bytecodes::_getfield :
@@ -1503,7 +1485,6 @@
if (is_loaded) store = _memory->store(store);
if (store != NULL) {
append(store);
- kill_field(field); // invalidate all CSEs that are accesses of this field
}
}
break;
@@ -1900,6 +1881,8 @@
assert(i2->bci() != -1, "should already be linked");
return i2;
}
+ ValueNumberingEffects vne(vmap());
+ i1->visit(&vne);
}
if (i1->as_Phi() == NULL && i1->as_Local() == NULL) {
@@ -1926,14 +1909,8 @@
assert(_last == i1, "adjust code below");
StateSplit* s = i1->as_StateSplit();
if (s != NULL && i1->as_BlockEnd() == NULL) {
- // Continue CSE across certain intrinsics
- Intrinsic* intrinsic = s->as_Intrinsic();
- if (UseLocalValueNumbering) {
- if (intrinsic == NULL || !intrinsic->preserves_state()) {
- vmap()->kill_all(); // for now, hopefully we need this only for calls eventually
- }
- }
if (EliminateFieldAccess) {
+ Intrinsic* intrinsic = s->as_Intrinsic();
if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) {
_memory->kill();
}
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -283,8 +283,6 @@
Dependencies* dependency_recorder() const; // = compilation()->dependencies()
bool direct_compare(ciKlass* k);
- void kill_field(ciField* field);
- void kill_array(Value value);
void kill_all();
ValueStack* lock_stack();
--- a/hotspot/src/share/vm/c1/c1_ValueMap.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/c1/c1_ValueMap.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -133,53 +133,77 @@
virtual void kill_array(ValueType* type) = 0;
// visitor functions
- void do_StoreField (StoreField* x) { kill_field(x->field()); };
- void do_StoreIndexed (StoreIndexed* x) { kill_array(x->type()); };
- void do_MonitorEnter (MonitorEnter* x) { kill_memory(); };
- void do_MonitorExit (MonitorExit* x) { kill_memory(); };
- void do_Invoke (Invoke* x) { kill_memory(); };
- void do_UnsafePutRaw (UnsafePutRaw* x) { kill_memory(); };
- void do_UnsafePutObject(UnsafePutObject* x) { kill_memory(); };
- void do_Intrinsic (Intrinsic* x) { if (!x->preserves_state()) kill_memory(); };
+ void do_StoreField (StoreField* x) {
+ if (!x->is_initialized()) {
+ kill_memory();
+ } else {
+ kill_field(x->field());
+ }
+ }
+ void do_StoreIndexed (StoreIndexed* x) { kill_array(x->type()); }
+ void do_MonitorEnter (MonitorEnter* x) { kill_memory(); }
+ void do_MonitorExit (MonitorExit* x) { kill_memory(); }
+ void do_Invoke (Invoke* x) { kill_memory(); }
+ void do_UnsafePutRaw (UnsafePutRaw* x) { kill_memory(); }
+ void do_UnsafePutObject(UnsafePutObject* x) { kill_memory(); }
+ void do_Intrinsic (Intrinsic* x) { if (!x->preserves_state()) kill_memory(); }
- void do_Phi (Phi* x) { /* nothing to do */ };
- void do_Local (Local* x) { /* nothing to do */ };
- void do_Constant (Constant* x) { /* nothing to do */ };
- void do_LoadField (LoadField* x) { /* nothing to do */ };
- void do_ArrayLength (ArrayLength* x) { /* nothing to do */ };
- void do_LoadIndexed (LoadIndexed* x) { /* nothing to do */ };
- void do_NegateOp (NegateOp* x) { /* nothing to do */ };
- void do_ArithmeticOp (ArithmeticOp* x) { /* nothing to do */ };
- void do_ShiftOp (ShiftOp* x) { /* nothing to do */ };
- void do_LogicOp (LogicOp* x) { /* nothing to do */ };
- void do_CompareOp (CompareOp* x) { /* nothing to do */ };
- void do_IfOp (IfOp* x) { /* nothing to do */ };
- void do_Convert (Convert* x) { /* nothing to do */ };
- void do_NullCheck (NullCheck* x) { /* nothing to do */ };
- void do_NewInstance (NewInstance* x) { /* nothing to do */ };
- void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ };
- void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ };
- void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ };
- void do_CheckCast (CheckCast* x) { /* nothing to do */ };
- void do_InstanceOf (InstanceOf* x) { /* nothing to do */ };
- void do_BlockBegin (BlockBegin* x) { /* nothing to do */ };
- void do_Goto (Goto* x) { /* nothing to do */ };
- void do_If (If* x) { /* nothing to do */ };
- void do_IfInstanceOf (IfInstanceOf* x) { /* nothing to do */ };
- void do_TableSwitch (TableSwitch* x) { /* nothing to do */ };
- void do_LookupSwitch (LookupSwitch* x) { /* nothing to do */ };
- void do_Return (Return* x) { /* nothing to do */ };
- void do_Throw (Throw* x) { /* nothing to do */ };
- void do_Base (Base* x) { /* nothing to do */ };
- void do_OsrEntry (OsrEntry* x) { /* nothing to do */ };
- void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ };
- void do_RoundFP (RoundFP* x) { /* nothing to do */ };
- void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ };
- void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ };
- void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ };
- void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
- void do_ProfileCall (ProfileCall* x) { /* nothing to do */ };
- void do_ProfileCounter (ProfileCounter* x) { /* nothing to do */ };
+ void do_Phi (Phi* x) { /* nothing to do */ }
+ void do_Local (Local* x) { /* nothing to do */ }
+ void do_Constant (Constant* x) { /* nothing to do */ }
+ void do_LoadField (LoadField* x) {
+ if (!x->is_initialized()) {
+ kill_memory();
+ }
+ }
+ void do_ArrayLength (ArrayLength* x) { /* nothing to do */ }
+ void do_LoadIndexed (LoadIndexed* x) { /* nothing to do */ }
+ void do_NegateOp (NegateOp* x) { /* nothing to do */ }
+ void do_ArithmeticOp (ArithmeticOp* x) { /* nothing to do */ }
+ void do_ShiftOp (ShiftOp* x) { /* nothing to do */ }
+ void do_LogicOp (LogicOp* x) { /* nothing to do */ }
+ void do_CompareOp (CompareOp* x) { /* nothing to do */ }
+ void do_IfOp (IfOp* x) { /* nothing to do */ }
+ void do_Convert (Convert* x) { /* nothing to do */ }
+ void do_NullCheck (NullCheck* x) { /* nothing to do */ }
+ void do_NewInstance (NewInstance* x) { /* nothing to do */ }
+ void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ }
+ void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ }
+ void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ }
+ void do_CheckCast (CheckCast* x) { /* nothing to do */ }
+ void do_InstanceOf (InstanceOf* x) { /* nothing to do */ }
+ void do_BlockBegin (BlockBegin* x) { /* nothing to do */ }
+ void do_Goto (Goto* x) { /* nothing to do */ }
+ void do_If (If* x) { /* nothing to do */ }
+ void do_IfInstanceOf (IfInstanceOf* x) { /* nothing to do */ }
+ void do_TableSwitch (TableSwitch* x) { /* nothing to do */ }
+ void do_LookupSwitch (LookupSwitch* x) { /* nothing to do */ }
+ void do_Return (Return* x) { /* nothing to do */ }
+ void do_Throw (Throw* x) { /* nothing to do */ }
+ void do_Base (Base* x) { /* nothing to do */ }
+ void do_OsrEntry (OsrEntry* x) { /* nothing to do */ }
+ void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ }
+ void do_RoundFP (RoundFP* x) { /* nothing to do */ }
+ void do_UnsafeGetRaw (UnsafeGetRaw* x) { /* nothing to do */ }
+ void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ }
+ void do_UnsafePrefetchRead (UnsafePrefetchRead* x) { /* nothing to do */ }
+ void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
+ void do_ProfileCall (ProfileCall* x) { /* nothing to do */ }
+ void do_ProfileCounter (ProfileCounter* x) { /* nothing to do */ }
+};
+
+
+class ValueNumberingEffects: public ValueNumberingVisitor {
+ private:
+ ValueMap* _map;
+
+ public:
+ // implementation for abstract methods of ValueNumberingVisitor
+ void kill_memory() { _map->kill_memory(); }
+ void kill_field(ciField* field) { _map->kill_field(field); }
+ void kill_array(ValueType* type) { _map->kill_array(type); }
+
+ ValueNumberingEffects(ValueMap* map): _map(map) {}
};
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -325,24 +325,30 @@
// For objects in CMS generation, this closure marks
// given objects (transitively) as being reachable/live.
// This is currently used during the (weak) reference object
-// processing phase of the CMS final checkpoint step.
+// processing phase of the CMS final checkpoint step, as
+// well as during the concurrent precleaning of the discovered
+// reference lists.
class CMSKeepAliveClosure: public OopClosure {
private:
CMSCollector* _collector;
const MemRegion _span;
CMSMarkStack* _mark_stack;
CMSBitMap* _bit_map;
+ bool _concurrent_precleaning;
protected:
DO_OOP_WORK_DEFN
public:
CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
- CMSBitMap* bit_map, CMSMarkStack* mark_stack):
+ CMSBitMap* bit_map, CMSMarkStack* mark_stack,
+ bool cpc):
_collector(collector),
_span(span),
_bit_map(bit_map),
- _mark_stack(mark_stack) {
+ _mark_stack(mark_stack),
+ _concurrent_precleaning(cpc) {
assert(!_span.is_empty(), "Empty span could spell trouble");
}
+ bool concurrent_precleaning() const { return _concurrent_precleaning; }
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -538,6 +538,7 @@
_survivor_chunk_capacity(0), // -- ditto --
_survivor_chunk_index(0), // -- ditto --
_ser_pmc_preclean_ovflw(0),
+ _ser_kac_preclean_ovflw(0),
_ser_pmc_remark_ovflw(0),
_par_pmc_remark_ovflw(0),
_ser_kac_ovflw(0),
@@ -1960,6 +1961,7 @@
ref_processor()->set_enqueuing_is_done(false);
ref_processor()->enable_discovery();
+ ref_processor()->setup_policy(clear_all_soft_refs);
// If an asynchronous collection finishes, the _modUnionTable is
// all clear. If we are assuming the collection from an asynchronous
// collection, clear the _modUnionTable.
@@ -2383,6 +2385,9 @@
Universe::verify(true);
}
+ // Snapshot the soft reference policy to be used in this collection cycle.
+ ref_processor()->setup_policy(clear_all_soft_refs);
+
bool init_mark_was_synchronous = false; // until proven otherwise
while (_collectorState != Idling) {
if (TraceCMSState) {
@@ -4388,10 +4393,10 @@
CMSPrecleanRefsYieldClosure yield_cl(this);
assert(rp->span().equals(_span), "Spans should be equal");
CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
- &_markStack);
+ &_markStack, true /* preclean */);
CMSDrainMarkingStackClosure complete_trace(this,
- _span, &_markBitMap, &_markStack,
- &keep_alive);
+ _span, &_markBitMap, &_markStack,
+ &keep_alive, true /* preclean */);
// We don't want this step to interfere with a young
// collection because we don't want to take CPU
@@ -4590,11 +4595,11 @@
if (!dirtyRegion.is_empty()) {
assert(numDirtyCards > 0, "consistency check");
HeapWord* stop_point = NULL;
+ stopTimer();
+ CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
+ bitMapLock());
+ startTimer();
{
- stopTimer();
- CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
- bitMapLock());
- startTimer();
verify_work_stacks_empty();
verify_overflow_empty();
sample_eden();
@@ -4611,10 +4616,6 @@
assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) ||
(_collectorState == AbortablePreclean && should_abort_preclean()),
"Unparsable objects should only be in perm gen.");
-
- stopTimer();
- CMSTokenSyncWithLocks ts(true, bitMapLock());
- startTimer();
_modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
if (should_abort_preclean()) {
break; // out of preclean loop
@@ -4852,17 +4853,19 @@
// recurrence of that condition.
assert(_markStack.isEmpty(), "No grey objects");
size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
- _ser_kac_ovflw;
+ _ser_kac_ovflw + _ser_kac_preclean_ovflw;
if (ser_ovflw > 0) {
if (PrintCMSStatistics != 0) {
gclog_or_tty->print_cr("Marking stack overflow (benign) "
- "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
+ "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
+ ", kac_preclean="SIZE_FORMAT")",
_ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
- _ser_kac_ovflw);
+ _ser_kac_ovflw, _ser_kac_preclean_ovflw);
}
_markStack.expand();
_ser_pmc_remark_ovflw = 0;
_ser_pmc_preclean_ovflw = 0;
+ _ser_kac_preclean_ovflw = 0;
_ser_kac_ovflw = 0;
}
if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
@@ -5675,40 +5678,29 @@
ResourceMark rm;
HandleMark hm;
- ReferencePolicy* soft_ref_policy;
-
- assert(!ref_processor()->enqueuing_is_done(), "Enqueuing should not be complete");
- // Process weak references.
- if (clear_all_soft_refs) {
- soft_ref_policy = new AlwaysClearPolicy();
- } else {
-#ifdef COMPILER2
- soft_ref_policy = new LRUMaxHeapPolicy();
-#else
- soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
- }
- verify_work_stacks_empty();
ReferenceProcessor* rp = ref_processor();
assert(rp->span().equals(_span), "Spans should be equal");
+ assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
+ // Process weak references.
+ rp->setup_policy(clear_all_soft_refs);
+ verify_work_stacks_empty();
+
CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
- &_markStack);
+ &_markStack, false /* !preclean */);
CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
_span, &_markBitMap, &_markStack,
- &cmsKeepAliveClosure);
+ &cmsKeepAliveClosure, false /* !preclean */);
{
TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
if (rp->processing_is_mt()) {
CMSRefProcTaskExecutor task_executor(*this);
- rp->process_discovered_references(soft_ref_policy,
- &_is_alive_closure,
+ rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
&task_executor);
} else {
- rp->process_discovered_references(soft_ref_policy,
- &_is_alive_closure,
+ rp->process_discovered_references(&_is_alive_closure,
&cmsKeepAliveClosure,
&cmsDrainMarkingStackClosure,
NULL);
@@ -6163,8 +6155,8 @@
#endif
size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
- assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
- "missing Printezis mark?");
+ assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
+ "missing Printezis mark?");
HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
size_t size = pointer_delta(nextOneAddr + 1, addr);
assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
@@ -8302,8 +8294,29 @@
}
)
if (simulate_overflow || !_mark_stack->push(obj)) {
- _collector->push_on_overflow_list(obj);
- _collector->_ser_kac_ovflw++;
+ if (_concurrent_precleaning) {
+ // We dirty the overflown object and let the remark
+ // phase deal with it.
+ assert(_collector->overflow_list_is_empty(), "Error");
+ // In the case of object arrays, we need to dirty all of
+ // the cards that the object spans. No locking or atomics
+ // are needed since no one else can be mutating the mod union
+ // table.
+ if (obj->is_objArray()) {
+ size_t sz = obj->size();
+ HeapWord* end_card_addr =
+ (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
+ MemRegion redirty_range = MemRegion(addr, end_card_addr);
+ assert(!redirty_range.is_empty(), "Arithmetical tautology");
+ _collector->_modUnionTable.mark_range(redirty_range);
+ } else {
+ _collector->_modUnionTable.mark(addr);
+ }
+ _collector->_ser_kac_preclean_ovflw++;
+ } else {
+ _collector->push_on_overflow_list(obj);
+ _collector->_ser_kac_ovflw++;
+ }
}
}
}
@@ -8400,6 +8413,8 @@
void CMSDrainMarkingStackClosure::do_void() {
// the max number to take from overflow list at a time
const size_t num = _mark_stack->capacity()/4;
+ assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
+ "Overflow list should be NULL during concurrent phases");
while (!_mark_stack->isEmpty() ||
// if stack is empty, check the overflow list
_collector->take_from_overflow_list(num, _mark_stack)) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -592,6 +592,7 @@
size_t _ser_pmc_preclean_ovflw;
size_t _ser_pmc_remark_ovflw;
size_t _par_pmc_remark_ovflw;
+ size_t _ser_kac_preclean_ovflw;
size_t _ser_kac_ovflw;
size_t _par_kac_ovflw;
NOT_PRODUCT(size_t _num_par_pushes;)
@@ -1749,21 +1750,30 @@
// work-routine/closure used to complete transitive
// marking of objects as live after a certain point
// in which an initial set has been completely accumulated.
+// This closure is currently used both during the final
+// remark stop-world phase, as well as during the concurrent
+// precleaning of the discovered reference lists.
class CMSDrainMarkingStackClosure: public VoidClosure {
CMSCollector* _collector;
MemRegion _span;
CMSMarkStack* _mark_stack;
CMSBitMap* _bit_map;
CMSKeepAliveClosure* _keep_alive;
+ bool _concurrent_precleaning;
public:
CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
CMSBitMap* bit_map, CMSMarkStack* mark_stack,
- CMSKeepAliveClosure* keep_alive):
+ CMSKeepAliveClosure* keep_alive,
+ bool cpc):
_collector(collector),
_span(span),
_bit_map(bit_map),
_mark_stack(mark_stack),
- _keep_alive(keep_alive) { }
+ _keep_alive(keep_alive),
+ _concurrent_precleaning(cpc) {
+ assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
+ "Mismatch");
+ }
void do_void();
};
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -811,6 +811,7 @@
ReferenceProcessor* rp = g1h->ref_processor();
rp->verify_no_references_recorded();
rp->enable_discovery(); // enable ("weak") refs discovery
+ rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
satb_mq_set.set_process_completed_threshold(G1SATBProcessCompletedThreshold);
@@ -1829,32 +1830,21 @@
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
ResourceMark rm;
HandleMark hm;
- ReferencePolicy* soft_ref_policy;
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ ReferenceProcessor* rp = g1h->ref_processor();
// Process weak references.
- if (clear_all_soft_refs) {
- soft_ref_policy = new AlwaysClearPolicy();
- } else {
-#ifdef COMPILER2
- soft_ref_policy = new LRUMaxHeapPolicy();
-#else
- soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif
- }
+ rp->setup_policy(clear_all_soft_refs);
assert(_markStack.isEmpty(), "mark stack should be empty");
- G1CollectedHeap* g1 = G1CollectedHeap::heap();
- G1CMIsAliveClosure g1IsAliveClosure(g1);
-
- G1CMKeepAliveClosure g1KeepAliveClosure(g1, this, nextMarkBitMap());
+ G1CMIsAliveClosure g1IsAliveClosure (g1h);
+ G1CMKeepAliveClosure g1KeepAliveClosure(g1h, this, nextMarkBitMap());
G1CMDrainMarkingStackClosure
g1DrainMarkingStackClosure(nextMarkBitMap(), &_markStack,
&g1KeepAliveClosure);
// XXXYYY Also: copy the parallel ref processing code from CMS.
- ReferenceProcessor* rp = g1->ref_processor();
- rp->process_discovered_references(soft_ref_policy,
- &g1IsAliveClosure,
+ rp->process_discovered_references(&g1IsAliveClosure,
&g1KeepAliveClosure,
&g1DrainMarkingStackClosure,
NULL);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -891,6 +891,7 @@
ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
ref_processor()->enable_discovery();
+ ref_processor()->setup_policy(clear_all_soft_refs);
// Do collection work
{
@@ -2463,7 +2464,7 @@
COMPILER2_PRESENT(DerivedPointerTable::clear());
- // We want to turn off ref discovere, if necessary, and turn it back on
+ // We want to turn off ref discovery, if necessary, and turn it back on
// on again later if we do.
bool was_enabled = ref_processor()->discovery_enabled();
if (was_enabled) ref_processor()->disable_discovery();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -33,8 +33,9 @@
// hook up weak ref data so it can be used during Mark-Sweep
assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
+ assert(rp != NULL, "should be non-NULL");
GenMarkSweep::_ref_processor = rp;
- assert(rp != NULL, "should be non-NULL");
+ rp->setup_policy(clear_all_softrefs);
// When collecting the permanent generation methodOops may be moving,
// so we either have to flush all bcp data or convert it into bci.
@@ -121,23 +122,12 @@
&GenMarkSweep::follow_root_closure);
// Process reference objects found during marking
- ReferencePolicy *soft_ref_policy;
- if (clear_all_softrefs) {
- soft_ref_policy = new AlwaysClearPolicy();
- } else {
-#ifdef COMPILER2
- soft_ref_policy = new LRUMaxHeapPolicy();
-#else
- soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif
- }
- assert(soft_ref_policy != NULL,"No soft reference policy");
- GenMarkSweep::ref_processor()->process_discovered_references(
- soft_ref_policy,
- &GenMarkSweep::is_alive,
- &GenMarkSweep::keep_alive,
- &GenMarkSweep::follow_stack_closure,
- NULL);
+ ReferenceProcessor* rp = GenMarkSweep::ref_processor();
+ rp->setup_policy(clear_all_softrefs);
+ rp->process_discovered_references(&GenMarkSweep::is_alive,
+ &GenMarkSweep::keep_alive,
+ &GenMarkSweep::follow_stack_closure,
+ NULL);
// Follow system dictionary roots and unload classes
bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -759,17 +759,12 @@
thread_state_set.steals(),
thread_state_set.pops()+thread_state_set.steals());
}
- assert(thread_state_set.pushes() == thread_state_set.pops() + thread_state_set.steals(),
+ assert(thread_state_set.pushes() == thread_state_set.pops()
+ + thread_state_set.steals(),
"Or else the queues are leaky.");
- // For now, process discovered weak refs sequentially.
-#ifdef COMPILER2
- ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
-#else
- ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-
// Process (weak) reference objects found during scavenge.
+ ReferenceProcessor* rp = ref_processor();
IsAliveClosure is_alive(this);
ScanWeakRefClosure scan_weak_ref(this);
KeepAliveClosure keep_alive(&scan_weak_ref);
@@ -778,18 +773,17 @@
set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
&scan_without_gc_barrier, &scan_with_gc_barrier);
- if (ref_processor()->processing_is_mt()) {
+ rp->setup_policy(clear_all_soft_refs);
+ if (rp->processing_is_mt()) {
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
- ref_processor()->process_discovered_references(
- soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
- &task_executor);
+ rp->process_discovered_references(&is_alive, &keep_alive,
+ &evacuate_followers, &task_executor);
} else {
thread_state_set.flush();
gch->set_par_threads(0); // 0 ==> non-parallel.
gch->save_marks();
- ref_processor()->process_discovered_references(
- soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
- NULL);
+ rp->process_discovered_references(&is_alive, &keep_alive,
+ &evacuate_followers, NULL);
}
if (!promotion_failed()) {
// Swap the survivor spaces.
@@ -851,14 +845,14 @@
SpecializationStats::print();
- ref_processor()->set_enqueuing_is_done(true);
- if (ref_processor()->processing_is_mt()) {
+ rp->set_enqueuing_is_done(true);
+ if (rp->processing_is_mt()) {
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
- ref_processor()->enqueue_discovered_references(&task_executor);
+ rp->enqueue_discovered_references(&task_executor);
} else {
- ref_processor()->enqueue_discovered_references(NULL);
+ rp->enqueue_discovered_references(NULL);
}
- ref_processor()->verify_no_references_recorded();
+ rp->verify_no_references_recorded();
}
static int sum;
@@ -1211,7 +1205,7 @@
int n = 0;
while (cur != NULL) {
oop obj_to_push = cur->forwardee();
- oop next = oop(cur->klass());
+ oop next = oop(cur->klass_or_null());
cur->set_klass(obj_to_push->klass());
if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
obj_to_push = cur;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -172,6 +172,7 @@
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
+ ref_processor()->setup_policy(clear_all_softrefs);
mark_sweep_phase1(clear_all_softrefs);
@@ -517,20 +518,9 @@
// Process reference objects found during marking
{
- ReferencePolicy *soft_ref_policy;
- if (clear_all_softrefs) {
- soft_ref_policy = new AlwaysClearPolicy();
- } else {
-#ifdef COMPILER2
- soft_ref_policy = new LRUMaxHeapPolicy();
-#else
- soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
- }
- assert(soft_ref_policy != NULL,"No soft reference policy");
+ ref_processor()->setup_policy(clear_all_softrefs);
ref_processor()->process_discovered_references(
- soft_ref_policy, is_alive_closure(), mark_and_push_closure(),
- follow_stack_closure(), NULL);
+ is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
}
// Follow system dictionary roots and unload classes
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -1578,6 +1578,7 @@
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
+ ref_processor()->setup_policy(maximum_heap_compaction);
bool marked_for_unloading = false;
@@ -1894,26 +1895,14 @@
// Process reference objects found during marking
{
TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
- ReferencePolicy *soft_ref_policy;
- if (maximum_heap_compaction) {
- soft_ref_policy = new AlwaysClearPolicy();
- } else {
-#ifdef COMPILER2
- soft_ref_policy = new LRUMaxHeapPolicy();
-#else
- soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
- }
- assert(soft_ref_policy != NULL, "No soft reference policy");
if (ref_processor()->processing_is_mt()) {
RefProcTaskExecutor task_executor;
ref_processor()->process_discovered_references(
- soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
- &follow_stack_closure, &task_executor);
+ is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
+ &task_executor);
} else {
ref_processor()->process_discovered_references(
- soft_ref_policy, is_alive_closure(), &mark_and_push_closure,
- &follow_stack_closure, NULL);
+ is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);
}
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -330,6 +330,7 @@
COMPILER2_PRESENT(DerivedPointerTable::clear());
reference_processor()->enable_discovery();
+ reference_processor()->setup_policy(false);
// We track how much was promoted to the next generation for
// the AdaptiveSizePolicy.
@@ -394,24 +395,16 @@
// Process reference objects discovered during scavenge
{
-#ifdef COMPILER2
- ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
-#else
- ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-
+ reference_processor()->setup_policy(false); // not always_clear
PSKeepAliveClosure keep_alive(promotion_manager);
PSEvacuateFollowersClosure evac_followers(promotion_manager);
- assert(soft_ref_policy != NULL,"No soft reference policy");
if (reference_processor()->processing_is_mt()) {
PSRefProcTaskExecutor task_executor;
reference_processor()->process_discovered_references(
- soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
- &task_executor);
+ &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
} else {
reference_processor()->process_discovered_references(
- soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
- NULL);
+ &_is_alive_closure, &keep_alive, &evac_followers, NULL);
}
}
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -414,9 +414,20 @@
if (limit > 0) {
limit = round_down(limit, page_size());
if (chunk_size > current_chunk_size(i)) {
- chunk_size = MIN2((off_t)chunk_size, (off_t)current_chunk_size(i) + (off_t)limit);
+ size_t upper_bound = pages_available * page_size();
+ if (upper_bound > limit &&
+ current_chunk_size(i) < upper_bound - limit) {
+ // The resulting upper bound should not exceed the available
+ // amount of memory (pages_available * page_size()).
+ upper_bound = current_chunk_size(i) + limit;
+ }
+ chunk_size = MIN2(chunk_size, upper_bound);
} else {
- chunk_size = MAX2((off_t)chunk_size, (off_t)current_chunk_size(i) - (off_t)limit);
+ size_t lower_bound = page_size();
+ if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow.
+ lower_bound = current_chunk_size(i) - limit;
+ }
+ chunk_size = MAX2(chunk_size, lower_bound);
}
}
assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
--- a/hotspot/src/share/vm/includeDB_core Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/includeDB_core Fri Dec 05 15:32:59 2008 -0800
@@ -2303,6 +2303,7 @@
javaCalls.cpp interfaceSupport.hpp
javaCalls.cpp interpreter.hpp
javaCalls.cpp javaCalls.hpp
+javaCalls.cpp jniCheck.hpp
javaCalls.cpp linkResolver.hpp
javaCalls.cpp mutexLocker.hpp
javaCalls.cpp nmethod.hpp
@@ -3434,6 +3435,7 @@
referenceProcessor.cpp systemDictionary.hpp
referenceProcessor.hpp instanceRefKlass.hpp
+referenceProcessor.hpp referencePolicy.hpp
reflection.cpp arguments.hpp
reflection.cpp handles.inline.hpp
--- a/hotspot/src/share/vm/includeDB_features Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/includeDB_features Fri Dec 05 15:32:59 2008 -0800
@@ -115,6 +115,8 @@
heapInspection.cpp os.hpp
heapInspection.cpp resourceArea.hpp
+javaCalls.cpp jniCheck.hpp
+
jniCheck.cpp fieldDescriptor.hpp
jniCheck.cpp handles.hpp
jniCheck.cpp instanceKlass.hpp
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -540,14 +540,6 @@
assert(gch->no_allocs_since_save_marks(0),
"save marks have not been newly set.");
- // Weak refs.
- // FIXME: Are these storage leaks, or are they resource objects?
-#ifdef COMPILER2
- ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
-#else
- ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
-
// Not very pretty.
CollectorPolicy* cp = gch->collector_policy();
@@ -574,8 +566,10 @@
evacuate_followers.do_void();
FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
- ref_processor()->process_discovered_references(
- soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
+ ReferenceProcessor* rp = ref_processor();
+ rp->setup_policy(clear_all_soft_refs);
+ rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
+ NULL);
if (!promotion_failed()) {
// Swap the survivor spaces.
eden()->clear(SpaceDecorator::Mangle);
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -525,8 +525,9 @@
if (rp->discovery_is_atomic()) {
rp->verify_no_references_recorded();
rp->enable_discovery();
+ rp->setup_policy(clear_all_soft_refs);
} else {
- // collect() will enable discovery as appropriate
+ // collect() below will enable discovery as appropriate
}
_gens[i]->collect(full, clear_all_soft_refs, size, is_tlab);
if (!rp->enqueuing_is_done()) {
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -31,8 +31,9 @@
// hook up weak ref data so it can be used during Mark-Sweep
assert(ref_processor() == NULL, "no stomping");
+ assert(rp != NULL, "should be non-NULL");
_ref_processor = rp;
- assert(rp != NULL, "should be non-NULL");
+ rp->setup_policy(clear_all_softrefs);
TraceTime t1("Full GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
@@ -245,20 +246,9 @@
// Process reference objects found during marking
{
- ReferencePolicy *soft_ref_policy;
- if (clear_all_softrefs) {
- soft_ref_policy = new AlwaysClearPolicy();
- } else {
-#ifdef COMPILER2
- soft_ref_policy = new LRUMaxHeapPolicy();
-#else
- soft_ref_policy = new LRUCurrentHeapPolicy();
-#endif // COMPILER2
- }
- assert(soft_ref_policy != NULL,"No soft reference policy");
+ ref_processor()->setup_policy(clear_all_softrefs);
ref_processor()->process_discovered_references(
- soft_ref_policy, &is_alive, &keep_alive,
- &follow_stack_closure, NULL);
+ &is_alive, &keep_alive, &follow_stack_closure, NULL);
}
// Follow system dictionary roots and unload classes
--- a/hotspot/src/share/vm/memory/referencePolicy.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/memory/referencePolicy.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -26,6 +26,11 @@
# include "incls/_referencePolicy.cpp.incl"
LRUCurrentHeapPolicy::LRUCurrentHeapPolicy() {
+ setup();
+}
+
+// Capture state (of-the-VM) information needed to evaluate the policy
+void LRUCurrentHeapPolicy::setup() {
_max_interval = (Universe::get_heap_free_at_last_gc() / M) * SoftRefLRUPolicyMSPerMB;
assert(_max_interval >= 0,"Sanity check");
}
@@ -47,6 +52,11 @@
/////////////////////// MaxHeap //////////////////////
LRUMaxHeapPolicy::LRUMaxHeapPolicy() {
+ setup();
+}
+
+// Capture state (of-the-VM) information needed to evaluate the policy
+void LRUMaxHeapPolicy::setup() {
size_t max_heap = MaxHeapSize;
max_heap -= Universe::get_heap_used_at_last_gc();
max_heap /= M;
--- a/hotspot/src/share/vm/memory/referencePolicy.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/memory/referencePolicy.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -26,9 +26,11 @@
// should be cleared.
-class ReferencePolicy : public ResourceObj {
+class ReferencePolicy : public CHeapObj {
public:
virtual bool should_clear_reference(oop p) { ShouldNotReachHere(); return true; }
+ // Capture state (of-the-VM) information needed to evaluate the policy
+ virtual void setup() { /* do nothing */ }
};
class NeverClearPolicy : public ReferencePolicy {
@@ -48,6 +50,8 @@
public:
LRUCurrentHeapPolicy();
+ // Capture state (of-the-VM) information needed to evaluate the policy
+ void setup();
bool should_clear_reference(oop p);
};
@@ -58,5 +62,7 @@
public:
LRUMaxHeapPolicy();
+ // Capture state (of-the-VM) information needed to evaluate the policy
+ void setup();
bool should_clear_reference(oop p);
};
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -25,6 +25,11 @@
# include "incls/_precompiled.incl"
# include "incls/_referenceProcessor.cpp.incl"
+ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
+ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
+oop ReferenceProcessor::_sentinelRef = NULL;
+const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
+
// List of discovered references.
class DiscoveredList {
public:
@@ -47,7 +52,9 @@
}
bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); }
size_t length() { return _len; }
- void set_length(size_t len) { _len = len; }
+ void set_length(size_t len) { _len = len; }
+ void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
+ void dec_length(size_t dec) { _len -= dec; }
private:
// Set value depending on UseCompressedOops. This could be a template class
// but then we have to fix all the instantiations and declarations that use this class.
@@ -56,10 +63,6 @@
size_t _len;
};
-oop ReferenceProcessor::_sentinelRef = NULL;
-
-const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
-
void referenceProcessor_init() {
ReferenceProcessor::init_statics();
}
@@ -80,6 +83,12 @@
}
assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
"Just constructed it!");
+ _always_clear_soft_ref_policy = new AlwaysClearPolicy();
+ _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
+ NOT_COMPILER2(LRUCurrentHeapPolicy());
+ if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
+ vm_exit_during_initialization("Could not allocate reference policy object");
+ }
guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
RefDiscoveryPolicy == ReferentBasedDiscovery,
"Unrecongnized RefDiscoveryPolicy");
@@ -106,6 +115,7 @@
vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
}
rp->set_is_alive_non_header(is_alive_non_header);
+ rp->setup_policy(false /* default soft ref policy */);
return rp;
}
@@ -192,7 +202,6 @@
}
void ReferenceProcessor::process_discovered_references(
- ReferencePolicy* policy,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc,
@@ -207,7 +216,7 @@
// Soft references
{
TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
- process_discovered_reflist(_discoveredSoftRefs, policy, true,
+ process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
is_alive, keep_alive, complete_gc, task_executor);
}
@@ -436,13 +445,13 @@
// The "allow_null_referent" argument tells us to allow for the possibility
// of a NULL referent in the discovered Reference object. This typically
// happens in the case of concurrent collectors that may have done the
- // discovery concurrently or interleaved with mutator execution.
+ // discovery concurrently, or interleaved, with mutator execution.
inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
// Move to the next discovered reference.
inline void next();
- // Remove the current reference from the list and move to the next.
+ // Remove the current reference from the list
inline void remove();
// Make the Reference object active again.
@@ -476,7 +485,6 @@
inline size_t removed() const { return _removed; }
)
-private:
inline void move_to_next();
private:
@@ -553,7 +561,7 @@
oopDesc::store_heap_oop((oop*)_prev_next, _next);
}
NOT_PRODUCT(_removed++);
- move_to_next();
+ _refs_list.dec_length(1);
}
inline void DiscoveredListIterator::move_to_next() {
@@ -591,12 +599,13 @@
gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
iter.obj(), iter.obj()->blueprint()->internal_name());
}
+ // Remove Reference object from list
+ iter.remove();
// Make the Reference object active again
iter.make_active();
// keep the referent around
iter.make_referent_alive();
- // Remove Reference object from list
- iter.remove();
+ iter.move_to_next();
} else {
iter.next();
}
@@ -629,12 +638,13 @@
iter.obj(), iter.obj()->blueprint()->internal_name());
}
// The referent is reachable after all.
+ // Remove Reference object from list.
+ iter.remove();
// Update the referent pointer as necessary: Note that this
// should not entail any recursive marking because the
// referent must already have been traversed.
iter.make_referent_alive();
- // Remove Reference object from list
- iter.remove();
+ iter.move_to_next();
} else {
iter.next();
}
@@ -670,6 +680,7 @@
} else {
keep_alive->do_oop((oop*)next_addr);
}
+ iter.move_to_next();
} else {
iter.next();
}
@@ -832,9 +843,9 @@
}
java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
ref_lists[to_idx].set_head(move_head);
- ref_lists[to_idx].set_length(ref_lists[to_idx].length() + refs_to_move);
+ ref_lists[to_idx].inc_length(refs_to_move);
ref_lists[from_idx].set_head(new_head);
- ref_lists[from_idx].set_length(ref_lists[from_idx].length() - refs_to_move);
+ ref_lists[from_idx].dec_length(refs_to_move);
} else {
++to_idx;
}
@@ -923,7 +934,6 @@
void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
assert(!discovery_is_atomic(), "Else why call this method?");
DiscoveredListIterator iter(refs_list, NULL, NULL);
- size_t length = refs_list.length();
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
oop next = java_lang_ref_Reference::next(iter.obj());
@@ -941,12 +951,11 @@
)
// Remove Reference object from list
iter.remove();
- --length;
+ iter.move_to_next();
} else {
iter.next();
}
}
- refs_list.set_length(length);
NOT_PRODUCT(
if (PrintGCDetails && TraceReferenceGC) {
gclog_or_tty->print(
@@ -1024,7 +1033,7 @@
// We have separate lists for enqueueing so no synchronization
// is necessary.
refs_list.set_head(obj);
- refs_list.set_length(refs_list.length() + 1);
+ refs_list.inc_length(1);
if (_discovered_list_needs_barrier) {
_bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR");
}
@@ -1090,15 +1099,28 @@
// reachable.
if (is_alive_non_header() != NULL) {
oop referent = java_lang_ref_Reference::referent(obj);
- // We'd like to assert the following:
- // assert(referent != NULL, "Refs with null referents already filtered");
- // However, since this code may be executed concurrently with
- // mutators, which can clear() the referent, it is not
- // guaranteed that the referent is non-NULL.
+ // In the case of non-concurrent discovery, the last
+ // disjunct below should hold. It may not hold in the
+ // case of concurrent discovery because mutators may
+ // concurrently clear() a Reference.
+ assert(UseConcMarkSweepGC || UseG1GC || referent != NULL,
+ "Refs with null referents already filtered");
if (is_alive_non_header()->do_object_b(referent)) {
return false; // referent is reachable
}
}
+ if (rt == REF_SOFT) {
+ // For soft refs we can decide now if these are not
+ // current candidates for clearing, in which case we
+ // can mark through them now, rather than delaying that
+ // to the reference-processing phase. Since all current
+ // time-stamp policies advance the soft-ref clock only
+ // at a major collection cycle, this is always currently
+ // accurate.
+ if (!_current_soft_ref_policy->should_clear_reference(obj)) {
+ return false;
+ }
+ }
HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
const oop discovered = java_lang_ref_Reference::discovered(obj);
@@ -1168,7 +1190,7 @@
_bs->write_ref_field((oop*)discovered_addr, current_head);
}
list->set_head(obj);
- list->set_length(list->length() + 1);
+ list->inc_length(1);
}
// In the MT discovery case, it is currently possible to see
@@ -1209,45 +1231,48 @@
TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
+ if (yield->should_return()) {
+ return;
+ }
preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
keep_alive, complete_gc, yield);
}
}
- if (yield->should_return()) {
- return;
- }
// Weak references
{
TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
+ if (yield->should_return()) {
+ return;
+ }
preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
keep_alive, complete_gc, yield);
}
}
- if (yield->should_return()) {
- return;
- }
// Final references
{
TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
+ if (yield->should_return()) {
+ return;
+ }
preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
keep_alive, complete_gc, yield);
}
}
- if (yield->should_return()) {
- return;
- }
// Phantom references
{
TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
false, gclog_or_tty);
for (int i = 0; i < _num_q; i++) {
+ if (yield->should_return()) {
+ return;
+ }
preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
keep_alive, complete_gc, yield);
}
@@ -1256,9 +1281,12 @@
// Walk the given discovered ref list, and remove all reference objects
// whose referents are still alive, whose referents are NULL or which
-// are not active (have a non-NULL next field). NOTE: For this to work
-// correctly, refs discovery can not be happening concurrently with this
-// step.
+// are not active (have a non-NULL next field). NOTE: When we are
+// thus precleaning the ref lists (which happens single-threaded today),
+// we do not disable refs discovery to honour the correct semantics of
+// java.lang.Reference. As a result, we need to be careful below
+// that ref removal steps interleave safely with ref discovery steps
+// (in this thread).
void
ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
@@ -1266,7 +1294,6 @@
VoidClosure* complete_gc,
YieldClosure* yield) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
- size_t length = refs_list.length();
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
oop obj = iter.obj();
@@ -1281,7 +1308,6 @@
}
// Remove Reference object from list
iter.remove();
- --length;
// Keep alive its cohort.
iter.make_referent_alive();
if (UseCompressedOops) {
@@ -1291,12 +1317,11 @@
oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
keep_alive->do_oop(next_addr);
}
+ iter.move_to_next();
} else {
iter.next();
}
}
- refs_list.set_length(length);
-
// Close the reachable set
complete_gc->do_void();
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -23,7 +23,7 @@
*/
// ReferenceProcessor class encapsulates the per-"collector" processing
-// of "weak" references for GC. The interface is useful for supporting
+// of java.lang.Reference objects for GC. The interface is useful for supporting
// a generational abstraction, in particular when there are multiple
// generations that are being independently collected -- possibly
// concurrently and/or incrementally. Note, however, that the
@@ -75,6 +75,14 @@
// all collectors but the CMS collector).
BoolObjectClosure* _is_alive_non_header;
+ // Soft ref clearing policies
+ // . the default policy
+ static ReferencePolicy* _default_soft_ref_policy;
+ // . the "clear all" policy
+ static ReferencePolicy* _always_clear_soft_ref_policy;
+ // . the current policy below is either one of the above
+ ReferencePolicy* _current_soft_ref_policy;
+
// The discovered ref lists themselves
// The MT'ness degree of the queues below
@@ -90,6 +98,12 @@
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
static oop sentinel_ref() { return _sentinelRef; }
static oop* adr_sentinel_ref() { return &_sentinelRef; }
+ ReferencePolicy* setup_policy(bool always_clear) {
+ _current_soft_ref_policy = always_clear ?
+ _always_clear_soft_ref_policy : _default_soft_ref_policy;
+ _current_soft_ref_policy->setup(); // snapshot the policy threshold
+ return _current_soft_ref_policy;
+ }
public:
// Process references with a certain reachability level.
@@ -297,8 +311,7 @@
bool discover_reference(oop obj, ReferenceType rt);
// Process references found during GC (called by the garbage collector)
- void process_discovered_references(ReferencePolicy* policy,
- BoolObjectClosure* is_alive,
+ void process_discovered_references(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc,
AbstractRefProcTaskExecutor* task_executor);
--- a/hotspot/src/share/vm/memory/universe.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/memory/universe.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -96,7 +96,7 @@
bool Universe::_fully_initialized = false;
size_t Universe::_heap_capacity_at_last_gc;
-size_t Universe::_heap_used_at_last_gc;
+size_t Universe::_heap_used_at_last_gc = 0;
CollectedHeap* Universe::_collectedHeap = NULL;
address Universe::_heap_base = NULL;
--- a/hotspot/src/share/vm/oops/oop.inline.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -92,7 +92,7 @@
// This is only to be used during GC, for from-space objects, so no
// barrier is needed.
if (UseCompressedOops) {
- _metadata._compressed_klass = encode_heap_oop_not_null(k);
+ _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling)
} else {
_metadata._klass = (klassOop)k;
}
--- a/hotspot/src/share/vm/opto/callnode.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/opto/callnode.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -395,7 +395,13 @@
OptoReg::regname(OptoReg::c_frame_pointer),
regalloc->reg2offset(box_reg));
}
- format_helper( regalloc, st, obj, "MON-OBJ[", i, &scobjs );
+ const char* obj_msg = "MON-OBJ[";
+ if (EliminateLocks) {
+ while( !box->is_BoxLock() ) box = box->in(1);
+ if (box->as_BoxLock()->is_eliminated())
+ obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
+ }
+ format_helper( regalloc, st, obj, obj_msg, i, &scobjs );
}
for (i = 0; i < (uint)scobjs.length(); i++) {
@@ -908,8 +914,9 @@
add_req(lock->box_node());
add_req(lock->obj_node());
} else {
- add_req(NULL);
- add_req(NULL);
+ Node* top = Compile::current()->top();
+ add_req(top);
+ add_req(top);
}
jvms()->set_scloff(nextmon+MonitorEdges);
jvms()->set_endoff(req());
@@ -1382,7 +1389,7 @@
//
// If we are locking an unescaped object, the lock/unlock is unnecessary
//
- ConnectionGraph *cgr = Compile::current()->congraph();
+ ConnectionGraph *cgr = phase->C->congraph();
PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
if (cgr != NULL)
es = cgr->escape_state(obj_node(), phase);
@@ -1450,6 +1457,7 @@
// Mark it eliminated to update any counters
lock->set_eliminated();
+ lock->set_coarsened();
}
} else if (result != NULL && ctrl->is_Region() &&
iter->_worklist.member(ctrl)) {
@@ -1484,7 +1492,7 @@
//
// If we are unlocking an unescaped object, the lock/unlock is unnecessary.
//
- ConnectionGraph *cgr = Compile::current()->congraph();
+ ConnectionGraph *cgr = phase->C->congraph();
PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
if (cgr != NULL)
es = cgr->escape_state(obj_node(), phase);
--- a/hotspot/src/share/vm/opto/callnode.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/opto/callnode.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -780,7 +780,8 @@
//------------------------------AbstractLockNode-----------------------------------
class AbstractLockNode: public CallNode {
private:
- bool _eliminate; // indicates this lock can be safely eliminated
+ bool _eliminate; // indicates this lock can be safely eliminated
+ bool _coarsened; // indicates this lock was coarsened
#ifndef PRODUCT
NamedCounter* _counter;
#endif
@@ -801,6 +802,7 @@
public:
AbstractLockNode(const TypeFunc *tf)
: CallNode(tf, NULL, TypeRawPtr::BOTTOM),
+ _coarsened(false),
_eliminate(false)
{
#ifndef PRODUCT
@@ -819,6 +821,9 @@
// mark node as eliminated and update the counter if there is one
void set_eliminated();
+ bool is_coarsened() { return _coarsened; }
+ void set_coarsened() { _coarsened = true; }
+
// locking does not modify its arguments
virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
--- a/hotspot/src/share/vm/opto/compile.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/opto/compile.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -1532,11 +1532,6 @@
if (failing()) return;
- // get rid of the connection graph since it's information is not
- // updated by optimizations
- _congraph = NULL;
-
-
// Loop transforms on the ideal graph. Range Check Elimination,
// peeling, unrolling, etc.
--- a/hotspot/src/share/vm/opto/escape.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/opto/escape.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -199,7 +199,8 @@
es = ptnode_adr(idx)->escape_state();
// if we have already computed a value, return it
- if (es != PointsToNode::UnknownEscape)
+ if (es != PointsToNode::UnknownEscape &&
+ ptnode_adr(idx)->node_type() == PointsToNode::JavaObject)
return es;
// PointsTo() calls n->uncast() which can return a new ideal node.
--- a/hotspot/src/share/vm/opto/locknode.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/opto/locknode.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -44,10 +44,15 @@
_inmask.Insert(reg);
}
+//-----------------------------hash--------------------------------------------
+uint BoxLockNode::hash() const {
+ return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
+}
+
//------------------------------cmp--------------------------------------------
uint BoxLockNode::cmp( const Node &n ) const {
const BoxLockNode &bn = (const BoxLockNode &)n;
- return bn._slot == _slot;
+ return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
}
OptoReg::Name BoxLockNode::stack_slot(Node* box_node) {
--- a/hotspot/src/share/vm/opto/locknode.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/opto/locknode.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -36,7 +36,7 @@
virtual const RegMask &in_RegMask(uint) const;
virtual const RegMask &out_RegMask() const;
virtual uint size_of() const;
- virtual uint hash() const { return Node::hash() + _slot; }
+ virtual uint hash() const;
virtual uint cmp( const Node &n ) const;
virtual const class Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
virtual uint ideal_reg() const { return Op_RegP; }
--- a/hotspot/src/share/vm/opto/macro.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/opto/macro.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -59,7 +59,7 @@
for (uint i = old_dbg_start; i < oldcall->req(); i++) {
Node* old_in = oldcall->in(i);
// Clone old SafePointScalarObjectNodes, adjusting their field contents.
- if (old_in->is_SafePointScalarObject()) {
+ if (old_in != NULL && old_in->is_SafePointScalarObject()) {
SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
uint old_unique = C->unique();
Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
@@ -1509,21 +1509,63 @@
if (!alock->is_eliminated()) {
return false;
}
- // Mark the box lock as eliminated if all correspondent locks are eliminated
- // to construct correct debug info.
- BoxLockNode* box = alock->box_node()->as_BoxLock();
- if (!box->is_eliminated()) {
- bool eliminate = true;
- for (DUIterator_Fast imax, i = box->fast_outs(imax); i < imax; i++) {
- Node *lck = box->fast_out(i);
- if (lck->is_Lock() && !lck->as_AbstractLock()->is_eliminated()) {
- eliminate = false;
- break;
- }
- }
- if (eliminate)
- box->set_eliminated();
- }
+ if (alock->is_Lock() && !alock->is_coarsened()) {
+ // Create new "eliminated" BoxLock node and use it
+ // in monitor debug info for the same object.
+ BoxLockNode* oldbox = alock->box_node()->as_BoxLock();
+ Node* obj = alock->obj_node();
+ if (!oldbox->is_eliminated()) {
+ BoxLockNode* newbox = oldbox->clone()->as_BoxLock();
+ newbox->set_eliminated();
+ transform_later(newbox);
+ // Replace old box node with new box for all users
+ // of the same object.
+ for (uint i = 0; i < oldbox->outcnt();) {
+
+ bool next_edge = true;
+ Node* u = oldbox->raw_out(i);
+ if (u == alock) {
+ i++;
+ continue; // It will be removed below
+ }
+ if (u->is_Lock() &&
+ u->as_Lock()->obj_node() == obj &&
+ // oldbox could be referenced in debug info also
+ u->as_Lock()->box_node() == oldbox) {
+ assert(u->as_Lock()->is_eliminated(), "sanity");
+ _igvn.hash_delete(u);
+ u->set_req(TypeFunc::Parms + 1, newbox);
+ next_edge = false;
+#ifdef ASSERT
+ } else if (u->is_Unlock() && u->as_Unlock()->obj_node() == obj) {
+ assert(u->as_Unlock()->is_eliminated(), "sanity");
+#endif
+ }
+ // Replace old box in monitor debug info.
+ if (u->is_SafePoint() && u->as_SafePoint()->jvms()) {
+ SafePointNode* sfn = u->as_SafePoint();
+ JVMState* youngest_jvms = sfn->jvms();
+ int max_depth = youngest_jvms->depth();
+ for (int depth = 1; depth <= max_depth; depth++) {
+ JVMState* jvms = youngest_jvms->of_depth(depth);
+ int num_mon = jvms->nof_monitors();
+ // Loop over monitors
+ for (int idx = 0; idx < num_mon; idx++) {
+ Node* obj_node = sfn->monitor_obj(jvms, idx);
+ Node* box_node = sfn->monitor_box(jvms, idx);
+ if (box_node == oldbox && obj_node == obj) {
+ int j = jvms->monitor_box_offset(idx);
+ _igvn.hash_delete(u);
+ u->set_req(j, newbox);
+ next_edge = false;
+ }
+ } // for (int idx = 0;
+ } // for (int depth = 1;
+ } // if (u->is_SafePoint()
+ if (next_edge) i++;
+ } // for (uint i = 0; i < oldbox->outcnt();)
+ } // if (!oldbox->is_eliminated())
+ } // if (alock->is_Lock() && !lock->is_coarsened())
#ifndef PRODUCT
if (PrintEliminateLocks) {
@@ -1562,6 +1604,15 @@
_igvn.subsume_node(ctrlproj, fallthroughproj);
_igvn.hash_delete(memproj);
_igvn.subsume_node(memproj, memproj_fallthrough);
+
+ // Delete FastLock node also if this Lock node is unique user
+ // (a loop peeling may clone a Lock node).
+ Node* flock = alock->as_Lock()->fastlock_node();
+ if (flock->outcnt() == 1) {
+ assert(flock->unique_out() == alock, "sanity");
+ _igvn.hash_delete(flock);
+ _igvn.subsume_node(flock, top());
+ }
}
// Seach for MemBarRelease node and delete it also.
@@ -1887,7 +1938,7 @@
bool PhaseMacroExpand::expand_macro_nodes() {
if (C->macro_count() == 0)
return false;
- // attempt to eliminate allocations
+ // First, attempt to eliminate locks
bool progress = true;
while (progress) {
progress = false;
@@ -1895,6 +1946,26 @@
Node * n = C->macro_node(i-1);
bool success = false;
debug_only(int old_macro_count = C->macro_count(););
+ if (n->is_AbstractLock()) {
+ success = eliminate_locking_node(n->as_AbstractLock());
+ } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
+ _igvn.add_users_to_worklist(n);
+ _igvn.hash_delete(n);
+ _igvn.subsume_node(n, n->in(1));
+ success = true;
+ }
+ assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
+ progress = progress || success;
+ }
+ }
+ // Next, attempt to eliminate allocations
+ progress = true;
+ while (progress) {
+ progress = false;
+ for (int i = C->macro_count(); i > 0; i--) {
+ Node * n = C->macro_node(i-1);
+ bool success = false;
+ debug_only(int old_macro_count = C->macro_count(););
switch (n->class_id()) {
case Node::Class_Allocate:
case Node::Class_AllocateArray:
@@ -1902,17 +1973,10 @@
break;
case Node::Class_Lock:
case Node::Class_Unlock:
- success = eliminate_locking_node(n->as_AbstractLock());
+ assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
break;
default:
- if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
- _igvn.add_users_to_worklist(n);
- _igvn.hash_delete(n);
- _igvn.subsume_node(n, n->in(1));
- success = true;
- } else {
- assert(false, "unknown node type in macro list");
- }
+ assert(false, "unknown node type in macro list");
}
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
progress = progress || success;
--- a/hotspot/src/share/vm/opto/output.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/opto/output.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -849,10 +849,8 @@
// Loop over monitors and insert into array
for(idx = 0; idx < num_mon; idx++) {
// Grab the node that defines this monitor
- Node* box_node;
- Node* obj_node;
- box_node = sfn->monitor_box(jvms, idx);
- obj_node = sfn->monitor_obj(jvms, idx);
+ Node* box_node = sfn->monitor_box(jvms, idx);
+ Node* obj_node = sfn->monitor_obj(jvms, idx);
// Create ScopeValue for object
ScopeValue *scval = NULL;
@@ -890,6 +888,7 @@
OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
Location basic_lock = Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg));
+ while( !box_node->is_BoxLock() ) box_node = box_node->in(1);
monarray->append(new MonitorValue(scval, basic_lock, box_node->as_BoxLock()->is_eliminated()));
}
--- a/hotspot/src/share/vm/prims/jniCheck.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/prims/jniCheck.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -112,18 +112,6 @@
static const char * fatal_non_string = "JNI string operation received a non-string";
-
-// Report a JNI failure caught by -Xcheck:jni. Perform a core dump.
-// Note: two variations -- one to be called when in VM state (e.g. when
-// within IN_VM macro), one to be called when in NATIVE state.
-
-// When in VM state:
-static void ReportJNIFatalError(JavaThread* thr, const char *msg) {
- tty->print_cr("FATAL ERROR in native method: %s", msg);
- thr->print_stack();
- os::abort(true);
-}
-
// When in VM state:
static void ReportJNIWarning(JavaThread* thr, const char *msg) {
tty->print_cr("WARNING in native method: %s", msg);
--- a/hotspot/src/share/vm/prims/jniCheck.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/prims/jniCheck.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -22,6 +22,19 @@
*
*/
+extern "C" {
+ // Report a JNI failure caught by -Xcheck:jni. Perform a core dump.
+ // Note: two variations -- one to be called when in VM state (e.g. when
+ // within IN_VM macro), one to be called when in NATIVE state.
+
+ // When in VM state:
+ static void ReportJNIFatalError(JavaThread* thr, const char *msg) {
+ tty->print_cr("FATAL ERROR in native method: %s", msg);
+ thr->print_stack();
+ os::abort(true);
+ }
+}
+
//
// Checked JNI routines that are useful for outside of checked JNI
//
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -2322,7 +2322,12 @@
return JNI_ERR;
}
}
-
+ // Change the default value for flags which have different default values
+ // when working with older JDKs.
+ if (JDK_Version::current().compare_major(6) <= 0 &&
+ FLAG_IS_DEFAULT(UseVMInterruptibleIO)) {
+ FLAG_SET_DEFAULT(UseVMInterruptibleIO, true);
+ }
return JNI_OK;
}
--- a/hotspot/src/share/vm/runtime/biasedLocking.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/runtime/biasedLocking.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -582,13 +582,19 @@
if (heuristics == HR_NOT_BIASED) {
return NOT_BIASED;
} else if (heuristics == HR_SINGLE_REVOKE) {
- if (mark->biased_locker() == THREAD) {
+ Klass *k = Klass::cast(obj->klass());
+ markOop prototype_header = k->prototype_header();
+ if (mark->biased_locker() == THREAD &&
+ prototype_header->bias_epoch() == mark->bias_epoch()) {
// A thread is trying to revoke the bias of an object biased
// toward it, again likely due to an identity hash code
// computation. We can again avoid a safepoint in this case
// since we are only going to walk our own stack. There are no
// races with revocations occurring in other threads because we
// reach no safepoints in the revocation path.
+ // Also check the epoch because even if threads match, another thread
+ // can come in with a CAS to steal the bias of an object that has a
+ // stale epoch.
ResourceMark rm;
if (TraceBiasedLocking) {
tty->print_cr("Revoking bias by walking my own stack:");
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -342,6 +342,9 @@
product(bool, UseNUMA, false, \
"Use NUMA if available") \
\
+ product(bool, ForceNUMA, false, \
+ "Force NUMA optimizations on single-node/UMA systems") \
+ \
product(intx, NUMAChunkResizeWeight, 20, \
"Percentage (0-100) used to weight the current sample when " \
"computing exponentially decaying average for " \
@@ -1474,7 +1477,7 @@
"CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \
" ratio") \
\
- product(bool, CMSPrecleanRefLists1, false, \
+ product(bool, CMSPrecleanRefLists1, true, \
"Preclean ref lists during (initial) preclean phase") \
\
product(bool, CMSPrecleanRefLists2, false, \
@@ -3262,9 +3265,10 @@
diagnostic(bool, PrintDTraceDOF, false, \
"Print the DTrace DOF passed to the system for JSDT probes") \
\
- product(bool, UseVMInterruptibleIO, true, \
+ product(bool, UseVMInterruptibleIO, false, \
"(Unstable, Solaris-specific) Thread interrupt before or with " \
- "EINTR for I/O operations results in OS_INTRPT")
+ "EINTR for I/O operations results in OS_INTRPT. The default value"\
+ " of this flag is true for JDK 6 and earliers")
/*
--- a/hotspot/src/share/vm/runtime/javaCalls.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/runtime/javaCalls.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -309,8 +309,12 @@
CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
- // Make sure that the arguments have the right type
- debug_only(args->verify(method, result->get_type(), thread));
+ // Verify the arguments
+
+ if (CheckJNICalls) {
+ args->verify(method, result->get_type(), thread);
+ }
+ else debug_only(args->verify(method, result->get_type(), thread));
// Ignore call if method is empty
if (method->is_empty_method()) {
@@ -431,24 +435,26 @@
return TaggedStackInterpreter ? _parameters : _value;
}
-//--------------------------------------------------------------------------------------
-// Non-Product code
-#ifndef PRODUCT
class SignatureChekker : public SignatureIterator {
private:
bool *_is_oop;
int _pos;
BasicType _return_type;
+ intptr_t* _value;
+ Thread* _thread;
public:
bool _is_return;
- SignatureChekker(symbolHandle signature, BasicType return_type, bool is_static, bool* is_oop) : SignatureIterator(signature) {
+ SignatureChekker(symbolHandle signature, BasicType return_type, bool is_static, bool* is_oop, intptr_t* value, Thread* thread) : SignatureIterator(signature) {
_is_oop = is_oop;
_is_return = false;
_return_type = return_type;
_pos = 0;
+ _value = value;
+ _thread = thread;
+
if (!is_static) {
check_value(true); // Receiver must be an oop
}
@@ -489,6 +495,24 @@
check_return_type(t);
return;
}
+
+ // verify handle and the oop pointed to by handle
+ int p = _pos;
+ bool bad = false;
+ // If argument is oop
+ if (_is_oop[p]) {
+ intptr_t v = _value[p];
+ if (v != 0 ) {
+ size_t t = (size_t)v;
+ bad = (t < (size_t)os::vm_page_size() ) || !(*(oop*)v)->is_oop_or_null(true);
+ if (CheckJNICalls && bad) {
+ ReportJNIFatalError((JavaThread*)_thread, "Bad JNI oop argument");
+ }
+ }
+ // for the regular debug case.
+ assert(!bad, "Bad JNI oop argument");
+ }
+
check_value(true);
}
@@ -505,6 +529,7 @@
void do_array(int begin, int end) { check_obj(T_OBJECT); }
};
+
void JavaCallArguments::verify(methodHandle method, BasicType return_type,
Thread *thread) {
guarantee(method->size_of_parameters() == size_of_parameters(), "wrong no. of arguments pushed");
@@ -515,10 +540,9 @@
// Check that oop information is correct
symbolHandle signature (thread, method->signature());
- SignatureChekker sc(signature, return_type, method->is_static(),_is_oop);
+ SignatureChekker sc(signature, return_type, method->is_static(),_is_oop, _value, thread);
sc.iterate_parameters();
sc.check_doing_return(true);
sc.iterate_returntype();
}
-#endif // PRODUCT
--- a/hotspot/src/share/vm/runtime/javaCalls.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/runtime/javaCalls.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -150,7 +150,7 @@
int size_of_parameters() const { return _size; }
// Verify that pushed arguments fits a given method
- void verify(methodHandle method, BasicType return_type, Thread *thread) PRODUCT_RETURN;
+ void verify(methodHandle method, BasicType return_type, Thread *thread);
};
// All calls to Java have to go via JavaCalls. Sets up the stack frame
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Fri Dec 05 15:32:59 2008 -0800
@@ -556,7 +556,10 @@
// the caller was at a call site, it's safe to destroy all
// caller-saved registers, as these entry points do.
VtableStub* vt_stub = VtableStubs::stub_containing(pc);
- guarantee(vt_stub != NULL, "unable to find SEGVing vtable stub");
+
+ // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error.
+ if (vt_stub == NULL) return NULL;
+
if (vt_stub->is_abstract_method_error(pc)) {
assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
return StubRoutines::throw_AbstractMethodError_entry();
@@ -565,7 +568,9 @@
}
} else {
CodeBlob* cb = CodeCache::find_blob(pc);
- guarantee(cb != NULL, "exception happened outside interpreter, nmethods and vtable stubs (1)");
+
+ // If code blob is NULL, then return NULL to signal handler to report the SEGV error.
+ if (cb == NULL) return NULL;
// Exception happened in CodeCache. Must be either:
// 1. Inline-cache check in C2I handler blob,
@@ -574,7 +579,7 @@
if (!cb->is_nmethod()) {
guarantee(cb->is_adapter_blob(),
- "exception happened outside interpreter, nmethods and vtable stubs (2)");
+ "exception happened outside interpreter, nmethods and vtable stubs (1)");
// There is no handler here, so we will simply unwind.
return StubRoutines::throw_NullPointerException_at_call_entry();
}
--- a/hotspot/src/share/vm/utilities/macros.hpp Thu Dec 04 11:10:13 2008 -0800
+++ b/hotspot/src/share/vm/utilities/macros.hpp Fri Dec 05 15:32:59 2008 -0800
@@ -65,8 +65,10 @@
// COMPILER2 variant
#ifdef COMPILER2
#define COMPILER2_PRESENT(code) code
+#define NOT_COMPILER2(code)
#else // COMPILER2
#define COMPILER2_PRESENT(code)
+#define NOT_COMPILER2(code) code
#endif // COMPILER2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6756768/Test6756768.java Fri Dec 05 15:32:59 2008 -0800
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6756768
+ * @summary C1 generates invalid code
+ *
+ * @run main/othervm -Xcomp Test6756768
+ */
+
+class Test6756768a
+{
+ static boolean var_1 = true;
+}
+
+final class Test6756768b
+{
+ static boolean var_24 = false;
+ static int var_25 = 0;
+
+ static boolean var_temp1 = Test6756768a.var_1 = false;
+}
+
+public final class Test6756768 extends Test6756768a
+{
+ final static int var = var_1 ^ (Test6756768b.var_24 ? var_1 : var_1) ? Test6756768b.var_25 : 1;
+
+ static public void main(String[] args) {
+ if (var != 0) {
+ throw new InternalError("var = " + var);
+ }
+ }
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6756768/Test6756768_2.java Fri Dec 05 15:32:59 2008 -0800
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6756768
+ * @summary C1 generates invalid code
+ *
+ * @run main/othervm -Xcomp Test6756768_2
+ */
+
+class Test6756768_2a {
+ static int var = ++Test6756768_2.var;
+}
+
+public class Test6756768_2 {
+ static int var = 1;
+
+ static Object d2 = null;
+
+ static void test_static_field() {
+ int v = var;
+ int v2 = Test6756768_2a.var;
+ int v3 = var;
+ var = v3;
+ }
+
+ public static void main(String[] args) {
+ var = 1;
+ test_static_field();
+ if (var != 2) {
+ throw new InternalError();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6775880/Test.java Fri Dec 05 15:32:59 2008 -0800
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 6775880
+ * @summary EA +DeoptimizeALot: assert(mon_info->owner()->is_locked(),"object must be locked now")
+ * @compile -source 1.4 -target 1.4 Test.java
+ * @run main/othervm -server -Xbatch -XX:+DoEscapeAnalysis -XX:+DeoptimizeALot -XX:CompileCommand=exclude,java.lang.AbstractStringBuilder::append Test
+ */
+
+public class Test {
+
+ int cnt;
+ int b[];
+ String s;
+
+ String test() {
+ String res="";
+ for (int i=0; i < cnt; i++) {
+ if (i != 0) {
+ res = res +".";
+ }
+ res = res + b[i];
+ }
+ return res;
+ }
+
+ public static void main(String[] args) {
+ Test t = new Test();
+ t.cnt = 3;
+ t.b = new int[3];
+ t.b[0] = 0;
+ t.b[1] = 1;
+ t.b[2] = 2;
+ int j=0;
+ t.s = "";
+ for (int i=0; i<10001; i++) {
+ t.s = "c";
+ t.s = t.test();
+ }
+ System.out.println("After s=" + t.s);
+ }
+}
+
+