--- a/.hgtags Mon Apr 14 08:24:28 2014 +0200
+++ b/.hgtags Sun Apr 13 23:39:22 2014 -0700
@@ -248,3 +248,4 @@
7f655f31f9bcee618cf832f08176ad8c1ed3fdd3 jdk9-b03
099891b1d86f3719e116ac717ffdafc90d037fb7 jdk9-b04
dd311791ad6895a3989020dd6c6c46db87972ab8 jdk9-b05
+85dbdc227c5e11429b4fc4a8ba763f50107edd6e jdk9-b06
--- a/.hgtags-top-repo Mon Apr 14 08:24:28 2014 +0200
+++ b/.hgtags-top-repo Sun Apr 13 23:39:22 2014 -0700
@@ -248,3 +248,4 @@
fd8d51bdf9aadf7ae83e65e8655c53581017c363 jdk9-b03
cb4c3440bc2748101923e2488506e61009ab1bf5 jdk9-b04
8c63f0b6ada282f27e3a80125e53c3be603f9af7 jdk9-b05
+d0b525cd31b87abeb6d5b7e3516953eeb13b323c jdk9-b06
--- a/corba/.hgtags Mon Apr 14 08:24:28 2014 +0200
+++ b/corba/.hgtags Sun Apr 13 23:39:22 2014 -0700
@@ -248,3 +248,4 @@
d338b892a13db19b093f85cf5f949a4504e4d31f jdk9-b03
1ed19de263e1e0772da0269118cdd9deeb9fff04 jdk9-b04
167c39eb44731a5d66770d0f00e231164653a2ff jdk9-b05
+a4bf701ac316946c2e5e83138ad8e687da6a4b30 jdk9-b06
--- a/hotspot/.hgtags Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/.hgtags Sun Apr 13 23:39:22 2014 -0700
@@ -408,3 +408,4 @@
b2fee789d23f3cdabb3db4e51af43038e5692d3a jdk9-b03
3812c088b9456ee22c933e88aee1ece71f4e783a jdk9-b04
bdc5311e1db7598589b77015119b821bf8c828bd jdk9-b05
+52377a30a3f87b62d6135706997b8c7a47366e37 jdk9-b06
--- a/hotspot/src/os/aix/vm/os_aix.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/aix/vm/os_aix.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2811,18 +2811,13 @@
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
-void os::yield_all(int attempts) {
+void os::yield_all() {
// Yields to all threads, including threads with lower priorities
// Threads on Linux are all with same priority. The Solaris style
// os::yield_all() with nanosleep(1ms) is not necessary.
sched_yield();
}
-// Called from the tight loops to possibly influence time-sharing heuristics
-void os::loop_breaker(int attempts) {
- os::yield_all(attempts);
-}
-
////////////////////////////////////////////////////////////////////////////////
// thread priority support
@@ -3079,7 +3074,7 @@
for (int n = 0; !osthread->sr.is_suspended(); n++) {
for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
- os::yield_all(i);
+ os::yield_all();
}
// timeout, try to cancel the request
@@ -3113,7 +3108,7 @@
if (sr_notify(osthread) == 0) {
for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
- os::yield_all(i);
+ os::yield_all();
}
}
} else {
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -917,9 +917,20 @@
//////////////////////////////////////////////////////////////////////////////
// thread local storage
+// Restore the thread pointer if the destructor is called. This is in case
+// someone from JNI code sets up a destructor with pthread_key_create to run
+// detachCurrentThread on thread death. Unless we restore the thread pointer we
+// will hang or crash. When detachCurrentThread is called the key will be set
+// to null and we will not be called again. If detachCurrentThread is never
+// called we could loop forever depending on the pthread implementation.
+static void restore_thread_pointer(void* p) {
+ Thread* thread = (Thread*) p;
+ os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
+
int os::allocate_thread_local_storage() {
pthread_key_t key;
- int rslt = pthread_key_create(&key, NULL);
+ int rslt = pthread_key_create(&key, restore_thread_pointer);
assert(rslt == 0, "cannot allocate thread local storage");
return (int)key;
}
@@ -2551,18 +2562,13 @@
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
-void os::yield_all(int attempts) {
+void os::yield_all() {
// Yields to all threads, including threads with lower priorities
// Threads on Bsd are all with same priority. The Solaris style
// os::yield_all() with nanosleep(1ms) is not necessary.
sched_yield();
}
-// Called from the tight loops to possibly influence time-sharing heuristics
-void os::loop_breaker(int attempts) {
- os::yield_all(attempts);
-}
-
////////////////////////////////////////////////////////////////////////////////
// thread priority support
--- a/hotspot/src/os/linux/vm/os_linux.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1032,9 +1032,20 @@
//////////////////////////////////////////////////////////////////////////////
// thread local storage
+// Restore the thread pointer if the destructor is called. This is in case
+// someone from JNI code sets up a destructor with pthread_key_create to run
+// detachCurrentThread on thread death. Unless we restore the thread pointer we
+// will hang or crash. When detachCurrentThread is called the key will be set
+// to null and we will not be called again. If detachCurrentThread is never
+// called we could loop forever depending on the pthread implementation.
+static void restore_thread_pointer(void* p) {
+ Thread* thread = (Thread*) p;
+ os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
+
int os::allocate_thread_local_storage() {
pthread_key_t key;
- int rslt = pthread_key_create(&key, NULL);
+ int rslt = pthread_key_create(&key, restore_thread_pointer);
assert(rslt == 0, "cannot allocate thread local storage");
return (int)key;
}
@@ -3781,18 +3792,13 @@
os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
-void os::yield_all(int attempts) {
+void os::yield_all() {
// Yields to all threads, including threads with lower priorities
// Threads on Linux are all with same priority. The Solaris style
// os::yield_all() with nanosleep(1ms) is not necessary.
sched_yield();
}
-// Called from the tight loops to possibly influence time-sharing heuristics
-void os::loop_breaker(int attempts) {
- os::yield_all(attempts);
-}
-
////////////////////////////////////////////////////////////////////////////////
// thread priority support
--- a/hotspot/src/os/solaris/vm/attachListener_solaris.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/solaris/vm/attachListener_solaris.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -29,6 +29,7 @@
#include "services/dtraceAttacher.hpp"
#include <door.h>
+#include <limits.h>
#include <string.h>
#include <signal.h>
#include <sys/types.h>
@@ -668,11 +669,13 @@
out->print_cr("No probe specified");
return JNI_ERR;
} else {
- int probe_typess = atoi(probe);
- if (errno) {
+ char *end;
+ long val = strtol(probe, &end, 10);
+ if (end == probe || val < 0 || val > INT_MAX) {
out->print_cr("invalid probe type");
return JNI_ERR;
} else {
+ int probe_typess = (int) val;
DTrace::enable_dprobes(probe_typess);
return JNI_OK;
}
@@ -703,8 +706,9 @@
bool flag = true;
const char* arg1;
if ((arg1 = op->arg(1)) != NULL) {
- flag = (atoi(arg1) != 0);
- if (errno) {
+ char *end;
+ flag = (strtol(arg1, &end, 10) != 0);
+ if (arg1 == end) {
out->print_cr("flag value has to be an integer");
return JNI_ERR;
}
--- a/hotspot/src/os/solaris/vm/osThread_solaris.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/solaris/vm/osThread_solaris.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -49,16 +49,6 @@
// copied from synchronizer.cpp
-void OSThread::handle_spinlock_contention(int tries) {
- if (NoYieldsInMicrolock) return;
-
- if (tries > 10) {
- os::yield_all(tries); // Yield to threads of any priority
- } else if (tries > 5) {
- os::yield(); // Yield to threads of same or higher priority
- }
-}
-
void OSThread::SR_handler(Thread* thread, ucontext_t* uc) {
os::Solaris::SR_handler(thread, uc);
}
--- a/hotspot/src/os/solaris/vm/osThread_solaris.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/solaris/vm/osThread_solaris.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -82,8 +82,6 @@
void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; }
static void SR_handler(Thread* thread, ucontext_t* uc);
- static void handle_spinlock_contention(int tries); // Used for thread local eden locking
-
// ***************************************************************
// Platform dependent initialization and cleanup
// ***************************************************************
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -969,9 +969,6 @@
return true;
}
-// _T2_libthread is true if we believe we are running with the newer
-// SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
-bool os::Solaris::_T2_libthread = false;
bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
// Allocate the OSThread object
@@ -1056,71 +1053,10 @@
thread->set_osthread(osthread);
// Create the Solaris thread
- // explicit THR_BOUND for T2_libthread case in case
- // that assumption is not accurate, but our alternate signal stack
- // handling is based on it which must have bound threads
thread_t tid = 0;
- long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
- | ((UseBoundThreads || os::Solaris::T2_libthread() ||
- (thr_type == vm_thread) ||
- (thr_type == cgc_thread) ||
- (thr_type == pgc_thread) ||
- (thr_type == compiler_thread && BackgroundCompilation)) ?
- THR_BOUND : 0);
+ long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
int status;
- // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
- //
- // On multiprocessors systems, libthread sometimes under-provisions our
- // process with LWPs. On a 30-way systems, for instance, we could have
- // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
- // to our process. This can result in under utilization of PEs.
- // I suspect the problem is related to libthread's LWP
- // pool management and to the kernel's SIGBLOCKING "last LWP parked"
- // upcall policy.
- //
- // The following code is palliative -- it attempts to ensure that our
- // process has sufficient LWPs to take advantage of multiple PEs.
- // Proper long-term cures include using user-level threads bound to LWPs
- // (THR_BOUND) or using LWP-based synchronization. Note that there is a
- // slight timing window with respect to sampling _os_thread_count, but
- // the race is benign. Also, we should periodically recompute
- // _processors_online as the min of SC_NPROCESSORS_ONLN and the
- // the number of PEs in our partition. You might be tempted to use
- // THR_NEW_LWP here, but I'd recommend against it as that could
- // result in undesirable growth of the libthread's LWP pool.
- // The fix below isn't sufficient; for instance, it doesn't take into count
- // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks.
- //
- // Some pathologies this scheme doesn't handle:
- // * Threads can block, releasing the LWPs. The LWPs can age out.
- // When a large number of threads become ready again there aren't
- // enough LWPs available to service them. This can occur when the
- // number of ready threads oscillates.
- // * LWPs/Threads park on IO, thus taking the LWP out of circulation.
- //
- // Finally, we should call thr_setconcurrency() periodically to refresh
- // the LWP pool and thwart the LWP age-out mechanism.
- // The "+3" term provides a little slop -- we want to slightly overprovision.
-
- if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
- if (!(flags & THR_BOUND)) {
- thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation
- }
- }
- // Although this doesn't hurt, we should warn of undefined behavior
- // when using unbound T1 threads with schedctl(). This should never
- // happen, as the compiler and VM threads are always created bound
- DEBUG_ONLY(
- if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
- (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
- ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
- (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
- warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
- }
- );
-
-
// Mark that we don't have an lwp or thread id yet.
// In case we attempt to set the priority before the thread starts.
osthread->set_lwp_id(-1);
@@ -1145,13 +1081,6 @@
// Remember that we created this thread so we can set priority on it
osthread->set_vm_created();
- // Set the default thread priority. If using bound threads, setting
- // lwp priority will be delayed until thread start.
- set_native_priority(thread,
- DefaultThreadPriority == -1 ?
- java_to_os_priority[NormPriority] :
- DefaultThreadPriority);
-
// Initial thread state is INITIALIZED, not SUSPENDED
osthread->set_state(INITIALIZED);
@@ -1333,39 +1262,8 @@
jt->set_stack_size(stack_size);
}
- // 5/22/01: Right now alternate signal stacks do not handle
- // throwing stack overflow exceptions, see bug 4463178
- // Until a fix is found for this, T2 will NOT imply alternate signal
- // stacks.
- // If using T2 libthread threads, install an alternate signal stack.
- // Because alternate stacks associate with LWPs on Solaris,
- // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
- // we prefer to explicitly stack bang.
- // If not using T2 libthread, but using UseBoundThreads any threads
- // (primordial thread, jni_attachCurrentThread) we do not create,
- // probably are not bound, therefore they can not have an alternate
- // signal stack. Since our stack banging code is generated and
- // is shared across threads, all threads must be bound to allow
- // using alternate signal stacks. The alternative is to interpose
- // on _lwp_create to associate an alt sig stack with each LWP,
- // and this could be a problem when the JVM is embedded.
- // We would prefer to use alternate signal stacks with T2
- // Since there is currently no accurate way to detect T2
- // we do not. Assuming T2 when running T1 causes sig 11s or assertions
- // on installing alternate signal stacks
-
-
- // 05/09/03: removed alternate signal stack support for Solaris
- // The alternate signal stack mechanism is no longer needed to
- // handle stack overflow. This is now handled by allocating
- // guard pages (red zone) and stackbanging.
- // Initially the alternate signal stack mechanism was removed because
- // it did not work with T1 llibthread. Alternate
- // signal stacks MUST have all threads bound to lwps. Applications
- // can create their own threads and attach them without their being
- // bound under T1. This is frequently the case for the primordial thread.
- // If we were ever to reenable this mechanism we would need to
- // use the dynamic check for T2 libthread.
+ // With the T2 libthread (T1 is no longer supported) threads are always bound
+ // and we use stackbanging in all cases.
os::Solaris::init_thread_fpu_state();
std::set_terminate(_handle_uncaught_cxx_exception);
@@ -2092,12 +1990,7 @@
}
void os::Solaris::print_libversion_info(outputStream* st) {
- if (os::Solaris::T2_libthread()) {
- st->print(" (T2 libthread)");
- }
- else {
- st->print(" (T1 libthread)");
- }
+ st->print(" (T2 libthread)");
st->cr();
}
@@ -3323,41 +3216,10 @@
os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
-
-// On Solaris we found that yield_all doesn't always yield to all other threads.
-// There have been cases where there is a thread ready to execute but it doesn't
-// get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
-// The 1 millisecond wait doesn't seem long enough for the kernel to issue a
-// SIGWAITING signal which will cause a new lwp to be created. So we count the
-// number of times yield_all is called in the one loop and increase the sleep
-// time after 8 attempts. If this fails too we increase the concurrency level
-// so that the starving thread would get an lwp
-
-void os::yield_all(int attempts) {
+void os::yield_all() {
// Yields to all threads, including threads with lower priorities
- if (attempts == 0) {
- os::sleep(Thread::current(), 1, false);
- } else {
- int iterations = attempts % 30;
- if (iterations == 0 && !os::Solaris::T2_libthread()) {
- // thr_setconcurrency and _getconcurrency make sense only under T1.
- int noofLWPS = thr_getconcurrency();
- if (noofLWPS < (Threads::number_of_threads() + 2)) {
- thr_setconcurrency(thr_getconcurrency() + 1);
- }
- } else if (iterations < 25) {
- os::sleep(Thread::current(), 1, false);
- } else {
- os::sleep(Thread::current(), 10, false);
- }
- }
-}
-
-// Called from the tight loops to possibly influence time-sharing heuristics
-void os::loop_breaker(int attempts) {
- os::yield_all(attempts);
-}
-
+ os::sleep(Thread::current(), 1, false);
+}
// Interface for setting lwp priorities. If we are using T2 libthread,
// which forces the use of BoundThreads or we manually set UseBoundThreads,
@@ -3365,6 +3227,9 @@
// function is meaningless in this mode so we must adjust the real lwp's priority
// The routines below implement the getting and setting of lwp priorities.
//
+// Note: T2 is now the only supported libthread. UseBoundThreads flag is
+// being deprecated and all threads are now BoundThreads
+//
// Note: There are three priority scales used on Solaris. Java priotities
// which range from 1 to 10, libthread "thr_setprio" scale which range
// from 0 to 127, and the current scheduling class of the process we
@@ -3437,29 +3302,19 @@
if (!UseThreadPriorities) return 0;
- // We are using Bound threads, we need to determine our priority ranges
- if (os::Solaris::T2_libthread() || UseBoundThreads) {
- // If ThreadPriorityPolicy is 1, switch tables
- if (ThreadPriorityPolicy == 1) {
- for (i = 0 ; i < CriticalPriority+1; i++)
- os::java_to_os_priority[i] = prio_policy1[i];
- }
- if (UseCriticalJavaThreadPriority) {
- // MaxPriority always maps to the FX scheduling class and criticalPrio.
- // See set_native_priority() and set_lwp_class_and_priority().
- // Save original MaxPriority mapping in case attempt to
- // use critical priority fails.
- java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
- // Set negative to distinguish from other priorities
- os::java_to_os_priority[MaxPriority] = -criticalPrio;
- }
- }
- // Not using Bound Threads, set to ThreadPolicy 1
- else {
- for ( i = 0 ; i < CriticalPriority+1; i++ ) {
+ // If ThreadPriorityPolicy is 1, switch tables
+ if (ThreadPriorityPolicy == 1) {
+ for (i = 0 ; i < CriticalPriority+1; i++)
os::java_to_os_priority[i] = prio_policy1[i];
- }
- return 0;
+ }
+ if (UseCriticalJavaThreadPriority) {
+ // MaxPriority always maps to the FX scheduling class and criticalPrio.
+ // See set_native_priority() and set_lwp_class_and_priority().
+ // Save original MaxPriority mapping in case attempt to
+ // use critical priority fails.
+ java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
+ // Set negative to distinguish from other priorities
+ os::java_to_os_priority[MaxPriority] = -criticalPrio;
}
// Get IDs for a set of well-known scheduling classes.
@@ -3583,10 +3438,6 @@
// set_lwp_class_and_priority
-//
-// Set the class and priority of the lwp. This call should only
-// be made when using bound threads (T2 threads are bound by default).
-//
int set_lwp_class_and_priority(int ThreadID, int lwpid,
int newPrio, int new_class, bool scale) {
int rslt;
@@ -3812,23 +3663,20 @@
status = thr_setprio(thread->osthread()->thread_id(), newpri);
}
- if (os::Solaris::T2_libthread() ||
- (UseBoundThreads && osthread->is_vm_created())) {
- int lwp_status =
- set_lwp_class_and_priority(osthread->thread_id(),
- osthread->lwp_id(),
- newpri,
- fxcritical ? fxLimits.schedPolicy : myClass,
- !fxcritical);
- if (lwp_status != 0 && fxcritical) {
- // Try again, this time without changing the scheduling class
- newpri = java_MaxPriority_to_os_priority;
- lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
- osthread->lwp_id(),
- newpri, myClass, false);
- }
- status |= lwp_status;
- }
+ int lwp_status =
+ set_lwp_class_and_priority(osthread->thread_id(),
+ osthread->lwp_id(),
+ newpri,
+ fxcritical ? fxLimits.schedPolicy : myClass,
+ !fxcritical);
+ if (lwp_status != 0 && fxcritical) {
+ // Try again, this time without changing the scheduling class
+ newpri = java_MaxPriority_to_os_priority;
+ lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
+ osthread->lwp_id(),
+ newpri, myClass, false);
+ }
+ status |= lwp_status;
return (status == 0) ? OS_OK : OS_ERR;
}
@@ -4495,13 +4343,6 @@
}
}
-// (Static) wrappers for the new libthread API
-int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
-int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
-int_fnP_thread_t_i os::Solaris::_thr_setmutator;
-int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
-int_fnP_thread_t os::Solaris::_thr_continue_mutator;
-
// (Static) wrapper for getisax(2) call.
os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
@@ -4536,78 +4377,9 @@
return addr;
}
-
-
-// isT2_libthread()
-//
-// Routine to determine if we are currently using the new T2 libthread.
-//
-// We determine if we are using T2 by reading /proc/self/lstatus and
-// looking for a thread with the ASLWP bit set. If we find this status
-// bit set, we must assume that we are NOT using T2. The T2 team
-// has approved this algorithm.
-//
-// We need to determine if we are running with the new T2 libthread
-// since setting native thread priorities is handled differently
-// when using this library. All threads created using T2 are bound
-// threads. Calling thr_setprio is meaningless in this case.
-//
-bool isT2_libthread() {
- static prheader_t * lwpArray = NULL;
- static int lwpSize = 0;
- static int lwpFile = -1;
- lwpstatus_t * that;
- char lwpName [128];
- bool isT2 = false;
-
-#define ADR(x) ((uintptr_t)(x))
-#define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
-
- lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
- if (lwpFile < 0) {
- if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
- return false;
- }
- lwpSize = 16*1024;
- for (;;) {
- ::lseek64 (lwpFile, 0, SEEK_SET);
- lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
- if (::read(lwpFile, lwpArray, lwpSize) < 0) {
- if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
- break;
- }
- if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
- // We got a good snapshot - now iterate over the list.
- int aslwpcount = 0;
- for (int i = 0; i < lwpArray->pr_nent; i++ ) {
- that = LWPINDEX(lwpArray,i);
- if (that->pr_flags & PR_ASLWP) {
- aslwpcount++;
- }
- }
- if (aslwpcount == 0) isT2 = true;
- break;
- }
- lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
- FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry.
- }
-
- FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
- ::close (lwpFile);
- if (ThreadPriorityVerbose) {
- if (isT2) tty->print_cr("We are running with a T2 libthread\n");
- else tty->print_cr("We are not running with a T2 libthread\n");
- }
- return isT2;
-}
-
-
void os::Solaris::libthread_init() {
address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
- // Determine if we are running with the new T2 libthread
- os::Solaris::set_T2_libthread(isT2_libthread());
-
lwp_priocntl_init();
// RTLD_DEFAULT was not defined on some early versions of 5.5.1
@@ -4618,22 +4390,6 @@
guarantee(func != NULL, "libthread.so is too old.");
}
- // Initialize the new libthread getstate API wrappers
- func = resolve_symbol("thr_getstate");
- os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
-
- func = resolve_symbol("thr_setstate");
- os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
-
- func = resolve_symbol("thr_setmutator");
- os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
-
- func = resolve_symbol("thr_suspend_mutator");
- os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
-
- func = resolve_symbol("thr_continue_mutator");
- os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
-
int size;
void (*handler_info_func)(address *, int *);
handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
@@ -5536,11 +5292,7 @@
}
bool os::is_thread_cpu_time_supported() {
- if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
- return true;
- } else {
- return false;
- }
+ return true;
}
// System loadavg support. Returns -1 if load average cannot be obtained.
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -41,19 +41,6 @@
#define TRS_LWPID 2
#define TRS_INVALID 3
- // _T2_libthread is true if we believe we are running with the newer
- // SunSoft lib/lwp/libthread: default Solaris 9, available Solaris 8
- // which is a lightweight libthread that also supports all T1
- static bool _T2_libthread;
- // These refer to new libthread interface functions
- // They get intialized if we dynamically detect new libthread
- static int_fnP_thread_t_iP_uP_stack_tP_gregset_t _thr_getstate;
- static int_fnP_thread_t_i_gregset_t _thr_setstate;
- static int_fnP_thread_t_i _thr_setmutator;
- static int_fnP_thread_t _thr_suspend_mutator;
- static int_fnP_thread_t _thr_continue_mutator;
- // libthread_init sets the above, if the new functionality is detected
-
// initialized to libthread or lwp synchronization primitives depending on UseLWPSychronization
static int_fnP_mutex_tP _mutex_lock;
static int_fnP_mutex_tP _mutex_trylock;
@@ -214,29 +201,6 @@
static struct sigaction *get_chained_signal_action(int sig);
static bool chained_handler(int sig, siginfo_t *siginfo, void *context);
- // The following allow us to link against both the old and new libthread (2.8)
- // and exploit the new libthread functionality if available.
-
- static bool T2_libthread() { return _T2_libthread; }
- static void set_T2_libthread(bool T2_libthread) { _T2_libthread = T2_libthread; }
-
- static int thr_getstate(thread_t tid, int *flag, unsigned *lwp, stack_t *ss, gregset_t rs)
- { return _thr_getstate(tid, flag, lwp, ss, rs); }
- static void set_thr_getstate(int_fnP_thread_t_iP_uP_stack_tP_gregset_t func)
- { _thr_getstate = func; }
-
- static int thr_setstate(thread_t tid, int flag, gregset_t rs) { return _thr_setstate(tid, flag, rs); }
- static void set_thr_setstate(int_fnP_thread_t_i_gregset_t func) { _thr_setstate = func; }
-
- static int thr_setmutator(thread_t tid, int enabled) { return _thr_setmutator(tid, enabled); }
- static void set_thr_setmutator(int_fnP_thread_t_i func) { _thr_setmutator = func; }
-
- static int thr_suspend_mutator(thread_t tid) { return _thr_suspend_mutator(tid); }
- static void set_thr_suspend_mutator(int_fnP_thread_t func) { _thr_suspend_mutator = func; }
-
- static int thr_continue_mutator(thread_t tid) { return _thr_continue_mutator(tid); }
- static void set_thr_continue_mutator(int_fnP_thread_t func) { _thr_continue_mutator = func; }
-
// Allows us to switch between lwp and thread -based synchronization
static int mutex_lock(mutex_t *mx) { return _mutex_lock(mx); }
static int mutex_trylock(mutex_t *mx) { return _mutex_trylock(mx); }
--- a/hotspot/src/os/windows/vm/os_windows.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -3518,7 +3518,7 @@
void os::yield() { os::NakedYield(); }
-void os::yield_all(int attempts) {
+void os::yield_all() {
// Yields to all threads, including threads with lower priorities
Sleep(1);
}
@@ -3864,12 +3864,6 @@
win32::setmode_streams();
init_page_sizes((size_t) win32::vm_page_size());
- // For better scalability on MP systems (must be called after initialize_system_info)
-#ifndef PRODUCT
- if (is_MP()) {
- NoYieldsInMicrolock = true;
- }
-#endif
// This may be overridden later when argument processing is done.
FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation,
os::win32::is_windows_2003());
--- a/hotspot/src/os/windows/vm/os_windows.inline.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os/windows/vm/os_windows.inline.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -52,9 +52,6 @@
return (void*)::GetProcAddress((HMODULE)lib, name);
}
-// Used to improve time-sharing on some systems
-inline void os::loop_breaker(int attempts) {}
-
inline bool os::obsolete_option(const JavaVMOption *option) {
return false;
}
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -270,31 +270,6 @@
}
}
-static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
- char lwpstatusfile[PROCFILE_LENGTH];
- int lwpfd, err;
-
- if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs))
- return (err);
- if (*flags == TRS_LWPID) {
- sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(),
- *lwp);
- if ((lwpfd = ::open(lwpstatusfile, O_RDONLY)) < 0) {
- perror("thr_mutator_status: open lwpstatus");
- return (EINVAL);
- }
- if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) !=
- sizeof (lwpstatus_t)) {
- perror("thr_mutator_status: read lwpstatus");
- (void) ::close(lwpfd);
- return (EINVAL);
- }
- (void) ::close(lwpfd);
- }
- return (0);
-}
-
-
bool os::is_allocatable(size_t bytes) {
#ifdef _LP64
return true;
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -256,30 +256,6 @@
}
}
-static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
- char lwpstatusfile[PROCFILE_LENGTH];
- int lwpfd, err;
-
- if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs))
- return (err);
- if (*flags == TRS_LWPID) {
- sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(),
- *lwp);
- if ((lwpfd = open(lwpstatusfile, O_RDONLY)) < 0) {
- perror("thr_mutator_status: open lwpstatus");
- return (EINVAL);
- }
- if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) !=
- sizeof (lwpstatus_t)) {
- perror("thr_mutator_status: read lwpstatus");
- (void) close(lwpfd);
- return (EINVAL);
- }
- (void) close(lwpfd);
- }
- return (0);
-}
-
#ifndef AMD64
// Detecting SSE support by OS
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -4180,8 +4180,12 @@
clear_class_metadata();
- // deallocate the klass if already created.
- MetadataFactory::free_metadata(_loader_data, _klass);
+ // deallocate the klass if already created. Don't directly deallocate, but add
+ // to the deallocate list so that the klass is removed from the CLD::_klasses list
+ // at a safepoint.
+ if (_klass != NULL) {
+ _loader_data->add_to_deallocate_list(_klass);
+ }
_klass = NULL;
}
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -464,25 +464,26 @@
void java_lang_String::print(oop java_string, outputStream* st) {
assert(java_string->klass() == SystemDictionary::String_klass(), "must be java_string");
typeArrayOop value = java_lang_String::value(java_string);
- int offset = java_lang_String::offset(java_string);
- int length = java_lang_String::length(java_string);
-
- int end = MIN2(length, 100);
+
if (value == NULL) {
// This can happen if, e.g., printing a String
// object before its initializer has been called
- st->print_cr("NULL");
- } else {
- st->print("\"");
- for (int index = 0; index < length; index++) {
- st->print("%c", value->char_at(index + offset));
- }
- st->print("\"");
+ st->print("NULL");
+ return;
}
-}
-
-static void initialize_static_field(fieldDescriptor* fd, TRAPS) {
- Handle mirror (THREAD, fd->field_holder()->java_mirror());
+
+ int offset = java_lang_String::offset(java_string);
+ int length = java_lang_String::length(java_string);
+
+ st->print("\"");
+ for (int index = 0; index < length; index++) {
+ st->print("%c", value->char_at(index + offset));
+ }
+ st->print("\"");
+}
+
+
+static void initialize_static_field(fieldDescriptor* fd, Handle mirror, TRAPS) {
assert(mirror.not_null() && fd->is_static(), "just checking");
if (fd->has_initial_value()) {
BasicType t = fd->field_type();
@@ -549,21 +550,45 @@
create_mirror(k, Handle(NULL), CHECK);
}
-oop java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
+void java_lang_Class::initialize_mirror_fields(KlassHandle k,
+ Handle mirror,
+ Handle protection_domain,
+ TRAPS) {
+ // Allocate a simple java object for a lock.
+ // This needs to be a java object because during class initialization
+ // it can be held across a java call.
+ typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
+ set_init_lock(mirror(), r);
+
+ // Set protection domain also
+ set_protection_domain(mirror(), protection_domain());
+
+ // Initialize static fields
+ InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK);
+}
+
+void java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) {
assert(k->java_mirror() == NULL, "should only assign mirror once");
// Use this moment of initialization to cache modifier_flags also,
// to support Class.getModifiers(). Instance classes recalculate
// the cached flags after the class file is parsed, but before the
// class is put into the system dictionary.
- int computed_modifiers = k->compute_modifier_flags(CHECK_0);
+ int computed_modifiers = k->compute_modifier_flags(CHECK);
k->set_modifier_flags(computed_modifiers);
// Class_klass has to be loaded because it is used to allocate
// the mirror.
if (SystemDictionary::Class_klass_loaded()) {
// Allocate mirror (java.lang.Class instance)
- Handle mirror = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK_0);
+ Handle mirror = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->allocate_instance(k, CHECK);
+
+ // Setup indirection from mirror->klass
+ if (!k.is_null()) {
+ java_lang_Class::set_klass(mirror(), k());
+ }
InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
+ assert(oop_size(mirror()) == mk->instance_size(k), "should have been set");
+
java_lang_Class::set_static_oop_field_count(mirror(), mk->compute_static_oop_field_count(mirror()));
// It might also have a component mirror. This mirror must already exist.
@@ -576,29 +601,32 @@
assert(k->oop_is_objArray(), "Must be");
Klass* element_klass = ObjArrayKlass::cast(k())->element_klass();
assert(element_klass != NULL, "Must have an element klass");
- comp_mirror = element_klass->java_mirror();
+ comp_mirror = element_klass->java_mirror();
}
assert(comp_mirror.not_null(), "must have a mirror");
- // Two-way link between the array klass and its component mirror:
+ // Two-way link between the array klass and its component mirror:
ArrayKlass::cast(k())->set_component_mirror(comp_mirror());
set_array_klass(comp_mirror(), k());
} else {
assert(k->oop_is_instance(), "Must be");
- // Allocate a simple java object for a lock.
- // This needs to be a java object because during class initialization
- // it can be held across a java call.
- typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_NULL);
- set_init_lock(mirror(), r);
-
- // Set protection domain also
- set_protection_domain(mirror(), protection_domain());
-
- // Initialize static fields
- InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, CHECK_NULL);
+ initialize_mirror_fields(k, mirror, protection_domain, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ // If any of the fields throws an exception like OOM remove the klass field
+ // from the mirror so GC doesn't follow it after the klass has been deallocated.
+ // This mirror looks like a primitive type, which logically it is because it
+ // it represents no class.
+ java_lang_Class::set_klass(mirror(), NULL);
+ return;
+ }
}
- return mirror();
+
+ // Setup indirection from klass->mirror last
+ // after any exceptions can happen during allocations.
+ if (!k.is_null()) {
+ k->set_java_mirror(mirror());
+ }
} else {
if (fixup_mirror_list() == NULL) {
GrowableArray<Klass*>* list =
@@ -606,12 +634,10 @@
set_fixup_mirror_list(list);
}
fixup_mirror_list()->push(k());
- return NULL;
}
}
-
int java_lang_Class::oop_size(oop java_class) {
assert(_oop_size_offset != 0, "must be set");
return java_class->int_field(_oop_size_offset);
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -246,11 +246,12 @@
static void set_init_lock(oop java_class, oop init_lock);
static void set_protection_domain(oop java_class, oop protection_domain);
+ static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS);
public:
static void compute_offsets();
// Instance creation
- static oop create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
+ static void create_mirror(KlassHandle k, Handle protection_domain, TRAPS);
static void fixup_mirror(KlassHandle k, TRAPS);
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
// Conversion
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -810,11 +810,11 @@
const int limit = the_table()->table_size();
assert(0 <= start_idx && start_idx <= limit,
- err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
+ err_msg("start_idx (%d) is out of bounds", start_idx));
assert(0 <= end_idx && end_idx <= limit,
- err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
+ err_msg("end_idx (%d) is out of bounds", end_idx));
assert(start_idx <= end_idx,
- err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+ err_msg("Index ordering: start_idx=%d, end_idx=%d",
start_idx, end_idx));
for (int i = start_idx; i < end_idx; i += 1) {
@@ -833,11 +833,11 @@
const int limit = the_table()->table_size();
assert(0 <= start_idx && start_idx <= limit,
- err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
+ err_msg("start_idx (%d) is out of bounds", start_idx));
assert(0 <= end_idx && end_idx <= limit,
- err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
+ err_msg("end_idx (%d) is out of bounds", end_idx));
assert(start_idx <= end_idx,
- err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+ err_msg("Index ordering: start_idx=%d, end_idx=%d",
start_idx, end_idx));
for (int i = start_idx; i < end_idx; ++i) {
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -826,47 +826,6 @@
}
} // load_instance_class loop
- if (HAS_PENDING_EXCEPTION) {
- // An exception, such as OOM could have happened at various places inside
- // load_instance_class. We might have partially initialized a shared class
- // and need to clean it up.
- if (class_loader.is_null()) {
- // In some cases k may be null. Let's find the shared class again.
- instanceKlassHandle ik(THREAD, find_shared_class(name));
- if (ik.not_null()) {
- if (ik->class_loader_data() == NULL) {
- // We didn't go as far as Klass::restore_unshareable_info(),
- // so nothing to clean up.
- } else {
- Klass *kk;
- {
- MutexLocker mu(SystemDictionary_lock, THREAD);
- kk = find_class(d_index, d_hash, name, ik->class_loader_data());
- }
- if (kk != NULL) {
- // No clean up is needed if the shared class has been entered
- // into system dictionary, as load_shared_class() won't be called
- // again.
- } else {
- // This must be done outside of the SystemDictionary_lock to
- // avoid deadlock.
- //
- // Note that Klass::restore_unshareable_info (called via
- // load_instance_class above) is also called outside
- // of SystemDictionary_lock. Other threads are blocked from
- // loading this class because they are waiting on the
- // SystemDictionary_lock until this thread removes
- // the placeholder below.
- //
- // This need to be re-thought when parallel-capable non-boot
- // classloaders are supported by CDS (today they're not).
- clean_up_shared_class(ik, class_loader, THREAD);
- }
- }
- }
- }
- }
-
if (load_instance_added == true) {
// clean up placeholder entries for LOAD_INSTANCE success or error
// This brackets the SystemDictionary updates for both defining
@@ -1272,19 +1231,6 @@
return ik;
}
-void SystemDictionary::clean_up_shared_class(instanceKlassHandle ik, Handle class_loader, TRAPS) {
- // Updating methods must be done under a lock so multiple
- // threads don't update these in parallel
- // Shared classes are all currently loaded by the bootstrap
- // classloader, so this will never cause a deadlock on
- // a custom class loader lock.
- {
- Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
- check_loader_lock_contention(lockObject, THREAD);
- ObjectLocker ol(lockObject, THREAD, true);
- ik->remove_unshareable_info();
- }
-}
instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -617,7 +617,6 @@
Handle class_loader, TRAPS);
static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
Handle class_loader, TRAPS);
- static void clean_up_shared_class(instanceKlassHandle ik, Handle class_loader, TRAPS);
static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
static void check_loader_lock_contention(Handle loader_lock, TRAPS);
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -57,10 +57,10 @@
_threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
- int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
+ uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
ConcurrentG1RefineThread *next = NULL;
- for (int i = _n_threads - 1; i >= 0; i--) {
+ for (uint i = _n_threads - 1; i != UINT_MAX; i--) {
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
assert(t != NULL, "Conc refine should have been created");
if (t->osthread() == NULL) {
@@ -87,7 +87,7 @@
void ConcurrentG1Refine::stop() {
if (_threads != NULL) {
- for (int i = 0; i < _n_threads; i++) {
+ for (uint i = 0; i < _n_threads; i++) {
_threads[i]->stop();
}
}
@@ -96,7 +96,7 @@
void ConcurrentG1Refine::reinitialize_threads() {
reset_threshold_step();
if (_threads != NULL) {
- for (int i = 0; i < _n_threads; i++) {
+ for (uint i = 0; i < _n_threads; i++) {
_threads[i]->initialize();
}
}
@@ -104,7 +104,7 @@
ConcurrentG1Refine::~ConcurrentG1Refine() {
if (_threads != NULL) {
- for (int i = 0; i < _n_threads; i++) {
+ for (uint i = 0; i < _n_threads; i++) {
delete _threads[i];
}
FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads, mtGC);
@@ -113,7 +113,7 @@
void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
if (_threads != NULL) {
- for (int i = 0; i < _n_threads; i++) {
+ for (uint i = 0; i < _n_threads; i++) {
tc->do_thread(_threads[i]);
}
}
@@ -121,20 +121,20 @@
void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
if (_threads != NULL) {
- for (int i = 0; i < worker_thread_num(); i++) {
+ for (uint i = 0; i < worker_thread_num(); i++) {
tc->do_thread(_threads[i]);
}
}
}
-int ConcurrentG1Refine::thread_num() {
- int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
+uint ConcurrentG1Refine::thread_num() {
+ uint n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
: ParallelGCThreads;
- return MAX2<int>(n_threads, 1);
+ return MAX2<uint>(n_threads, 1);
}
void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
- for (int i = 0; i < _n_threads; ++i) {
+ for (uint i = 0; i < _n_threads; ++i) {
_threads[i]->print_on(st);
st->cr();
}
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -39,8 +39,8 @@
class ConcurrentG1Refine: public CHeapObj<mtGC> {
ConcurrentG1RefineThread** _threads;
- int _n_threads;
- int _n_worker_threads;
+ uint _n_threads;
+ uint _n_worker_threads;
/*
* The value of the update buffer queue length falls into one of 3 zones:
* green, yellow, red. If the value is in [0, green) nothing is
@@ -88,7 +88,7 @@
// The RS sampling thread
ConcurrentG1RefineThread * sampling_thread() const;
- static int thread_num();
+ static uint thread_num();
void print_worker_threads_on(outputStream* st) const;
@@ -100,8 +100,8 @@
int yellow_zone() const { return _yellow_zone; }
int red_zone() const { return _red_zone; }
- int total_thread_num() const { return _n_threads; }
- int worker_thread_num() const { return _n_worker_threads; }
+ uint total_thread_num() const { return _n_threads; }
+ uint worker_thread_num() const { return _n_worker_threads; }
int thread_threshold_step() const { return _thread_threshold_step; }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -33,7 +33,7 @@
ConcurrentG1RefineThread::
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
- int worker_id_offset, int worker_id) :
+ uint worker_id_offset, uint worker_id) :
ConcurrentGCThread(),
_worker_id_offset(worker_id_offset),
_worker_id(worker_id),
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -38,8 +38,8 @@
double _vtime_start; // Initial virtual time.
double _vtime_accum; // Initial virtual time.
- int _worker_id;
- int _worker_id_offset;
+ uint _worker_id;
+ uint _worker_id_offset;
// The refinement threads collection is linked list. A predecessor can activate a successor
// when the number of the rset update buffer crosses a certain threshold. A successor
@@ -71,7 +71,7 @@
virtual void run();
// Constructor
ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
- int worker_id_offset, int worker_id);
+ uint worker_id_offset, uint worker_id);
void initialize();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -567,8 +567,8 @@
_root_regions.init(_g1h, this);
if (ConcGCThreads > ParallelGCThreads) {
- warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
- "than ParallelGCThreads (" UINT32_FORMAT ").",
+ warning("Can't have more ConcGCThreads (" UINTX_FORMAT ") "
+ "than ParallelGCThreads (" UINTX_FORMAT ").",
ConcGCThreads, ParallelGCThreads);
return;
}
@@ -1804,7 +1804,6 @@
class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
G1CollectedHeap* _g1;
- int _worker_num;
size_t _max_live_bytes;
uint _regions_claimed;
size_t _freed_bytes;
@@ -1817,10 +1816,9 @@
public:
G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
- int worker_num,
FreeRegionList* local_cleanup_list,
HRRSCleanupTask* hrrs_cleanup_task) :
- _g1(g1), _worker_num(worker_num),
+ _g1(g1),
_max_live_bytes(0), _regions_claimed(0),
_freed_bytes(0),
_claimed_region_time(0.0), _max_region_time(0.0),
@@ -1893,7 +1891,7 @@
double start = os::elapsedTime();
FreeRegionList local_cleanup_list("Local Cleanup List");
HRRSCleanupTask hrrs_cleanup_task;
- G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
+ G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
&hrrs_cleanup_task);
if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
@@ -2145,7 +2143,7 @@
G1CollectedHeap* g1h = G1CollectedHeap::heap();
- _cleanup_list.verify_list();
+ _cleanup_list.verify_optional();
FreeRegionList tmp_free_list("Tmp Free List");
if (G1ConcRegionFreeingVerbose) {
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -34,12 +34,12 @@
bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
bool consume,
- size_t worker_i) {
+ uint worker_i) {
bool res = true;
if (_buf != NULL) {
res = apply_closure_to_buffer(cl, _buf, _index, _sz,
consume,
- (int) worker_i);
+ worker_i);
if (res && consume) _index = _sz;
}
return res;
@@ -49,7 +49,7 @@
void** buf,
size_t index, size_t sz,
bool consume,
- int worker_i) {
+ uint worker_i) {
if (cl == NULL) return true;
for (size_t i = index; i < sz; i += oopSize) {
int ind = byte_index_to_index((int)i);
@@ -79,8 +79,8 @@
}
// Determines how many mutator threads can process the buffers in parallel.
-size_t DirtyCardQueueSet::num_par_ids() {
- return os::processor_count();
+uint DirtyCardQueueSet::num_par_ids() {
+ return (uint)os::processor_count();
}
void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
@@ -103,7 +103,7 @@
}
void DirtyCardQueueSet::iterate_closure_all_threads(bool consume,
- size_t worker_i) {
+ uint worker_i) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
for(JavaThread* t = Threads::first(); t; t = t->next()) {
bool b = t->dirty_card_queue().apply_closure(_closure, consume);
@@ -126,11 +126,11 @@
// We get the the number of any par_id that this thread
// might have already claimed.
- int worker_i = thread->get_claimed_par_id();
+ uint worker_i = thread->get_claimed_par_id();
- // If worker_i is not -1 then the thread has already claimed
+ // If worker_i is not UINT_MAX then the thread has already claimed
// a par_id. We make note of it using the already_claimed value
- if (worker_i != -1) {
+ if (worker_i != UINT_MAX) {
already_claimed = true;
} else {
@@ -142,7 +142,7 @@
}
bool b = false;
- if (worker_i != -1) {
+ if (worker_i != UINT_MAX) {
b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0,
_sz, true, worker_i);
if (b) Atomic::inc(&_processed_buffers_mut);
@@ -154,8 +154,8 @@
// we release the id
_free_ids->release_par_id(worker_i);
- // and set the claimed_id in the thread to -1
- thread->set_claimed_par_id(-1);
+ // and set the claimed_id in the thread to UINT_MAX
+ thread->set_claimed_par_id(UINT_MAX);
}
}
return b;
@@ -186,7 +186,7 @@
bool DirtyCardQueueSet::
apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
- int worker_i,
+ uint worker_i,
BufferNode* nd) {
if (nd != NULL) {
void **buf = BufferNode::make_buffer_from_node(nd);
@@ -208,7 +208,7 @@
}
bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
- int worker_i,
+ uint worker_i,
int stop_at,
bool during_pause) {
assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
@@ -218,7 +218,7 @@
return res;
}
-bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
+bool DirtyCardQueueSet::apply_closure_to_completed_buffer(uint worker_i,
int stop_at,
bool during_pause) {
return apply_closure_to_completed_buffer(_closure, worker_i,
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -36,7 +36,7 @@
public:
// Process the card whose card table entry is "card_ptr". If returns
// "false", terminate the iteration early.
- virtual bool do_card_ptr(jbyte* card_ptr, int worker_i = 0) = 0;
+ virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0;
};
// A ptrQueue whose elements are "oops", pointers to object heads.
@@ -53,7 +53,7 @@
// deletes processed entries from logs.
bool apply_closure(CardTableEntryClosure* cl,
bool consume = true,
- size_t worker_i = 0);
+ uint worker_i = 0);
// Apply the closure to all elements of "buf", down to "index"
// (inclusive.) If returns "false", then a closure application returned
@@ -63,7 +63,7 @@
static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
void** buf, size_t index, size_t sz,
bool consume = true,
- int worker_i = 0);
+ uint worker_i = 0);
void **get_buf() { return _buf;}
void set_buf(void **buf) {_buf = buf;}
size_t get_index() { return _index;}
@@ -98,7 +98,7 @@
// The number of parallel ids that can be claimed to allow collector or
// mutator threads to do card-processing work.
- static size_t num_par_ids();
+ static uint num_par_ids();
static void handle_zero_index_for_thread(JavaThread* t);
@@ -115,7 +115,7 @@
// change in the future.) If "consume" is true, processed entries are
// discarded.
void iterate_closure_all_threads(bool consume = true,
- size_t worker_i = 0);
+ uint worker_i = 0);
// If there exists some completed buffer, pop it, then apply the
// registered closure to all its elements, nulling out those elements
@@ -124,7 +124,7 @@
// but is only partially completed before a "yield" happens, the
// partially completed buffer (with its processed elements set to NULL)
// is returned to the completed buffer set, and this call returns false.
- bool apply_closure_to_completed_buffer(int worker_i = 0,
+ bool apply_closure_to_completed_buffer(uint worker_i = 0,
int stop_at = 0,
bool during_pause = false);
@@ -136,13 +136,13 @@
// partially completed buffer (with its processed elements set to NULL)
// is returned to the completed buffer set, and this call returns false.
bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
- int worker_i = 0,
+ uint worker_i = 0,
int stop_at = 0,
bool during_pause = false);
// Helper routine for the above.
bool apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
- int worker_i,
+ uint worker_i,
BufferNode* nd);
BufferNode* get_completed_buffer(int stop_at);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -304,26 +304,26 @@
if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
guarantee(entry > N_words,
err_msg("Should be in logarithmic region - "
- "entry: " UINT32_FORMAT ", "
- "_array->offset_array(c): " UINT32_FORMAT ", "
- "N_words: " UINT32_FORMAT,
- entry, _array->offset_array(c), N_words));
+ "entry: %u, "
+ "_array->offset_array(c): %u, "
+ "N_words: %u",
+ (uint)entry, (uint)_array->offset_array(c), (uint)N_words));
}
size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
size_t landing_card = c - backskip;
guarantee(landing_card >= (start_card - 1), "Inv");
if (landing_card >= start_card) {
guarantee(_array->offset_array(landing_card) <= entry,
- err_msg("Monotonicity - landing_card offset: " UINT32_FORMAT ", "
- "entry: " UINT32_FORMAT,
- _array->offset_array(landing_card), entry));
+ err_msg("Monotonicity - landing_card offset: %u, "
+ "entry: %u",
+ (uint)_array->offset_array(landing_card), (uint)entry));
} else {
guarantee(landing_card == start_card - 1, "Tautology");
// Note that N_words is the maximum offset value
guarantee(_array->offset_array(landing_card) <= N_words,
- err_msg("landing card offset: " UINT32_FORMAT ", "
- "N_words: " UINT32_FORMAT,
- _array->offset_array(landing_card), N_words));
+ err_msg("landing card offset: %u, "
+ "N_words: %u",
+ (uint)_array->offset_array(landing_card), (uint)N_words));
}
}
}
@@ -554,21 +554,20 @@
(_array->offset_array(orig_index) > 0 &&
_array->offset_array(orig_index) <= N_words),
err_msg("offset array should have been set - "
- "orig_index offset: " UINT32_FORMAT ", "
+ "orig_index offset: %u, "
"blk_start: " PTR_FORMAT ", "
"boundary: " PTR_FORMAT,
- _array->offset_array(orig_index),
+ (uint)_array->offset_array(orig_index),
blk_start, boundary));
for (size_t j = orig_index + 1; j <= end_index; j++) {
assert(_array->offset_array(j) > 0 &&
_array->offset_array(j) <=
(u_char) (N_words+BlockOffsetArray::N_powers-1),
err_msg("offset array should have been set - "
- UINT32_FORMAT " not > 0 OR "
- UINT32_FORMAT " not <= " UINT32_FORMAT,
- _array->offset_array(j),
- _array->offset_array(j),
- (u_char) (N_words+BlockOffsetArray::N_powers-1)));
+ "%u not > 0 OR %u not <= %u",
+ (uint) _array->offset_array(j),
+ (uint) _array->offset_array(j),
+ (uint) (N_words+BlockOffsetArray::N_powers-1)));
}
#endif
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -146,8 +146,8 @@
void check_offset(size_t offset, const char* msg) const {
assert(offset <= N_words,
err_msg("%s - "
- "offset: " UINT32_FORMAT", N_words: " UINT32_FORMAT,
- msg, offset, N_words));
+ "offset: " SIZE_FORMAT", N_words: %u",
+ msg, offset, (uint)N_words));
}
// Bounds checking accessors:
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -102,7 +102,7 @@
ConcurrentG1Refine* cg1r) :
_sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
{}
- bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+ bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
// This path is executed by the concurrent refine or mutator threads,
// concurrently, and so we do not care if card_ptr contains references
@@ -131,7 +131,7 @@
{
for (int i = 0; i < 256; i++) _histo[i] = 0;
}
- bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+ bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
_calls++;
unsigned char* ujb = (unsigned char*)card_ptr;
@@ -160,7 +160,7 @@
RedirtyLoggedCardTableEntryClosure() :
_calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
- bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+ bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
_calls++;
*card_ptr = 0;
@@ -1288,7 +1288,7 @@
print_heap_before_gc();
trace_heap_before_gc(gc_tracer);
- size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
+ size_t metadata_prev_used = MetaspaceAux::used_bytes();
verify_region_sets_optional();
@@ -2314,7 +2314,7 @@
void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
DirtyCardQueue* into_cset_dcq,
bool concurrent,
- int worker_i) {
+ uint worker_i) {
// Clean cards in the hot card cache
G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
@@ -2843,7 +2843,7 @@
// Given the id of a worker, obtain or calculate a suitable
// starting region for iterating over the current collection set.
-HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
+HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
assert(get_gc_time_stamp() > 0, "should have been updated by now");
HeapRegion* result = NULL;
@@ -5103,7 +5103,7 @@
OopClosure* scan_non_heap_roots,
OopsInHeapRegionClosure* scan_rs,
G1KlassScanClosure* scan_klasses,
- int worker_i) {
+ uint worker_i) {
// First scan the strong roots
double ext_roots_start = os::elapsedTime();
@@ -5207,10 +5207,10 @@
~G1StringSymbolTableUnlinkTask() {
guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
- err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
+ err_msg("claim value %d after unlink less than initial string table size %d",
StringTable::parallel_claimed_index(), _initial_string_table_size));
guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
- err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
+ err_msg("claim value %d after unlink less than initial symbol table size %d",
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
}
@@ -5275,7 +5275,7 @@
class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
public:
- bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+ bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
*card_ptr = CardTableModRefBS::dirty_card_val();
return true;
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -845,7 +845,7 @@
OopClosure* scan_non_heap_roots,
OopsInHeapRegionClosure* scan_rs,
G1KlassScanClosure* scan_klasses,
- int worker_i);
+ uint worker_i);
// Notifies all the necessary spaces that the committed space has
// been updated (either expanded or shrunk). It should be called
@@ -1139,7 +1139,7 @@
void iterate_dirty_card_closure(CardTableEntryClosure* cl,
DirtyCardQueue* into_cset_dcq,
- bool concurrent, int worker_i);
+ bool concurrent, uint worker_i);
// The shared block offset table array.
G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
@@ -1370,7 +1370,7 @@
// Given the id of a worker, obtain or calculate a suitable
// starting region for iterating over the current collection set.
- HeapRegion* start_cset_region_for_worker(int worker_i);
+ HeapRegion* start_cset_region_for_worker(uint worker_i);
// This is a convenience method that is used by the
// HeapRegionIterator classes to calculate the starting region for
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1204,7 +1204,7 @@
(_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
if (full) {
- _metaspace_used_bytes_before_gc = MetaspaceAux::allocated_used_bytes();
+ _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
}
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -147,7 +147,7 @@
void WorkerDataArray<T>::verify() {
for (uint i = 0; i < _length; i++) {
assert(_data[i] != _uninitialized,
- err_msg("Invalid data for worker " UINT32_FORMAT ", data: %lf, uninitialized: %lf",
+ err_msg("Invalid data for worker %u, data: %lf, uninitialized: %lf",
i, (double)_data[i], (double)_uninitialized));
}
}
@@ -246,8 +246,8 @@
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
}
-void G1GCPhaseTimes::print_stats(int level, const char* str, double value, int workers) {
- LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %d]", str, value, workers);
+void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
+ LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
}
double G1GCPhaseTimes::accounted_time_ms() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -161,7 +161,7 @@
// Helper methods for detailed logging
void print_stats(int level, const char* str, double value);
- void print_stats(int level, const char* str, double value, int workers);
+ void print_stats(int level, const char* str, double value, uint workers);
public:
G1GCPhaseTimes(uint max_gc_threads);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -44,9 +44,9 @@
_hot_cache_idx = 0;
// For refining the cards in the hot cache in parallel
- int n_workers = (ParallelGCThreads > 0 ?
+ uint n_workers = (ParallelGCThreads > 0 ?
_g1h->workers()->total_workers() : 1);
- _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
+ _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
_hot_cache_par_claimed_idx = 0;
_card_counts.initialize();
@@ -89,7 +89,7 @@
return res;
}
-void G1HotCardCache::drain(int worker_i,
+void G1HotCardCache::drain(uint worker_i,
G1RemSet* g1rs,
DirtyCardQueue* into_cset_dcq) {
if (!default_use_cache()) {
@@ -122,8 +122,8 @@
// RSet updating while within an evacuation pause.
// In this case worker_i should be the id of a GC worker thread
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
- assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
- err_msg("incorrect worker id: "INT32_FORMAT, worker_i));
+ assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
+ err_msg("incorrect worker id: %u", worker_i));
into_cset_dcq->enqueue(card_ptr);
}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -99,7 +99,7 @@
// Refine the cards that have delayed as a result of
// being in the cache.
- void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
+ void drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
// Set up for parallel processing of the cards in the hot cache
void reset_hot_cache_claimed_index() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -234,14 +234,14 @@
HeapRegion* _from;
OopsInHeapRegionClosure* _push_ref_cl;
bool _record_refs_into_cset;
- int _worker_i;
+ uint _worker_i;
public:
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
OopsInHeapRegionClosure* push_ref_cl,
bool record_refs_into_cset,
- int worker_i = 0);
+ uint worker_i = 0);
void set_from(HeapRegion* from) {
assert(from != NULL, "from region must be non-NULL");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -113,14 +113,14 @@
G1SATBCardTableModRefBS *_ct_bs;
double _strong_code_root_scan_time_sec;
- int _worker_i;
+ uint _worker_i;
int _block_size;
bool _try_claimed;
public:
ScanRSClosure(OopsInHeapRegionClosure* oc,
CodeBlobToOopClosure* code_root_cl,
- int worker_i) :
+ uint worker_i) :
_oc(oc),
_code_root_cl(code_root_cl),
_strong_code_root_scan_time_sec(0.0),
@@ -162,7 +162,7 @@
void printCard(HeapRegion* card_region, size_t card_index,
HeapWord* card_start) {
- gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
+ gclog_or_tty->print_cr("T %u Region [" PTR_FORMAT ", " PTR_FORMAT ") "
"RS names card %p: "
"[" PTR_FORMAT ", " PTR_FORMAT ")",
_worker_i,
@@ -241,7 +241,7 @@
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
CodeBlobToOopClosure* code_root_cl,
- int worker_i) {
+ uint worker_i) {
double rs_time_start = os::elapsedTime();
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
@@ -274,13 +274,13 @@
DirtyCardQueue* into_cset_dcq) :
_g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
{}
- bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+ bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
// The only time we care about recording cards that
// contain references that point into the collection set
// is during RSet updating within an evacuation pause.
// In this case worker_i should be the id of a GC worker thread.
assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
- assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
+ assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
if (_g1rs->refine_card(card_ptr, worker_i, true)) {
// 'card_ptr' contains references that point into the collection
@@ -295,7 +295,7 @@
}
};
-void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
+void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
double start = os::elapsedTime();
// Apply the given closure to all remaining log entries.
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
@@ -320,14 +320,14 @@
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
CodeBlobToOopClosure* code_root_cl,
- int worker_i) {
+ uint worker_i) {
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset();
#endif
// We cache the value of 'oc' closure into the appropriate slot in the
// _cset_rs_update_cl for this worker
- assert(worker_i < (int)n_workers(), "sanity");
+ assert(worker_i < n_workers(), "sanity");
_cset_rs_update_cl[worker_i] = oc;
// A DirtyCardQueue that is used to hold cards containing references
@@ -399,7 +399,7 @@
_g1(g1), _ct_bs(bs)
{ }
- bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+ bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
// Construct the region representing the card.
HeapWord* start = _ct_bs->addr_for(card_ptr);
// And find the region containing it.
@@ -543,7 +543,7 @@
G1RemSet* rs,
OopsInHeapRegionClosure* push_ref_cl,
bool record_refs_into_cset,
- int worker_i) :
+ uint worker_i) :
_g1(g1h), _g1_rem_set(rs), _from(NULL),
_record_refs_into_cset(record_refs_into_cset),
_push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
@@ -552,7 +552,7 @@
// into the collection set, if we're checking for such references;
// false otherwise.
-bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
+bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
bool check_for_refs_into_cset) {
// If the card is no longer dirty, nothing to do.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -97,7 +97,7 @@
// In the sequential case this param will be ignored.
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
CodeBlobToOopClosure* code_root_cl,
- int worker_i);
+ uint worker_i);
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
@@ -109,9 +109,9 @@
void scanRS(OopsInHeapRegionClosure* oc,
CodeBlobToOopClosure* code_root_cl,
- int worker_i);
+ uint worker_i);
- void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
+ void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
CardTableModRefBS* ct_bs() { return _ct_bs; }
size_t cardsScanned() { return _total_cards_scanned; }
@@ -138,7 +138,7 @@
// if the given card contains oops that have references into the
// current collection set.
virtual bool refine_card(jbyte* card_ptr,
- int worker_i,
+ uint worker_i,
bool check_for_refs_into_cset);
// Print accumulated summary info from the start of the VM.
@@ -171,12 +171,12 @@
class UpdateRSOopClosure: public ExtendedOopClosure {
HeapRegion* _from;
G1RemSet* _rs;
- int _worker_i;
+ uint _worker_i;
template <class T> void do_oop_work(T* p);
public:
- UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
+ UpdateRSOopClosure(G1RemSet* rs, uint worker_i = 0) :
_from(NULL), _rs(rs), _worker_i(worker_i)
{}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -390,7 +390,7 @@
void FromCardCache::print(outputStream* out) {
for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
for (uint j = 0; j < _max_regions; j++) {
- out->print_cr("_from_card_cache["UINT32_FORMAT"]["UINT32_FORMAT"] = "INT32_FORMAT".",
+ out->print_cr("_from_card_cache[%u][%u] = %d.",
i, j, at(i, j));
}
}
@@ -430,7 +430,7 @@
int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
if (G1TraceHeapRegionRememberedSet) {
- gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
+ gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
hr()->bottom(), from_card,
FromCardCache::at((uint)tid, cur_hrs_ind));
}
@@ -853,13 +853,13 @@
// This can be done by either mutator threads together with the
// concurrent refinement threads or GC threads.
uint HeapRegionRemSet::num_par_rem_sets() {
- return (uint)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
+ return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
}
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr)
: _bosa(bosa),
- _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true),
+ _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true),
_code_roots(), _other_regions(hr, &_m) {
reset_for_par_iteration();
}
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -30,7 +30,7 @@
inline void HeapRegionSetBase::add(HeapRegion* hr) {
check_mt_safety();
assert(hr->containing_set() == NULL, hrs_ext_msg(this, "should not already have a containing set %u"));
- assert(hr->next() == NULL, hrs_ext_msg(this, "should not already be linked"));
+ assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should not already be linked"));
_count.increment(1u, hr->capacity());
hr->set_containing_set(this);
@@ -40,7 +40,7 @@
inline void HeapRegionSetBase::remove(HeapRegion* hr) {
check_mt_safety();
verify_region(hr);
- assert(hr->next() == NULL, hrs_ext_msg(this, "should already be unlinked"));
+ assert(hr->next() == NULL && hr->prev() == NULL, hrs_ext_msg(this, "should already be unlinked"));
hr->set_containing_set(NULL);
assert(_count.length() > 0, hrs_ext_msg(this, "pre-condition"));
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -290,7 +290,7 @@
shared_satb_queue()->apply_closure_and_empty(_closure);
}
-void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
+void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
SharedHeap* sh = SharedHeap::heap();
int parity = sh->strong_roots_parity();
@@ -315,7 +315,7 @@
}
bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
- int worker) {
+ uint worker) {
BufferNode* nd = NULL;
{
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
--- a/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/satbQueue.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -84,7 +84,7 @@
// Utility function to support sequential and parallel versions. If
// "par" is true, then "worker" is the par thread id; if "false", worker
// is ignored.
- bool apply_closure_to_completed_buffer_work(bool par, int worker);
+ bool apply_closure_to_completed_buffer_work(bool par, uint worker);
#ifdef ASSERT
void dump_active_states(bool expected_active);
@@ -124,7 +124,7 @@
// be called serially and at a safepoint.
void iterate_closure_all_threads();
// Parallel version of the above.
- void par_iterate_closure_all_threads(int worker);
+ void par_iterate_closure_all_threads(uint worker);
// If there exists some completed buffer, pop it, then apply the
// registered closure to all its elements, and return true. If no
@@ -133,7 +133,7 @@
return apply_closure_to_completed_buffer_work(false, 0);
}
// Parallel version of the above.
- bool par_apply_closure_to_completed_buffer(int worker) {
+ bool par_apply_closure_to_completed_buffer(uint worker) {
return apply_closure_to_completed_buffer_work(true, worker);
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -184,7 +184,7 @@
size_t prev_used = heap->used();
// Capture metadata size before collection for sizing.
- size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
+ size_t metadata_prev_used = MetaspaceAux::used_bytes();
// For PrintGCDetails
size_t old_gen_prev_used = old_gen->used_in_bytes();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -928,7 +928,7 @@
_heap_used = heap->used();
_young_gen_used = heap->young_gen()->used_in_bytes();
_old_gen_used = heap->old_gen()->used_in_bytes();
- _metadata_used = MetaspaceAux::allocated_used_bytes();
+ _metadata_used = MetaspaceAux::used_bytes();
};
size_t heap_used() const { return _heap_used; }
--- a/hotspot/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -62,16 +62,16 @@
};
class MetaspaceSizes : public StackObj {
- size_t _capacity;
+ size_t _committed;
size_t _used;
size_t _reserved;
public:
- MetaspaceSizes() : _capacity(0), _used(0), _reserved(0) {}
- MetaspaceSizes(size_t capacity, size_t used, size_t reserved) :
- _capacity(capacity), _used(used), _reserved(reserved) {}
+ MetaspaceSizes() : _committed(0), _used(0), _reserved(0) {}
+ MetaspaceSizes(size_t committed, size_t used, size_t reserved) :
+ _committed(committed), _used(used), _reserved(reserved) {}
- size_t capacity() const { return _capacity; }
+ size_t committed() const { return _committed; }
size_t used() const { return _used; }
size_t reserved() const { return _reserved; }
};
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -258,7 +258,7 @@
static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
TraceStructMetaspaceSizes meta_sizes;
- meta_sizes.set_capacity(sizes.capacity());
+ meta_sizes.set_committed(sizes.committed());
meta_sizes.set_used(sizes.used());
meta_sizes.set_reserved(sizes.reserved());
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -85,16 +85,16 @@
MetaspaceSummary CollectedHeap::create_metaspace_summary() {
const MetaspaceSizes meta_space(
- MetaspaceAux::allocated_capacity_bytes(),
- MetaspaceAux::allocated_used_bytes(),
+ MetaspaceAux::committed_bytes(),
+ MetaspaceAux::used_bytes(),
MetaspaceAux::reserved_bytes());
const MetaspaceSizes data_space(
- MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
- MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
+ MetaspaceAux::committed_bytes(Metaspace::NonClassType),
+ MetaspaceAux::used_bytes(Metaspace::NonClassType),
MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
const MetaspaceSizes class_space(
- MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
- MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
+ MetaspaceAux::committed_bytes(Metaspace::ClassType),
+ MetaspaceAux::used_bytes(Metaspace::ClassType),
MetaspaceAux::reserved_bytes(Metaspace::ClassType));
const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
@@ -582,36 +582,6 @@
}
}
-oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) {
- debug_only(check_for_valid_allocation_state());
- assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
- assert(size >= 0, "int won't convert to size_t");
- HeapWord* obj;
- assert(ScavengeRootsInCode > 0, "must be");
- obj = common_mem_allocate_init(real_klass, size, CHECK_NULL);
- post_allocation_setup_common(klass, obj);
- assert(Universe::is_bootstrapping() ||
- !((oop)obj)->is_array(), "must not be an array");
- NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
- oop mirror = (oop)obj;
-
- java_lang_Class::set_oop_size(mirror, size);
-
- // Setup indirections
- if (!real_klass.is_null()) {
- java_lang_Class::set_klass(mirror, real_klass());
- real_klass->set_java_mirror(mirror);
- }
-
- InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
- assert(size == mk->instance_size(real_klass), "should have been set");
-
- // notify jvmti and dtrace
- post_allocation_notify(klass, (oop)obj);
-
- return mirror;
-}
-
/////////////// Unit tests ///////////////
#ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -312,9 +312,6 @@
// May be overridden to set additional parallelism.
virtual void set_par_threads(uint t) { _n_par_threads = t; };
- // Allocate and initialize instances of Class
- static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS);
-
// General obj/array allocation facilities.
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -257,6 +257,12 @@
assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment");
assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment");
assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment");
+ assert(_min_gen0_size <= bound_minus_alignment(_min_gen0_size, _min_heap_byte_size),
+ "Ergonomics made minimum young generation larger than minimum heap");
+ assert(_initial_gen0_size <= bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size),
+ "Ergonomics made initial young generation larger than initial heap");
+ assert(_max_gen0_size <= bound_minus_alignment(_max_gen0_size, _max_heap_byte_size),
+ "Ergonomics made maximum young generation lager than maximum heap");
}
void TwoGenerationCollectorPolicy::assert_size_info() {
@@ -267,6 +273,9 @@
assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment");
assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment");
assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes");
+ assert(_min_gen0_size + _min_gen1_size <= _min_heap_byte_size, "Minimum generation sizes exceed minimum heap size");
+ assert(_initial_gen0_size + _initial_gen1_size == _initial_heap_byte_size, "Initial generation sizes should match initial heap size");
+ assert(_max_gen0_size + _max_gen1_size == _max_heap_byte_size, "Maximum generation sizes should match maximum heap size");
}
#endif // ASSERT
@@ -303,20 +312,26 @@
}
}
+ // Make sure NewSize allows an old generation to fit even if set on the command line
+ if (FLAG_IS_CMDLINE(NewSize) && NewSize >= _initial_heap_byte_size) {
+ warning("NewSize was set larger than initial heap size, will use initial heap size.");
+ NewSize = bound_minus_alignment(NewSize, _initial_heap_byte_size);
+ }
+
// Now take the actual NewSize into account. We will silently increase NewSize
// if the user specified a smaller or unaligned value.
- smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment));
- if (smallest_new_size != NewSize) {
+ uintx bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize);
+ bounded_new_size = MAX2(smallest_new_size, (uintx)align_size_down(bounded_new_size, _gen_alignment));
+ if (bounded_new_size != NewSize) {
// Do not use FLAG_SET_ERGO to update NewSize here, since this will override
// if NewSize was set on the command line or not. This information is needed
// later when setting the initial and minimum young generation size.
- NewSize = smallest_new_size;
+ NewSize = bounded_new_size;
}
+ _min_gen0_size = smallest_new_size;
_initial_gen0_size = NewSize;
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
- uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size);
-
if (MaxNewSize >= MaxHeapSize) {
// Make sure there is room for an old generation
uintx smaller_max_new_size = MaxHeapSize - _gen_alignment;
@@ -330,8 +345,8 @@
FLAG_SET_ERGO(uintx, NewSize, MaxNewSize);
_initial_gen0_size = NewSize;
}
- } else if (MaxNewSize < min_new_size) {
- FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size);
+ } else if (MaxNewSize < _initial_gen0_size) {
+ FLAG_SET_ERGO(uintx, MaxNewSize, _initial_gen0_size);
} else if (!is_size_aligned(MaxNewSize, _gen_alignment)) {
FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment));
}
@@ -361,7 +376,9 @@
GenCollectorPolicy::initialize_flags();
if (!is_size_aligned(OldSize, _gen_alignment)) {
- FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment));
+ // Setting OldSize directly to preserve information about the possible
+ // setting of OldSize on the command line.
+ OldSize = align_size_down(OldSize, _gen_alignment);
}
if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
@@ -400,6 +417,20 @@
}
}
+ // Update NewSize, if possible, to avoid sizing gen0 to small when only
+ // OldSize is set on the command line.
+ if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) {
+ if (OldSize < _initial_heap_byte_size) {
+ size_t new_size = _initial_heap_byte_size - OldSize;
+ // Need to compare against the flag value for max since _max_gen0_size
+ // might not have been set yet.
+ if (new_size >= _min_gen0_size && new_size <= MaxNewSize) {
+ FLAG_SET_ERGO(uintx, NewSize, new_size);
+ _initial_gen0_size = NewSize;
+ }
+ }
+ }
+
always_do_update_barrier = UseConcMarkSweepGC;
DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();)
@@ -441,57 +472,37 @@
// Given the maximum gen0 size, determine the initial and
// minimum gen0 sizes.
- if (_max_heap_byte_size == _min_heap_byte_size) {
- // The maximum and minimum heap sizes are the same so the generations
- // minimum and initial must be the same as its maximum.
- _min_gen0_size = max_new_size;
- _initial_gen0_size = max_new_size;
- _max_gen0_size = max_new_size;
+ if (_max_heap_byte_size == _initial_heap_byte_size) {
+ // The maxium and initial heap sizes are the same so the generation's
+ // initial size must be the same as it maximum size. Use NewSize as the
+ // size if set on command line.
+ size_t fixed_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : max_new_size;
+
+ _initial_gen0_size = fixed_young_size;
+ _max_gen0_size = fixed_young_size;
+
+ // Also update the minimum size if min == initial == max.
+ if (_max_heap_byte_size == _min_heap_byte_size) {
+ _min_gen0_size = fixed_young_size;
+ }
} else {
size_t desired_new_size = 0;
if (FLAG_IS_CMDLINE(NewSize)) {
- // If NewSize is set on the command line, we must use it as
- // the initial size and it also makes sense to use it as the
- // lower limit.
- _min_gen0_size = NewSize;
- desired_new_size = NewSize;
- max_new_size = MAX2(max_new_size, NewSize);
- } else if (FLAG_IS_ERGO(NewSize)) {
- // If NewSize is set ergonomically, we should use it as a lower
- // limit, but use NewRatio to calculate the initial size.
- _min_gen0_size = NewSize;
+ // If NewSize is set on the command line, we should use it as
+ // the initial size, but make sure it is within the heap bounds.
desired_new_size =
- MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
- max_new_size = MAX2(max_new_size, NewSize);
+ MIN2(max_new_size, bound_minus_alignment(NewSize, _initial_heap_byte_size));
+ _min_gen0_size = bound_minus_alignment(desired_new_size, _min_heap_byte_size);
} else {
- // For the case where NewSize is the default, use NewRatio
- // to size the minimum and initial generation sizes.
- // Use the default NewSize as the floor for these values. If
- // NewRatio is overly large, the resulting sizes can be too small.
- _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
+ // For the case where NewSize is not set on the command line, use
+ // NewRatio to size the initial generation size. Use the current
+ // NewSize as the floor, because if NewRatio is overly large, the resulting
+ // size can be too small.
desired_new_size =
- MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
+ MIN2(max_new_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize));
}
-
- assert(_min_gen0_size > 0, "Sanity check");
_initial_gen0_size = desired_new_size;
_max_gen0_size = max_new_size;
-
- // At this point the desirable initial and minimum sizes have been
- // determined without regard to the maximum sizes.
-
- // Bound the sizes by the corresponding overall heap sizes.
- _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
- _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
- _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
-
- // At this point all three sizes have been checked against the
- // maximum sizes but have not been checked for consistency among the three.
-
- // Final check min <= initial <= max
- _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
- _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
- _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
}
// Write back to flags if necessary.
@@ -512,33 +523,6 @@
DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
}
-// Call this method during the sizing of the gen1 to make
-// adjustments to gen0 because of gen1 sizing policy. gen0 initially has
-// the most freedom in sizing because it is done before the
-// policy for gen1 is applied. Once gen1 policies have been applied,
-// there may be conflicts in the shape of the heap and this method
-// is used to make the needed adjustments. The application of the
-// policies could be more sophisticated (iterative for example) but
-// keeping it simple also seems a worthwhile goal.
-bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
- size_t* gen1_size_ptr,
- const size_t heap_size) {
- bool result = false;
-
- if ((*gen0_size_ptr + *gen1_size_ptr) > heap_size) {
- uintx smallest_new_size = young_gen_size_lower_bound();
- if ((heap_size < (*gen0_size_ptr + _min_gen1_size)) &&
- (heap_size >= _min_gen1_size + smallest_new_size)) {
- // Adjust gen0 down to accommodate _min_gen1_size
- *gen0_size_ptr = align_size_down_bounded(heap_size - _min_gen1_size, _gen_alignment);
- result = true;
- } else {
- *gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _gen_alignment);
- }
- }
- return result;
-}
-
// Minimum sizes of the generations may be different than
// the initial sizes. An inconsistency is permitted here
// in the total size that can be specified explicitly by
@@ -564,57 +548,64 @@
// with the overall heap size). In either case make
// the minimum, maximum and initial sizes consistent
// with the gen0 sizes and the overall heap sizes.
- _min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment);
- _initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment);
+ _min_gen1_size = _gen_alignment;
+ _initial_gen1_size = MIN2(_max_gen1_size, MAX2(_initial_heap_byte_size - _initial_gen0_size, _min_gen1_size));
// _max_gen1_size has already been made consistent above
FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
} else {
- // OldSize has been explicitly set on the command line. Use the
- // OldSize and then determine the consequences.
- _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
- _initial_gen1_size = OldSize;
-
+ // OldSize has been explicitly set on the command line. Use it
+ // for the initial size but make sure the minimum allow a young
+ // generation to fit as well.
// If the user has explicitly set an OldSize that is inconsistent
// with other command line flags, issue a warning.
// The generation minimums and the overall heap minimum should
// be within one generation alignment.
- if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
- warning("Inconsistency between minimum heap size and minimum "
- "generation sizes: using minimum heap = " SIZE_FORMAT,
- _min_heap_byte_size);
- }
if (OldSize > _max_gen1_size) {
warning("Inconsistency between maximum heap size and maximum "
- "generation sizes: using maximum heap = " SIZE_FORMAT
- " -XX:OldSize flag is being ignored",
- _max_heap_byte_size);
+ "generation sizes: using maximum heap = " SIZE_FORMAT
+ " -XX:OldSize flag is being ignored",
+ _max_heap_byte_size);
+ FLAG_SET_ERGO(uintx, OldSize, _max_gen1_size);
}
- // If there is an inconsistency between the OldSize and the minimum and/or
- // initial size of gen0, since OldSize was explicitly set, OldSize wins.
- if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, _min_heap_byte_size)) {
- if (PrintGCDetails && Verbose) {
- gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
- SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
- _min_gen0_size, _initial_gen0_size, _max_gen0_size);
- }
+
+ _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
+ _initial_gen1_size = OldSize;
+ }
+
+ // The initial generation sizes should match the initial heap size,
+ // if not issue a warning and resize the generations. This behavior
+ // differs from JDK8 where the generation sizes have higher priority
+ // than the initial heap size.
+ if ((_initial_gen1_size + _initial_gen0_size) != _initial_heap_byte_size) {
+ warning("Inconsistency between generation sizes and heap size, resizing "
+ "the generations to fit the heap.");
+
+ size_t desired_gen0_size = _initial_heap_byte_size - _initial_gen1_size;
+ if (_initial_heap_byte_size < _initial_gen1_size) {
+ // Old want all memory, use minimum for young and rest for old
+ _initial_gen0_size = _min_gen0_size;
+ _initial_gen1_size = _initial_heap_byte_size - _min_gen0_size;
+ } else if (desired_gen0_size > _max_gen0_size) {
+ // Need to increase both young and old generation
+ _initial_gen0_size = _max_gen0_size;
+ _initial_gen1_size = _initial_heap_byte_size - _max_gen0_size;
+ } else if (desired_gen0_size < _min_gen0_size) {
+ // Need to decrease both young and old generation
+ _initial_gen0_size = _min_gen0_size;
+ _initial_gen1_size = _initial_heap_byte_size - _min_gen0_size;
+ } else {
+ // The young generation boundaries allow us to only update the
+ // young generation.
+ _initial_gen0_size = desired_gen0_size;
}
- // The same as above for the old gen initial size.
- if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
- _initial_heap_byte_size)) {
- if (PrintGCDetails && Verbose) {
- gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
- SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
- _min_gen0_size, _initial_gen0_size, _max_gen0_size);
- }
+
+ if (PrintGCDetails && Verbose) {
+ gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 "
+ SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT,
+ _min_gen0_size, _initial_gen0_size, _max_gen0_size);
}
}
- _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
-
- // Make sure that min gen1 <= initial gen1 <= max gen1.
- _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
- _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
-
// Write back to flags if necessary
if (NewSize != _initial_gen0_size) {
FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
@@ -994,56 +985,88 @@
// verify that there are some basic rules for NewSize honored by the policies.
class TestGenCollectorPolicy {
public:
- static void test() {
+ static void test_new_size() {
size_t flag_value;
save_flags();
- // Set some limits that makes the math simple.
- FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
- FLAG_SET_ERGO(uintx, InitialHeapSize, 120 * M);
- Arguments::set_min_heap_size(40 * M);
-
// If NewSize is set on the command line, it should be used
// for both min and initial young size if less than min heap.
flag_value = 20 * M;
+ set_basic_flag_values();
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
- verify_min(flag_value);
- verify_initial(flag_value);
+ verify_gen0_min(flag_value);
+
+ set_basic_flag_values();
+ FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
+ verify_gen0_initial(flag_value);
// If NewSize is set on command line, but is larger than the min
// heap size, it should only be used for initial young size.
flag_value = 80 * M;
+ set_basic_flag_values();
FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
- verify_initial(flag_value);
+ verify_gen0_initial(flag_value);
// If NewSize has been ergonomically set, the collector policy
// should use it for min but calculate the initial young size
// using NewRatio.
flag_value = 20 * M;
+ set_basic_flag_values();
FLAG_SET_ERGO(uintx, NewSize, flag_value);
- verify_min(flag_value);
- verify_scaled_initial(InitialHeapSize);
+ verify_gen0_min(flag_value);
+
+ set_basic_flag_values();
+ FLAG_SET_ERGO(uintx, NewSize, flag_value);
+ verify_scaled_gen0_initial(InitialHeapSize);
restore_flags();
+ }
+
+ static void test_old_size() {
+ size_t flag_value;
+
+ save_flags();
+
+ // If OldSize is set on the command line, it should be used
+ // for both min and initial old size if less than min heap.
+ flag_value = 20 * M;
+ set_basic_flag_values();
+ FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
+ verify_gen1_min(flag_value);
+
+ set_basic_flag_values();
+ FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
+ verify_gen1_initial(flag_value);
+
+ // If MaxNewSize is large, the maximum OldSize will be less than
+ // what's requested on the command line and it should be reset
+ // ergonomically.
+ flag_value = 30 * M;
+ set_basic_flag_values();
+ FLAG_SET_CMDLINE(uintx, OldSize, flag_value);
+ FLAG_SET_CMDLINE(uintx, MaxNewSize, 170*M);
+ // Calculate what we expect the flag to be.
+ flag_value = MaxHeapSize - MaxNewSize;
+ verify_gen1_initial(flag_value);
}
- static void verify_min(size_t expected) {
+ static void verify_gen0_min(size_t expected) {
MarkSweepPolicy msp;
msp.initialize_all();
assert(msp.min_gen0_size() <= expected, err_msg("%zu > %zu", msp.min_gen0_size(), expected));
}
- static void verify_initial(size_t expected) {
+ static void verify_gen0_initial(size_t expected) {
MarkSweepPolicy msp;
msp.initialize_all();
assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
}
- static void verify_scaled_initial(size_t initial_heap_size) {
+ static void verify_scaled_gen0_initial(size_t initial_heap_size) {
MarkSweepPolicy msp;
msp.initialize_all();
@@ -1053,6 +1076,21 @@
err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize));
}
+ static void verify_gen1_min(size_t expected) {
+ MarkSweepPolicy msp;
+ msp.initialize_all();
+
+ assert(msp.min_gen1_size() <= expected, err_msg("%zu > %zu", msp.min_gen1_size(), expected));
+ }
+
+ static void verify_gen1_initial(size_t expected) {
+ MarkSweepPolicy msp;
+ msp.initialize_all();
+
+ assert(msp.initial_gen1_size() == expected, err_msg("%zu != %zu", msp.initial_gen1_size(), expected));
+ }
+
+
private:
static size_t original_InitialHeapSize;
static size_t original_MaxHeapSize;
@@ -1061,6 +1099,15 @@
static size_t original_NewSize;
static size_t original_OldSize;
+ static void set_basic_flag_values() {
+ FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
+ FLAG_SET_ERGO(uintx, InitialHeapSize, 100 * M);
+ FLAG_SET_ERGO(uintx, OldSize, 4 * M);
+ FLAG_SET_ERGO(uintx, NewSize, 1 * M);
+ FLAG_SET_ERGO(uintx, MaxNewSize, 80 * M);
+ Arguments::set_min_heap_size(40 * M);
+ }
+
static void save_flags() {
original_InitialHeapSize = InitialHeapSize;
original_MaxHeapSize = MaxHeapSize;
@@ -1088,7 +1135,11 @@
size_t TestGenCollectorPolicy::original_OldSize = 0;
void TestNewSize_test() {
- TestGenCollectorPolicy::test();
+ TestGenCollectorPolicy::test_new_size();
+}
+
+void TestOldSize_test() {
+ TestGenCollectorPolicy::test_old_size();
}
#endif
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -248,13 +248,13 @@
// Compute max heap alignment.
size_t compute_max_alignment();
- // Scale the base_size by NewRatio according to
- // result = base_size / (NewRatio + 1)
- // and align by min_alignment()
- size_t scale_by_NewRatio_aligned(size_t base_size);
+ // Scale the base_size by NewRatio according to
+ // result = base_size / (NewRatio + 1)
+ // and align by min_alignment()
+ size_t scale_by_NewRatio_aligned(size_t base_size);
- // Bound the value by the given maximum minus the min_alignment.
- size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
+ // Bound the value by the given maximum minus the min_alignment.
+ size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
public:
GenCollectorPolicy();
@@ -335,10 +335,6 @@
virtual CollectorPolicy::Name kind() {
return CollectorPolicy::TwoGenerationCollectorPolicyKind;
}
-
- // Returns true if gen0 sizes were adjusted
- bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr,
- const size_t heap_size);
};
class MarkSweepPolicy : public TwoGenerationCollectorPolicy {
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -374,7 +374,7 @@
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
- const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
+ const size_t metadata_prev_used = MetaspaceAux::used_bytes();
print_heap_before_gc();
--- a/hotspot/src/share/vm/memory/metaspace.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1447,7 +1447,7 @@
uint current_shrink_factor = _shrink_factor;
_shrink_factor = 0;
- const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
+ const size_t used_after_gc = MetaspaceAux::capacity_bytes();
const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
@@ -2538,8 +2538,8 @@
// MetaspaceAux
-size_t MetaspaceAux::_allocated_capacity_words[] = {0, 0};
-size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
+size_t MetaspaceAux::_capacity_words[] = {0, 0};
+size_t MetaspaceAux::_used_words[] = {0, 0};
size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) {
VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
@@ -2552,38 +2552,38 @@
void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
assert_lock_strong(SpaceManager::expand_lock());
- assert(words <= allocated_capacity_words(mdtype),
+ assert(words <= capacity_words(mdtype),
err_msg("About to decrement below 0: words " SIZE_FORMAT
- " is greater than _allocated_capacity_words[%u] " SIZE_FORMAT,
- words, mdtype, allocated_capacity_words(mdtype)));
- _allocated_capacity_words[mdtype] -= words;
+ " is greater than _capacity_words[%u] " SIZE_FORMAT,
+ words, mdtype, capacity_words(mdtype)));
+ _capacity_words[mdtype] -= words;
}
void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
assert_lock_strong(SpaceManager::expand_lock());
// Needs to be atomic
- _allocated_capacity_words[mdtype] += words;
+ _capacity_words[mdtype] += words;
}
void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) {
- assert(words <= allocated_used_words(mdtype),
+ assert(words <= used_words(mdtype),
err_msg("About to decrement below 0: words " SIZE_FORMAT
- " is greater than _allocated_used_words[%u] " SIZE_FORMAT,
- words, mdtype, allocated_used_words(mdtype)));
+ " is greater than _used_words[%u] " SIZE_FORMAT,
+ words, mdtype, used_words(mdtype)));
// For CMS deallocation of the Metaspaces occurs during the
// sweep which is a concurrent phase. Protection by the expand_lock()
// is not enough since allocation is on a per Metaspace basis
// and protected by the Metaspace lock.
jlong minus_words = (jlong) - (jlong) words;
- Atomic::add_ptr(minus_words, &_allocated_used_words[mdtype]);
+ Atomic::add_ptr(minus_words, &_used_words[mdtype]);
}
void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) {
- // _allocated_used_words tracks allocations for
+ // _used_words tracks allocations for
// each piece of metadata. Those allocations are
// generally done concurrently by different application
// threads so must be done atomically.
- Atomic::add_ptr(words, &_allocated_used_words[mdtype]);
+ Atomic::add_ptr(words, &_used_words[mdtype]);
}
size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
@@ -2630,16 +2630,16 @@
size_t MetaspaceAux::capacity_bytes_slow() {
#ifdef PRODUCT
- // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+ // Use capacity_bytes() in PRODUCT instead of this function.
guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
#endif
size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
- assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
- err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+ assert(capacity_bytes() == class_capacity + non_class_capacity,
+ err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT
" class_capacity + non_class_capacity " SIZE_FORMAT
" class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
- allocated_capacity_bytes(), class_capacity + non_class_capacity,
+ capacity_bytes(), class_capacity + non_class_capacity,
class_capacity, non_class_capacity));
return class_capacity + non_class_capacity;
@@ -2699,14 +2699,14 @@
"->" SIZE_FORMAT
"(" SIZE_FORMAT ")",
prev_metadata_used,
- allocated_used_bytes(),
+ used_bytes(),
reserved_bytes());
} else {
gclog_or_tty->print(" " SIZE_FORMAT "K"
"->" SIZE_FORMAT "K"
"(" SIZE_FORMAT "K)",
prev_metadata_used/K,
- allocated_used_bytes()/K,
+ used_bytes()/K,
reserved_bytes()/K);
}
@@ -2722,8 +2722,8 @@
"capacity " SIZE_FORMAT "K, "
"committed " SIZE_FORMAT "K, "
"reserved " SIZE_FORMAT "K",
- allocated_used_bytes()/K,
- allocated_capacity_bytes()/K,
+ used_bytes()/K,
+ capacity_bytes()/K,
committed_bytes()/K,
reserved_bytes()/K);
@@ -2734,8 +2734,8 @@
"capacity " SIZE_FORMAT "K, "
"committed " SIZE_FORMAT "K, "
"reserved " SIZE_FORMAT "K",
- allocated_used_bytes(ct)/K,
- allocated_capacity_bytes(ct)/K,
+ used_bytes(ct)/K,
+ capacity_bytes(ct)/K,
committed_bytes(ct)/K,
reserved_bytes(ct)/K);
}
@@ -2837,42 +2837,42 @@
void MetaspaceAux::verify_capacity() {
#ifdef ASSERT
- size_t running_sum_capacity_bytes = allocated_capacity_bytes();
+ size_t running_sum_capacity_bytes = capacity_bytes();
// For purposes of the running sum of capacity, verify against capacity
size_t capacity_in_use_bytes = capacity_bytes_slow();
assert(running_sum_capacity_bytes == capacity_in_use_bytes,
- err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
+ err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT
" capacity_bytes_slow()" SIZE_FORMAT,
running_sum_capacity_bytes, capacity_in_use_bytes));
for (Metaspace::MetadataType i = Metaspace::ClassType;
i < Metaspace:: MetadataTypeCount;
i = (Metaspace::MetadataType)(i + 1)) {
size_t capacity_in_use_bytes = capacity_bytes_slow(i);
- assert(allocated_capacity_bytes(i) == capacity_in_use_bytes,
- err_msg("allocated_capacity_bytes(%u) " SIZE_FORMAT
+ assert(capacity_bytes(i) == capacity_in_use_bytes,
+ err_msg("capacity_bytes(%u) " SIZE_FORMAT
" capacity_bytes_slow(%u)" SIZE_FORMAT,
- i, allocated_capacity_bytes(i), i, capacity_in_use_bytes));
+ i, capacity_bytes(i), i, capacity_in_use_bytes));
}
#endif
}
void MetaspaceAux::verify_used() {
#ifdef ASSERT
- size_t running_sum_used_bytes = allocated_used_bytes();
+ size_t running_sum_used_bytes = used_bytes();
// For purposes of the running sum of used, verify against used
size_t used_in_use_bytes = used_bytes_slow();
- assert(allocated_used_bytes() == used_in_use_bytes,
- err_msg("allocated_used_bytes() " SIZE_FORMAT
+ assert(used_bytes() == used_in_use_bytes,
+ err_msg("used_bytes() " SIZE_FORMAT
" used_bytes_slow()" SIZE_FORMAT,
- allocated_used_bytes(), used_in_use_bytes));
+ used_bytes(), used_in_use_bytes));
for (Metaspace::MetadataType i = Metaspace::ClassType;
i < Metaspace:: MetadataTypeCount;
i = (Metaspace::MetadataType)(i + 1)) {
size_t used_in_use_bytes = used_bytes_slow(i);
- assert(allocated_used_bytes(i) == used_in_use_bytes,
- err_msg("allocated_used_bytes(%u) " SIZE_FORMAT
+ assert(used_bytes(i) == used_in_use_bytes,
+ err_msg("used_bytes(%u) " SIZE_FORMAT
" used_bytes_slow(%u)" SIZE_FORMAT,
- i, allocated_used_bytes(i), i, used_in_use_bytes));
+ i, used_bytes(i), i, used_in_use_bytes));
}
#endif
}
--- a/hotspot/src/share/vm/memory/metaspace.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspace.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -280,11 +280,11 @@
// allocated to a Metaspace. This is used instead of
// iterating over all the classloaders. One for each
// type of Metadata
- static size_t _allocated_capacity_words[Metaspace:: MetadataTypeCount];
- // Running sum of space in all Metachunks that have
+ static size_t _capacity_words[Metaspace:: MetadataTypeCount];
+ // Running sum of space in all Metachunks that
// are being used for metadata. One for each
// type of Metadata.
- static size_t _allocated_used_words[Metaspace:: MetadataTypeCount];
+ static size_t _used_words[Metaspace:: MetadataTypeCount];
public:
// Decrement and increment _allocated_capacity_words
@@ -308,32 +308,32 @@
static size_t free_chunks_total_bytes();
static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
- static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
- return _allocated_capacity_words[mdtype];
+ static size_t capacity_words(Metaspace::MetadataType mdtype) {
+ return _capacity_words[mdtype];
}
- static size_t allocated_capacity_words() {
- return allocated_capacity_words(Metaspace::NonClassType) +
- allocated_capacity_words(Metaspace::ClassType);
+ static size_t capacity_words() {
+ return capacity_words(Metaspace::NonClassType) +
+ capacity_words(Metaspace::ClassType);
}
- static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
- return allocated_capacity_words(mdtype) * BytesPerWord;
+ static size_t capacity_bytes(Metaspace::MetadataType mdtype) {
+ return capacity_words(mdtype) * BytesPerWord;
}
- static size_t allocated_capacity_bytes() {
- return allocated_capacity_words() * BytesPerWord;
+ static size_t capacity_bytes() {
+ return capacity_words() * BytesPerWord;
}
- static size_t allocated_used_words(Metaspace::MetadataType mdtype) {
- return _allocated_used_words[mdtype];
+ static size_t used_words(Metaspace::MetadataType mdtype) {
+ return _used_words[mdtype];
}
- static size_t allocated_used_words() {
- return allocated_used_words(Metaspace::NonClassType) +
- allocated_used_words(Metaspace::ClassType);
+ static size_t used_words() {
+ return used_words(Metaspace::NonClassType) +
+ used_words(Metaspace::ClassType);
}
- static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
- return allocated_used_words(mdtype) * BytesPerWord;
+ static size_t used_bytes(Metaspace::MetadataType mdtype) {
+ return used_words(mdtype) * BytesPerWord;
}
- static size_t allocated_used_bytes() {
- return allocated_used_words() * BytesPerWord;
+ static size_t used_bytes() {
+ return used_words() * BytesPerWord;
}
static size_t free_bytes();
--- a/hotspot/src/share/vm/memory/metaspaceCounters.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -66,7 +66,7 @@
MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
size_t MetaspaceCounters::used() {
- return MetaspaceAux::allocated_used_bytes();
+ return MetaspaceAux::used_bytes();
}
size_t MetaspaceCounters::capacity() {
@@ -98,7 +98,7 @@
MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
size_t CompressedClassSpaceCounters::used() {
- return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+ return MetaspaceAux::used_bytes(Metaspace::ClassType);
}
size_t CompressedClassSpaceCounters::capacity() {
--- a/hotspot/src/share/vm/oops/constantPool.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/oops/constantPool.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -144,6 +144,10 @@
// CDS support. Create a new resolved_references array.
void ConstantPool::restore_unshareable_info(TRAPS) {
+ // Only create the new resolved references array and lock if it hasn't been
+ // attempted before
+ if (resolved_references() != NULL) return;
+
// restore the C++ vtable from the shared archive
restore_vtable();
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1289,17 +1289,18 @@
}
-void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
+void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, Handle, TRAPS), Handle mirror, TRAPS) {
instanceKlassHandle h_this(THREAD, this);
- do_local_static_fields_impl(h_this, f, CHECK);
+ do_local_static_fields_impl(h_this, f, mirror, CHECK);
}
-void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_k, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
+void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_k,
+ void f(fieldDescriptor* fd, Handle, TRAPS), Handle mirror, TRAPS) {
for (JavaFieldStream fs(this_k()); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
fieldDescriptor& fd = fs.field_descriptor();
- f(&fd, CHECK);
+ f(&fd, mirror, CHECK);
}
}
}
@@ -2240,9 +2241,7 @@
int num_methods = methods->length();
for (int index2 = 0; index2 < num_methods; ++index2) {
methodHandle m(THREAD, methods->at(index2));
- m()->link_method(m, CHECK);
- // restore method's vtable by calling a virtual function
- m->restore_vtable();
+ m->restore_unshareable_info(CHECK);
}
if (JvmtiExport::has_redefined_a_class()) {
// Reinitialize vtable because RedefineClasses may have changed some
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -802,7 +802,7 @@
// Iterators
void do_local_static_fields(FieldClosure* cl);
void do_nonstatic_fields(FieldClosure* cl); // including inherited fields
- void do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS);
+ void do_local_static_fields(void f(fieldDescriptor*, Handle, TRAPS), Handle, TRAPS);
void methods_do(void f(Method* method));
void array_klasses_do(void f(Klass* k));
@@ -1010,7 +1010,7 @@
static void set_initialization_state_and_notify_impl (instanceKlassHandle this_k, ClassState state, TRAPS);
static void call_class_initializer_impl (instanceKlassHandle this_k, TRAPS);
static Klass* array_klass_impl (instanceKlassHandle this_k, bool or_null, int n, TRAPS);
- static void do_local_static_fields_impl (instanceKlassHandle this_k, void f(fieldDescriptor* fd, TRAPS), TRAPS);
+ static void do_local_static_fields_impl (instanceKlassHandle this_k, void f(fieldDescriptor* fd, Handle, TRAPS), Handle, TRAPS);
/* jni_id_for_impl for jfieldID only */
static JNIid* jni_id_for_impl (instanceKlassHandle this_k, int offset);
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -367,7 +367,12 @@
// Query before forming handle.
int size = instance_size(k);
KlassHandle h_k(THREAD, this);
- instanceOop i = (instanceOop) CollectedHeap::Class_obj_allocate(h_k, size, k, CHECK_NULL);
+ instanceOop i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
+
+ // Since mirrors can be variable sized because of the static fields, store
+ // the size in the mirror itself.
+ java_lang_Class::set_oop_size(i, size);
+
return i;
}
--- a/hotspot/src/share/vm/oops/klass.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/oops/klass.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -475,12 +475,8 @@
}
void Klass::remove_unshareable_info() {
- if (!DumpSharedSpaces) {
- // Clean up after OOM during class loading
- if (class_loader_data() != NULL) {
- class_loader_data()->remove_class(this);
- }
- }
+ assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
+
set_subklass(NULL);
set_next_sibling(NULL);
// Clear the java mirror
@@ -492,17 +488,26 @@
}
void Klass::restore_unshareable_info(TRAPS) {
- ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
- // Restore class_loader_data to the null class loader data
- set_class_loader_data(loader_data);
+ // If an exception happened during CDS restore, some of these fields may already be
+ // set. We leave the class on the CLD list, even if incomplete so that we don't
+ // modify the CLD list outside a safepoint.
+ if (class_loader_data() == NULL) {
+ ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+ // Restore class_loader_data to the null class loader data
+ set_class_loader_data(loader_data);
- // Add to null class loader list first before creating the mirror
- // (same order as class file parsing)
- loader_data->add_class(this);
+ // Add to null class loader list first before creating the mirror
+ // (same order as class file parsing)
+ loader_data->add_class(this);
+ }
// Recreate the class mirror. The protection_domain is always null for
// boot loader, for now.
- java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
+ // Only recreate it if not present. A previous attempt to restore may have
+ // gotten an OOM later but keep the mirror if it was created.
+ if (java_mirror() == NULL) {
+ java_lang_Class::create_mirror(this, Handle(NULL), CHECK);
+ }
}
Klass* Klass::array_klass_or_null(int rank) {
--- a/hotspot/src/share/vm/oops/method.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/oops/method.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -903,6 +903,19 @@
return adapter->get_c2i_entry();
}
+void Method::restore_unshareable_info(TRAPS) {
+ // Since restore_unshareable_info can be called more than once for a method, don't
+ // redo any work. If this field is restored, there is nothing to do.
+ if (_from_compiled_entry == NULL) {
+ // restore method's vtable by calling a virtual function
+ restore_vtable();
+
+ methodHandle mh(THREAD, this);
+ link_method(mh, CHECK);
+ }
+}
+
+
// The verified_code_entry() must be called when a invoke is resolved
// on this method.
--- a/hotspot/src/share/vm/oops/method.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/oops/method.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -123,6 +123,8 @@
void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); }
bool is_method() const volatile { return true; }
+ void restore_unshareable_info(TRAPS);
+
// accessors for instance variables
ConstMethod* constMethod() const { return _constMethod; }
--- a/hotspot/src/share/vm/prims/jni.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/prims/jni.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -3877,6 +3877,7 @@
void TestMetachunk_test();
void TestVirtualSpaceNode_test();
void TestNewSize_test();
+void TestOldSize_test();
void TestKlass_test();
void TestBitMap_test();
#if INCLUDE_ALL_GCS
@@ -3903,6 +3904,7 @@
run_unit_test(AltHashing::test_alt_hash());
run_unit_test(test_loggc_filename());
run_unit_test(TestNewSize_test());
+ run_unit_test(TestOldSize_test());
run_unit_test(TestKlass_test());
run_unit_test(TestBitMap_test());
#if INCLUDE_VM_STRUCTS
--- a/hotspot/src/share/vm/runtime/arguments.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -307,6 +307,9 @@
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
#endif // PRODUCT
{ "UseVMInterruptibleIO", JDK_Version::jdk(8), JDK_Version::jdk(9) },
+ { "UseBoundThreads", JDK_Version::jdk(9), JDK_Version::jdk(10) },
+ { "DefaultThreadPriority", JDK_Version::jdk(9), JDK_Version::jdk(10) },
+ { "NoYieldsInMicrolock", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ NULL, JDK_Version(0), JDK_Version(0) }
};
@@ -2078,17 +2081,6 @@
// Note: Needs platform-dependent factoring.
bool status = true;
- // Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
- // builds so the cost of stack banging can be measured.
-#if (defined(PRODUCT) && defined(SOLARIS))
- if (!UseBoundThreads && !UseStackBanging) {
- jio_fprintf(defaultStream::error_stream(),
- "-UseStackBanging conflicts with -UseBoundThreads\n");
-
- status = false;
- }
-#endif
-
if (TLABRefillWasteFraction == 0) {
jio_fprintf(defaultStream::error_stream(),
"TLABRefillWasteFraction should be a denominator, "
--- a/hotspot/src/share/vm/runtime/os.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/os.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -929,6 +929,10 @@
}
void os::print_date_and_time(outputStream *st) {
+ const int secs_per_day = 86400;
+ const int secs_per_hour = 3600;
+ const int secs_per_min = 60;
+
time_t tloc;
(void)time(&tloc);
st->print("time: %s", ctime(&tloc)); // ctime adds newline.
@@ -937,7 +941,17 @@
// NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
// Linux. Must be a bug in glibc ? Workaround is to round "t" to int
// before printf. We lost some precision, but who cares?
- st->print_cr("elapsed time: %d seconds", (int)t);
+ int eltime = (int)t; // elapsed time in seconds
+
+ // print elapsed time in a human-readable format:
+ int eldays = eltime / secs_per_day;
+ int day_secs = eldays * secs_per_day;
+ int elhours = (eltime - day_secs) / secs_per_hour;
+ int hour_secs = elhours * secs_per_hour;
+ int elmins = (eltime - day_secs - hour_secs) / secs_per_min;
+ int minute_secs = elmins * secs_per_min;
+ int elsecs = (eltime - day_secs - hour_secs - minute_secs);
+ st->print_cr("elapsed time: %d seconds (%dd %dh %dm %ds)", eltime, eldays, elhours, elmins, elsecs);
}
// moved from debug.cpp (used to be find()) but still called from there
--- a/hotspot/src/share/vm/runtime/os.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/os.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -450,8 +450,8 @@
// yield that can be used in lieu of blocking.
} ;
static YieldResult NakedYield () ;
- static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
- static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing
+ static void yield_all(); // Yields to all other threads including lower priority
+ // (for the default scheduling policy)
static OSReturn set_priority(Thread* thread, ThreadPriority priority);
static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
--- a/hotspot/src/share/vm/runtime/safepoint.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -319,7 +319,7 @@
if (steps < DeferThrSuspendLoopCount) {
os::NakedYield() ;
} else {
- os::yield_all(steps) ;
+ os::yield_all() ;
// Alternately, the VM thread could transiently depress its scheduling priority or
// transiently increase the priority of the tardy mutator(s).
}
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -924,12 +924,6 @@
JRT_END
#endif // !PRODUCT
-
-JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
- os::yield_all(attempts);
-JRT_END
-
-
JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
assert(obj->is_oop(), "must be a valid oop");
assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -253,9 +253,6 @@
// bytecode tracing is only used by the TraceBytecodes
static intptr_t trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2) PRODUCT_RETURN0;
- // Used to back off a spin lock that is under heavy contention
- static void yield_all(JavaThread* thread, int attempts = 0);
-
static oop retrieve_receiver( Symbol* sig, frame caller );
static void register_finalizer(JavaThread* thread, oopDesc* obj);
--- a/hotspot/src/share/vm/runtime/thread.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -1394,8 +1394,8 @@
void JavaThread::initialize() {
// Initialize fields
- // Set the claimed par_id to -1 (ie not claiming any par_ids)
- set_claimed_par_id(-1);
+ // Set the claimed par_id to UINT_MAX (ie not claiming any par_ids)
+ set_claimed_par_id(UINT_MAX);
set_saved_exception_pc(NULL);
set_threadObj(NULL);
--- a/hotspot/src/share/vm/runtime/thread.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -1778,12 +1778,12 @@
void set_done_attaching_via_jni() { _jni_attach_state = _attached_via_jni; OrderAccess::fence(); }
private:
// This field is used to determine if a thread has claimed
- // a par_id: it is -1 if the thread has not claimed a par_id;
+ // a par_id: it is UINT_MAX if the thread has not claimed a par_id;
// otherwise its value is the par_id that has been claimed.
- int _claimed_par_id;
+ uint _claimed_par_id;
public:
- int get_claimed_par_id() { return _claimed_par_id; }
- void set_claimed_par_id(int id) { _claimed_par_id = id;}
+ uint get_claimed_par_id() { return _claimed_par_id; }
+ void set_claimed_par_id(uint id) { _claimed_par_id = id;}
};
// Inline implementation of JavaThread::current
--- a/hotspot/src/share/vm/runtime/vframe.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/vframe.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -321,24 +321,38 @@
}
}
-StackValueCollection* interpretedVFrame::expressions() const {
- int length = fr().interpreter_frame_expression_stack_size();
- if (method()->is_native()) {
- // If the method is native, there is no expression stack
- length = 0;
+StackValueCollection* interpretedVFrame::expressions() const {
+
+ InterpreterOopMap oop_mask;
+
+ if (!method()->is_native()) {
+ // Get oopmap describing oops and int for current bci
+ if (TraceDeoptimization && Verbose) {
+ methodHandle m_h(method());
+ OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
+ } else {
+ method()->mask_for(bci(), &oop_mask);
+ }
+ }
+
+ // If the bci is a call instruction, i.e. any of the invoke* instructions,
+ // the InterpreterOopMap does not include expression/operand stack liveness
+ // info in the oop_mask/bit_mask. This can lead to a discrepancy of what
+ // is actually on the expression stack compared to what is given by the
+ // oop_map. We need to use the length reported in the oop_map.
+ int length = oop_mask.expression_stack_size();
+
+ assert(fr().interpreter_frame_expression_stack_size() >= length,
+ "error in expression stack!");
+
+ StackValueCollection* result = new StackValueCollection(length);
+
+ if (0 == length) {
+ return result;
}
int nof_locals = method()->max_locals();
- StackValueCollection* result = new StackValueCollection(length);
- InterpreterOopMap oop_mask;
- // Get oopmap describing oops and int for current bci
- if (TraceDeoptimization && Verbose) {
- methodHandle m_h(method());
- OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
- } else {
- method()->mask_for(bci(), &oop_mask);
- }
// handle expressions
for(int i=0; i < length; i++) {
// Find stack location
--- a/hotspot/src/share/vm/runtime/vmThread.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -305,6 +305,9 @@
_terminate_lock->notify();
}
+ // Thread destructor usually does this.
+ ThreadLocalStorage::set_thread(NULL);
+
// Deletion must be done synchronously by the JNI DestroyJavaVM thread
// so that the VMThread deletion completes before the main thread frees
// up the CodeHeap.
--- a/hotspot/src/share/vm/services/memoryPool.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/services/memoryPool.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -268,7 +268,7 @@
}
size_t MetaspacePool::used_in_bytes() {
- return MetaspaceAux::allocated_used_bytes();
+ return MetaspaceAux::used_bytes();
}
size_t MetaspacePool::calculate_max_size() const {
@@ -280,7 +280,7 @@
MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
size_t CompressedKlassSpacePool::used_in_bytes() {
- return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+ return MetaspaceAux::used_bytes(Metaspace::ClassType);
}
MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
--- a/hotspot/src/share/vm/trace/trace.xml Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/trace/trace.xml Sun Apr 13 23:39:22 2014 -0700
@@ -185,7 +185,7 @@
</event>
<struct id="MetaspaceSizes">
- <value type="BYTES64" field="capacity" label="Capacity" description="Total available memory to allocate in" />
+ <value type="BYTES64" field="committed" label="Committed" description="Committed memory for this space" />
<value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
<value type="BYTES64" field="reserved" label="Reserved" description="Reserved memory for this space" />
</struct>
--- a/hotspot/src/share/vm/utilities/growableArray.hpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/utilities/growableArray.hpp Sun Apr 13 23:39:22 2014 -0700
@@ -147,6 +147,9 @@
}
};
+template<class E> class GrowableArrayIterator;
+template<class E, class UnaryPredicate> class GrowableArrayFilterIterator;
+
template<class E> class GrowableArray : public GenericGrowableArray {
friend class VMStructs;
@@ -243,6 +246,14 @@
return _data[_len-1];
}
+ GrowableArrayIterator<E> begin() const {
+ return GrowableArrayIterator<E>(this, 0);
+ }
+
+ GrowableArrayIterator<E> end() const {
+ return GrowableArrayIterator<E>(this, length());
+ }
+
void push(const E& elem) { append(elem); }
E pop() {
@@ -412,4 +423,83 @@
tty->print("}\n");
}
+// Custom STL-style iterator to iterate over GrowableArrays
+// It is constructed by invoking GrowableArray::begin() and GrowableArray::end()
+template<class E> class GrowableArrayIterator : public StackObj {
+ friend class GrowableArray<E>;
+ template<class F, class UnaryPredicate> friend class GrowableArrayFilterIterator;
+
+ private:
+ const GrowableArray<E>* _array; // GrowableArray we iterate over
+ int _position; // The current position in the GrowableArray
+
+ // Private constructor used in GrowableArray::begin() and GrowableArray::end()
+ GrowableArrayIterator(const GrowableArray<E>* array, int position) : _array(array), _position(position) {
+ assert(0 <= position && position <= _array->length(), "illegal position");
+ }
+
+ public:
+ GrowableArrayIterator<E>& operator++() { ++_position; return *this; }
+ E operator*() { return _array->at(_position); }
+
+ bool operator==(const GrowableArrayIterator<E>& rhs) {
+ assert(_array == rhs._array, "iterator belongs to different array");
+ return _position == rhs._position;
+ }
+
+ bool operator!=(const GrowableArrayIterator<E>& rhs) {
+ assert(_array == rhs._array, "iterator belongs to different array");
+ return _position != rhs._position;
+ }
+};
+
+// Custom STL-style iterator to iterate over elements of a GrowableArray that satisfy a given predicate
+template<class E, class UnaryPredicate> class GrowableArrayFilterIterator : public StackObj {
+ friend class GrowableArray<E>;
+
+ private:
+ const GrowableArray<E>* _array; // GrowableArray we iterate over
+ int _position; // Current position in the GrowableArray
+ UnaryPredicate _predicate; // Unary predicate the elements of the GrowableArray should satisfy
+
+ public:
+ GrowableArrayFilterIterator(const GrowableArrayIterator<E>& begin, UnaryPredicate filter_predicate)
+ : _array(begin._array), _position(begin._position), _predicate(filter_predicate) {
+ // Advance to first element satisfying the predicate
+ while(_position != _array->length() && !_predicate(_array->at(_position))) {
+ ++_position;
+ }
+ }
+
+ GrowableArrayFilterIterator<E, UnaryPredicate>& operator++() {
+ do {
+ // Advance to next element satisfying the predicate
+ ++_position;
+ } while(_position != _array->length() && !_predicate(_array->at(_position)));
+ return *this;
+ }
+
+ E operator*() { return _array->at(_position); }
+
+ bool operator==(const GrowableArrayIterator<E>& rhs) {
+ assert(_array == rhs._array, "iterator belongs to different array");
+ return _position == rhs._position;
+ }
+
+ bool operator!=(const GrowableArrayIterator<E>& rhs) {
+ assert(_array == rhs._array, "iterator belongs to different array");
+ return _position != rhs._position;
+ }
+
+ bool operator==(const GrowableArrayFilterIterator<E, UnaryPredicate>& rhs) {
+ assert(_array == rhs._array, "iterator belongs to different array");
+ return _position == rhs._position;
+ }
+
+ bool operator!=(const GrowableArrayFilterIterator<E, UnaryPredicate>& rhs) {
+ assert(_array == rhs._array, "iterator belongs to different array");
+ return _position != rhs._position;
+ }
+};
+
#endif // SHARE_VM_UTILITIES_GROWABLEARRAY_HPP
--- a/hotspot/src/share/vm/utilities/taskqueue.cpp Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/src/share/vm/utilities/taskqueue.cpp Sun Apr 13 23:39:22 2014 -0700
@@ -239,8 +239,8 @@
#ifdef TRACESPINNING
void ParallelTaskTerminator::print_termination_counts() {
- gclog_or_tty->print_cr("ParallelTaskTerminator Total yields: " UINT32_FORMAT
- " Total spins: " UINT32_FORMAT " Total peeks: " UINT32_FORMAT,
+ gclog_or_tty->print_cr("ParallelTaskTerminator Total yields: %u"
+ " Total spins: %u Total peeks: %u",
total_yields(),
total_spins(),
total_peeks());
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/CorrectnessTest.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test CorrectnessTest
+ * @bug 8038418
+ * @library /testlibrary /testlibrary/whitebox
+ * @compile execution/TypeConflict.java execution/TypeProfile.java
+ * execution/MethodHandleDelegate.java
+ * @build CorrectnessTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ * -XX:TypeProfileLevel=222 -XX:+UseTypeSpeculation
+ * -XX:CompileCommand=exclude,execution/*::methodNotToCompile
+ * -XX:CompileCommand=dontinline,scenarios/Scenario::collectReturnType
+ * CorrectnessTest RETURN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ * -XX:TypeProfileLevel=222 -XX:+UseTypeSpeculation
+ * -XX:CompileCommand=exclude,execution/*::methodNotToCompile
+ * -XX:CompileCommand=dontinline,scenarios/Scenario::collectReturnType
+ * CorrectnessTest PARAMETERS
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ * -XX:TypeProfileLevel=222 -XX:+UseTypeSpeculation
+ * -XX:CompileCommand=exclude,execution/*::methodNotToCompile
+ * -XX:CompileCommand=dontinline,scenarios/Scenario::collectReturnType
+ * CorrectnessTest ARGUMENTS
+ * @summary Tests correctness of type usage with type profiling and speculations
+ */
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.Platform;
+import execution.Execution;
+import execution.MethodHandleDelegate;
+import execution.TypeConflict;
+import execution.TypeProfile;
+import hierarchies.*;
+import scenarios.*;
+import sun.hotspot.WhiteBox;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiFunction;
+
+public class CorrectnessTest {
+ private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+ public static void main(String[] args) {
+ if (!Platform.isServer()) {
+ System.out.println("ALL TESTS SKIPPED");
+ }
+ Asserts.assertGTE(args.length, 1);
+ ProfilingType profilingType = ProfilingType.valueOf(args[0]);
+ if (runTests(profilingType)) {
+ System.out.println("ALL TESTS PASSED");
+ } else {
+ throw new RuntimeException("SOME TESTS FAILED");
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public static boolean runTests(ProfilingType profilingType) {
+ boolean result = true;
+
+ List<Execution> executionList = new ArrayList<>();
+ executionList.add(new TypeConflict());
+ executionList.add(new TypeProfile());
+ for (int i = 0, n = executionList.size(); i < n; i++) {
+ executionList.add(new MethodHandleDelegate(executionList.get(i)));
+ }
+
+ List<TypeHierarchy> hierarchyList = new ArrayList<>();
+ hierarchyList.add(new DefaultMethodInterface.Hierarchy());
+ hierarchyList.add(new DefaultMethodInterface2.Hierarchy());
+ hierarchyList.add(new Linear.Hierarchy());
+ hierarchyList.add(new Linear2.Hierarchy());
+ hierarchyList.add(new OneRank.Hierarchy());
+ for (int i = 0, n = hierarchyList.size(); i < n; i++) {
+ hierarchyList.add(new NullableType(hierarchyList.get(i)));
+ }
+
+ List<BiFunction<ProfilingType, TypeHierarchy, Scenario<?, ?>>> testCasesConstructors
+ = new ArrayList<>();
+ testCasesConstructors.add(ArrayCopy::new);
+ testCasesConstructors.add(ArrayReferenceStore::new);
+ testCasesConstructors.add(ClassIdentity::new);
+ testCasesConstructors.add(ClassInstanceOf::new);
+ testCasesConstructors.add(ClassIsInstance::new);
+ testCasesConstructors.add(ReceiverAtInvokes::new);
+ testCasesConstructors.add(CheckCast::new);
+
+ for (TypeHierarchy hierarchy : hierarchyList) {
+ for (BiFunction<ProfilingType, TypeHierarchy, Scenario<?, ?>> constructor : testCasesConstructors) {
+ for (Execution execution : executionList) {
+ Scenario<?, ?> scenario = constructor.apply(profilingType, hierarchy);
+ if (scenario.isApplicable()) {
+ result &= executeTest(hierarchy, execution, scenario);
+ }
+ }
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Executes test case
+ *
+ * @param hierarchy type hierarchy for the test
+ * @param execution execution scenario
+ * @param scenario test scenario executed with given Execution
+ */
+ private static boolean executeTest(TypeHierarchy hierarchy, Execution execution, Scenario<?, ?> scenario) {
+ boolean testCaseResult = false;
+ String testName = hierarchy.getClass().getName() + " :: " + scenario.getName() + " @ " + execution.getName();
+ clearAllMethodsState(scenario.getClass());
+ try {
+ execution.execute(scenario);
+ testCaseResult = true;
+ } catch (Exception e) {
+ System.err.println(testName + " failed with exception " + e);
+ e.printStackTrace();
+ }
+ System.out.println((testCaseResult ? "PASSED: " : "FAILED: ") + testName);
+ return testCaseResult;
+ }
+
+ private static void clearAllMethodsState(Class aClass) {
+ while (aClass != null) {
+ for (Method m : aClass.getDeclaredMethods()) {
+ WHITE_BOX.clearMethodState(m);
+ }
+ aClass = aClass.getSuperclass();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/OffTest.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test CorrectnessTest
+ * @bug 8038418
+ * @library /testlibrary /testlibrary/whitebox
+ * @compile execution/TypeConflict.java execution/TypeProfile.java
+ * execution/MethodHandleDelegate.java
+ * @build CorrectnessTest
+ * @build OffTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/timeout=1200 OffTest
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import scenarios.ProfilingType;
+
+import java.util.Random;
+
+public class OffTest {
+ private static final String[] OPTIONS = {
+ "-Xbootclasspath/a:.",
+ "-XX:+IgnoreUnrecognizedVMOptions",
+ "-XX:+UnlockExperimentalVMOptions",
+ "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:+WhiteBoxAPI",
+ "-XX:CompileCommand=exclude,execution/*::methodNotToCompile",
+ "-XX:CompileCommand=dontinline,scenarios/Scenario::collectReturnType",
+ "", // -XX:TypeProfileLevel=?
+ "", // -XX:?UseTypeSpeculation
+ CorrectnessTest.class.getName(),
+ "", // ProfilingType.name()
+ };
+
+ private static final String TYPE_PROFILE_LEVEL = "TypeProfileLevel";
+ private static final String USE_TYPE_SPECULATION = "UseTypeSpeculation";
+ private static final int TYPE_PROFILE_LEVEL_LENGTH = 3;
+ private static final int TYPE_PROFILE_LEVEL_BOUND = 3;
+ private static final int DEFAULT_COUNT = 10;
+ private static final int PROFILING_TYPE_INDEX = OPTIONS.length - 1;
+ private static final int TYPE_PROFILE_INDEX = OPTIONS.length - 4;
+ private static final int USE_TYPE_SPECULATION_INDEX = OPTIONS.length - 3;
+ private static final Random RNG;
+
+ static {
+ String str = System.getProperty("seed");
+ long seed = str != null ? Long.parseLong(str) : new Random().nextLong();
+ RNG = new Random(seed);
+ System.out.printf("-Dseed=%d%n", seed);
+ }
+
+ public static void main(String[] args) throws Exception {
+ int count = DEFAULT_COUNT;
+ if (args.length > 0) {
+ count = Integer.parseInt(args[0]) ;
+ }
+ for (int i = 0; i < count; ++i) {
+ runTest();
+ }
+ }
+
+ private static void runTest() throws Exception {
+ String useTypeSpeculation = "-XX:" + (RNG.nextBoolean() ? "+" : "-") + USE_TYPE_SPECULATION;
+ String typeProfileLevel = "-XX:" + TYPE_PROFILE_LEVEL + "=" + randomTypeProfileLevel();
+ ProfilingType type = randomProfileType();
+ OPTIONS[TYPE_PROFILE_INDEX] = typeProfileLevel;
+ OPTIONS[USE_TYPE_SPECULATION_INDEX] = useTypeSpeculation;
+ OPTIONS[PROFILING_TYPE_INDEX] = type.name();
+ ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(/* addTestVmOptions= */ true, OPTIONS);
+ OutputAnalyzer outputAnalyzer = new OutputAnalyzer(processBuilder.start());
+ outputAnalyzer.shouldHaveExitValue(0);
+ }
+
+ private static ProfilingType randomProfileType() {
+ ProfilingType[] value = ProfilingType.values();
+ return value[RNG.nextInt(value.length)];
+ }
+
+ private static String randomTypeProfileLevel() {
+ StringBuilder stringBuilder = new StringBuilder();
+ for (int i = 0; i < TYPE_PROFILE_LEVEL_LENGTH; ++i) {
+ stringBuilder.append(RNG.nextInt(TYPE_PROFILE_LEVEL_BOUND));
+ }
+ return stringBuilder.toString();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/execution/Execution.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package execution;
+
+import hierarchies.TypeHierarchy;
+import scenarios.Scenario;
+
+/**
+ * Execution scenario represents test methods execution type.
+ * @param <T> parameter type
+ * @param <R> result Type
+ */
+public interface Execution<T extends TypeHierarchy.I, R> {
+ /**
+ * Executes the test code of the given scenario
+ * See {@link scenarios.Scenario#run(T)}
+ *
+ * @param scenario test scenario
+ */
+ void execute(Scenario<T, R> scenario);
+
+ default String getName() {
+ return this.getClass().getName();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/execution/MethodHandleDelegate.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package execution;
+
+import hierarchies.TypeHierarchy;
+import scenarios.Scenario;
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+
+/**
+ * Executes test scenario using {@link MethodHandle#invoke(Object...)}.
+ * Delegates execution to the given {@link Execution} by creating
+ * new test scenario, see {@link Scenario}
+ */
+public class MethodHandleDelegate<T extends TypeHierarchy.I, R> implements Execution<T, R> {
+ private final Execution<T, R> delegate;
+
+ public MethodHandleDelegate(Execution<T, R> delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void execute(Scenario<T, R> scenario) {
+ delegate.execute(new MHScenario<T, R>(scenario));
+ }
+
+ @Override
+ public String getName() {
+ return "MethodHandleDelegate # " + delegate.getName();
+ }
+
+ private static class MHScenario<T extends TypeHierarchy.I, R> extends Scenario<T, R> {
+ private final Scenario<T, R> scenario;
+ private static final MethodHandle METHOD_HANDLE_RUN;
+
+ static {
+ MethodHandles.Lookup lookup = MethodHandles.lookup();
+ MethodType methodType = MethodType.methodType(Object.class, TypeHierarchy.I.class);
+
+ try {
+ METHOD_HANDLE_RUN = lookup.findVirtual(Scenario.class, "run", methodType);
+ } catch (NoSuchMethodException | IllegalAccessException e) {
+ System.err.println("Failed to get target method run() with " + e);
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ }
+
+ /**
+ * Constructor
+ *
+ * @param scenario test scenario to be executed
+ */
+ private MHScenario(Scenario<T, R> scenario) {
+ super("MethodHandle::" + scenario.getName(), scenario.profilingType, scenario.hierarchy);
+ this.scenario = scenario;
+ }
+
+ /**
+ * Runs {@link Scenario#run(T)} with {@link MethodHandle#invoke(Object...)}
+ *
+ * @param t subject of the test
+ * @return result of the underlying {@link Scenario#run(T)} invocation
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public R run(T t) {
+ try {
+ return (R) METHOD_HANDLE_RUN.invoke(scenario, t);
+ } catch (Throwable thr) {
+ System.err.println(scenario.getName()
+ + " failed to invoke target method run() with " + thr);
+ throw new RuntimeException("Invocation failed", thr);
+ }
+ }
+
+ @Override
+ public void check(R r, T t) {
+ scenario.check(r, t);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/execution/TypeConflict.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package execution;
+
+import hierarchies.TypeHierarchy;
+import scenarios.Scenario;
+
+/**
+ * Type profiling conflict execution scenario. The main goal is
+ * to make compiler profile and compile methods with different types.
+ * Scenario tests guards by passing conflicting types (incompatible
+ * for the profiled data).
+ */
+public class TypeConflict<T extends TypeHierarchy.I, R> implements Execution<T, R> {
+ /** Test methods execution number to make profile */
+ private final static int POLLUTION_THRESHOLD = 5000;
+ /** Test methods execution number to make it profiled and compiled*/
+ private final static int PROFILE_THRESHOLD = 20000;
+
+ @Override
+ public void execute(Scenario<T, R> scenario) {
+ T base = scenario.getProfiled();
+ T incompatible = scenario.getConflict();
+
+ // pollute profile by passing different types
+ R baseResult = null;
+ R incResult = null;
+ for (int i = 0; i < POLLUTION_THRESHOLD; i++) {
+ baseResult = methodNotToCompile(scenario, base);
+ incResult = methodNotToCompile(scenario, incompatible);
+ }
+ scenario.check(baseResult, base);
+ scenario.check(incResult, incompatible);
+
+ // profile and compile
+ R result = null;
+ for (int i = 0; i < PROFILE_THRESHOLD; i++) {
+ result = methodNotToCompile(scenario, base);
+ }
+ scenario.check(result, base);
+
+ // pass another type to make guard work and recompile
+ for (int i = 0; i < PROFILE_THRESHOLD; i++) {
+ result = methodNotToCompile(scenario, incompatible);
+ }
+ scenario.check(result, incompatible);
+ }
+
+ private R methodNotToCompile(Scenario<T, R> scenario, T t) {
+ return scenario.run(t);
+ }
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/execution/TypeProfile.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package execution;
+
+import hierarchies.TypeHierarchy;
+import scenarios.Scenario;
+
+/**
+ * Profile type execution scenario. Executes tester method
+ * in a loop without any manipulation with types or instances.
+ */
+public class TypeProfile<T extends TypeHierarchy.I, R> implements Execution<T, R> {
+ /** Number of test method execution to make it profiled and compiled */
+ private final static int PROFILE_THRESHOLD = 100000;
+
+ /**
+ * Makes scenario code be profiled and compiled
+ * @param scenario Test scenario
+ */
+ @Override
+ public void execute(Scenario<T, R> scenario) {
+ R result = null;
+ T prof = scenario.getProfiled();
+ T confl = scenario.getConflict();
+
+ for (int i = 0; i < PROFILE_THRESHOLD; i++) {
+ result = methodNotToCompile(scenario, prof);
+ }
+ scenario.check(result, prof);
+
+ result = methodNotToCompile(scenario, confl);
+ scenario.check(result, confl);
+ }
+
+ protected R methodNotToCompile(Scenario<T, R> scenario, T t) {
+ return scenario.run(t);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/hierarchies/DefaultMethodInterface.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package hierarchies;
+
+public class DefaultMethodInterface {
+ private DefaultMethodInterface() {
+ }
+
+ public static class Hierarchy
+ extends TypeHierarchy<DefaultMethodInterface.A, DefaultMethodInterface.B> {
+ public Hierarchy() {
+ super(new DefaultMethodInterface.A(), new DefaultMethodInterface.B(),
+ DefaultMethodInterface.A.class, DefaultMethodInterface.B.class);
+ }
+ }
+
+ public static interface I2 extends TypeHierarchy.I {
+ default int m() {
+ return TypeHierarchy.ANSWER;
+ }
+ }
+
+ public static class A implements I2 {
+ // use default method from I2
+ }
+
+ public static class B extends A {
+ @Override
+ public int m() {
+ return TypeHierarchy.YEAR;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/hierarchies/DefaultMethodInterface2.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package hierarchies;
+
+public class DefaultMethodInterface2 {
+ private DefaultMethodInterface2() {
+ }
+
+ public static class Hierarchy
+ extends TypeHierarchy<TypeHierarchy.A, DefaultMethodInterface2.B> {
+ public Hierarchy() {
+ super(new TypeHierarchy.A(), new DefaultMethodInterface2.B(),
+ TypeHierarchy.A.class, DefaultMethodInterface2.B.class);
+ }
+ }
+
+ public static interface I2 extends TypeHierarchy.I {
+ default int m() {
+ return TypeHierarchy.ANSWER;
+ }
+ }
+
+ public static class B implements I2 {
+ // default method I2.m()
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/hierarchies/Linear.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package hierarchies;
+
+public class Linear {
+ private Linear() {
+ }
+
+ public static class Hierarchy extends TypeHierarchy<TypeHierarchy.A, Linear.B> {
+ public Hierarchy() {
+ super(new TypeHierarchy.A(), new Linear.B(),
+ TypeHierarchy.A.class, Linear.B.class);
+ }
+ }
+
+ public static class B extends TypeHierarchy.A {
+ @Override
+ public int m() {
+ return TypeHierarchy.YEAR;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/hierarchies/Linear2.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package hierarchies;
+
+public class Linear2 {
+ private Linear2() {
+ }
+
+ public static class Hierarchy extends TypeHierarchy<TypeHierarchy.A, Linear2.B> {
+ public Hierarchy() {
+ super(new A(), new Linear2.B(),
+ A.class, Linear2.B.class);
+ }
+ }
+
+ public static interface I2 {
+ int m();
+ }
+
+ public static class B extends TypeHierarchy.A implements Linear2.I2 {
+ @Override
+ public int m() {
+ return TypeHierarchy.YEAR;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/hierarchies/NullableType.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package hierarchies;
+
+public class NullableType<M extends TypeHierarchy.I, N extends TypeHierarchy.I>
+ extends TypeHierarchy<M, N> {
+
+ public NullableType(TypeHierarchy<M, N> delegate) {
+ super(delegate.getM(), null,
+ delegate.getClassM(), delegate.getClassN());
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/hierarchies/OneRank.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package hierarchies;
+
+public class OneRank {
+ private OneRank() {
+ }
+
+ public static class Hierarchy extends TypeHierarchy<TypeHierarchy.A, OneRank.B> {
+ public Hierarchy() {
+ super(new TypeHierarchy.A(), new OneRank.B(),
+ TypeHierarchy.A.class, OneRank.B.class);
+ }
+ }
+
+ public static class B implements TypeHierarchy.I {
+ @Override
+ public int m() {
+ return TypeHierarchy.YEAR;
+ }
+ }
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/hierarchies/TypeHierarchy.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package hierarchies;
+
+/**
+ * Type hierarchy contains classes the type profiling and speculation are tested with
+ */
+public abstract class TypeHierarchy<M extends TypeHierarchy.I, N extends TypeHierarchy.I> {
+ // Magic numbers
+ public static final int ANSWER = 42;
+ public static final int TEMP = 451;
+ public static final int YEAR = 1984;
+
+ private final M m;
+ private final N n;
+ private final Class<M> classM;
+ private final Class<N> classN;
+
+ protected TypeHierarchy(M m, N n, Class<M> classM, Class<N> classN) {
+ this.m = m;
+ this.n = n;
+ this.classM = classM;
+ this.classN = classN;
+ }
+
+ public final M getM() {
+ return m;
+ }
+
+ public final N getN() {
+ return n;
+ }
+
+ public final Class<M> getClassM() {
+ return classM;
+ }
+
+ public final Class<N> getClassN() {
+ return classN;
+ }
+
+ public interface I {
+ int m();
+ }
+
+ public static class A implements I {
+ @Override
+ public int m() {
+ return TypeHierarchy.ANSWER;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/ArrayCopy.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+import hierarchies.TypeHierarchy;
+
+import java.util.Arrays;
+
+/**
+ * Tests System.arraycopy()
+ */
+public class ArrayCopy extends ArrayScenario {
+ public ArrayCopy(ProfilingType profilingType,
+ TypeHierarchy<? extends TypeHierarchy.I, ? extends TypeHierarchy.I> hierarchy) {
+ super("ArrayCopy", profilingType, hierarchy);
+ }
+
+ /**
+ * @param obj is used to fill arrays
+ * @return the same obj
+ */
+ @Override
+ public TypeHierarchy.I run(TypeHierarchy.I obj) {
+ switch (profilingType) {
+ case RETURN:
+ TypeHierarchy.I t = collectReturnType(obj);
+ Arrays.fill(array, t);
+ System.arraycopy(array, 0, matrix[0], 0, array.length);
+ return array[0];
+ case ARGUMENTS:
+ field = obj;
+ Arrays.fill(array, field);
+ System.arraycopy(array, 0, matrix[0], 0, array.length);
+ return array[0];
+ case PARAMETERS:
+ Arrays.fill(array, obj);
+ System.arraycopy(array, 0, matrix[0], 0, array.length);
+ return array[0];
+ }
+ throw new RuntimeException("Should not reach here");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/ArrayReferenceStore.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+import hierarchies.TypeHierarchy;
+
+import java.util.Arrays;
+
+/**
+ * Tests aastore bytecode
+ */
+public class ArrayReferenceStore extends ArrayScenario {
+ public ArrayReferenceStore(ProfilingType profilingType,
+ TypeHierarchy<? extends TypeHierarchy.I, ? extends TypeHierarchy.I> hierarchy) {
+ super("ArrayReferenceStore", profilingType, hierarchy);
+ }
+
+ /**
+ * @param obj is used to fill arrays
+ * @return obj
+ */
+ @Override
+ public TypeHierarchy.I run(TypeHierarchy.I obj) {
+ switch (profilingType) {
+ case RETURN:
+ TypeHierarchy.I t = collectReturnType(obj);
+ Arrays.fill(array, t);
+ matrix[0] = array;
+ return matrix[0][0];
+ case ARGUMENTS:
+ field = obj;
+ Arrays.fill(array, field);
+ matrix[0] = array;
+ return matrix[0][0];
+ case PARAMETERS:
+ Arrays.fill(array, obj);
+ matrix[0] = array;
+ return matrix[0][0];
+ }
+ throw new RuntimeException("Should not reach here");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/ArrayScenario.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+import com.oracle.java.testlibrary.Asserts;
+import hierarchies.TypeHierarchy;
+
+import java.lang.reflect.Array;
+import java.util.Arrays;
+
+/**
+ * Base class for array scenarios
+ */
+public abstract class ArrayScenario extends Scenario<TypeHierarchy.I, TypeHierarchy.I> {
+ protected final TypeHierarchy.I[] array;
+ protected final TypeHierarchy.I[][] matrix;
+
+ protected ArrayScenario(String name, ProfilingType profilingType,
+ TypeHierarchy<? extends TypeHierarchy.I, ? extends TypeHierarchy.I> hierarchy) {
+ super(name, profilingType, hierarchy);
+ final int x = 20;
+ final int y = 10;
+
+ TypeHierarchy.I prof = hierarchy.getM();
+ TypeHierarchy.I confl = hierarchy.getN();
+
+ this.array = (TypeHierarchy.I[]) Array.newInstance(hierarchy.getClassM(), y);
+ Arrays.fill(array, prof);
+
+ this.matrix = (TypeHierarchy.I[][]) Array.newInstance(hierarchy.getClassM(), x, y);
+ for (int i = 0; i < x; i++) {
+ this.matrix[i] = this.array;
+ }
+
+ Asserts.assertEquals(array.length, matrix[0].length, "Invariant");
+ }
+
+ @Override
+ public boolean isApplicable() {
+ return hierarchy.getClassM().isAssignableFrom(hierarchy.getClassN());
+ }
+
+ @Override
+ public void check(TypeHierarchy.I res, TypeHierarchy.I orig) {
+ Asserts.assertEquals(res, orig, "Check failed");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/CheckCast.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+import com.oracle.java.testlibrary.Asserts;
+import hierarchies.TypeHierarchy;
+
+import java.util.Objects;
+
+/**
+ * Checkcast scenario
+ * @param <T> profiling parameter
+ */
+public class CheckCast<T extends TypeHierarchy.I> extends Scenario<T, Integer> {
+ public CheckCast(ProfilingType profilingType, TypeHierarchy<? extends T, ? extends T> hierarchy) {
+ super("CheckCast", profilingType, hierarchy);
+ }
+
+ /**
+ * Returns type profiling.
+ * @param obj is a profiled parameter for the test
+ * @return parameter casted to the type R
+ */
+ @Override
+ public Integer run(T obj) {
+ switch (profilingType) {
+ case RETURN:
+ T t = collectReturnType(obj);
+ if (t != null) {
+ return t.m();
+ }
+ return null;
+ case ARGUMENTS:
+ field = obj;
+ if (field != null) {
+ return field.m();
+ }
+ return null;
+ case PARAMETERS:
+ if (obj != null) {
+ return obj.m();
+ }
+ return null;
+ }
+ throw new RuntimeException("Should not reach here");
+ }
+
+ @Override
+ public void check(Integer result, T orig) {
+ if (result != null || orig != null) {
+ Objects.requireNonNull(result);
+ Objects.requireNonNull(orig);
+ Asserts.assertEquals(result, orig.m(), "Results mismatch");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/ClassIdentity.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+import com.oracle.java.testlibrary.Asserts;
+import hierarchies.TypeHierarchy;
+
+/**
+ * Tests pattern: if (a.getClass() == D.class)
+ */
+public class ClassIdentity<T extends TypeHierarchy.I> extends Scenario<T, Integer> {
+ public ClassIdentity(ProfilingType profilingType,
+ TypeHierarchy<? extends T, ? extends T> hierarchy) {
+ super("ClassIdentity", profilingType, hierarchy);
+ }
+
+ @Override
+ public boolean isApplicable() {
+ return hierarchy.getM() != null && hierarchy.getN() != null;
+ }
+
+ @Override
+ public Integer run(T obj) {
+ switch (profilingType) {
+ case RETURN:
+ T t = collectReturnType(obj);
+ if (t.getClass() == TypeHierarchy.A.class) {
+ return inlinee(t);
+ }
+ return TypeHierarchy.TEMP;
+ case ARGUMENTS:
+ field = obj;
+ if (field.getClass() == TypeHierarchy.A.class) {
+ return inlinee(field);
+ }
+ return TypeHierarchy.TEMP;
+ case PARAMETERS:
+ if (obj.getClass() == TypeHierarchy.A.class) {
+ return inlinee(obj);
+ }
+ return TypeHierarchy.TEMP;
+ }
+ throw new RuntimeException("Should not reach here");
+ }
+
+ public int inlinee(T obj) {
+ return obj.m();
+ }
+
+ @Override
+ public void check(Integer result, T orig) {
+ if (orig.getClass() == TypeHierarchy.A.class) {
+ Asserts.assertEquals(result, orig.m(),
+ "Results are not equal for TypeHierarchy.A.class");
+ } else {
+ Asserts.assertEquals(result, TypeHierarchy.TEMP, "Result differs from expected");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/ClassInstanceOf.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+import com.oracle.java.testlibrary.Asserts;
+import hierarchies.TypeHierarchy;
+
+/**
+ * Tests instanceof
+ */
+public class ClassInstanceOf<T extends TypeHierarchy.I> extends Scenario<T, Integer> {
+ public ClassInstanceOf(ProfilingType profilingType,
+ TypeHierarchy<? extends T, ? extends T> hierarchy) {
+ super("ClassInstanceOf", profilingType, hierarchy);
+ }
+
+ @Override
+ public Integer run(T obj) {
+ switch (profilingType) {
+ case RETURN:
+ T t = collectReturnType(obj);
+ if (t instanceof TypeHierarchy.A) {
+ return inlinee(t);
+ }
+ return TypeHierarchy.TEMP;
+ case ARGUMENTS:
+ field = obj;
+ if (field instanceof TypeHierarchy.A) {
+ return inlinee(field);
+ }
+ return TypeHierarchy.TEMP;
+ case PARAMETERS:
+ if (obj instanceof TypeHierarchy.A) {
+ return inlinee(obj);
+ }
+ return TypeHierarchy.TEMP;
+ }
+ throw new RuntimeException("Should not reach here");
+ }
+
+ public int inlinee(T obj) {
+ return obj.m();
+ }
+
+ @Override
+ public void check(Integer result, T orig) {
+ if (orig instanceof TypeHierarchy.A) {
+ Asserts.assertEquals(result, orig.m(), "Results are not equal for TypeHierarchy.A");
+ } else {
+ Asserts.assertEquals(result, TypeHierarchy.TEMP, "Result differs from expected");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/ClassIsInstance.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+import com.oracle.java.testlibrary.Asserts;
+import hierarchies.TypeHierarchy;
+
+/**
+ * Tests {@link Class#isInstance(Object)}
+ */
+public class ClassIsInstance<T extends TypeHierarchy.I> extends Scenario<T, Integer> {
+ private final Class<?> baseClass;
+
+ public ClassIsInstance(ProfilingType profilingType,
+ TypeHierarchy<? extends T, ? extends T> hierarchy) {
+ super("ClassIsInstance", profilingType, hierarchy);
+ this.baseClass = hierarchy.getClassM();
+ }
+
+ @Override
+ public Integer run(T obj) {
+ switch (profilingType) {
+ case RETURN:
+ T t = collectReturnType(obj);
+ if (baseClass.isInstance(t)) {
+ return inlinee(t);
+ }
+ return TypeHierarchy.TEMP;
+ case ARGUMENTS:
+ field = obj;
+ if (baseClass.isInstance(field)) {
+ return inlinee(field);
+ }
+ return TypeHierarchy.TEMP;
+ case PARAMETERS:
+ if (baseClass.isInstance(obj)) {
+ return inlinee(obj);
+ }
+ return TypeHierarchy.TEMP;
+ }
+ throw new RuntimeException("Should not reach here");
+ }
+
+ public int inlinee(T obj) {
+ return obj.m();
+ }
+
+ @Override
+ public void check(Integer result, T orig) {
+ if (baseClass.isInstance(orig)) {
+ Asserts.assertEquals(result, orig.m(), "Results are not equal for base class");
+ } else {
+ Asserts.assertEquals(result, TypeHierarchy.TEMP, "Result differs from expected");
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/ProfilingType.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+public enum ProfilingType {
+ /** type profiling of return values of reference types from an invoke */
+ RETURN,
+ /** type profiling for reference parameters on method entries */
+ PARAMETERS,
+ /** type profiling for reference arguments at an invoke */
+ ARGUMENTS,
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/ReceiverAtInvokes.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+import com.oracle.java.testlibrary.Asserts;
+import hierarchies.TypeHierarchy;
+
+/**
+ * Receiver at invokes profiling and speculation
+ *
+ * @param <T> parameter to be returned
+ */
+public class ReceiverAtInvokes<T extends TypeHierarchy.I> extends Scenario<T, Integer> {
+ public ReceiverAtInvokes(ProfilingType profilingType,
+ TypeHierarchy<? extends T, ? extends T> hierarchy) {
+ super("ReceiverAtInvokes", profilingType, hierarchy);
+ }
+
+ @Override
+ public boolean isApplicable() {
+ return hierarchy.getM() != null && hierarchy.getN() != null;
+ }
+
+ /**
+ * Receiver profiling
+ *
+ * @param obj is a profiled parameter for the test
+ * @return parameter casted to the type R
+ */
+ @Override
+ public Integer run(T obj) {
+ switch (profilingType) {
+ case RETURN:
+ T t = collectReturnType(obj);
+ return inlinee(t);
+ case ARGUMENTS:
+ field = obj;
+ return inlinee(field);
+ case PARAMETERS:
+ return inlinee(obj);
+ }
+ throw new RuntimeException("Should not reach here");
+ }
+
+ private Integer inlinee(T obj) {
+ return obj.m(); // should be inlined
+ }
+
+ @Override
+ public void check(Integer result, T orig) {
+ Asserts.assertEquals(result, orig.m(), "Results mismatch");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/types/correctness/scenarios/Scenario.java Sun Apr 13 23:39:22 2014 -0700
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package scenarios;
+
+import hierarchies.TypeHierarchy;
+
+/**
+ * Test scenario
+ *
+ * @param <T> parameter type
+ * @param <R> result type
+ */
+public abstract class Scenario<T extends TypeHierarchy.I, R> {
+
+ private final String name;
+ public final ProfilingType profilingType;
+ public final TypeHierarchy <? extends T, ? extends T> hierarchy;
+ protected volatile T field;
+
+ /**
+ * Constructor
+ *
+ * @param name scenario name
+ * @param profilingType tested profiling type
+ * @param hierarchy type hierarchy
+ */
+ protected Scenario(String name, ProfilingType profilingType,
+ TypeHierarchy<? extends T, ? extends T> hierarchy) {
+ this.profilingType = profilingType;
+ this.name = name + " # " + profilingType.name();
+ this.hierarchy = hierarchy;
+ }
+
+ /**
+ * Returns the object which should be used as a parameter
+ * for the methods used for profile data
+ *
+ * @return profiled type object
+ */
+ public T getProfiled() {
+ return hierarchy.getM();
+ }
+
+ /**
+ * Returns the object which makes a conflict for a profiled data
+ * when passed instead of {@linkplain Scenario#getProfiled}
+ *
+ * @return incompatible to profiled object
+ */
+ public T getConflict() {
+ return hierarchy.getN();
+ }
+
+ /**
+ * @return scenario name
+ */
+ public String getName() {
+ return name;
+ }
+
+ /** Is this scenario applicable for a hierarchy it was constructed with */
+ public boolean isApplicable() {
+ return true;
+ }
+
+ /**
+ * Runs test scenario
+ *
+ * @param t subject of the test
+ * @return result of the test invocation
+ */
+ public abstract R run(T t);
+
+ /** Used for a return type profiling */
+ protected final T collectReturnType(T t) {
+ return t;
+ }
+
+ /**
+ * Checks the result for R and T
+ *
+ * @param r result
+ * @param t original
+ * @throws java.lang.RuntimeException on result mismatch
+ */
+ public abstract void check(R r, T t);
+}
--- a/hotspot/test/gc/g1/TestStringDeduplicationTools.java Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/test/gc/g1/TestStringDeduplicationTools.java Sun Apr 13 23:39:22 2014 -0700
@@ -310,7 +310,9 @@
}
System.gc();
+
System.out.println("Heap Memory Usage: " + ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getUsed());
+ System.out.println("Array Header Size: " + unsafe.ARRAY_CHAR_BASE_OFFSET);
System.out.println("End: MemoryUsageTest");
}
@@ -482,31 +484,40 @@
public static void testMemoryUsage() throws Exception {
// Test that memory usage is reduced after deduplication
OutputAnalyzer output;
- final String usagePattern = "Heap Memory Usage: (\\d+)";
+ final String heapMemoryUsagePattern = "Heap Memory Usage: (\\d+)";
+ final String arrayHeaderSizePattern = "Array Header Size: (\\d+)";
// Run without deduplication
output = MemoryUsageTest.run(false);
output.shouldHaveExitValue(0);
- final long memoryUsageWithoutDedup = Long.parseLong(output.firstMatch(usagePattern, 1));
+ final long heapMemoryUsageWithoutDedup = Long.parseLong(output.firstMatch(heapMemoryUsagePattern, 1));
+ final long arrayHeaderSizeWithoutDedup = Long.parseLong(output.firstMatch(arrayHeaderSizePattern, 1));
// Run with deduplication
output = MemoryUsageTest.run(true);
output.shouldHaveExitValue(0);
- final long memoryUsageWithDedup = Long.parseLong(output.firstMatch(usagePattern, 1));
+ final long heapMemoryUsageWithDedup = Long.parseLong(output.firstMatch(heapMemoryUsagePattern, 1));
+ final long arrayHeaderSizeWithDedup = Long.parseLong(output.firstMatch(arrayHeaderSizePattern, 1));
+
+ // Sanity check to make sure one instance isn't using compressed class pointers and the other not
+ if (arrayHeaderSizeWithoutDedup != arrayHeaderSizeWithDedup) {
+ throw new Exception("Unexpected difference between array header sizes");
+ }
// Calculate expected memory usage with deduplication enabled. This calculation does
// not take alignment and padding into account, so it's a conservative estimate.
- final long sizeOfChar = 2; // bytes
- final long bytesSaved = (LargeNumberOfStrings - 1) * (StringLength * sizeOfChar + unsafe.ARRAY_CHAR_BASE_OFFSET);
- final long memoryUsageWithDedupExpected = memoryUsageWithoutDedup - bytesSaved;
+ final long sizeOfChar = unsafe.ARRAY_CHAR_INDEX_SCALE;
+ final long sizeOfCharArray = StringLength * sizeOfChar + arrayHeaderSizeWithoutDedup;
+ final long bytesSaved = (LargeNumberOfStrings - 1) * sizeOfCharArray;
+ final long heapMemoryUsageWithDedupExpected = heapMemoryUsageWithoutDedup - bytesSaved;
System.out.println("Memory usage summary:");
- System.out.println(" memoryUsageWithoutDedup: " + memoryUsageWithoutDedup);
- System.out.println(" memoryUsageWithDedup: " + memoryUsageWithDedup);
- System.out.println(" memoryUsageWithDedupExpected: " + memoryUsageWithDedupExpected);
+ System.out.println(" heapMemoryUsageWithoutDedup: " + heapMemoryUsageWithoutDedup);
+ System.out.println(" heapMemoryUsageWithDedup: " + heapMemoryUsageWithDedup);
+ System.out.println(" heapMemoryUsageWithDedupExpected: " + heapMemoryUsageWithDedupExpected);
- if (memoryUsageWithDedup > memoryUsageWithDedupExpected) {
- throw new Exception("Unexpected memory usage, memoryUsageWithDedup should less or equal to memoryUsageWithDedupExpected");
+ if (heapMemoryUsageWithDedup > heapMemoryUsageWithDedupExpected) {
+ throw new Exception("Unexpected memory usage, heapMemoryUsageWithDedup should be less or equal to heapMemoryUsageWithDedupExpected");
}
}
}
--- a/hotspot/test/runtime/6626217/Test6626217.sh Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/test/runtime/6626217/Test6626217.sh Sun Apr 13 23:39:22 2014 -0700
@@ -22,7 +22,6 @@
#
-# @ignore 8028733
# @test @(#)Test6626217.sh
# @bug 6626217
# @summary Loader-constraint table allows arrays instead of only the base-classes
--- a/hotspot/test/runtime/6888954/vmerrors.sh Mon Apr 14 08:24:28 2014 +0200
+++ b/hotspot/test/runtime/6888954/vmerrors.sh Sun Apr 13 23:39:22 2014 -0700
@@ -85,7 +85,7 @@
[ $i -lt 10 ] && i2=0$i
"$TESTJAVA/bin/java" $TESTVMOPTS -XX:+IgnoreUnrecognizedVMOptions \
- -XX:-TransmitErrorReport \
+ -XX:-TransmitErrorReport -XX:-CreateMinidumpOnCrash \
-XX:ErrorHandlerTest=${i} -version > ${i2}.out 2>&1
# If ErrorHandlerTest is ignored (product build), stop.
--- a/jaxp/.hgtags Mon Apr 14 08:24:28 2014 +0200
+++ b/jaxp/.hgtags Sun Apr 13 23:39:22 2014 -0700
@@ -248,3 +248,4 @@
fb92ed0399424193f444489ad49a16748816dc12 jdk9-b03
2846d8fc31490897817a122a668af4f44fc913d0 jdk9-b04
b92a20e303d24c74078888cd7084b14d7626d48f jdk9-b05
+46e4951b2a267e98341613a3b796f2c7554eb831 jdk9-b06
--- a/jaxws/.hgtags Mon Apr 14 08:24:28 2014 +0200
+++ b/jaxws/.hgtags Sun Apr 13 23:39:22 2014 -0700
@@ -251,3 +251,4 @@
1cd9786257ed4f82a3371fd606b162e5bb6fcd81 jdk9-b03
da44a8bdf1f3fdd518e7d785d60cc1b15983b176 jdk9-b04
eae966c8133fec0a8bf9e16d1274a4ede3c0fb52 jdk9-b05
+cf0a6e41670f990414cd337000ad5f3bd1908073 jdk9-b06
--- a/jdk/.hgtags Mon Apr 14 08:24:28 2014 +0200
+++ b/jdk/.hgtags Sun Apr 13 23:39:22 2014 -0700
@@ -248,3 +248,4 @@
4111af6151ed8ca8e3f5603c69729a68427e1d5b jdk9-b03
627deed79b595a4789fc9151455b663a47381257 jdk9-b04
263198a1d8f1f4cb97d35f40c61704b08ebd3686 jdk9-b05
+cac7b28b8b1e0e11d7a8e1ac1fe75a03b3749eab jdk9-b06
--- a/langtools/.hgtags Mon Apr 14 08:24:28 2014 +0200
+++ b/langtools/.hgtags Sun Apr 13 23:39:22 2014 -0700
@@ -248,3 +248,4 @@
151222468d1d04ce6613d33efa3d45bfaf53e3e5 jdk9-b03
fa2ec6b6b1697ae4a78b03b609664dc6b47dee86 jdk9-b04
1d5e6fc88a4cca287090c16b0530a0d5849a5603 jdk9-b05
+31946c0a3f4dc2c78f6f09a0524aaa2a0dad1c78 jdk9-b06
--- a/nashorn/.hgtags Mon Apr 14 08:24:28 2014 +0200
+++ b/nashorn/.hgtags Sun Apr 13 23:39:22 2014 -0700
@@ -239,3 +239,4 @@
832f89ff25d903c45cfc994553f1ade8821a4398 jdk9-b03
3f6ef92cd7823372c45e79125adba4cbf1c9f7b2 jdk9-b04
2a1cac93c33317d828d4a5b81239204a9927cc4a jdk9-b05
+1f75bcbe74e315470dc0b75b7d5bcd209e287c39 jdk9-b06