--- a/hotspot/src/os/linux/vm/os_linux.cpp Tue Mar 15 12:27:15 2016 +0900
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Tue Mar 15 10:56:32 2016 +0000
@@ -881,6 +881,13 @@
assert(osthread != NULL, "osthread not set");
if (Thread::current()->osthread() == osthread) {
+#ifdef ASSERT
+ sigset_t current;
+ sigemptyset(¤t);
+ pthread_sigmask(SIG_SETMASK, NULL, ¤t);
+ assert(!sigismember(¤t, SR_signum), "SR signal should not be blocked!");
+#endif
+
// Restore caller's signal mask
sigset_t sigmask = osthread->caller_sigmask();
pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
@@ -3903,7 +3910,8 @@
// after sigsuspend.
int old_errno = errno;
- Thread* thread = Thread::current();
+ Thread* thread = Thread::current_or_null_safe();
+ assert(thread != NULL, "Missing current thread in SR_handler");
OSThread* osthread = thread->osthread();
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
@@ -3915,7 +3923,7 @@
os::SuspendResume::State state = osthread->sr.suspended();
if (state == os::SuspendResume::SR_SUSPENDED) {
sigset_t suspend_set; // signals for sigsuspend()
-
+ sigemptyset(&suspend_set);
// get current set of blocked signals and unblock resume signal
pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
sigdelset(&suspend_set, SR_signum);
@@ -4169,6 +4177,7 @@
// try to honor the signal mask
sigset_t oset;
+ sigemptyset(&oset);
pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
// call into the chained handler
@@ -4179,7 +4188,7 @@
}
// restore the signal mask
- pthread_sigmask(SIG_SETMASK, &oset, 0);
+ pthread_sigmask(SIG_SETMASK, &oset, NULL);
}
// Tell jvm's signal handler the signal is taken care of.
return true;
@@ -5716,6 +5725,7 @@
// Don't catch signals while blocked; let the running threads have the signals.
// (This allows a debugger to break into the running thread.)
sigset_t oldsigs;
+ sigemptyset(&oldsigs);
sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
#endif
--- a/hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp Tue Mar 15 12:27:15 2016 +0900
+++ b/hotspot/src/share/vm/gc/g1/g1HeapTransition.cpp Tue Mar 15 10:56:32 2016 +0000
@@ -82,8 +82,8 @@
void G1HeapTransition::print() {
Data after(_g1_heap);
- size_t eden_capacity_bytes_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length;
- size_t survivor_capacity_bytes_after_gc = _g1_heap->g1_policy()->max_survivor_regions();
+ size_t eden_capacity_length_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length;
+ size_t survivor_capacity_length_after_gc = _g1_heap->g1_policy()->max_survivor_regions();
DetailedUsage usage;
if (log_is_enabled(Trace, gc, heap)) {
@@ -100,11 +100,11 @@
}
log_info(gc, heap)("Eden regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
- _before._eden_length, after._eden_length, eden_capacity_bytes_after_gc);
+ _before._eden_length, after._eden_length, eden_capacity_length_after_gc);
log_trace(gc, heap)(" Used: 0K, Waste: 0K");
log_info(gc, heap)("Survivor regions: " SIZE_FORMAT "->" SIZE_FORMAT "(" SIZE_FORMAT ")",
- _before._survivor_length, after._survivor_length, survivor_capacity_bytes_after_gc);
+ _before._survivor_length, after._survivor_length, survivor_capacity_length_after_gc);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K);
--- a/hotspot/src/share/vm/memory/allocation.hpp Tue Mar 15 12:27:15 2016 +0900
+++ b/hotspot/src/share/vm/memory/allocation.hpp Tue Mar 15 10:56:32 2016 +0000
@@ -716,9 +716,6 @@
private:
static bool should_use_malloc(size_t length);
- static size_t size_for_malloc(size_t length);
- static size_t size_for_mmap(size_t length);
-
static E* allocate_malloc(size_t length);
static E* allocate_mmap(size_t length);
@@ -731,4 +728,26 @@
static void free(E* addr, size_t length);
};
+// Uses mmaped memory for all allocations. All allocations are initially
+// zero-filled. No pre-touching.
+template <class E, MEMFLAGS F>
+class MmapArrayAllocator : public AllStatic {
+ private:
+ static size_t size_for(size_t length);
+
+ public:
+ static E* allocate(size_t length);
+ static void free(E* addr, size_t length);
+};
+
+// Uses malloc:ed memory for all allocations.
+template <class E, MEMFLAGS F>
+class MallocArrayAllocator : public AllStatic {
+ public:
+ static size_t size_for(size_t length);
+
+ static E* allocate(size_t length);
+ static void free(E* addr, size_t length);
+};
+
#endif // SHARE_VM_MEMORY_ALLOCATION_HPP
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp Tue Mar 15 12:27:15 2016 +0900
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp Tue Mar 15 10:56:32 2016 +0000
@@ -151,30 +151,15 @@
}
template <class E, MEMFLAGS F>
-size_t ArrayAllocator<E, F>::size_for_malloc(size_t length) {
- return length * sizeof(E);
-}
-
-template <class E, MEMFLAGS F>
-size_t ArrayAllocator<E, F>::size_for_mmap(size_t length) {
+size_t MmapArrayAllocator<E, F>::size_for(size_t length) {
size_t size = length * sizeof(E);
int alignment = os::vm_allocation_granularity();
return align_size_up(size, alignment);
}
template <class E, MEMFLAGS F>
-bool ArrayAllocator<E, F>::should_use_malloc(size_t length) {
- return size_for_malloc(length) < ArrayAllocatorMallocLimit;
-}
-
-template <class E, MEMFLAGS F>
-E* ArrayAllocator<E, F>::allocate_malloc(size_t length) {
- return (E*)AllocateHeap(size_for_malloc(length), F);
-}
-
-template <class E, MEMFLAGS F>
-E* ArrayAllocator<E, F>::allocate_mmap(size_t length) {
- size_t size = size_for_mmap(length);
+E* MmapArrayAllocator<E, F>::allocate(size_t length) {
+ size_t size = size_for(length);
int alignment = os::vm_allocation_granularity();
char* addr = os::reserve_memory(size, NULL, alignment, F);
@@ -188,6 +173,42 @@
}
template <class E, MEMFLAGS F>
+void MmapArrayAllocator<E, F>::free(E* addr, size_t length) {
+ bool result = os::release_memory((char*)addr, size_for(length));
+ assert(result, "Failed to release memory");
+}
+
+template <class E, MEMFLAGS F>
+size_t MallocArrayAllocator<E, F>::size_for(size_t length) {
+ return length * sizeof(E);
+}
+
+template <class E, MEMFLAGS F>
+E* MallocArrayAllocator<E, F>::allocate(size_t length) {
+ return (E*)AllocateHeap(size_for(length), F);
+}
+
+template<class E, MEMFLAGS F>
+void MallocArrayAllocator<E, F>::free(E* addr, size_t /*length*/) {
+ FreeHeap(addr);
+}
+
+template <class E, MEMFLAGS F>
+bool ArrayAllocator<E, F>::should_use_malloc(size_t length) {
+ return MallocArrayAllocator<E, F>::size_for(length) < ArrayAllocatorMallocLimit;
+}
+
+template <class E, MEMFLAGS F>
+E* ArrayAllocator<E, F>::allocate_malloc(size_t length) {
+ return MallocArrayAllocator<E, F>::allocate(length);
+}
+
+template <class E, MEMFLAGS F>
+E* ArrayAllocator<E, F>::allocate_mmap(size_t length) {
+ return MmapArrayAllocator<E, F>::allocate(length);
+}
+
+template <class E, MEMFLAGS F>
E* ArrayAllocator<E, F>::allocate(size_t length) {
if (should_use_malloc(length)) {
return allocate_malloc(length);
@@ -214,14 +235,13 @@
}
template<class E, MEMFLAGS F>
-void ArrayAllocator<E, F>::free_malloc(E* addr, size_t /*length*/) {
- FreeHeap(addr);
+void ArrayAllocator<E, F>::free_malloc(E* addr, size_t length) {
+ MallocArrayAllocator<E, F>::free(addr, length);
}
template<class E, MEMFLAGS F>
void ArrayAllocator<E, F>::free_mmap(E* addr, size_t length) {
- bool result = os::release_memory((char*)addr, size_for_mmap(length));
- assert(result, "Failed to release memory");
+ MmapArrayAllocator<E, F>::free(addr, length);
}
template<class E, MEMFLAGS F>