--- a/hotspot/src/os/posix/vm/os_posix.cpp Mon Mar 07 14:41:31 2016 -0500
+++ b/hotspot/src/os/posix/vm/os_posix.cpp Mon Mar 07 23:06:34 2016 +0000
@@ -336,13 +336,13 @@
const char *start;
if (lib_name != NULL) {
- len = name_len = strlen(lib_name);
+ name_len = strlen(lib_name);
if (is_absolute_path) {
// Need to strip path, prefix and suffix
if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
lib_name = ++start;
}
- if (len <= (prefix_len + suffix_len)) {
+ if (strlen(lib_name) <= (prefix_len + suffix_len)) {
return NULL;
}
lib_name += prefix_len;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Mon Mar 07 14:41:31 2016 -0500
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Mon Mar 07 23:06:34 2016 +0000
@@ -2329,9 +2329,13 @@
GCIdMarkAndRestore conc_gc_id_mark(_cmThread->gc_id());
if (_cm->has_aborted()) {
_gc_tracer_cm->report_concurrent_mode_failure();
+
+ // ConcurrentGCTimer will be ended as well.
+ _cm->register_concurrent_gc_end_and_stop_timer();
+ } else {
+ _gc_timer_cm->register_gc_end();
}
- _gc_timer_cm->register_gc_end();
_gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
// Clear state variables to prepare for the next concurrent cycle.
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Mon Mar 07 14:41:31 2016 -0500
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Mon Mar 07 23:06:34 2016 +0000
@@ -441,7 +441,7 @@
_has_aborted(false),
_restart_for_overflow(false),
_concurrent_marking_in_progress(false),
- _concurrent_phase_started(false),
+ _concurrent_phase_status(ConcPhaseNotStarted),
// _verbose_level set below
@@ -1008,16 +1008,43 @@
}
void G1ConcurrentMark::register_concurrent_phase_start(const char* title) {
- assert(!_concurrent_phase_started, "Sanity");
- _concurrent_phase_started = true;
+ uint old_val = 0;
+ do {
+ old_val = Atomic::cmpxchg(ConcPhaseStarted, &_concurrent_phase_status, ConcPhaseNotStarted);
+ } while (old_val != ConcPhaseNotStarted);
_g1h->gc_timer_cm()->register_gc_concurrent_start(title);
}
+void G1ConcurrentMark::register_concurrent_phase_end_common(bool end_timer) {
+ if (_concurrent_phase_status == ConcPhaseNotStarted) {
+ return;
+ }
+
+ uint old_val = Atomic::cmpxchg(ConcPhaseStopping, &_concurrent_phase_status, ConcPhaseStarted);
+ if (old_val == ConcPhaseStarted) {
+ _g1h->gc_timer_cm()->register_gc_concurrent_end();
+ // If 'end_timer' is true, we came here to end timer which needs concurrent phase ended.
+ // We need to end it before changing the status to 'ConcPhaseNotStarted' to prevent
+ // starting a new concurrent phase by 'ConcurrentMarkThread'.
+ if (end_timer) {
+ _g1h->gc_timer_cm()->register_gc_end();
+ }
+ old_val = Atomic::cmpxchg(ConcPhaseNotStarted, &_concurrent_phase_status, ConcPhaseStopping);
+ assert(old_val == ConcPhaseStopping, "Should not have changed since we entered this scope.");
+ } else {
+ do {
+ // Let other thread finish changing '_concurrent_phase_status' to 'ConcPhaseNotStarted'.
+ os::naked_short_sleep(1);
+ } while (_concurrent_phase_status != ConcPhaseNotStarted);
+ }
+}
+
void G1ConcurrentMark::register_concurrent_phase_end() {
- if (_concurrent_phase_started) {
- _concurrent_phase_started = false;
- _g1h->gc_timer_cm()->register_gc_concurrent_end();
- }
+ register_concurrent_phase_end_common(false);
+}
+
+void G1ConcurrentMark::register_concurrent_gc_end_and_stop_timer() {
+ register_concurrent_phase_end_common(true);
}
void G1ConcurrentMark::markFromRoots() {
@@ -2605,9 +2632,6 @@
_g1h->trace_heap_after_concurrent_cycle();
- // Close any open concurrent phase timing
- register_concurrent_phase_end();
-
_g1h->register_concurrent_cycle_end();
}
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp Mon Mar 07 14:41:31 2016 -0500
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp Mon Mar 07 23:06:34 2016 +0000
@@ -352,8 +352,17 @@
// time of remark.
volatile bool _concurrent_marking_in_progress;
- // Keep track of whether we have started concurrent phase or not.
- bool _concurrent_phase_started;
+ // There would be a race between ConcurrentMarkThread and VMThread(ConcurrentMark::abort())
+ // to call ConcurrentGCTimer::register_gc_concurrent_end().
+ // And this variable is used to keep track of concurrent phase.
+ volatile uint _concurrent_phase_status;
+ // Concurrent phase is not yet started.
+ static const uint ConcPhaseNotStarted = 0;
+ // Concurrent phase is started.
+ static const uint ConcPhaseStarted = 1;
+ // Caller thread of ConcurrentGCTimer::register_gc_concurrent_end() is ending concurrent phase.
+ // So other thread should wait until the status to be changed to ConcPhaseNotStarted.
+ static const uint ConcPhaseStopping = 2;
// All of these times are in ms
NumberSeq _init_times;
@@ -485,6 +494,9 @@
// Set to true when initialization is complete
bool _completed_initialization;
+ // end_timer, true to end gc timer after ending concurrent phase.
+ void register_concurrent_phase_end_common(bool end_timer);
+
public:
// Manipulation of the global mark stack.
// The push and pop operations are used by tasks for transfers
@@ -520,6 +532,8 @@
void register_concurrent_phase_start(const char* title);
void register_concurrent_phase_end();
+ // Ends both concurrent phase and timer.
+ void register_concurrent_gc_end_and_stop_timer();
void update_accum_task_vtime(int i, double vtime) {
_accum_task_vtime[i] += vtime;
--- a/hotspot/src/share/vm/runtime/globals.hpp Mon Mar 07 14:41:31 2016 -0500
+++ b/hotspot/src/share/vm/runtime/globals.hpp Mon Mar 07 23:06:34 2016 +0000
@@ -891,9 +891,6 @@
notproduct(bool, VerifyLastFrame, false, \
"Verify oops on last frame on entry to VM") \
\
- develop(bool, TraceHandleAllocation, false, \
- "Print out warnings when suspiciously many handles are allocated")\
- \
product(bool, FailOverToOldVerifier, true, \
"Fail over to old verifier when split verifier fails") \
\
@@ -3024,14 +3021,6 @@
notproduct(ccstrlist, SuppressErrorAt, "", \
"List of assertions (file:line) to muzzle") \
\
- notproduct(size_t, HandleAllocationLimit, 1024, \
- "Threshold for HandleMark allocation when +TraceHandleAllocation "\
- "is used") \
- \
- develop(size_t, TotalHandleAllocationLimit, 1024, \
- "Threshold for total handle allocation when " \
- "+TraceHandleAllocation is used") \
- \
develop(intx, StackPrintLimit, 100, \
"number of stack frames to print in VM-level stack dump") \
\
--- a/hotspot/src/share/vm/runtime/handles.cpp Mon Mar 07 14:41:31 2016 -0500
+++ b/hotspot/src/share/vm/runtime/handles.cpp Mon Mar 07 23:06:34 2016 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -129,14 +129,6 @@
k = k->next();
}
- // The thread local handle areas should not get very large
- if (TraceHandleAllocation && (size_t)handles_visited > TotalHandleAllocationLimit) {
-#ifdef ASSERT
- warning("%d: Visited in HandleMark : " SIZE_FORMAT, _nof_handlemarks, handles_visited);
-#else
- warning("Visited in HandleMark : " SIZE_FORMAT, handles_visited);
-#endif
- }
if (_prev != NULL) _prev->oops_do(f);
}
@@ -165,31 +157,6 @@
assert(area->_handle_mark_nesting > 0, "must stack allocate HandleMarks" );
debug_only(area->_handle_mark_nesting--);
- // Debug code to trace the number of handles allocated per mark/
-#ifdef ASSERT
- if (TraceHandleAllocation) {
- size_t handles = 0;
- Chunk *c = _chunk->next();
- if (c == NULL) {
- handles = area->_hwm - _hwm; // no new chunk allocated
- } else {
- handles = _max - _hwm; // add rest in first chunk
- while(c != NULL) {
- handles += c->length();
- c = c->next();
- }
- handles -= area->_max - area->_hwm; // adjust for last trunk not full
- }
- handles /= sizeof(void *); // Adjust for size of a handle
- if (handles > HandleAllocationLimit) {
- // Note: _nof_handlemarks is only set in debug mode
- warning("%d: Allocated in HandleMark : " SIZE_FORMAT, _nof_handlemarks, handles);
- }
-
- tty->print_cr("Handles " SIZE_FORMAT, handles);
- }
-#endif
-
// Delete later chunks
if( _chunk->next() ) {
// reset arena size before delete chunks. Otherwise, the total