hotspot/src/share/vm/runtime/synchronizer.cpp
changeset 29070 b0a5fc9c59c8
parent 28621 37cc414b6491
child 30244 d4e471395ff5
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Fri Feb 13 13:17:13 2015 +0100
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Fri Feb 13 10:01:42 2015 -0800
@@ -122,6 +122,70 @@
 static volatile int MonitorPopulation = 0;  // # Extant -- in circulation
 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 
+
+// =====================> Quick functions
+
+// The quick_* forms are special fast-path variants used to improve
+// performance.  In the simplest case, a "quick_*" implementation could
+// simply return false, in which case the caller will perform the necessary
+// state transitions and call the slow-path form.
+// The fast-path is designed to handle frequently arising cases in an efficient
+// manner and is just a degenerate "optimistic" variant of the slow-path.
+// returns true  -- to indicate the call was satisfied.
+// returns false -- to indicate the call needs the services of the slow-path.
+// A no-loitering ordinance is in effect for code in the quick_* family
+// operators: safepoints or indefinite blocking (blocking that might span a
+// safepoint) are forbidden. Generally the thread_state() is _in_Java upon
+// entry.
+
+// The LockNode emitted directly at the synchronization site would have
+// been too big if it were to have included support for the cases of inflated
+// recursive enter and exit, so they go here instead.
+// Note that we can't safely call AsyncPrintJavaStack() from within
+// quick_enter() as our thread state remains _in_Java.
+
+bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
+                                     BasicLock * Lock) {
+  assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
+  assert(Self->is_Java_thread(), "invariant");
+  assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
+  No_Safepoint_Verifier nsv;
+  if (obj == NULL) return false;       // Need to throw NPE
+  const markOop mark = obj->mark();
+
+  if (mark->has_monitor()) {
+    ObjectMonitor * const m = mark->monitor();
+    assert(m->object() == obj, "invariant");
+    Thread * const owner = (Thread *) m->_owner;
+
+    // Lock contention and Transactional Lock Elision (TLE) diagnostics
+    // and observability
+    // Case: light contention possibly amenable to TLE
+    // Case: TLE inimical operations such as nested/recursive synchronization
+
+    if (owner == Self) {
+      m->_recursions++;
+      return true;
+    }
+
+    if (owner == NULL &&
+        Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) {
+      assert(m->_recursions == 0, "invariant");
+      assert(m->_owner == Self, "invariant");
+      return true;
+    }
+  }
+
+  // Note that we could inflate in quick_enter.
+  // This is likely a useful optimization
+  // Critically, in quick_enter() we must not:
+  // -- perform bias revocation, or
+  // -- block indefinitely, or
+  // -- reach a safepoint
+
+  return false;        // revert to slow-path
+}
+
 // -----------------------------------------------------------------------------
 //  Fast Monitor Enter/Exit
 // This the fast monitor enter. The interpreter and compiler use