src/hotspot/share/runtime/biasedLocking.hpp
changeset 55625 f7e8dbb77156
parent 55479 80b27dc96ca3
child 57777 90ead0febf56
equal deleted inserted replaced
55624:cb90a20eb99a 55625:f7e8dbb77156
    74 // to occur quickly in the situations where the bias has been revoked.
    74 // to occur quickly in the situations where the bias has been revoked.
    75 //
    75 //
    76 // Revocation of the lock's bias is fairly straightforward. We want to
    76 // Revocation of the lock's bias is fairly straightforward. We want to
    77 // restore the object's header and stack-based BasicObjectLocks and
    77 // restore the object's header and stack-based BasicObjectLocks and
    78 // BasicLocks to the state they would have been in had the object been
    78 // BasicLocks to the state they would have been in had the object been
    79 // locked by HotSpot's usual fast locking scheme. To do this, we bring
    79 // locked by HotSpot's usual fast locking scheme. To do this, we execute
    80 // the system to a safepoint and walk the stack of the thread toward
    80 // a handshake with the JavaThread that biased the lock. Inside the
    81 // which the lock is biased. We find all of the lock records on the
    81 // handshake we walk the biaser stack searching for all of the lock
    82 // stack corresponding to this object, in particular the first /
    82 // records corresponding to this object, in particular the first / "highest"
    83 // "highest" record. We fill in the highest lock record with the
    83 // record. We fill in the highest lock record with the object's displaced
    84 // object's displaced header (which is a well-known value given that
    84 // header (which is a well-known value given that we don't maintain an
    85 // we don't maintain an identity hash nor age bits for the object
    85 // identity hash nor age bits for the object while it's in the biased
    86 // while it's in the biased state) and all other lock records with 0,
    86 // state) and all other lock records with 0, the value for recursive locks.
    87 // the value for recursive locks. When the safepoint is released, the
    87 // Alternatively, we can revoke the bias of an object inside a safepoint
    88 // formerly-biased thread and all other threads revert back to
    88 // if we are already in one and we detect that we need to perform a
    89 // HotSpot's CAS-based locking.
    89 // revocation.
    90 //
    90 //
    91 // This scheme can not handle transfers of biases of single objects
    91 // This scheme can not handle transfers of biases of single objects
    92 // from thread to thread efficiently, but it can handle bulk transfers
    92 // from thread to thread efficiently, but it can handle bulk transfers
    93 // of such biases, which is a usage pattern showing up in some
    93 // of such biases, which is a usage pattern showing up in some
    94 // applications and benchmarks. We implement "bulk rebias" and "bulk
    94 // applications and benchmarks. We implement "bulk rebias" and "bulk
   113   int _total_entry_count;
   113   int _total_entry_count;
   114   int _biased_lock_entry_count;
   114   int _biased_lock_entry_count;
   115   int _anonymously_biased_lock_entry_count;
   115   int _anonymously_biased_lock_entry_count;
   116   int _rebiased_lock_entry_count;
   116   int _rebiased_lock_entry_count;
   117   int _revoked_lock_entry_count;
   117   int _revoked_lock_entry_count;
       
   118   int _handshakes_count;
   118   int _fast_path_entry_count;
   119   int _fast_path_entry_count;
   119   int _slow_path_entry_count;
   120   int _slow_path_entry_count;
   120 
   121 
   121  public:
   122  public:
   122   BiasedLockingCounters() :
   123   BiasedLockingCounters() :
   123     _total_entry_count(0),
   124     _total_entry_count(0),
   124     _biased_lock_entry_count(0),
   125     _biased_lock_entry_count(0),
   125     _anonymously_biased_lock_entry_count(0),
   126     _anonymously_biased_lock_entry_count(0),
   126     _rebiased_lock_entry_count(0),
   127     _rebiased_lock_entry_count(0),
   127     _revoked_lock_entry_count(0),
   128     _revoked_lock_entry_count(0),
       
   129     _handshakes_count(0),
   128     _fast_path_entry_count(0),
   130     _fast_path_entry_count(0),
   129     _slow_path_entry_count(0) {}
   131     _slow_path_entry_count(0) {}
   130 
   132 
   131   int slow_path_entry_count() const; // Compute this field if necessary
   133   int slow_path_entry_count() const; // Compute this field if necessary
   132 
   134 
   133   int* total_entry_count_addr()                   { return &_total_entry_count; }
   135   int* total_entry_count_addr()                   { return &_total_entry_count; }
   134   int* biased_lock_entry_count_addr()             { return &_biased_lock_entry_count; }
   136   int* biased_lock_entry_count_addr()             { return &_biased_lock_entry_count; }
   135   int* anonymously_biased_lock_entry_count_addr() { return &_anonymously_biased_lock_entry_count; }
   137   int* anonymously_biased_lock_entry_count_addr() { return &_anonymously_biased_lock_entry_count; }
   136   int* rebiased_lock_entry_count_addr()           { return &_rebiased_lock_entry_count; }
   138   int* rebiased_lock_entry_count_addr()           { return &_rebiased_lock_entry_count; }
   137   int* revoked_lock_entry_count_addr()            { return &_revoked_lock_entry_count; }
   139   int* revoked_lock_entry_count_addr()            { return &_revoked_lock_entry_count; }
       
   140   int* handshakes_count_addr()                    { return &_handshakes_count; }
   138   int* fast_path_entry_count_addr()               { return &_fast_path_entry_count; }
   141   int* fast_path_entry_count_addr()               { return &_fast_path_entry_count; }
   139   int* slow_path_entry_count_addr()               { return &_slow_path_entry_count; }
   142   int* slow_path_entry_count_addr()               { return &_slow_path_entry_count; }
   140 
   143 
   141   bool nonzero() { return _total_entry_count > 0; }
   144   bool nonzero() { return _total_entry_count > 0; }
   142 
   145 
   144   void print() const;
   147   void print() const;
   145 };
   148 };
   146 
   149 
   147 
   150 
   148 class BiasedLocking : AllStatic {
   151 class BiasedLocking : AllStatic {
       
   152 friend class VM_BulkRevokeBias;
       
   153 friend class RevokeOneBias;
       
   154 
   149 private:
   155 private:
   150   static BiasedLockingCounters _counters;
   156   static BiasedLockingCounters _counters;
   151 
   157 
   152 public:
   158 public:
   153   static int* total_entry_count_addr();
   159   static int* total_entry_count_addr();
   154   static int* biased_lock_entry_count_addr();
   160   static int* biased_lock_entry_count_addr();
   155   static int* anonymously_biased_lock_entry_count_addr();
   161   static int* anonymously_biased_lock_entry_count_addr();
   156   static int* rebiased_lock_entry_count_addr();
   162   static int* rebiased_lock_entry_count_addr();
   157   static int* revoked_lock_entry_count_addr();
   163   static int* revoked_lock_entry_count_addr();
       
   164   static int* handshakes_count_addr();
   158   static int* fast_path_entry_count_addr();
   165   static int* fast_path_entry_count_addr();
   159   static int* slow_path_entry_count_addr();
   166   static int* slow_path_entry_count_addr();
   160 
   167 
   161   enum Condition {
   168   enum Condition {
   162     NOT_BIASED = 1,
   169     NOT_BIASED = 1,
   163     BIAS_REVOKED = 2,
   170     BIAS_REVOKED = 2,
   164     BIAS_REVOKED_AND_REBIASED = 3
   171     BIAS_REVOKED_AND_REBIASED = 3,
       
   172     NOT_REVOKED = 4
   165   };
   173   };
   166 
   174 
       
   175 private:
       
   176   static Condition single_revoke_at_safepoint(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requester, JavaThread** biaser);
       
   177   static Condition bulk_revoke_or_rebias_at_safepoint(oop o, bool bulk_rebias, bool attempt_rebias, JavaThread* requester);
       
   178   static Condition single_revoke_with_handshake(Handle obj, JavaThread *requester, JavaThread *biaser);
       
   179   static void walk_stack_and_revoke(oop obj, JavaThread* biased_locker);
       
   180 
       
   181 public:
   167   // This initialization routine should only be called once and
   182   // This initialization routine should only be called once and
   168   // schedules a PeriodicTask to turn on biased locking a few seconds
   183   // schedules a PeriodicTask to turn on biased locking a few seconds
   169   // into the VM run to avoid startup time regressions
   184   // into the VM run to avoid startup time regressions
   170   static void init();
   185   static void init();
   171 
   186 
   176   // This should be called by JavaThreads to revoke the bias of an object
   191   // This should be called by JavaThreads to revoke the bias of an object
   177   static Condition revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS);
   192   static Condition revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS);
   178 
   193 
   179   // These do not allow rebiasing; they are used by deoptimization to
   194   // These do not allow rebiasing; they are used by deoptimization to
   180   // ensure that monitors on the stack can be migrated
   195   // ensure that monitors on the stack can be migrated
   181   static void revoke(GrowableArray<Handle>* objs);
   196   static void revoke(GrowableArray<Handle>* objs, JavaThread *biaser);
   182   static void revoke_at_safepoint(Handle obj);
   197   static void revoke_at_safepoint(Handle obj);
   183   static void revoke_at_safepoint(GrowableArray<Handle>* objs);
   198   static void revoke_at_safepoint(GrowableArray<Handle>* objs);
   184 
   199 
   185   static void print_counters() { _counters.print(); }
   200   static void print_counters() { _counters.print(); }
   186   static BiasedLockingCounters* counters() { return &_counters; }
   201   static BiasedLockingCounters* counters() { return &_counters; }