hotspot/src/share/vm/runtime/thread.hpp
changeset 26684 d1221849ea3d
parent 26683 a02753d5a0b2
child 26834 41332d860d6a
equal deleted inserted replaced
26683:a02753d5a0b2 26684:d1221849ea3d
   176   // osThread.hpp).
   176   // osThread.hpp).
   177   //
   177   //
   178   // 2. It would be more natural if set_external_suspend() is private and
   178   // 2. It would be more natural if set_external_suspend() is private and
   179   // part of java_suspend(), but that probably would affect the suspend/query
   179   // part of java_suspend(), but that probably would affect the suspend/query
   180   // performance. Need more investigation on this.
   180   // performance. Need more investigation on this.
   181   //
       
   182 
   181 
   183   // suspend/resume lock: used for self-suspend
   182   // suspend/resume lock: used for self-suspend
   184   Monitor* _SR_lock;
   183   Monitor* _SR_lock;
   185 
   184 
   186  protected:
   185  protected:
   512   size_t  stack_size() const           { return _stack_size; }
   511   size_t  stack_size() const           { return _stack_size; }
   513   void    set_stack_size(size_t size)  { _stack_size = size; }
   512   void    set_stack_size(size_t size)  { _stack_size = size; }
   514   void    record_stack_base_and_size();
   513   void    record_stack_base_and_size();
   515 
   514 
   516   bool    on_local_stack(address adr) const {
   515   bool    on_local_stack(address adr) const {
   517     /* QQQ this has knowledge of direction, ought to be a stack method */
   516     // QQQ this has knowledge of direction, ought to be a stack method
   518     return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
   517     return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
   519   }
   518   }
   520 
   519 
   521   uintptr_t self_raw_id()                    { return _self_raw_id; }
   520   uintptr_t self_raw_id()                    { return _self_raw_id; }
   522   void      set_self_raw_id(uintptr_t value) { _self_raw_id = value; }
   521   void      set_self_raw_id(uintptr_t value) { _self_raw_id = value; }
   622 // ThreadLocalStorage::thread is warm -- it's called > 16K times in the same
   621 // ThreadLocalStorage::thread is warm -- it's called > 16K times in the same
   623 // period.   This is inlined in thread_<os_family>.inline.hpp.
   622 // period.   This is inlined in thread_<os_family>.inline.hpp.
   624 
   623 
   625 inline Thread* Thread::current() {
   624 inline Thread* Thread::current() {
   626 #ifdef ASSERT
   625 #ifdef ASSERT
   627 // This function is very high traffic. Define PARANOID to enable expensive
   626   // This function is very high traffic. Define PARANOID to enable expensive
   628 // asserts.
   627   // asserts.
   629 #ifdef PARANOID
   628 #ifdef PARANOID
   630   // Signal handler should call ThreadLocalStorage::get_thread_slow()
   629   // Signal handler should call ThreadLocalStorage::get_thread_slow()
   631   Thread* t = ThreadLocalStorage::get_thread_slow();
   630   Thread* t = ThreadLocalStorage::get_thread_slow();
   632   assert(t != NULL && !t->is_inside_signal_handler(),
   631   assert(t != NULL && !t->is_inside_signal_handler(),
   633          "Don't use Thread::current() inside signal handler");
   632          "Don't use Thread::current() inside signal handler");
   841   // suspend/resume support
   840   // suspend/resume support
   842   volatile bool         _suspend_equivalent;     // Suspend equivalent condition
   841   volatile bool         _suspend_equivalent;     // Suspend equivalent condition
   843   jint                  _in_deopt_handler;       // count of deoptimization
   842   jint                  _in_deopt_handler;       // count of deoptimization
   844                                                  // handlers thread is in
   843                                                  // handlers thread is in
   845   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
   844   volatile bool         _doing_unsafe_access;    // Thread may fault due to unsafe access
   846   bool                  _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was
   845   bool                  _do_not_unlock_if_synchronized;  // Do not unlock the receiver of a synchronized method (since it was
   847   // never locked) when throwing an exception. Used by interpreter only.
   846                                                          // never locked) when throwing an exception. Used by interpreter only.
   848 
   847 
   849   // JNI attach states:
   848   // JNI attach states:
   850   enum JNIAttachStates {
   849   enum JNIAttachStates {
   851     _not_attaching_via_jni = 1,  // thread is not attaching via JNI
   850     _not_attaching_via_jni = 1,  // thread is not attaching via JNI
   852     _attaching_via_jni,          // thread is attaching via JNI
   851     _attaching_via_jni,          // thread is attaching via JNI
   902     intptr_t _target;
   901     intptr_t _target;
   903     intptr_t _instruction;
   902     intptr_t _instruction;
   904     const char*  _file;
   903     const char*  _file;
   905     int _line;
   904     int _line;
   906   }   _jmp_ring[jump_ring_buffer_size];
   905   }   _jmp_ring[jump_ring_buffer_size];
   907 #endif /* PRODUCT */
   906 #endif // PRODUCT
   908 
   907 
   909 #if INCLUDE_ALL_GCS
   908 #if INCLUDE_ALL_GCS
   910   // Support for G1 barriers
   909   // Support for G1 barriers
   911 
   910 
   912   ObjPtrQueue _satb_mark_queue;          // Thread-local log for SATB barrier.
   911   ObjPtrQueue _satb_mark_queue;          // Thread-local log for SATB barrier.
  1069   bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
  1068   bool is_ext_suspend_completed_with_lock(uint32_t *bits) {
  1070     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
  1069     MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag);
  1071     // Warning: is_ext_suspend_completed() may temporarily drop the
  1070     // Warning: is_ext_suspend_completed() may temporarily drop the
  1072     // SR_lock to allow the thread to reach a stable thread state if
  1071     // SR_lock to allow the thread to reach a stable thread state if
  1073     // it is currently in a transient thread state.
  1072     // it is currently in a transient thread state.
  1074     return is_ext_suspend_completed(false /*!called_by_wait */,
  1073     return is_ext_suspend_completed(false /* !called_by_wait */,
  1075                                     SuspendRetryDelay, bits);
  1074                                     SuspendRetryDelay, bits);
  1076   }
  1075   }
  1077 
  1076 
  1078   // We cannot allow wait_for_ext_suspend_completion() to run forever or
  1077   // We cannot allow wait_for_ext_suspend_completion() to run forever or
  1079   // we could hang. SuspendRetryCount and SuspendRetryDelay are normally
  1078   // we could hang. SuspendRetryCount and SuspendRetryDelay are normally
  1094     return (_suspend_flags & _external_suspend) != 0;
  1093     return (_suspend_flags & _external_suspend) != 0;
  1095   }
  1094   }
  1096   // Whenever a thread transitions from native to vm/java it must suspend
  1095   // Whenever a thread transitions from native to vm/java it must suspend
  1097   // if external|deopt suspend is present.
  1096   // if external|deopt suspend is present.
  1098   bool is_suspend_after_native() const {
  1097   bool is_suspend_after_native() const {
  1099     return (_suspend_flags & (_external_suspend | _deopt_suspend) ) != 0;
  1098     return (_suspend_flags & (_external_suspend | _deopt_suspend)) != 0;
  1100   }
  1099   }
  1101 
  1100 
  1102   // external suspend request is completed
  1101   // external suspend request is completed
  1103   bool is_ext_suspended() const {
  1102   bool is_ext_suspended() const {
  1104     return (_suspend_flags & _ext_suspended) != 0;
  1103     return (_suspend_flags & _ext_suspended) != 0;
  1135     return is_ext_suspended() || is_external_suspend();
  1134     return is_ext_suspended() || is_external_suspend();
  1136   }
  1135   }
  1137 
  1136 
  1138   bool is_suspend_equivalent() const             { return _suspend_equivalent; }
  1137   bool is_suspend_equivalent() const             { return _suspend_equivalent; }
  1139 
  1138 
  1140   void set_suspend_equivalent()                  { _suspend_equivalent = true; };
  1139   void set_suspend_equivalent()                  { _suspend_equivalent = true; }
  1141   void clear_suspend_equivalent()                { _suspend_equivalent = false; };
  1140   void clear_suspend_equivalent()                { _suspend_equivalent = false; }
  1142 
  1141 
  1143   // Thread.stop support
  1142   // Thread.stop support
  1144   void send_thread_stop(oop throwable);
  1143   void send_thread_stop(oop throwable);
  1145   AsyncRequests clear_special_runtime_exit_condition() {
  1144   AsyncRequests clear_special_runtime_exit_condition() {
  1146     AsyncRequests x = _special_runtime_exit_condition;
  1145     AsyncRequests x = _special_runtime_exit_condition;
  1236     set_exception_pc(NULL);
  1235     set_exception_pc(NULL);
  1237   }
  1236   }
  1238 
  1237 
  1239   // Stack overflow support
  1238   // Stack overflow support
  1240   inline size_t stack_available(address cur_sp);
  1239   inline size_t stack_available(address cur_sp);
  1241   address stack_yellow_zone_base()
  1240   address stack_yellow_zone_base() {
  1242     { return (address)(stack_base() - (stack_size() - (stack_red_zone_size() + stack_yellow_zone_size()))); }
  1241     return (address)(stack_base() -
  1243   size_t  stack_yellow_zone_size()
  1242                      (stack_size() -
  1244     { return StackYellowPages * os::vm_page_size(); }
  1243                      (stack_red_zone_size() + stack_yellow_zone_size())));
  1245   address stack_red_zone_base()
  1244   }
  1246     { return (address)(stack_base() - (stack_size() - stack_red_zone_size())); }
  1245   size_t  stack_yellow_zone_size() {
  1247   size_t stack_red_zone_size()
  1246     return StackYellowPages * os::vm_page_size();
  1248     { return StackRedPages * os::vm_page_size(); }
  1247   }
  1249   bool in_stack_yellow_zone(address a)
  1248   address stack_red_zone_base() {
  1250     { return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base()); }
  1249     return (address)(stack_base() - (stack_size() - stack_red_zone_size()));
  1251   bool in_stack_red_zone(address a)
  1250   }
  1252     { return (a <= stack_red_zone_base()) && (a >= (address)((intptr_t)stack_base() - stack_size())); }
  1251   size_t stack_red_zone_size() { return StackRedPages * os::vm_page_size(); }
       
  1252   bool in_stack_yellow_zone(address a) {
       
  1253     return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base());
       
  1254   }
       
  1255   bool in_stack_red_zone(address a) {
       
  1256     return (a <= stack_red_zone_base()) &&
       
  1257            (a >= (address)((intptr_t)stack_base() - stack_size()));
       
  1258   }
  1253 
  1259 
  1254   void create_stack_guard_pages();
  1260   void create_stack_guard_pages();
  1255   void remove_stack_guard_pages();
  1261   void remove_stack_guard_pages();
  1256 
  1262 
  1257   void enable_stack_yellow_zone();
  1263   void enable_stack_yellow_zone();
  1287   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
  1293   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
  1288   bool do_not_unlock(void)                       { return _do_not_unlock_if_synchronized; }
  1294   bool do_not_unlock(void)                       { return _do_not_unlock_if_synchronized; }
  1289 
  1295 
  1290 #ifndef PRODUCT
  1296 #ifndef PRODUCT
  1291   void record_jump(address target, address instr, const char* file, int line);
  1297   void record_jump(address target, address instr, const char* file, int line);
  1292 #endif /* PRODUCT */
  1298 #endif // PRODUCT
  1293 
  1299 
  1294   // For assembly stub generation
  1300   // For assembly stub generation
  1295   static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj); }
  1301   static ByteSize threadObj_offset()             { return byte_offset_of(JavaThread, _threadObj); }
  1296 #ifndef PRODUCT
  1302 #ifndef PRODUCT
  1297   static ByteSize jmp_ring_index_offset()        { return byte_offset_of(JavaThread, _jmp_ring_index); }
  1303   static ByteSize jmp_ring_index_offset()        { return byte_offset_of(JavaThread, _jmp_ring_index); }
  1298   static ByteSize jmp_ring_offset()              { return byte_offset_of(JavaThread, _jmp_ring); }
  1304   static ByteSize jmp_ring_offset()              { return byte_offset_of(JavaThread, _jmp_ring); }
  1299 #endif /* PRODUCT */
  1305 #endif // PRODUCT
  1300   static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment); }
  1306   static ByteSize jni_environment_offset()       { return byte_offset_of(JavaThread, _jni_environment); }
  1301   static ByteSize last_Java_sp_offset() {
  1307   static ByteSize last_Java_sp_offset() {
  1302     return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
  1308     return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset();
  1303   }
  1309   }
  1304   static ByteSize last_Java_pc_offset() {
  1310   static ByteSize last_Java_pc_offset() {
  1347   }
  1353   }
  1348 
  1354 
  1349   // JNI critical regions. These can nest.
  1355   // JNI critical regions. These can nest.
  1350   bool in_critical()    { return _jni_active_critical > 0; }
  1356   bool in_critical()    { return _jni_active_critical > 0; }
  1351   bool in_last_critical()  { return _jni_active_critical == 1; }
  1357   bool in_last_critical()  { return _jni_active_critical == 1; }
  1352   void enter_critical() { assert(Thread::current() == this ||
  1358   void enter_critical() {
  1353                                  Thread::current()->is_VM_thread() && SafepointSynchronize::is_synchronizing(),
  1359     assert(Thread::current() == this ||
  1354                                  "this must be current thread or synchronizing");
  1360            (Thread::current()->is_VM_thread() &&
  1355   _jni_active_critical++; }
  1361            SafepointSynchronize::is_synchronizing()),
  1356   void exit_critical()  { assert(Thread::current() == this,
  1362            "this must be current thread or synchronizing");
  1357                                  "this must be current thread");
  1363     _jni_active_critical++;
  1358   _jni_active_critical--;
  1364   }
  1359   assert(_jni_active_critical >= 0,
  1365   void exit_critical() {
  1360          "JNI critical nesting problem?"); }
  1366     assert(Thread::current() == this, "this must be current thread");
       
  1367     _jni_active_critical--;
       
  1368     assert(_jni_active_critical >= 0, "JNI critical nesting problem?");
       
  1369   }
  1361 
  1370 
  1362   // Checked JNI, is the programmer required to check for exceptions, specify which function name
  1371   // Checked JNI, is the programmer required to check for exceptions, specify which function name
  1363   bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; }
  1372   bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; }
  1364   void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; }
  1373   void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; }
  1365   const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; }
  1374   const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; }
  1404 
  1413 
  1405   // Misc. operations
  1414   // Misc. operations
  1406   char* name() const { return (char*)get_thread_name(); }
  1415   char* name() const { return (char*)get_thread_name(); }
  1407   void print_on(outputStream* st) const;
  1416   void print_on(outputStream* st) const;
  1408   void print_value();
  1417   void print_value();
  1409   void print_thread_state_on(outputStream* ) const      PRODUCT_RETURN;
  1418   void print_thread_state_on(outputStream*) const      PRODUCT_RETURN;
  1410   void print_thread_state() const                       PRODUCT_RETURN;
  1419   void print_thread_state() const                      PRODUCT_RETURN;
  1411   void print_on_error(outputStream* st, char* buf, int buflen) const;
  1420   void print_on_error(outputStream* st, char* buf, int buflen) const;
  1412   void verify();
  1421   void verify();
  1413   const char* get_thread_name() const;
  1422   const char* get_thread_name() const;
  1414  private:
  1423  private:
  1415   // factor out low-level mechanics for use in both normal and error cases
  1424   // factor out low-level mechanics for use in both normal and error cases
  1764   // Get/set the thread's compilation environment.
  1773   // Get/set the thread's compilation environment.
  1765   ciEnv*        env()                            { return _env; }
  1774   ciEnv*        env()                            { return _env; }
  1766   void          set_env(ciEnv* env)              { _env = env; }
  1775   void          set_env(ciEnv* env)              { _env = env; }
  1767 
  1776 
  1768   BufferBlob*   get_buffer_blob() const          { return _buffer_blob; }
  1777   BufferBlob*   get_buffer_blob() const          { return _buffer_blob; }
  1769   void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; };
  1778   void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; }
  1770 
  1779 
  1771   // Get/set the thread's logging information
  1780   // Get/set the thread's logging information
  1772   CompileLog*   log()                            { return _log; }
  1781   CompileLog*   log()                            { return _log; }
  1773   void          init_log(CompileLog* log) {
  1782   void          init_log(CompileLog* log) {
  1774     // Set once, for good.
  1783     // Set once, for good.