30 #include "oops/instanceKlass.hpp" |
30 #include "oops/instanceKlass.hpp" |
31 #include "prims/jvmtiAgentThread.hpp" |
31 #include "prims/jvmtiAgentThread.hpp" |
32 #include "prims/jvmtiEventController.inline.hpp" |
32 #include "prims/jvmtiEventController.inline.hpp" |
33 #include "prims/jvmtiImpl.hpp" |
33 #include "prims/jvmtiImpl.hpp" |
34 #include "prims/jvmtiRedefineClasses.hpp" |
34 #include "prims/jvmtiRedefineClasses.hpp" |
|
35 #include "runtime/atomic.hpp" |
35 #include "runtime/deoptimization.hpp" |
36 #include "runtime/deoptimization.hpp" |
36 #include "runtime/handles.hpp" |
37 #include "runtime/handles.hpp" |
37 #include "runtime/handles.inline.hpp" |
38 #include "runtime/handles.inline.hpp" |
38 #include "runtime/interfaceSupport.hpp" |
39 #include "runtime/interfaceSupport.hpp" |
39 #include "runtime/javaCalls.hpp" |
40 #include "runtime/javaCalls.hpp" |
|
41 #include "runtime/serviceThread.hpp" |
40 #include "runtime/signature.hpp" |
42 #include "runtime/signature.hpp" |
41 #include "runtime/vframe.hpp" |
43 #include "runtime/vframe.hpp" |
42 #include "runtime/vframe_hp.hpp" |
44 #include "runtime/vframe_hp.hpp" |
43 #include "runtime/vm_operations.hpp" |
45 #include "runtime/vm_operations.hpp" |
44 #include "utilities/exceptions.hpp" |
46 #include "utilities/exceptions.hpp" |
908 tty->print(") "); |
910 tty->print(") "); |
909 } |
911 } |
910 tty->print_cr("]"); |
912 tty->print_cr("]"); |
911 #endif |
913 #endif |
912 } |
914 } |
|
915 |
|
916 #ifndef KERNEL |
|
917 |
|
918 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event( |
|
919 nmethod* nm) { |
|
920 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD); |
|
921 event.set_compiled_method_load(nm); |
|
922 nmethodLocker::lock_nmethod(nm); // will be unlocked when posted |
|
923 return event; |
|
924 } |
|
925 |
|
926 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event( |
|
927 jmethodID id, const void* code) { |
|
928 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD); |
|
929 event.set_compiled_method_unload(id, code); |
|
930 return event; |
|
931 } |
|
932 |
|
933 void JvmtiDeferredEvent::post() { |
|
934 switch(_type) { |
|
935 case TYPE_COMPILED_METHOD_LOAD: |
|
936 JvmtiExport::post_compiled_method_load(compiled_method_load()); |
|
937 nmethodLocker::unlock_nmethod(compiled_method_load()); |
|
938 break; |
|
939 case TYPE_COMPILED_METHOD_UNLOAD: |
|
940 JvmtiExport::post_compiled_method_unload( |
|
941 compiled_method_unload_method_id(), |
|
942 compiled_method_unload_code_begin()); |
|
943 break; |
|
944 case TYPE_FLUSH: |
|
945 JvmtiDeferredEventQueue::flush_complete(flush_state_addr()); |
|
946 break; |
|
947 default: |
|
948 ShouldNotReachHere(); |
|
949 } |
|
950 } |
|
951 |
|
952 JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_tail = NULL; |
|
953 JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_head = NULL; |
|
954 |
|
955 volatile JvmtiDeferredEventQueue::QueueNode* |
|
956 JvmtiDeferredEventQueue::_pending_list = NULL; |
|
957 |
|
958 bool JvmtiDeferredEventQueue::has_events() { |
|
959 assert(Service_lock->owned_by_self(), "Must own Service_lock"); |
|
960 return _queue_head != NULL || _pending_list != NULL; |
|
961 } |
|
962 |
|
963 void JvmtiDeferredEventQueue::enqueue(const JvmtiDeferredEvent& event) { |
|
964 assert(Service_lock->owned_by_self(), "Must own Service_lock"); |
|
965 |
|
966 process_pending_events(); |
|
967 |
|
968 // Events get added to the end of the queue (and are pulled off the front). |
|
969 QueueNode* node = new QueueNode(event); |
|
970 if (_queue_tail == NULL) { |
|
971 _queue_tail = _queue_head = node; |
|
972 } else { |
|
973 assert(_queue_tail->next() == NULL, "Must be the last element in the list"); |
|
974 _queue_tail->set_next(node); |
|
975 _queue_tail = node; |
|
976 } |
|
977 |
|
978 Service_lock->notify_all(); |
|
979 assert((_queue_head == NULL) == (_queue_tail == NULL), |
|
980 "Inconsistent queue markers"); |
|
981 } |
|
982 |
|
983 JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() { |
|
984 assert(Service_lock->owned_by_self(), "Must own Service_lock"); |
|
985 |
|
986 process_pending_events(); |
|
987 |
|
988 assert(_queue_head != NULL, "Nothing to dequeue"); |
|
989 |
|
990 if (_queue_head == NULL) { |
|
991 // Just in case this happens in product; it shouldn't but let's not crash |
|
992 return JvmtiDeferredEvent(); |
|
993 } |
|
994 |
|
995 QueueNode* node = _queue_head; |
|
996 _queue_head = _queue_head->next(); |
|
997 if (_queue_head == NULL) { |
|
998 _queue_tail = NULL; |
|
999 } |
|
1000 |
|
1001 assert((_queue_head == NULL) == (_queue_tail == NULL), |
|
1002 "Inconsistent queue markers"); |
|
1003 |
|
1004 JvmtiDeferredEvent event = node->event(); |
|
1005 delete node; |
|
1006 return event; |
|
1007 } |
|
1008 |
|
1009 void JvmtiDeferredEventQueue::add_pending_event( |
|
1010 const JvmtiDeferredEvent& event) { |
|
1011 |
|
1012 QueueNode* node = new QueueNode(event); |
|
1013 |
|
1014 bool success = false; |
|
1015 QueueNode* prev_value = (QueueNode*)_pending_list; |
|
1016 do { |
|
1017 node->set_next(prev_value); |
|
1018 prev_value = (QueueNode*)Atomic::cmpxchg_ptr( |
|
1019 (void*)node, (volatile void*)&_pending_list, (void*)node->next()); |
|
1020 } while (prev_value != node->next()); |
|
1021 } |
|
1022 |
|
1023 // This method transfers any events that were added by someone NOT holding |
|
1024 // the lock into the mainline queue. |
|
1025 void JvmtiDeferredEventQueue::process_pending_events() { |
|
1026 assert(Service_lock->owned_by_self(), "Must own Service_lock"); |
|
1027 |
|
1028 if (_pending_list != NULL) { |
|
1029 QueueNode* head = |
|
1030 (QueueNode*)Atomic::xchg_ptr(NULL, (volatile void*)&_pending_list); |
|
1031 |
|
1032 assert((_queue_head == NULL) == (_queue_tail == NULL), |
|
1033 "Inconsistent queue markers"); |
|
1034 |
|
1035 if (head != NULL) { |
|
1036 // Since we've treated the pending list as a stack (with newer |
|
1037 // events at the beginning), we need to join the bottom of the stack |
|
1038 // with the 'tail' of the queue in order to get the events in the |
|
1039 // right order. We do this by reversing the pending list and appending |
|
1040 // it to the queue. |
|
1041 |
|
1042 QueueNode* new_tail = head; |
|
1043 QueueNode* new_head = NULL; |
|
1044 |
|
1045 // This reverses the list |
|
1046 QueueNode* prev = new_tail; |
|
1047 QueueNode* node = new_tail->next(); |
|
1048 new_tail->set_next(NULL); |
|
1049 while (node != NULL) { |
|
1050 QueueNode* next = node->next(); |
|
1051 node->set_next(prev); |
|
1052 prev = node; |
|
1053 node = next; |
|
1054 } |
|
1055 new_head = prev; |
|
1056 |
|
1057 // Now append the new list to the queue |
|
1058 if (_queue_tail != NULL) { |
|
1059 _queue_tail->set_next(new_head); |
|
1060 } else { // _queue_head == NULL |
|
1061 _queue_head = new_head; |
|
1062 } |
|
1063 _queue_tail = new_tail; |
|
1064 } |
|
1065 } |
|
1066 } |
|
1067 |
|
1068 enum { |
|
1069 // Random - used for debugging |
|
1070 FLUSHING = 0x50403020, |
|
1071 FLUSHED = 0x09080706 |
|
1072 }; |
|
1073 |
|
1074 void JvmtiDeferredEventQueue::flush_queue(Thread* thread) { |
|
1075 |
|
1076 volatile int flush_state = FLUSHING; |
|
1077 |
|
1078 JvmtiDeferredEvent flush(JvmtiDeferredEvent::TYPE_FLUSH); |
|
1079 flush.set_flush_state_addr((int*)&flush_state); |
|
1080 |
|
1081 if (ServiceThread::is_service_thread(thread)) { |
|
1082 // If we are the service thread we have to post all preceding events |
|
1083 // Use the flush event as a token to indicate when we can stop |
|
1084 JvmtiDeferredEvent event; |
|
1085 { |
|
1086 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); |
|
1087 enqueue(flush); |
|
1088 event = dequeue(); |
|
1089 } |
|
1090 while (!event.is_flush_event() || |
|
1091 event.flush_state_addr() != &flush_state) { |
|
1092 event.post(); |
|
1093 { |
|
1094 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); |
|
1095 event = dequeue(); |
|
1096 } |
|
1097 } |
|
1098 } else { |
|
1099 // Wake up the service thread so it will process events. When it gets |
|
1100 // to the flush event it will set 'flush_complete' and notify us. |
|
1101 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); |
|
1102 enqueue(flush); |
|
1103 while (flush_state != FLUSHED) { |
|
1104 assert(flush_state == FLUSHING || flush_state == FLUSHED, |
|
1105 "only valid values for this"); |
|
1106 Service_lock->wait(Mutex::_no_safepoint_check_flag); |
|
1107 } |
|
1108 } |
|
1109 } |
|
1110 |
|
1111 void JvmtiDeferredEventQueue::flush_complete(int* state_addr) { |
|
1112 assert(state_addr != NULL && *state_addr == FLUSHING, "must be"); |
|
1113 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); |
|
1114 *state_addr = FLUSHED; |
|
1115 Service_lock->notify_all(); |
|
1116 } |
|
1117 |
|
1118 #endif // ndef KERNEL |