hotspot/src/share/vm/prims/jvmtiImpl.hpp
changeset 46438 b093c3f1ab3d
parent 38091 d7e51f40ba2d
child 46630 75aa3e39d02c
equal deleted inserted replaced
46436:755e01cd0b9d 46438:b093c3f1ab3d
     1 /*
     1 /*
     2  * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     7  * published by the Free Software Foundation.
   495 };
   495 };
   496 
   496 
   497 /**
   497 /**
   498  * Events enqueued on this queue wake up the Service thread which dequeues
   498  * Events enqueued on this queue wake up the Service thread which dequeues
   499  * and posts the events.  The Service_lock is required to be held
   499  * and posts the events.  The Service_lock is required to be held
   500  * when operating on the queue (except for the "pending" events).
   500  * when operating on the queue.
   501  */
   501  */
   502 class JvmtiDeferredEventQueue : AllStatic {
   502 class JvmtiDeferredEventQueue : AllStatic {
   503   friend class JvmtiDeferredEvent;
   503   friend class JvmtiDeferredEvent;
   504  private:
   504  private:
   505   class QueueNode : public CHeapObj<mtInternal> {
   505   class QueueNode : public CHeapObj<mtInternal> {
   517     void set_next(QueueNode* next) { _next = next; }
   517     void set_next(QueueNode* next) { _next = next; }
   518   };
   518   };
   519 
   519 
   520   static QueueNode* _queue_head;             // Hold Service_lock to access
   520   static QueueNode* _queue_head;             // Hold Service_lock to access
   521   static QueueNode* _queue_tail;             // Hold Service_lock to access
   521   static QueueNode* _queue_tail;             // Hold Service_lock to access
   522   static volatile QueueNode* _pending_list;  // Uses CAS for read/update
       
   523 
       
   524   // Transfers events from the _pending_list to the _queue.
       
   525   static void process_pending_events() NOT_JVMTI_RETURN;
       
   526 
   522 
   527  public:
   523  public:
   528   // Must be holding Service_lock when calling these
   524   // Must be holding Service_lock when calling these
   529   static bool has_events() NOT_JVMTI_RETURN_(false);
   525   static bool has_events() NOT_JVMTI_RETURN_(false);
   530   static void enqueue(const JvmtiDeferredEvent& event) NOT_JVMTI_RETURN;
   526   static void enqueue(const JvmtiDeferredEvent& event) NOT_JVMTI_RETURN;
   531   static JvmtiDeferredEvent dequeue() NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
   527   static JvmtiDeferredEvent dequeue() NOT_JVMTI_RETURN_(JvmtiDeferredEvent());
   532 
       
   533   // Used to enqueue events without using a lock, for times (such as during
       
   534   // safepoint) when we can't or don't want to lock the Service_lock.
       
   535   //
       
   536   // Events will be held off to the side until there's a call to
       
   537   // dequeue(), enqueue(), or process_pending_events() (all of which require
       
   538   // the holding of the Service_lock), and will be enqueued at that time.
       
   539   static void add_pending_event(const JvmtiDeferredEvent&) NOT_JVMTI_RETURN;
       
   540 };
   528 };
   541 
   529 
   542 // Utility macro that checks for NULL pointers:
   530 // Utility macro that checks for NULL pointers:
   543 #define NULL_CHECK(X, Y) if ((X) == NULL) { return (Y); }
   531 #define NULL_CHECK(X, Y) if ((X) == NULL) { return (Y); }
   544 
   532