src/hotspot/share/gc/shared/ptrQueue.hpp
changeset 52582 6df094be7f58
parent 51441 2e91d927e00c
child 52637 0877040ec224
equal deleted inserted replaced
52581:d402a406bbc3 52582:6df094be7f58
    25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
    25 #ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
    26 #define SHARE_GC_SHARED_PTRQUEUE_HPP
    26 #define SHARE_GC_SHARED_PTRQUEUE_HPP
    27 
    27 
    28 #include "utilities/align.hpp"
    28 #include "utilities/align.hpp"
    29 #include "utilities/sizes.hpp"
    29 #include "utilities/sizes.hpp"
       
    30 
       
    31 class Mutex;
    30 
    32 
    31 // There are various techniques that require threads to be able to log
    33 // There are various techniques that require threads to be able to log
    32 // addresses.  For example, a generational write barrier might log
    34 // addresses.  For example, a generational write barrier might log
    33 // the addresses of modified old-generation objects.  This type supports
    35 // the addresses of modified old-generation objects.  This type supports
    34 // this operation.
    36 // this operation.
   221 
   223 
   222   static size_t buffer_offset() {
   224   static size_t buffer_offset() {
   223     return offset_of(BufferNode, _buffer);
   225     return offset_of(BufferNode, _buffer);
   224   }
   226   }
   225 
   227 
       
   228 AIX_ONLY(public:)               // xlC 12 on AIX doesn't implement C++ DR45.
       
   229   // Allocate a new BufferNode with the "buffer" having size elements.
       
   230   static BufferNode* allocate(size_t size);
       
   231 
       
   232   // Free a BufferNode.
       
   233   static void deallocate(BufferNode* node);
       
   234 
   226 public:
   235 public:
   227   BufferNode* next() const     { return _next;  }
   236   BufferNode* next() const     { return _next;  }
   228   void set_next(BufferNode* n) { _next = n;     }
   237   void set_next(BufferNode* n) { _next = n;     }
   229   size_t index() const         { return _index; }
   238   size_t index() const         { return _index; }
   230   void set_index(size_t i)     { _index = i; }
   239   void set_index(size_t i)     { _index = i; }
   231 
       
   232   // Allocate a new BufferNode with the "buffer" having size elements.
       
   233   static BufferNode* allocate(size_t size);
       
   234 
       
   235   // Free a BufferNode.
       
   236   static void deallocate(BufferNode* node);
       
   237 
   240 
   238   // Return the BufferNode containing the buffer, after setting its index.
   241   // Return the BufferNode containing the buffer, after setting its index.
   239   static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
   242   static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
   240     BufferNode* node =
   243     BufferNode* node =
   241       reinterpret_cast<BufferNode*>(
   244       reinterpret_cast<BufferNode*>(
   248   static void** make_buffer_from_node(BufferNode *node) {
   251   static void** make_buffer_from_node(BufferNode *node) {
   249     // &_buffer[0] might lead to index out of bounds warnings.
   252     // &_buffer[0] might lead to index out of bounds warnings.
   250     return reinterpret_cast<void**>(
   253     return reinterpret_cast<void**>(
   251       reinterpret_cast<char*>(node) + buffer_offset());
   254       reinterpret_cast<char*>(node) + buffer_offset());
   252   }
   255   }
       
   256 
       
   257   // Free-list based allocator.
       
   258   class Allocator {
       
   259     size_t _buffer_size;
       
   260     Mutex* _lock;
       
   261     BufferNode* _free_list;
       
   262     volatile size_t _free_count;
       
   263 
       
   264   public:
       
   265     Allocator(size_t buffer_size, Mutex* lock);
       
   266     ~Allocator();
       
   267 
       
   268     size_t buffer_size() const { return _buffer_size; }
       
   269     size_t free_count() const;
       
   270     BufferNode* allocate();
       
   271     void release(BufferNode* node);
       
   272     void reduce_free_list();
       
   273   };
   253 };
   274 };
   254 
   275 
   255 // A PtrQueueSet represents resources common to a set of pointer queues.
   276 // A PtrQueueSet represents resources common to a set of pointer queues.
   256 // In particular, the individual queues allocate buffers from this shared
   277 // In particular, the individual queues allocate buffers from this shared
   257 // set, and return completed buffers to the set.
   278 // set, and return completed buffers to the set.
   258 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
   279 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
   259 class PtrQueueSet {
   280 class PtrQueueSet {
   260   // The size of all buffers in the set.
   281   BufferNode::Allocator* _allocator;
   261   size_t _buffer_size;
       
   262 
   282 
   263 protected:
   283 protected:
   264   Monitor* _cbl_mon;  // Protects the fields below.
   284   Monitor* _cbl_mon;  // Protects the fields below.
   265   BufferNode* _completed_buffers_head;
   285   BufferNode* _completed_buffers_head;
   266   BufferNode* _completed_buffers_tail;
   286   BufferNode* _completed_buffers_tail;
   267   size_t _n_completed_buffers;
   287   size_t _n_completed_buffers;
   268   int _process_completed_threshold;
   288   int _process_completed_threshold;
   269   volatile bool _process_completed;
   289   volatile bool _process_completed;
   270 
   290 
   271   // This (and the interpretation of the first element as a "next"
       
   272   // pointer) are protected by the TLOQ_FL_lock.
       
   273   Mutex* _fl_lock;
       
   274   BufferNode* _buf_free_list;
       
   275   size_t _buf_free_list_sz;
       
   276   // Queue set can share a freelist. The _fl_owner variable
       
   277   // specifies the owner. It is set to "this" by default.
       
   278   PtrQueueSet* _fl_owner;
       
   279 
       
   280   bool _all_active;
   291   bool _all_active;
   281 
   292 
   282   // If true, notify_all on _cbl_mon when the threshold is reached.
   293   // If true, notify_all on _cbl_mon when the threshold is reached.
   283   bool _notify_when_complete;
   294   bool _notify_when_complete;
   284 
   295 
   305   ~PtrQueueSet();
   316   ~PtrQueueSet();
   306 
   317 
   307   // Because of init-order concerns, we can't pass these as constructor
   318   // Because of init-order concerns, we can't pass these as constructor
   308   // arguments.
   319   // arguments.
   309   void initialize(Monitor* cbl_mon,
   320   void initialize(Monitor* cbl_mon,
   310                   Mutex* fl_lock,
   321                   BufferNode::Allocator* allocator,
   311                   int process_completed_threshold,
   322                   int process_completed_threshold,
   312                   int max_completed_queue,
   323                   int max_completed_queue);
   313                   PtrQueueSet *fl_owner = NULL);
       
   314 
   324 
   315 public:
   325 public:
   316 
   326 
   317   // Return the buffer for a BufferNode of size buffer_size().
   327   // Return the buffer for a BufferNode of size buffer_size().
   318   void** allocate_buffer();
   328   void** allocate_buffer();
   334   bool process_completed_buffers() { return _process_completed; }
   344   bool process_completed_buffers() { return _process_completed; }
   335   void set_process_completed(bool x) { _process_completed = x; }
   345   void set_process_completed(bool x) { _process_completed = x; }
   336 
   346 
   337   bool is_active() { return _all_active; }
   347   bool is_active() { return _all_active; }
   338 
   348 
   339   // Set the buffer size.  Should be called before any "enqueue" operation
       
   340   // can be called.  And should only be called once.
       
   341   void set_buffer_size(size_t sz);
       
   342 
       
   343   // Get the buffer size.  Must have been set.
       
   344   size_t buffer_size() const {
   349   size_t buffer_size() const {
   345     assert(_buffer_size > 0, "buffer size not set");
   350     return _allocator->buffer_size();
   346     return _buffer_size;
       
   347   }
   351   }
   348 
   352 
   349   // Get/Set the number of completed buffers that triggers log processing.
   353   // Get/Set the number of completed buffers that triggers log processing.
   350   void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
   354   void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
   351   int process_completed_threshold() const { return _process_completed_threshold; }
   355   int process_completed_threshold() const { return _process_completed_threshold; }
   352 
   356 
   353   // Must only be called at a safe point.  Indicates that the buffer free
       
   354   // list size may be reduced, if that is deemed desirable.
       
   355   void reduce_free_list();
       
   356 
       
   357   size_t completed_buffers_num() { return _n_completed_buffers; }
   357   size_t completed_buffers_num() { return _n_completed_buffers; }
   358 
   358 
   359   void merge_bufferlists(PtrQueueSet* src);
   359   void merge_bufferlists(PtrQueueSet* src);
   360 
   360 
   361   void set_max_completed_queue(int m) { _max_completed_queue = m; }
   361   void set_max_completed_queue(int m) { _max_completed_queue = m; }