hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
changeset 28831 454224c7e3ba
parent 27880 afb974a04396
child 29203 5024f7b3322c
equal deleted inserted replaced
28830:a252e278c3d9 28831:454224c7e3ba
     1 /*
     1 /*
     2  * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     7  * published by the Free Software Foundation.
    34 
    34 
    35 void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
    35 void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
    36   if (default_use_cache()) {
    36   if (default_use_cache()) {
    37     _use_cache = true;
    37     _use_cache = true;
    38 
    38 
    39     _hot_cache_size = (1 << G1ConcRSLogCacheSize);
    39     _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
    40     _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
    40     _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
    41 
    41 
    42     _n_hot = 0;
    42     reset_hot_cache_internal();
    43     _hot_cache_idx = 0;
       
    44 
    43 
    45     // For refining the cards in the hot cache in parallel
    44     // For refining the cards in the hot cache in parallel
    46     _hot_cache_par_chunk_size = ClaimChunkSize;
    45     _hot_cache_par_chunk_size = ClaimChunkSize;
    47     _hot_cache_par_claimed_idx = 0;
    46     _hot_cache_par_claimed_idx = 0;
    48 
    47 
    62   if (!_card_counts.is_hot(count)) {
    61   if (!_card_counts.is_hot(count)) {
    63     // The card is not hot so do not store it in the cache;
    62     // The card is not hot so do not store it in the cache;
    64     // return it for immediate refining.
    63     // return it for immediate refining.
    65     return card_ptr;
    64     return card_ptr;
    66   }
    65   }
       
    66   // Otherwise, the card is hot.
       
    67   size_t index = Atomic::add(1, &_hot_cache_idx) - 1;
       
    68   size_t masked_index = index & (_hot_cache_size - 1);
       
    69   jbyte* current_ptr = _hot_cache[masked_index];
    67 
    70 
    68   // Otherwise, the card is hot.
    71   // Try to store the new card pointer into the cache. Compare-and-swap to guard
    69   jbyte* res = NULL;
    72   // against the unlikely event of a race resulting in another card pointer to
    70   MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
    73   // have already been written to the cache. In this case we will return
    71   if (_n_hot == _hot_cache_size) {
    74   // card_ptr in favor of the other option, which would be starting over. This
    72     res = _hot_cache[_hot_cache_idx];
    75   // should be OK since card_ptr will likely be the older card already when/if
    73     _n_hot--;
    76   // this ever happens.
    74   }
    77   jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
    75 
    78                                                     &_hot_cache[masked_index],
    76   // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
    79                                                     current_ptr);
    77   _hot_cache[_hot_cache_idx] = card_ptr;
    80   return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
    78   _hot_cache_idx++;
       
    79 
       
    80   if (_hot_cache_idx == _hot_cache_size) {
       
    81     // Wrap around
       
    82     _hot_cache_idx = 0;
       
    83   }
       
    84   _n_hot++;
       
    85 
       
    86   return res;
       
    87 }
    81 }
    88 
    82 
    89 void G1HotCardCache::drain(uint worker_i,
    83 void G1HotCardCache::drain(uint worker_i,
    90                            G1RemSet* g1rs,
    84                            G1RemSet* g1rs,
    91                            DirtyCardQueue* into_cset_dcq) {
    85                            DirtyCardQueue* into_cset_dcq) {
    94     return;
    88     return;
    95   }
    89   }
    96 
    90 
    97   assert(_hot_cache != NULL, "Logic");
    91   assert(_hot_cache != NULL, "Logic");
    98   assert(!use_cache(), "cache should be disabled");
    92   assert(!use_cache(), "cache should be disabled");
    99   int start_idx;
       
   100 
    93 
   101   while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
    94   while (_hot_cache_par_claimed_idx < _hot_cache_size) {
   102     int end_idx = start_idx + _hot_cache_par_chunk_size;
    95     size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
       
    96                                  &_hot_cache_par_claimed_idx);
       
    97     size_t start_idx = end_idx - _hot_cache_par_chunk_size;
       
    98     // The current worker has successfully claimed the chunk [start_idx..end_idx)
       
    99     end_idx = MIN2(end_idx, _hot_cache_size);
       
   100     for (size_t i = start_idx; i < end_idx; i++) {
       
   101       jbyte* card_ptr = _hot_cache[i];
       
   102       if (card_ptr != NULL) {
       
   103         if (g1rs->refine_card(card_ptr, worker_i, true)) {
       
   104           // The part of the heap spanned by the card contains references
       
   105           // that point into the current collection set.
       
   106           // We need to record the card pointer in the DirtyCardQueueSet
       
   107           // that we use for such cards.
       
   108           //
       
   109           // The only time we care about recording cards that contain
       
   110           // references that point into the collection set is during
       
   111           // RSet updating while within an evacuation pause.
       
   112           // In this case worker_i should be the id of a GC worker thread
       
   113           assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
       
   114           assert(worker_i < ParallelGCThreads,
       
   115                  err_msg("incorrect worker id: %u", worker_i));
   103 
   116 
   104     if (start_idx ==
   117           into_cset_dcq->enqueue(card_ptr);
   105         Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
       
   106       // The current worker has successfully claimed the chunk [start_idx..end_idx)
       
   107       end_idx = MIN2(end_idx, _n_hot);
       
   108       for (int i = start_idx; i < end_idx; i++) {
       
   109         jbyte* card_ptr = _hot_cache[i];
       
   110         if (card_ptr != NULL) {
       
   111           if (g1rs->refine_card(card_ptr, worker_i, true)) {
       
   112             // The part of the heap spanned by the card contains references
       
   113             // that point into the current collection set.
       
   114             // We need to record the card pointer in the DirtyCardQueueSet
       
   115             // that we use for such cards.
       
   116             //
       
   117             // The only time we care about recording cards that contain
       
   118             // references that point into the collection set is during
       
   119             // RSet updating while within an evacuation pause.
       
   120             // In this case worker_i should be the id of a GC worker thread
       
   121             assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
       
   122             assert(worker_i < ParallelGCThreads,
       
   123                    err_msg("incorrect worker id: %u", worker_i));
       
   124 
       
   125             into_cset_dcq->enqueue(card_ptr);
       
   126           }
       
   127         }
   118         }
       
   119       } else {
       
   120         break;
   128       }
   121       }
   129     }
   122     }
   130   }
   123   }
       
   124 
   131   // The existing entries in the hot card cache, which were just refined
   125   // The existing entries in the hot card cache, which were just refined
   132   // above, are discarded prior to re-enabling the cache near the end of the GC.
   126   // above, are discarded prior to re-enabling the cache near the end of the GC.
   133 }
   127 }
   134 
   128 
   135 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
   129 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {