Merge
authorjmasa
Thu, 18 Jun 2009 12:40:53 -0700
changeset 2999 d494b40e4a41
parent 2902 dbb955b1ee59 (current diff)
parent 2998 b501bd305780 (diff)
child 3001 9b4390b8c6f7
Merge
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jun 18 12:40:53 2009 -0700
@@ -1535,6 +1535,15 @@
   guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
   guarantee(_cur_alloc_region == NULL, "from constructor");
 
+  // 6843694 - ensure that the maximum region index can fit
+  // in the remembered set structures.
+  const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
+  guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
+
+  const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
+  size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
+  guarantee(cards_per_region < max_cards_per_region, "too many cards per region");
+
   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
                                              heap_word_size(init_byte_size));
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jun 18 12:40:53 2009 -0700
@@ -59,6 +59,9 @@
 typedef GenericTaskQueue<oop*>    RefToScanQueue;
 typedef GenericTaskQueueSet<oop*> RefToScanQueueSet;
 
+typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
+typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
+
 enum G1GCThreadGroups {
   G1CRGroup = 0,
   G1ZFGroup = 1,
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Jun 18 12:40:53 2009 -0700
@@ -109,7 +109,7 @@
     return new PerRegionTable(hr);
   }
 
-  void add_card_work(short from_card, bool par) {
+  void add_card_work(CardIdx_t from_card, bool par) {
     if (!_bm.at(from_card)) {
       if (par) {
         if (_bm.par_at_put(from_card, 1)) {
@@ -141,11 +141,11 @@
     // and adding a bit to the new table is never incorrect.
     if (loc_hr->is_in_reserved(from)) {
       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
-      size_t from_card =
-        hw_offset >>
-        (CardTableModRefBS::card_shift - LogHeapWordSize);
+      CardIdx_t from_card = (CardIdx_t)
+          hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 
-      add_card_work((short) from_card, par);
+      assert(0 <= from_card && from_card < CardsPerRegion, "Must be in range.");
+      add_card_work(from_card, par);
     }
   }
 
@@ -190,11 +190,11 @@
 #endif
   }
 
-  void add_card(short from_card_index) {
+  void add_card(CardIdx_t from_card_index) {
     add_card_work(from_card_index, /*parallel*/ true);
   }
 
-  void seq_add_card(short from_card_index) {
+  void seq_add_card(CardIdx_t from_card_index) {
     add_card_work(from_card_index, /*parallel*/ false);
   }
 
@@ -604,7 +604,7 @@
 
   // Note that this may be a continued H region.
   HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
-  size_t from_hrs_ind = (size_t)from_hr->hrs_index();
+  RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
 
   // If the region is already coarsened, return.
   if (_coarse_map.at(from_hrs_ind)) {
@@ -627,11 +627,11 @@
       uintptr_t from_hr_bot_card_index =
         uintptr_t(from_hr->bottom())
           >> CardTableModRefBS::card_shift;
-      int card_index = from_card - from_hr_bot_card_index;
+      CardIdx_t card_index = from_card - from_hr_bot_card_index;
       assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion,
              "Must be in range.");
       if (G1HRRSUseSparseTable &&
-          _sparse_table.add_card((short) from_hrs_ind, card_index)) {
+          _sparse_table.add_card(from_hrs_ind, card_index)) {
         if (G1RecordHRRSOops) {
           HeapRegionRemSet::record(hr(), from);
 #if HRRS_VERBOSE
@@ -656,9 +656,9 @@
       }
 
       // Otherwise, transfer from sparse to fine-grain.
-      short cards[SparsePRTEntry::CardsPerEntry];
+      CardIdx_t cards[SparsePRTEntry::CardsPerEntry];
       if (G1HRRSUseSparseTable) {
-        bool res = _sparse_table.get_cards((short) from_hrs_ind, &cards[0]);
+        bool res = _sparse_table.get_cards(from_hrs_ind, &cards[0]);
         assert(res, "There should have been an entry");
       }
 
@@ -679,13 +679,13 @@
       // Add in the cards from the sparse table.
       if (G1HRRSUseSparseTable) {
         for (int i = 0; i < SparsePRTEntry::CardsPerEntry; i++) {
-          short c = cards[i];
+          CardIdx_t c = cards[i];
           if (c != SparsePRTEntry::NullEntry) {
             prt->add_card(c);
           }
         }
         // Now we can delete the sparse entry.
-        bool res = _sparse_table.delete_entry((short) from_hrs_ind);
+        bool res = _sparse_table.delete_entry(from_hrs_ind);
         assert(res, "It should have been there.");
       }
     }
@@ -1030,7 +1030,7 @@
 bool OtherRegionsTable::contains_reference_locked(oop* from) const {
   HeapRegion* hr = _g1h->heap_region_containing_raw(from);
   if (hr == NULL) return false;
-  size_t hr_ind = hr->hrs_index();
+  RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
   // Is this region in the coarse map?
   if (_coarse_map.at(hr_ind)) return true;
 
@@ -1045,8 +1045,9 @@
     uintptr_t hr_bot_card_index =
       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
     assert(from_card >= hr_bot_card_index, "Inv");
-    int card_index = from_card - hr_bot_card_index;
-    return _sparse_table.contains_card((short)hr_ind, card_index);
+    CardIdx_t card_index = from_card - hr_bot_card_index;
+    assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, "Must be in range.");
+    return _sparse_table.contains_card(hr_ind, card_index);
   }
 
 
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu Jun 18 12:40:53 2009 -0700
@@ -33,7 +33,7 @@
     sprt_iter->init(this);
 }
 
-void SparsePRTEntry::init(short region_ind) {
+void SparsePRTEntry::init(RegionIdx_t region_ind) {
   _region_ind = region_ind;
   _next_index = NullEntry;
 #if UNROLL_CARD_LOOPS
@@ -43,11 +43,12 @@
   _cards[2] = NullEntry;
   _cards[3] = NullEntry;
 #else
-  for (int i = 0; i < CardsPerEntry; i++) _cards[i] = NullEntry;
+  for (int i = 0; i < CardsPerEntry; i++)
+    _cards[i] = NullEntry;
 #endif
 }
 
-bool SparsePRTEntry::contains_card(short card_index) const {
+bool SparsePRTEntry::contains_card(CardIdx_t card_index) const {
 #if UNROLL_CARD_LOOPS
   assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
   if (_cards[0] == card_index) return true;
@@ -80,10 +81,10 @@
   return sum;
 }
 
-SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(short card_index) {
+SparsePRTEntry::AddCardResult SparsePRTEntry::add_card(CardIdx_t card_index) {
 #if UNROLL_CARD_LOOPS
   assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
-  short c = _cards[0];
+  CardIdx_t c = _cards[0];
   if (c == card_index) return found;
   if (c == NullEntry) { _cards[0] = card_index; return added; }
   c = _cards[1];
@@ -97,16 +98,19 @@
   if (c == NullEntry) { _cards[3] = card_index; return added; }
 #else
   for (int i = 0; i < CardsPerEntry; i++) {
-    short c = _cards[i];
+    CardIdx_t c = _cards[i];
     if (c == card_index) return found;
-    if (c == NullEntry) { _cards[i] = card_index; return added; }
+    if (c == NullEntry) {
+      _cards[i] = card_index;
+      return added;
+    }
   }
 #endif
   // Otherwise, we're full.
   return overflow;
 }
 
-void SparsePRTEntry::copy_cards(short* cards) const {
+void SparsePRTEntry::copy_cards(CardIdx_t* cards) const {
 #if UNROLL_CARD_LOOPS
   assert(CardsPerEntry == 4, "Assumption.  If changes, un-unroll.");
   cards[0] = _cards[0];
@@ -130,7 +134,7 @@
   _capacity(capacity), _capacity_mask(capacity-1),
   _occupied_entries(0), _occupied_cards(0),
   _entries(NEW_C_HEAP_ARRAY(SparsePRTEntry, capacity)),
-  _buckets(NEW_C_HEAP_ARRAY(short, capacity)),
+  _buckets(NEW_C_HEAP_ARRAY(int, capacity)),
   _next_deleted(NULL), _deleted(false),
   _free_list(NullEntry), _free_region(0)
 {
@@ -143,7 +147,7 @@
     _entries = NULL;
   }
   if (_buckets != NULL) {
-    FREE_C_HEAP_ARRAY(short, _buckets);
+    FREE_C_HEAP_ARRAY(int, _buckets);
     _buckets = NULL;
   }
 }
@@ -153,14 +157,18 @@
   _occupied_cards = 0;
   guarantee(_entries != NULL, "INV");
   guarantee(_buckets != NULL, "INV");
+
+  guarantee(_capacity <= ((size_t)1 << (sizeof(int)*BitsPerByte-1)) - 1,
+                "_capacity too large");
+
   // This will put -1 == NullEntry in the key field of all entries.
   memset(_entries, -1, _capacity * sizeof(SparsePRTEntry));
-  memset(_buckets, -1, _capacity * sizeof(short));
+  memset(_buckets, -1, _capacity * sizeof(int));
   _free_list = NullEntry;
   _free_region = 0;
 }
 
-bool RSHashTable::add_card(short region_ind, short card_index) {
+bool RSHashTable::add_card(RegionIdx_t region_ind, CardIdx_t card_index) {
   SparsePRTEntry* e = entry_for_region_ind_create(region_ind);
   assert(e != NULL && e->r_ind() == region_ind,
          "Postcondition of call above.");
@@ -175,9 +183,9 @@
   return res != SparsePRTEntry::overflow;
 }
 
-bool RSHashTable::get_cards(short region_ind, short* cards) {
-  short ind = (short) (region_ind & capacity_mask());
-  short cur_ind = _buckets[ind];
+bool RSHashTable::get_cards(RegionIdx_t region_ind, CardIdx_t* cards) {
+  int ind = (int) (region_ind & capacity_mask());
+  int cur_ind = _buckets[ind];
   SparsePRTEntry* cur;
   while (cur_ind != NullEntry &&
          (cur = entry(cur_ind))->r_ind() != region_ind) {
@@ -192,10 +200,10 @@
   return true;
 }
 
-bool RSHashTable::delete_entry(short region_ind) {
-  short ind = (short) (region_ind & capacity_mask());
-  short* prev_loc = &_buckets[ind];
-  short cur_ind = *prev_loc;
+bool RSHashTable::delete_entry(RegionIdx_t region_ind) {
+  int ind = (int) (region_ind & capacity_mask());
+  int* prev_loc = &_buckets[ind];
+  int cur_ind = *prev_loc;
   SparsePRTEntry* cur;
   while (cur_ind != NullEntry &&
          (cur = entry(cur_ind))->r_ind() != region_ind) {
@@ -212,10 +220,11 @@
   return true;
 }
 
-SparsePRTEntry* RSHashTable::entry_for_region_ind(short region_ind) const {
+SparsePRTEntry*
+RSHashTable::entry_for_region_ind(RegionIdx_t region_ind) const {
   assert(occupied_entries() < capacity(), "Precondition");
-  short ind = (short) (region_ind & capacity_mask());
-  short cur_ind = _buckets[ind];
+  int ind = (int) (region_ind & capacity_mask());
+  int cur_ind = _buckets[ind];
   SparsePRTEntry* cur;
   // XXX
   // int k = 0;
@@ -242,15 +251,16 @@
   }
 }
 
-SparsePRTEntry* RSHashTable::entry_for_region_ind_create(short region_ind) {
+SparsePRTEntry*
+RSHashTable::entry_for_region_ind_create(RegionIdx_t region_ind) {
   SparsePRTEntry* res = entry_for_region_ind(region_ind);
   if (res == NULL) {
-    short new_ind = alloc_entry();
+    int new_ind = alloc_entry();
     assert(0 <= new_ind && (size_t)new_ind < capacity(), "There should be room.");
     res = entry(new_ind);
     res->init(region_ind);
     // Insert at front.
-    short ind = (short) (region_ind & capacity_mask());
+    int ind = (int) (region_ind & capacity_mask());
     res->set_next_index(_buckets[ind]);
     _buckets[ind] = new_ind;
     _occupied_entries++;
@@ -258,8 +268,8 @@
   return res;
 }
 
-short RSHashTable::alloc_entry() {
-  short res;
+int RSHashTable::alloc_entry() {
+  int res;
   if (_free_list != NullEntry) {
     res = _free_list;
     _free_list = entry(res)->next_index();
@@ -273,13 +283,11 @@
   }
 }
 
-
-void RSHashTable::free_entry(short fi) {
+void RSHashTable::free_entry(int fi) {
   entry(fi)->set_next_index(_free_list);
   _free_list = fi;
 }
 
-
 void RSHashTable::add_entry(SparsePRTEntry* e) {
   assert(e->num_valid_cards() > 0, "Precondition.");
   SparsePRTEntry* e2 = entry_for_region_ind_create(e->r_ind());
@@ -322,8 +330,8 @@
   return NULL;
 }
 
-short /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
-  short res;
+CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
+  CardIdx_t res;
   while (_bl_ind != RSHashTable::NullEntry) {
     res = _rsht->entry(_bl_ind)->card(0);
     if (res != SparsePRTEntry::NullEntry) {
@@ -336,7 +344,7 @@
   return SparsePRTEntry::NullEntry;
 }
 
-size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(short ci) {
+size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
   return
     _heap_bot_card_ind
     + (_rsht->entry(_bl_ind)->r_ind() * CardsPerRegion)
@@ -345,7 +353,7 @@
 
 bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
   _card_ind++;
-  short ci;
+  CardIdx_t ci;
   if (_card_ind < SparsePRTEntry::CardsPerEntry &&
       ((ci = _rsht->entry(_bl_ind)->card(_card_ind)) !=
        SparsePRTEntry::NullEntry)) {
@@ -379,16 +387,16 @@
   return false;
 }
 
-bool RSHashTable::contains_card(short region_index, short card_index) const {
+bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index) const {
   SparsePRTEntry* e = entry_for_region_ind(region_index);
   return (e != NULL && e->contains_card(card_index));
 }
 
 size_t RSHashTable::mem_size() const {
-  return sizeof(this) + capacity() * (sizeof(SparsePRTEntry) + sizeof(short));
+  return sizeof(this) +
+    capacity() * (sizeof(SparsePRTEntry) + sizeof(int));
 }
 
-
 // ----------------------------------------------------------------------
 
 SparsePRT* SparsePRT::_head_expanded_list = NULL;
@@ -408,6 +416,7 @@
   }
 }
 
+
 SparsePRT* SparsePRT::get_from_expanded_list() {
   SparsePRT* hd = _head_expanded_list;
   while (hd != NULL) {
@@ -452,6 +461,7 @@
   _next = _cur;
 }
 
+
 SparsePRT::~SparsePRT() {
   assert(_next != NULL && _cur != NULL, "Inv");
   if (_cur != _next) { delete _cur; }
@@ -465,7 +475,7 @@
   return sizeof(this) + _next->mem_size();
 }
 
-bool SparsePRT::add_card(short region_id, short card_index) {
+bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
 #if SPARSE_PRT_VERBOSE
   gclog_or_tty->print_cr("  Adding card %d from region %d to region %d sparse.",
                 card_index, region_id, _hr->hrs_index());
@@ -476,11 +486,11 @@
   return _next->add_card(region_id, card_index);
 }
 
-bool SparsePRT::get_cards(short region_id, short* cards) {
+bool SparsePRT::get_cards(RegionIdx_t region_id, CardIdx_t* cards) {
   return _next->get_cards(region_id, cards);
 }
 
-bool SparsePRT::delete_entry(short region_id) {
+bool SparsePRT::delete_entry(RegionIdx_t region_id) {
   return _next->delete_entry(region_id);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu Jun 18 12:40:53 2009 -0700
@@ -35,32 +35,32 @@
 
 class SparsePRTEntry: public CHeapObj {
 public:
+
   enum SomePublicConstants {
-    CardsPerEntry = (short)4,
-    NullEntry = (short)-1,
-    DeletedEntry = (short)-2
+    CardsPerEntry =  4,
+    NullEntry     = -1
   };
 
 private:
-  short _region_ind;
-  short _next_index;
-  short _cards[CardsPerEntry];
+  RegionIdx_t _region_ind;
+  int         _next_index;
+  CardIdx_t   _cards[CardsPerEntry];
 
 public:
 
   // Set the region_ind to the given value, and delete all cards.
-  inline void init(short region_ind);
+  inline void init(RegionIdx_t region_ind);
 
-  short r_ind() const { return _region_ind; }
+  RegionIdx_t r_ind() const { return _region_ind; }
   bool valid_entry() const { return r_ind() >= 0; }
-  void set_r_ind(short rind) { _region_ind = rind; }
+  void set_r_ind(RegionIdx_t rind) { _region_ind = rind; }
 
-  short next_index() const { return _next_index; }
-  short* next_index_addr() { return &_next_index; }
-  void set_next_index(short ni) { _next_index = ni; }
+  int next_index() const { return _next_index; }
+  int* next_index_addr() { return &_next_index; }
+  void set_next_index(int ni) { _next_index = ni; }
 
   // Returns "true" iff the entry contains the given card index.
-  inline bool contains_card(short card_index) const;
+  inline bool contains_card(CardIdx_t card_index) const;
 
   // Returns the number of non-NULL card entries.
   inline int num_valid_cards() const;
@@ -73,14 +73,14 @@
     found,
     added
   };
-  inline AddCardResult add_card(short card_index);
+  inline AddCardResult add_card(CardIdx_t card_index);
 
   // Copy the current entry's cards into "cards".
-  inline void copy_cards(short* cards) const;
+  inline void copy_cards(CardIdx_t* cards) const;
   // Copy the current entry's cards into the "_card" array of "e."
   inline void copy_cards(SparsePRTEntry* e) const;
 
-  inline short card(int i) const { return _cards[i]; }
+  inline CardIdx_t card(int i) const { return _cards[i]; }
 };
 
 
@@ -98,9 +98,9 @@
   size_t _occupied_cards;
 
   SparsePRTEntry* _entries;
-  short* _buckets;
-  short  _free_region;
-  short  _free_list;
+  int* _buckets;
+  int  _free_region;
+  int  _free_list;
 
   static RSHashTable* _head_deleted_list;
   RSHashTable* _next_deleted;
@@ -113,20 +113,20 @@
   // operations, and that the the table be less than completely full.  If
   // an entry for "region_ind" is already in the table, finds it and
   // returns its address; otherwise returns "NULL."
-  SparsePRTEntry* entry_for_region_ind(short region_ind) const;
+  SparsePRTEntry* entry_for_region_ind(RegionIdx_t region_ind) const;
 
   // Requires that the caller hold a lock preventing parallel modifying
   // operations, and that the the table be less than completely full.  If
   // an entry for "region_ind" is already in the table, finds it and
   // returns its address; otherwise allocates, initializes, inserts and
   // returns a new entry for "region_ind".
-  SparsePRTEntry* entry_for_region_ind_create(short region_ind);
+  SparsePRTEntry* entry_for_region_ind_create(RegionIdx_t region_ind);
 
   // Returns the index of the next free entry in "_entries".
-  short alloc_entry();
+  int alloc_entry();
   // Declares the entry "fi" to be free.  (It must have already been
   // deleted from any bucket lists.
-  void free_entry(short fi);
+  void free_entry(int fi);
 
 public:
   RSHashTable(size_t capacity);
@@ -138,12 +138,12 @@
   // Otherwise, returns "false" to indicate that the addition would
   // overflow the entry for the region.  The caller must transfer these
   // entries to a larger-capacity representation.
-  bool add_card(short region_id, short card_index);
+  bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 
-  bool get_cards(short region_id, short* cards);
-  bool delete_entry(short region_id);
+  bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
+  bool delete_entry(RegionIdx_t region_id);
 
-  bool contains_card(short region_id, short card_index) const;
+  bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
 
   void add_entry(SparsePRTEntry* e);
 
@@ -162,51 +162,49 @@
 
   static void add_to_deleted_list(RSHashTable* rsht);
   static RSHashTable* get_from_deleted_list();
-
-
 };
 
-  // ValueObj because will be embedded in HRRS iterator.
+// ValueObj because will be embedded in HRRS iterator.
 class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
-    short _tbl_ind;
-    short _bl_ind;
-    short _card_ind;
-    RSHashTable* _rsht;
-    size_t _heap_bot_card_ind;
+  int _tbl_ind;         // [-1, 0.._rsht->_capacity)
+  int _bl_ind;          // [-1, 0.._rsht->_capacity)
+  short _card_ind;      // [0..CardsPerEntry)
+  RSHashTable* _rsht;
+  size_t _heap_bot_card_ind;
 
-    enum SomePrivateConstants {
-      CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
-    };
+  enum SomePrivateConstants {
+    CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
+  };
+
+  // If the bucket list pointed to by _bl_ind contains a card, sets
+  // _bl_ind to the index of that entry, and returns the card.
+  // Otherwise, returns SparseEntry::NullEntry.
+  CardIdx_t find_first_card_in_list();
 
-    // If the bucket list pointed to by _bl_ind contains a card, sets
-    // _bl_ind to the index of that entry, and returns the card.
-    // Otherwise, returns SparseEntry::NullEnty.
-    short find_first_card_in_list();
-    // Computes the proper card index for the card whose offset in the
-    // current region (as indicated by _bl_ind) is "ci".
-    // This is subject to errors when there is iteration concurrent with
-    // modification, but these errors should be benign.
-    size_t compute_card_ind(short ci);
+  // Computes the proper card index for the card whose offset in the
+  // current region (as indicated by _bl_ind) is "ci".
+  // This is subject to errors when there is iteration concurrent with
+  // modification, but these errors should be benign.
+  size_t compute_card_ind(CardIdx_t ci);
 
-  public:
-    RSHashTableIter(size_t heap_bot_card_ind) :
-      _tbl_ind(RSHashTable::NullEntry),
-      _bl_ind(RSHashTable::NullEntry),
-      _card_ind((SparsePRTEntry::CardsPerEntry-1)),
-      _rsht(NULL),
-      _heap_bot_card_ind(heap_bot_card_ind)
-    {}
+public:
+  RSHashTableIter(size_t heap_bot_card_ind) :
+    _tbl_ind(RSHashTable::NullEntry),
+    _bl_ind(RSHashTable::NullEntry),
+    _card_ind((SparsePRTEntry::CardsPerEntry-1)),
+    _rsht(NULL),
+    _heap_bot_card_ind(heap_bot_card_ind)
+  {}
 
-    void init(RSHashTable* rsht) {
-      _rsht = rsht;
-      _tbl_ind = -1; // So that first increment gets to 0.
-      _bl_ind = RSHashTable::NullEntry;
-      _card_ind = (SparsePRTEntry::CardsPerEntry-1);
-    }
+  void init(RSHashTable* rsht) {
+    _rsht = rsht;
+    _tbl_ind = -1; // So that first increment gets to 0.
+    _bl_ind = RSHashTable::NullEntry;
+    _card_ind = (SparsePRTEntry::CardsPerEntry-1);
+  }
 
-    bool has_next(size_t& card_index);
-
-  };
+  bool has_next(size_t& card_index);
+};
 
 // Concurrent accesss to a SparsePRT must be serialized by some external
 // mutex.
@@ -238,7 +236,6 @@
   SparsePRT* next_expanded() { return _next_expanded; }
   void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
 
-
   static SparsePRT* _head_expanded_list;
 
 public:
@@ -255,16 +252,16 @@
   // Otherwise, returns "false" to indicate that the addition would
   // overflow the entry for the region.  The caller must transfer these
   // entries to a larger-capacity representation.
-  bool add_card(short region_id, short card_index);
+  bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 
   // If the table hold an entry for "region_ind",  Copies its
   // cards into "cards", which must be an array of length at least
   // "CardsPerEntry", and returns "true"; otherwise, returns "false".
-  bool get_cards(short region_ind, short* cards);
+  bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards);
 
   // If there is an entry for "region_ind", removes it and return "true";
   // otherwise returns "false."
-  bool delete_entry(short region_ind);
+  bool delete_entry(RegionIdx_t region_ind);
 
   // Clear the table, and reinitialize to initial capacity.
   void clear();
@@ -276,13 +273,12 @@
   static void cleanup_all();
   RSHashTable* cur() const { return _cur; }
 
-
   void init_iterator(SparsePRTIter* sprt_iter);
 
   static void add_to_expanded_list(SparsePRT* sprt);
   static SparsePRT* get_from_expanded_list();
 
-  bool contains_card(short region_id, short card_index) const {
+  bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
     return _next->contains_card(region_id, card_index);
   }
 
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Thu Jun 18 12:40:53 2009 -0700
@@ -51,7 +51,6 @@
 concurrentG1Refine.hpp			allocation.hpp
 concurrentG1Refine.hpp			thread.hpp
 
-
 concurrentG1RefineThread.cpp		concurrentG1Refine.hpp
 concurrentG1RefineThread.cpp		concurrentG1RefineThread.hpp
 concurrentG1RefineThread.cpp		g1CollectedHeap.inline.hpp
@@ -334,6 +333,7 @@
 sparsePRT.hpp				allocation.hpp
 sparsePRT.hpp				cardTableModRefBS.hpp
 sparsePRT.hpp				globalDefinitions.hpp
+sparsePRT.hpp                           g1CollectedHeap.inline.hpp
 sparsePRT.hpp				heapRegion.hpp
 sparsePRT.hpp				mutex.hpp
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Thu Jun 18 12:40:53 2009 -0700
@@ -177,6 +177,7 @@
   // are double-word aligned in 32-bit VMs, but not in 64-bit VMs, so the 32-bit
   // granularity is 2, 64-bit is 1.
   static inline size_t obj_granularity() { return size_t(MinObjAlignment); }
+  static inline int obj_granularity_shift() { return LogMinObjAlignment; }
 
   HeapWord*       _region_start;
   size_t          _region_size;
@@ -299,13 +300,13 @@
 inline size_t
 ParMarkBitMap::bits_to_words(idx_t bits)
 {
-  return bits * obj_granularity();
+  return bits << obj_granularity_shift();
 }
 
 inline ParMarkBitMap::idx_t
 ParMarkBitMap::words_to_bits(size_t words)
 {
-  return words / obj_granularity();
+  return words >> obj_granularity_shift();
 }
 
 inline size_t ParMarkBitMap::obj_size(idx_t beg_bit, idx_t end_bit) const
--- a/hotspot/src/share/vm/includeDB_compiler1	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/includeDB_compiler1	Thu Jun 18 12:40:53 2009 -0700
@@ -387,7 +387,7 @@
 c1_ValueSet.cpp                         c1_ValueSet.hpp
 
 c1_ValueSet.hpp                         allocation.hpp
-c1_ValueSet.hpp                         bitMap.hpp
+c1_ValueSet.hpp                         bitMap.inline.hpp
 c1_ValueSet.hpp                         c1_Instruction.hpp
 
 c1_ValueStack.cpp                       c1_IR.hpp
--- a/hotspot/src/share/vm/memory/gcLocker.hpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/memory/gcLocker.hpp	Thu Jun 18 12:40:53 2009 -0700
@@ -242,6 +242,31 @@
 #endif
 };
 
+// A SkipGCALot object is used to elide the usual effect of gc-a-lot
+// over a section of execution by a thread. Currently, it's used only to
+// prevent re-entrant calls to GC.
+class SkipGCALot : public StackObj {
+  private:
+   bool _saved;
+   Thread* _t;
+
+  public:
+#ifdef ASSERT
+    SkipGCALot(Thread* t) : _t(t) {
+      _saved = _t->skip_gcalot();
+      _t->set_skip_gcalot(true);
+    }
+
+    ~SkipGCALot() {
+      assert(_t->skip_gcalot(), "Save-restore protocol invariant");
+      _t->set_skip_gcalot(_saved);
+    }
+#else
+    SkipGCALot(Thread* t) { }
+    ~SkipGCALot() { }
+#endif
+};
+
 // JRT_LEAF currently can be called from either _thread_in_Java or
 // _thread_in_native mode. In _thread_in_native, it is ok
 // for another thread to trigger GC. The rest of the JRT_LEAF
--- a/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Thu Jun 18 12:40:53 2009 -0700
@@ -66,11 +66,14 @@
 
 void InterfaceSupport::gc_alot() {
   Thread *thread = Thread::current();
-  if (thread->is_VM_thread()) return; // Avoid concurrent calls
+  if (!thread->is_Java_thread()) return; // Avoid concurrent calls
   // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
   JavaThread *current_thread = (JavaThread *)thread;
   if (current_thread->active_handles() == NULL) return;
 
+  // Short-circuit any possible re-entrant gc-a-lot attempt
+  if (thread->skip_gcalot()) return;
+
   if (is_init_completed()) {
 
     if (++_fullgc_alot_invocation < FullGCALotStart) {
--- a/hotspot/src/share/vm/runtime/thread.cpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Thu Jun 18 12:40:53 2009 -0700
@@ -127,6 +127,7 @@
   debug_only(_owned_locks = NULL;)
   debug_only(_allow_allocation_count = 0;)
   NOT_PRODUCT(_allow_safepoint_count = 0;)
+  NOT_PRODUCT(_skip_gcalot = false;)
   CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
   _jvmti_env_iteration_count = 0;
   _vm_operation_started_count = 0;
@@ -784,7 +785,6 @@
       // We could enter a safepoint here and thus have a gc
       InterfaceSupport::check_gc_alot();
     }
-
 #endif
 }
 #endif
--- a/hotspot/src/share/vm/runtime/thread.hpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Thu Jun 18 12:40:53 2009 -0700
@@ -191,6 +191,9 @@
   NOT_PRODUCT(int _allow_safepoint_count;)       // If 0, thread allow a safepoint to happen
   debug_only (int _allow_allocation_count;)      // If 0, the thread is allowed to allocate oops.
 
+  // Used by SkipGCALot class.
+  NOT_PRODUCT(bool _skip_gcalot;)                // Should we elide gc-a-lot?
+
   // Record when GC is locked out via the GC_locker mechanism
   CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;)
 
@@ -308,6 +311,11 @@
   bool is_gc_locked_out() { return _gc_locked_out_count > 0; }
 #endif // CHECK_UNHANDLED_OOPS
 
+#ifndef PRODUCT
+  bool skip_gcalot()           { return _skip_gcalot; }
+  void set_skip_gcalot(bool v) { _skip_gcalot = v;    }
+#endif
+
  public:
   // Installs a pending exception to be inserted later
   static void send_async_exception(oop thread_oop, oop java_throwable);
--- a/hotspot/src/share/vm/runtime/vmThread.cpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp	Thu Jun 18 12:40:53 2009 -0700
@@ -531,6 +531,7 @@
   Thread* t = Thread::current();
 
   if (!t->is_VM_thread()) {
+    SkipGCALot sgcalot(t);    // avoid re-entrant attempts to gc-a-lot
     // JavaThread or WatcherThread
     t->check_for_valid_safepoint_state(true);
 
--- a/hotspot/src/share/vm/utilities/bitMap.cpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp	Thu Jun 18 12:40:53 2009 -0700
@@ -41,19 +41,6 @@
   resize(size_in_bits, in_resource_area);
 }
 
-
-void BitMap::verify_index(idx_t index) const {
-    assert(index < _size, "BitMap index out of bounds");
-}
-
-void BitMap::verify_range(idx_t beg_index, idx_t end_index) const {
-#ifdef ASSERT
-    assert(beg_index <= end_index, "BitMap range error");
-    // Note that [0,0) and [size,size) are both valid ranges.
-    if (end_index != _size)  verify_index(end_index);
-#endif
-}
-
 void BitMap::resize(idx_t size_in_bits, bool in_resource_area) {
   assert(size_in_bits >= 0, "just checking");
   idx_t old_size_in_words = size_in_words();
--- a/hotspot/src/share/vm/utilities/bitMap.hpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/utilities/bitMap.hpp	Thu Jun 18 12:40:53 2009 -0700
@@ -93,10 +93,12 @@
   // The index of the first full word in a range.
   idx_t word_index_round_up(idx_t bit) const;
 
-  // Verification, statistics.
-  void verify_index(idx_t index) const;
-  void verify_range(idx_t beg_index, idx_t end_index) const;
+  // Verification.
+  inline void verify_index(idx_t index) const NOT_DEBUG_RETURN;
+  inline void verify_range(idx_t beg_index, idx_t end_index) const
+    NOT_DEBUG_RETURN;
 
+  // Statistics.
   static idx_t* _pop_count_table;
   static void init_pop_count_table();
   static idx_t num_set_bits(bm_word_t w);
@@ -287,7 +289,6 @@
 #endif
 };
 
-
 // Convenience class wrapping BitMap which provides multiple bits per slot.
 class BitMap2D VALUE_OBJ_CLASS_SPEC {
  public:
--- a/hotspot/src/share/vm/utilities/bitMap.inline.hpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/utilities/bitMap.inline.hpp	Thu Jun 18 12:40:53 2009 -0700
@@ -22,6 +22,17 @@
  *
  */
 
+#ifdef ASSERT
+inline void BitMap::verify_index(idx_t index) const {
+  assert(index < _size, "BitMap index out of bounds");
+}
+
+inline void BitMap::verify_range(idx_t beg_index, idx_t end_index) const {
+  assert(beg_index <= end_index, "BitMap range error");
+  // Note that [0,0) and [size,size) are both valid ranges.
+  if (end_index != _size) verify_index(end_index);
+}
+#endif // #ifdef ASSERT
 
 inline void BitMap::set_bit(idx_t bit) {
   verify_index(bit);
--- a/hotspot/src/share/vm/utilities/macros.hpp	Wed Jul 05 16:54:26 2017 +0200
+++ b/hotspot/src/share/vm/utilities/macros.hpp	Thu Jun 18 12:40:53 2009 -0700
@@ -106,11 +106,13 @@
 #ifdef ASSERT
 #define DEBUG_ONLY(code) code
 #define NOT_DEBUG(code)
+#define NOT_DEBUG_RETURN  /*next token must be ;*/
 // Historical.
 #define debug_only(code) code
 #else // ASSERT
 #define DEBUG_ONLY(code)
 #define NOT_DEBUG(code) code
+#define NOT_DEBUG_RETURN {}
 #define debug_only(code)
 #endif // ASSERT