Improve comments stuefe-new-metaspace-branch
authorstuefe
Mon, 25 Nov 2019 16:35:14 +0100
branchstuefe-new-metaspace-branch
changeset 59257 990b1fed3b47
parent 59238 6ce12ce00d3e
child 59271 1558266946de
Improve comments
src/hotspot/share/memory/metaspace/blockListArray.hpp
src/hotspot/share/memory/metaspace/blockListArray.inline.hpp
src/hotspot/share/memory/metaspace/commitLimiter.hpp
src/hotspot/share/memory/metaspace/commitMask.hpp
src/hotspot/share/memory/metaspace/counter.hpp
src/hotspot/share/memory/metaspace/internStat.hpp
src/hotspot/share/memory/metaspace/rootChunkArea.hpp
src/hotspot/share/memory/metaspace/spaceManager.hpp
--- a/src/hotspot/share/memory/metaspace/blockListArray.hpp	Sat Nov 23 11:05:16 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/blockListArray.hpp	Mon Nov 25 16:35:14 2019 +0100
@@ -47,7 +47,7 @@
 
 // A bitmap keeping track of which list is occupied. Smallest list corresponds to lowest order bit.
 // 1 means list is not empty, 0 means list is empty.
-class BlockListFreeMap {
+class BlockListArrayMask {
 
   typedef uint32_t mask_type;
   mask_type _mask;
@@ -56,7 +56,7 @@
 
 public:
 
-  BlockListFreeMap() : _mask(0) {}
+  BlockListArrayMask() : _mask(0) {}
 
   bool all_zero() const          { return _mask == 0; }
 
@@ -83,7 +83,7 @@
 
   block_t* _bins[num_bins];
 
-  BlockListFreeMap _map;
+  BlockListArrayMask _map;
 
   // e.g. spread = 4
   //
@@ -126,7 +126,7 @@
 public:
 
   BlockListArray() : _map() {
-    assert(BlockListFreeMap::size() >= num_bins, "Map too small.");
+    assert(BlockListArrayMask::size() >= num_bins, "Map too small.");
     ::memset(_bins, 0, sizeof(_bins));
   }
 
--- a/src/hotspot/share/memory/metaspace/blockListArray.inline.hpp	Sat Nov 23 11:05:16 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/blockListArray.inline.hpp	Mon Nov 25 16:35:14 2019 +0100
@@ -37,7 +37,7 @@
 
 // Starting at (including) pos, find the position of the next 1 bit.
 // Return -1 if not found.
-int BlockListFreeMap::find_next_set_bit(int pos) const {
+int BlockListArrayMask::find_next_set_bit(int pos) const {
 
   if (get_bit(pos)) {
     return pos;
--- a/src/hotspot/share/memory/metaspace/commitLimiter.hpp	Sat Nov 23 11:05:16 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/commitLimiter.hpp	Mon Nov 25 16:35:14 2019 +0100
@@ -31,6 +31,16 @@
 
 namespace metaspace {
 
+// The CommitLimiter encapsulates the logic of "asking someone if it
+//  is okay to further commit n words of memory". It exists to separate
+//  this control logic from the low-level Metaspace code.
+//
+// The default variant of the CommitLimiter checks whether committing would
+//  trigger MaxMetaspaceSize, in which case committing is denied. It also checks
+//  whether we would hit the GC threshold, in which case committing is denied too.
+//
+// Other versions of this Limiter can be implemented for tests (see metaspace
+//  gtests).
 class CommitLimiter : public CHeapObj<mtInternal> {
 
   // Counts total words committed for metaspace
--- a/src/hotspot/share/memory/metaspace/commitMask.hpp	Sat Nov 23 11:05:16 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/commitMask.hpp	Mon Nov 25 16:35:14 2019 +0100
@@ -34,8 +34,9 @@
 
 namespace metaspace {
 
-// A bitmap covering a range of metaspace; each bit in this mask corresponds to
-//
+// The CommitMask describes the committed-ness of a metaspace range.
+//  One bit corresponds to a commit granule.
+//  1 means the area is committed; 0 means it is uncommitted.
 class CommitMask : public CHeapBitMap {
 
   const MetaWord* const _base;
@@ -117,6 +118,7 @@
 
 public:
 
+  // Create a commit mask covering a range [start, start + word_size).
   CommitMask(const MetaWord* start, size_t word_size);
 
   const MetaWord* base() const  { return _base; }
--- a/src/hotspot/share/memory/metaspace/counter.hpp	Sat Nov 23 11:05:16 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/counter.hpp	Mon Nov 25 16:35:14 2019 +0100
@@ -35,10 +35,10 @@
 namespace metaspace {
 
 // A very simple helper class which counts something, offers decrement/increment
-// methods and checks for overflow/underflow on increment/decrement.
+// methods and, on debug, checks for overflow/underflow on increment/decrement.
 //
-// (since we seem to do that alot....)
-
+// Two variants exists, a thread-safe one using atomic counting, a normal one
+//  which does not.
 template <class T>
 class AbstractCounter {
 
--- a/src/hotspot/share/memory/metaspace/internStat.hpp	Sat Nov 23 11:05:16 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/internStat.hpp	Mon Nov 25 16:35:14 2019 +0100
@@ -36,6 +36,8 @@
 
 namespace metaspace {
 
+
+// A number of counters for internal metaspace statistics; Only active in debug.
 class InternalStats : public AllStatic {
 
   // Note: all counters which are modified on the classloader local allocation path
--- a/src/hotspot/share/memory/metaspace/rootChunkArea.hpp	Sat Nov 23 11:05:16 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/rootChunkArea.hpp	Mon Nov 25 16:35:14 2019 +0100
@@ -41,7 +41,7 @@
 class VirtualSpaceNode;
 
 
-// RootChunkArea describes the chunk composition of a root-chunk-sized areal.
+// RootChunkArea describes the chunk composition of a root-chunk-sized area.
 //
 
 class RootChunkArea {
--- a/src/hotspot/share/memory/metaspace/spaceManager.hpp	Sat Nov 23 11:05:16 2019 +0100
+++ b/src/hotspot/share/memory/metaspace/spaceManager.hpp	Mon Nov 25 16:35:14 2019 +0100
@@ -72,7 +72,11 @@
   Metachunk* current_chunk()              { return _chunks.first(); }
   const Metachunk* current_chunk() const  { return _chunks.first(); }
 
-  // Prematurely released metablocks.
+  // These structures take care of 1) prematurely deallocated Metaspace blocks
+  //  and 2) leftover space from retired chunks.
+  // Only one of these is active; one will eventually go. We are still testing
+  //  which implementation is better suited to the task. _lom is default. Change
+  //  with -XX:+-MetaspaceUseLOM.
   BlockFreelist* _block_freelist;
   LeftOverManager* _lom;