8058354: SPECjvm2008-Derby -2.7% performance regression on Solaris-X64 starting with 9-b29
authortschatzl
Tue, 07 Apr 2015 10:53:51 +0200
changeset 30158 bd6094906ef8
parent 30157 e36165b16dde
child 30159 5d1c43b3b9b3
8058354: SPECjvm2008-Derby -2.7% performance regression on Solaris-X64 starting with 9-b29 Summary: Allow use of large pages for auxiliary data structures in G1. Clean up existing interfaces. Reviewed-by: jmasa, pliden, stefank
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp
hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp
hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
hotspot/src/share/vm/prims/whitebox.cpp
hotspot/src/share/vm/runtime/virtualspace.cpp
hotspot/src/share/vm/runtime/virtualspace.hpp
hotspot/test/gc/g1/TestLargePageUseForAuxMemory.java
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Apr 07 10:53:51 2015 +0200
@@ -116,7 +116,7 @@
 }
 
 size_t CMBitMap::compute_size(size_t heap_size) {
-  return heap_size / mark_distance();
+  return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
 }
 
 size_t CMBitMap::mark_distance() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Apr 07 10:53:51 2015 +0200
@@ -1802,6 +1802,25 @@
   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 }
 
+G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
+                                                                 size_t size,
+                                                                 size_t translation_factor) {
+  // Allocate a new reserved space, preferring to use large pages.
+  ReservedSpace rs(size, true);
+  G1RegionToSpaceMapper* result  =
+    G1RegionToSpaceMapper::create_mapper(rs,
+                                         size,
+                                         rs.alignment(),
+                                         HeapRegion::GrainBytes,
+                                         translation_factor,
+                                         mtGC);
+  if (TracePageSizes) {
+    gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT,
+                           description, rs.alignment(), p2i(rs.base()), rs.size(), rs.alignment(), size);
+  }
+  return result;
+}
+
 jint G1CollectedHeap::initialize() {
   CollectedHeap::pre_initialize();
   os::enable_vtime();
@@ -1869,57 +1888,35 @@
   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
   G1RegionToSpaceMapper* heap_storage =
     G1RegionToSpaceMapper::create_mapper(g1_rs,
+                                         g1_rs.size(),
                                          UseLargePages ? os::large_page_size() : os::vm_page_size(),
                                          HeapRegion::GrainBytes,
                                          1,
                                          mtJavaHeap);
   heap_storage->set_mapping_changed_listener(&_listener);
 
-  // Reserve space for the block offset table. We do not support automatic uncommit
-  // for the card table at this time. BOT only.
-  ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+  // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
   G1RegionToSpaceMapper* bot_storage =
-    G1RegionToSpaceMapper::create_mapper(bot_rs,
-                                         os::vm_page_size(),
-                                         HeapRegion::GrainBytes,
-                                         G1BlockOffsetSharedArray::N_bytes,
-                                         mtGC);
+    create_aux_memory_mapper("Block offset table",
+                             G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
+                             G1BlockOffsetSharedArray::N_bytes);
 
   ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
   G1RegionToSpaceMapper* cardtable_storage =
-    G1RegionToSpaceMapper::create_mapper(cardtable_rs,
-                                         os::vm_page_size(),
-                                         HeapRegion::GrainBytes,
-                                         G1BlockOffsetSharedArray::N_bytes,
-                                         mtGC);
-
-  // Reserve space for the card counts table.
-  ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
+    create_aux_memory_mapper("Card table",
+                             G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
+                             G1BlockOffsetSharedArray::N_bytes);
+
   G1RegionToSpaceMapper* card_counts_storage =
-    G1RegionToSpaceMapper::create_mapper(card_counts_rs,
-                                         os::vm_page_size(),
-                                         HeapRegion::GrainBytes,
-                                         G1BlockOffsetSharedArray::N_bytes,
-                                         mtGC);
-
-  // Reserve space for prev and next bitmap.
+    create_aux_memory_mapper("Card counts table",
+                             G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize),
+                             G1BlockOffsetSharedArray::N_bytes);
+
   size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
-
-  ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
   G1RegionToSpaceMapper* prev_bitmap_storage =
-    G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
-                                         os::vm_page_size(),
-                                         HeapRegion::GrainBytes,
-                                         CMBitMap::mark_distance(),
-                                         mtGC);
-
-  ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
+    create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance());
   G1RegionToSpaceMapper* next_bitmap_storage =
-    G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
-                                         os::vm_page_size(),
-                                         HeapRegion::GrainBytes,
-                                         CMBitMap::mark_distance(),
-                                         mtGC);
+    create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance());
 
   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
   g1_barrier_set()->initialize(cardtable_storage);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Apr 07 10:53:51 2015 +0200
@@ -354,6 +354,12 @@
   // heap after a compaction.
   void print_hrm_post_compaction();
 
+  // Create a memory mapper for auxiliary data structures of the given size and
+  // translation factor.
+  static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
+                                                         size_t size,
+                                                         size_t translation_factor);
+
   double verify(bool guard, const char* msg);
   void verify_before_gc();
   void verify_after_gc();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp	Tue Apr 07 10:53:51 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,37 +44,45 @@
 #endif
 #include "utilities/bitMap.inline.hpp"
 
-G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL),
-  _high_boundary(NULL), _committed(), _page_size(0), _special(false),
+G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
+  _low_boundary(NULL), _high_boundary(NULL), _committed(), _page_size(0), _special(false),
   _dirty(), _executable(false) {
+  initialize_with_page_size(rs, used_size, page_size);
 }
 
-bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) {
-  if (!rs.is_reserved()) {
-    return false;  // Allocation failed.
-  }
-  assert(_low_boundary == NULL, "VirtualSpace already initialized");
-  assert(page_size > 0, "Granularity must be non-zero.");
+void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
+  guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
+
+  vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
+  vmassert(page_size > 0, "Page size must be non-zero.");
+
+  guarantee(is_ptr_aligned(rs.base(), page_size),
+            err_msg("Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size));
+  guarantee(is_size_aligned(used_size, os::vm_page_size()),
+            err_msg("Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size));
+  guarantee(used_size <= rs.size(),
+            err_msg("Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size()));
+  guarantee(is_size_aligned(rs.size(), page_size),
+            err_msg("Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size));
 
   _low_boundary  = rs.base();
-  _high_boundary = _low_boundary + rs.size();
+  _high_boundary = _low_boundary + used_size;
 
   _special = rs.special();
   _executable = rs.executable();
 
   _page_size = page_size;
 
-  assert(_committed.size() == 0, "virtual space initialized more than once");
-  uintx size_in_bits = rs.size() / page_size;
-  _committed.resize(size_in_bits, /* in_resource_area */ false);
+  vmassert(_committed.size() == 0, "virtual space initialized more than once");
+  BitMap::idx_t size_in_pages = rs.size() / page_size;
+  _committed.resize(size_in_pages, /* in_resource_area */ false);
   if (_special) {
-    _dirty.resize(size_in_bits, /* in_resource_area */ false);
+    _dirty.resize(size_in_pages, /* in_resource_area */ false);
   }
 
-  return true;
+  _tail_size = used_size % _page_size;
 }
 
-
 G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
   release();
 }
@@ -87,12 +95,18 @@
   _special                = false;
   _executable             = false;
   _page_size              = 0;
+  _tail_size              = 0;
   _committed.resize(0, false);
   _dirty.resize(0, false);
 }
 
 size_t G1PageBasedVirtualSpace::committed_size() const {
-  return _committed.count_one_bits() * _page_size;
+  size_t result = _committed.count_one_bits() * _page_size;
+  // The last page might not be in full.
+  if (is_last_page_partial() && _committed.at(_committed.size() - 1)) {
+    result -= _page_size - _tail_size;
+  }
+  return result;
 }
 
 size_t G1PageBasedVirtualSpace::reserved_size() const {
@@ -103,65 +117,134 @@
   return reserved_size() - committed_size();
 }
 
-uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
+size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const {
   return (addr - _low_boundary) / _page_size;
 }
 
-bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const {
-  uintptr_t end = start + size_in_pages;
-  return _committed.get_next_zero_offset(start, end) >= end;
+bool G1PageBasedVirtualSpace::is_area_committed(size_t start_page, size_t size_in_pages) const {
+  size_t end_page = start_page + size_in_pages;
+  return _committed.get_next_zero_offset(start_page, end_page) >= end_page;
 }
 
-bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const {
-  uintptr_t end = start + size_in_pages;
-  return _committed.get_next_one_offset(start, end) >= end;
+bool G1PageBasedVirtualSpace::is_area_uncommitted(size_t start_page, size_t size_in_pages) const {
+  size_t end_page = start_page + size_in_pages;
+  return _committed.get_next_one_offset(start_page, end_page) >= end_page;
 }
 
-char* G1PageBasedVirtualSpace::page_start(uintptr_t index) {
+char* G1PageBasedVirtualSpace::page_start(size_t index) const {
   return _low_boundary + index * _page_size;
 }
 
-size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) {
-  return num * _page_size;
+bool G1PageBasedVirtualSpace::is_after_last_page(size_t index) const {
+  guarantee(index <= _committed.size(),
+            err_msg("Given boundary page " SIZE_FORMAT " is beyond managed page count " SIZE_FORMAT, index, _committed.size()));
+  return index == _committed.size();
+}
+
+void G1PageBasedVirtualSpace::commit_preferred_pages(size_t start, size_t num_pages) {
+  vmassert(num_pages > 0, "No full pages to commit");
+  vmassert(start + num_pages <= _committed.size(),
+           err_msg("Tried to commit area from page " SIZE_FORMAT " to page " SIZE_FORMAT " "
+                   "that is outside of managed space of " SIZE_FORMAT " pages",
+                   start, start + num_pages, _committed.size()));
+
+  char* start_addr = page_start(start);
+  size_t size = num_pages * _page_size;
+
+  os::commit_memory_or_exit(start_addr, size, _page_size, _executable,
+                            err_msg("Failed to commit area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
+                                    p2i(start_addr), p2i(start_addr + size), size));
+}
+
+void G1PageBasedVirtualSpace::commit_tail() {
+  vmassert(_tail_size > 0, "The size of the tail area must be > 0 when reaching here");
+
+  char* const aligned_end_address = (char*)align_ptr_down(_high_boundary, _page_size);
+  os::commit_memory_or_exit(aligned_end_address, _tail_size, os::vm_page_size(), _executable,
+                            err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".",
+                                    p2i(aligned_end_address), p2i(_high_boundary), _tail_size));
 }
 
-bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) {
+void G1PageBasedVirtualSpace::commit_internal(size_t start_page, size_t end_page) {
+  guarantee(start_page < end_page,
+            err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
+  guarantee(end_page <= _committed.size(),
+            err_msg("Given end page " SIZE_FORMAT " is beyond end of managed page amount of " SIZE_FORMAT, end_page, _committed.size()));
+
+  size_t pages = end_page - start_page;
+  bool need_to_commit_tail = is_after_last_page(end_page) && is_last_page_partial();
+
+  // If we have to commit some (partial) tail area, decrease the amount of pages to avoid
+  // committing that in the full-page commit code.
+  if (need_to_commit_tail) {
+    pages--;
+  }
+
+  if (pages > 0) {
+    commit_preferred_pages(start_page, pages);
+  }
+
+  if (need_to_commit_tail) {
+    commit_tail();
+  }
+}
+
+char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) const {
+  return MIN2(_high_boundary, page_start(end_page));
+}
+
+void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) {
+  guarantee(start_page < end_page,
+            err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
+
+  os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page));
+}
+
+bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
   // We need to make sure to commit all pages covered by the given area.
-  guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted");
+  guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
 
   bool zero_filled = true;
-  uintptr_t end = start + size_in_pages;
+  size_t end_page = start_page + size_in_pages;
 
   if (_special) {
     // Check for dirty pages and update zero_filled if any found.
-    if (_dirty.get_next_one_offset(start,end) < end) {
+    if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
       zero_filled = false;
-      _dirty.clear_range(start, end);
+      _dirty.clear_range(start_page, end_page);
     }
   } else {
-    os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable,
-                              err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages));
+    commit_internal(start_page, end_page);
   }
-  _committed.set_range(start, end);
+  _committed.set_range(start_page, end_page);
 
   if (AlwaysPreTouch) {
-    os::pretouch_memory(page_start(start), page_start(end));
+    pretouch_internal(start_page, end_page);
   }
   return zero_filled;
 }
 
-void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) {
-  guarantee(is_area_committed(start, size_in_pages), "checking");
+void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
+  guarantee(start_page < end_page,
+            err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page));
 
+  char* start_addr = page_start(start_page);
+  os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
+}
+
+void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
+  guarantee(is_area_committed(start_page, size_in_pages), "checking");
+
+  size_t end_page = start_page + size_in_pages;
   if (_special) {
     // Mark that memory is dirty. If committed again the memory might
     // need to be cleared explicitly.
-    _dirty.set_range(start, start + size_in_pages);
+    _dirty.set_range(start_page, end_page);
   } else {
-    os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages));
+    uncommit_internal(start_page, end_page);
   }
 
-  _committed.clear_range(start, start + size_in_pages);
+  _committed.clear_range(start_page, end_page);
 }
 
 bool G1PageBasedVirtualSpace::contains(const void* p) const {
@@ -175,7 +258,8 @@
   out->cr();
   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
-  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(_low_boundary), p2i(_high_boundary));
+  out->print_cr(" - preferred page size: " SIZE_FORMAT, _page_size);
+  out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]",  p2i(_low_boundary), p2i(_high_boundary));
 }
 
 void G1PageBasedVirtualSpace::print() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp	Tue Apr 07 10:53:51 2015 +0200
@@ -34,6 +34,12 @@
 // granularity.
 // (De-)Allocation requests are always OS page aligned by passing a page index
 // and multiples of pages.
+// For systems that only commits of memory in a given size (always greater than
+// page size) the base address is required to be aligned to that page size.
+// The actual size requested need not be aligned to that page size, but the size
+// of the reservation passed may be rounded up to this page size. Any fragment
+// (less than the page size) of the actual size at the tail of the request will
+// be committed using OS small pages.
 // The implementation gives an error when trying to commit or uncommit pages that
 // have already been committed or uncommitted.
 class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC {
@@ -43,7 +49,11 @@
   char* _low_boundary;
   char* _high_boundary;
 
-  // The commit/uncommit granularity in bytes.
+  // The size of the tail in bytes of the handled space that needs to be committed
+  // using small pages.
+  size_t _tail_size;
+
+  // The preferred page size used for commit/uncommit in bytes.
   size_t _page_size;
 
   // Bitmap used for verification of commit/uncommit operations.
@@ -62,30 +72,55 @@
   // Indicates whether the committed space should be executable.
   bool _executable;
 
+  // Helper function for committing memory. Commit the given memory range by using
+  // _page_size pages as much as possible and the remainder with small sized pages.
+  void commit_internal(size_t start_page, size_t end_page);
+  // Commit num_pages pages of _page_size size starting from start. All argument
+  // checking has been performed.
+  void commit_preferred_pages(size_t start_page, size_t end_page);
+  // Commit space at the high end of the space that needs to be committed with small
+  // sized pages.
+  void commit_tail();
+
+  // Uncommit the given memory range.
+  void uncommit_internal(size_t start_page, size_t end_page);
+
+  // Pretouch the given memory range.
+  void pretouch_internal(size_t start_page, size_t end_page);
+
   // Returns the index of the page which contains the given address.
   uintptr_t  addr_to_page_index(char* addr) const;
   // Returns the address of the given page index.
-  char*  page_start(uintptr_t index);
-  // Returns the byte size of the given number of pages.
-  size_t byte_size_for_pages(size_t num);
+  char*  page_start(size_t index) const;
+
+  // Is the given page index the last page?
+  bool is_last_page(size_t index) const { return index == (_committed.size() - 1); }
+  // Is the given page index the first after last page?
+  bool is_after_last_page(size_t index) const;
+  // Is the last page only partially covered by this space?
+  bool is_last_page_partial() const { return !is_ptr_aligned(_high_boundary, _page_size); }
+  // Returns the end address of the given page bounded by the reserved space.
+  char* bounded_end_addr(size_t end_page) const;
 
   // Returns true if the entire area is backed by committed memory.
-  bool is_area_committed(uintptr_t start, size_t size_in_pages) const;
+  bool is_area_committed(size_t start_page, size_t size_in_pages) const;
   // Returns true if the entire area is not backed by committed memory.
-  bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const;
+  bool is_area_uncommitted(size_t start_page, size_t size_in_pages) const;
 
+  void initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size);
  public:
 
   // Commit the given area of pages starting at start being size_in_pages large.
   // Returns true if the given area is zero filled upon completion.
-  bool commit(uintptr_t start, size_t size_in_pages);
+  bool commit(size_t start_page, size_t size_in_pages);
 
   // Uncommit the given area of pages starting at start being size_in_pages large.
-  void uncommit(uintptr_t start, size_t size_in_pages);
+  void uncommit(size_t start_page, size_t size_in_pages);
 
-  // Initialization
-  G1PageBasedVirtualSpace();
-  bool initialize_with_granularity(ReservedSpace rs, size_t page_size);
+  // Initialize the given reserved space with the given base address and the size
+  // actually used.
+  // Prefer to commit in page_size chunks.
+  G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size);
 
   // Destruction
   ~G1PageBasedVirtualSpace();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp	Tue Apr 07 10:53:51 2015 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,17 +31,16 @@
 #include "utilities/bitMap.inline.hpp"
 
 G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
-                                             size_t commit_granularity,
+                                             size_t used_size,
+                                             size_t page_size,
                                              size_t region_granularity,
                                              MemoryType type) :
-  _storage(),
-  _commit_granularity(commit_granularity),
+  _storage(rs, used_size, page_size),
   _region_granularity(region_granularity),
   _listener(NULL),
   _commit_map() {
-  guarantee(is_power_of_2(commit_granularity), "must be");
+  guarantee(is_power_of_2(page_size), "must be");
   guarantee(is_power_of_2(region_granularity), "must be");
-  _storage.initialize_with_granularity(rs, commit_granularity);
 
   MemTracker::record_virtual_memory_type((address)rs.base(), type);
 }
@@ -55,25 +54,26 @@
 
  public:
   G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs,
-                                      size_t os_commit_granularity,
+                                      size_t actual_size,
+                                      size_t page_size,
                                       size_t alloc_granularity,
                                       size_t commit_factor,
                                       MemoryType type) :
-     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
-    _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) {
+    G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
+    _pages_per_region(alloc_granularity / (page_size * commit_factor)) {
 
-    guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity");
+    guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity");
     _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
   }
 
-  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
-    bool zero_filled = _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+  virtual void commit_regions(uint start_idx, size_t num_regions) {
+    bool zero_filled = _storage.commit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
     _commit_map.set_range(start_idx, start_idx + num_regions);
     fire_on_commit(start_idx, num_regions, zero_filled);
   }
 
-  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
-    _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region);
+  virtual void uncommit_regions(uint start_idx, size_t num_regions) {
+    _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
     _commit_map.clear_range(start_idx, start_idx + num_regions);
   }
 };
@@ -98,22 +98,23 @@
 
  public:
   G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs,
-                                       size_t os_commit_granularity,
+                                       size_t actual_size,
+                                       size_t page_size,
                                        size_t alloc_granularity,
                                        size_t commit_factor,
                                        MemoryType type) :
-     G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type),
-    _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() {
+    G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type),
+    _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
 
-    guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
-    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity);
+    guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
+    _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size);
     _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false);
   }
 
-  virtual void commit_regions(uintptr_t start_idx, size_t num_regions) {
-    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
-      assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i));
-      uintptr_t idx = region_idx_to_page_idx(i);
+  virtual void commit_regions(uint start_idx, size_t num_regions) {
+    for (uint i = start_idx; i < start_idx + num_regions; i++) {
+      assert(!_commit_map.at(i), err_msg("Trying to commit storage at region %u that is already committed", i));
+      size_t idx = region_idx_to_page_idx(i);
       uint old_refcount = _refcounts.get_by_index(idx);
       bool zero_filled = false;
       if (old_refcount == 0) {
@@ -125,10 +126,10 @@
     }
   }
 
-  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) {
-    for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) {
-      assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i));
-      uintptr_t idx = region_idx_to_page_idx(i);
+  virtual void uncommit_regions(uint start_idx, size_t num_regions) {
+    for (uint i = start_idx; i < start_idx + num_regions; i++) {
+      assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region %u that is not committed", i));
+      size_t idx = region_idx_to_page_idx(i);
       uint old_refcount = _refcounts.get_by_index(idx);
       assert(old_refcount > 0, "must be");
       if (old_refcount == 1) {
@@ -147,14 +148,15 @@
 }
 
 G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
-                                                            size_t os_commit_granularity,
+                                                            size_t actual_size,
+                                                            size_t page_size,
                                                             size_t region_granularity,
                                                             size_t commit_factor,
                                                             MemoryType type) {
 
-  if (region_granularity >= (os_commit_granularity * commit_factor)) {
-    return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+  if (region_granularity >= (page_size * commit_factor)) {
+    return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
   } else {
-    return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type);
+    return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type);
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp	Tue Apr 07 10:53:51 2015 +0200
@@ -46,12 +46,12 @@
  protected:
   // Backing storage.
   G1PageBasedVirtualSpace _storage;
-  size_t _commit_granularity;
+
   size_t _region_granularity;
   // Mapping management
   BitMap _commit_map;
 
-  G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type);
+  G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, MemoryType type);
 
   void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
  public:
@@ -70,16 +70,20 @@
     return _commit_map.at(idx);
   }
 
-  virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
-  virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0;
+  virtual void commit_regions(uint start_idx, size_t num_regions = 1) = 0;
+  virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;
 
   // Creates an appropriate G1RegionToSpaceMapper for the given parameters.
+  // The actual space to be used within the given reservation is given by actual_size.
+  // This is because some OSes need to round up the reservation size to guarantee
+  // alignment of page_size.
   // The byte_translation_factor defines how many bytes in a region correspond to
   // a single byte in the data structure this mapper is for.
   // Eg. in the card table, this value corresponds to the size a single card
-  // table entry corresponds to.
+  // table entry corresponds to in the heap.
   static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs,
-                                              size_t os_commit_granularity,
+                                              size_t actual_size,
+                                              size_t page_size,
                                               size_t region_granularity,
                                               size_t byte_translation_factor,
                                               MemoryType type);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Tue Apr 07 10:53:51 2015 +0200
@@ -419,6 +419,7 @@
   ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size()));
   G1RegionToSpaceMapper* bot_storage =
     G1RegionToSpaceMapper::create_mapper(bot_rs,
+                                         bot_rs.size(),
                                          os::vm_page_size(),
                                          HeapRegion::GrainBytes,
                                          G1BlockOffsetSharedArray::N_bytes,
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Tue Apr 07 10:53:51 2015 +0200
@@ -89,6 +89,10 @@
   return os::vm_page_size();
 WB_END
 
+WB_ENTRY(jlong, WB_GetVMLargePageSize(JNIEnv* env, jobject o))
+  return os::large_page_size();
+WB_END
+
 class WBIsKlassAliveClosure : public KlassClosure {
     Symbol* _name;
     bool _found;
@@ -1301,6 +1305,7 @@
   {CC"isObjectInOldGen0",   CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen  },
   {CC"getHeapOopSize",     CC"()I",                   (void*)&WB_GetHeapOopSize    },
   {CC"getVMPageSize",      CC"()I",                   (void*)&WB_GetVMPageSize     },
+  {CC"getVMLargePageSize", CC"()J",                   (void*)&WB_GetVMLargePageSize},
   {CC"isClassAlive0",      CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive      },
   {CC"parseCommandLine0",
       CC"(Ljava/lang/String;C[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Tue Apr 07 10:53:51 2015 +0200
@@ -37,13 +37,21 @@
     _alignment(0), _special(false), _executable(false) {
 }
 
-ReservedSpace::ReservedSpace(size_t size) {
+ReservedSpace::ReservedSpace(size_t size, bool prefer_large_pages) {
   // Want to use large pages where possible and pad with small pages.
   size_t page_size = os::page_size_for_region_unaligned(size, 1);
   bool large_pages = page_size != (size_t)os::vm_page_size();
-  // Don't force the alignment to be large page aligned,
-  // since that will waste memory.
-  size_t alignment = os::vm_allocation_granularity();
+  size_t alignment;
+  if (large_pages && prefer_large_pages) {
+    alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
+    // ReservedSpace initialization requires size to be aligned to the given
+    // alignment. Align the size up.
+    size = align_size_up(size, alignment);
+  } else {
+    // Don't force the alignment to be large page aligned,
+    // since that will waste memory.
+    alignment = os::vm_allocation_granularity();
+  }
   initialize(size, alignment, large_pages, NULL, false);
 }
 
--- a/hotspot/src/share/vm/runtime/virtualspace.hpp	Thu Apr 02 16:42:24 2015 -0400
+++ b/hotspot/src/share/vm/runtime/virtualspace.hpp	Tue Apr 07 10:53:51 2015 +0200
@@ -51,7 +51,12 @@
  public:
   // Constructor
   ReservedSpace();
-  ReservedSpace(size_t size);
+  // Initialize the reserved space with the given size. If prefer_large_pages is
+  // set, if the given size warrants use of large pages, try to force them by
+  // passing an alignment restriction further down. This may waste some space
+  // if the given size is not aligned, as the reservation will be aligned up
+  // to large page alignment.
+  ReservedSpace(size_t size, bool prefer_large_pages = false);
   ReservedSpace(size_t size, size_t alignment, bool large,
                 char* requested_address = NULL);
   ReservedSpace(size_t size, size_t alignment, bool large, bool executable);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestLargePageUseForAuxMemory.java	Tue Apr 07 10:53:51 2015 +0200
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestLargePageUseForAuxMemory.java
+ * @bug 8058354
+ * @key gc
+ * @library /testlibrary /../../test/lib
+ * @requires (vm.gc=="G1" | vm.gc=="null")
+ * @build TestLargePageUseForAuxMemory
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @summary Test that auxiliary data structures are allocated using large pages if available.
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UseG1GC -XX:+WhiteBoxAPI -XX:+IgnoreUnrecognizedVMOptions -XX:+UseLargePages TestLargePageUseForAuxMemory
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class TestLargePageUseForAuxMemory {
+    static final int HEAP_REGION_SIZE = 4 * 1024 * 1024;
+    static long largePageSize;
+    static long smallPageSize;
+
+    static void checkSmallTables(OutputAnalyzer output, long expectedPageSize) throws Exception {
+        output.shouldContain("G1 'Block offset table': pg_sz=" + expectedPageSize);
+        output.shouldContain("G1 'Card counts table': pg_sz=" + expectedPageSize);
+    }
+
+    static void checkBitmaps(OutputAnalyzer output, long expectedPageSize) throws Exception {
+        output.shouldContain("G1 'Prev Bitmap': pg_sz=" + expectedPageSize);
+        output.shouldContain("G1 'Next Bitmap': pg_sz=" + expectedPageSize);
+    }
+
+    static void testVM(long heapsize, boolean cardsShouldUseLargePages, boolean bitmapShouldUseLargePages) throws Exception {
+        ProcessBuilder pb;
+        // Test with large page enabled.
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+                                                   "-XX:G1HeapRegionSize=" + HEAP_REGION_SIZE,
+                                                   "-Xms" + 10 * HEAP_REGION_SIZE,
+                                                   "-Xmx" + heapsize,
+                                                   "-XX:+TracePageSizes",
+                                                   "-XX:+UseLargePages",
+                                                   "-XX:+IgnoreUnrecognizedVMOptions",  // there is on ObjectAlignmentInBytes in 32 bit builds
+                                                   "-XX:ObjectAlignmentInBytes=8",
+                                                   "-version");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        checkSmallTables(output, (cardsShouldUseLargePages ? largePageSize : smallPageSize));
+        checkBitmaps(output, (bitmapShouldUseLargePages ? largePageSize : smallPageSize));
+        output.shouldHaveExitValue(0);
+
+        // Test with large page disabled.
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+                                                   "-XX:G1HeapRegionSize=" + HEAP_REGION_SIZE,
+                                                   "-Xms" + 10 * HEAP_REGION_SIZE,
+                                                   "-Xmx" + heapsize,
+                                                   "-XX:+TracePageSizes",
+                                                   "-XX:-UseLargePages",
+                                                   "-XX:+IgnoreUnrecognizedVMOptions",  // there is on ObjectAlignmentInBytes in 32 bit builds
+                                                   "-XX:ObjectAlignmentInBytes=8",
+                                                   "-version");
+
+        output = new OutputAnalyzer(pb.start());
+        checkSmallTables(output, smallPageSize);
+        checkBitmaps(output, smallPageSize);
+        output.shouldHaveExitValue(0);
+    }
+
+    public static void main(String[] args) throws Exception {
+        if (!Platform.isDebugBuild()) {
+            System.out.println("Skip tests on non-debug builds because the required option TracePageSizes is a debug-only option.");
+            return;
+        }
+
+        WhiteBox wb = WhiteBox.getWhiteBox();
+        smallPageSize = wb.getVMPageSize();
+        largePageSize = wb.getVMLargePageSize();
+
+        if (largePageSize == 0) {
+            System.out.println("Skip tests because large page support does not seem to be available on this platform.");
+            return;
+        }
+
+        // To get large pages for the card table etc. we need at least a 1G heap (with 4k page size).
+        // 32 bit systems will have problems reserving such an amount of contiguous space, so skip the
+        // test there.
+        if (!Platform.is32bit()) {
+            // Size that a single card covers.
+            final int cardSize = 512;
+
+            final long heapSizeForCardTableUsingLargePages = largePageSize * cardSize;
+
+            testVM(heapSizeForCardTableUsingLargePages, true, true);
+            testVM(heapSizeForCardTableUsingLargePages + HEAP_REGION_SIZE, true, true);
+            testVM(heapSizeForCardTableUsingLargePages - HEAP_REGION_SIZE, false, true);
+        }
+
+        // Minimum heap requirement to get large pages for bitmaps is 128M heap. This seems okay to test
+        // everywhere.
+        final int bitmapTranslationFactor = 8 * 8; // ObjectAlignmentInBytes * BitsPerByte
+        final long heapSizeForBitmapUsingLargePages = largePageSize * bitmapTranslationFactor;
+
+        testVM(heapSizeForBitmapUsingLargePages, false, true);
+        testVM(heapSizeForBitmapUsingLargePages + HEAP_REGION_SIZE, false, true);
+        testVM(heapSizeForBitmapUsingLargePages - HEAP_REGION_SIZE, false, false);
+    }
+}
+