hotspot/src/share/vm/runtime/virtualspace.cpp
changeset 1 489c9b5090e2
child 823 9a5271881bc0
child 1374 4c24294029a9
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Sat Dec 01 00:00:00 2007 +0000
@@ -0,0 +1,704 @@
+/*
+ * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_virtualspace.cpp.incl"
+
+
+// ReservedSpace
+ReservedSpace::ReservedSpace(size_t size) {
+  initialize(size, 0, false, NULL);
+}
+
+ReservedSpace::ReservedSpace(size_t size, size_t alignment,
+                             bool large, char* requested_address) {
+  initialize(size, alignment, large, requested_address);
+}
+
+char *
+ReservedSpace::align_reserved_region(char* addr, const size_t len,
+                                     const size_t prefix_size,
+                                     const size_t prefix_align,
+                                     const size_t suffix_size,
+                                     const size_t suffix_align)
+{
+  assert(addr != NULL, "sanity");
+  const size_t required_size = prefix_size + suffix_size;
+  assert(len >= required_size, "len too small");
+
+  const size_t s = size_t(addr);
+  const size_t beg_ofs = s + prefix_size & suffix_align - 1;
+  const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
+
+  if (len < beg_delta + required_size) {
+     return NULL; // Cannot do proper alignment.
+  }
+  const size_t end_delta = len - (beg_delta + required_size);
+
+  if (beg_delta != 0) {
+    os::release_memory(addr, beg_delta);
+  }
+
+  if (end_delta != 0) {
+    char* release_addr = (char*) (s + beg_delta + required_size);
+    os::release_memory(release_addr, end_delta);
+  }
+
+  return (char*) (s + beg_delta);
+}
+
+char* ReservedSpace::reserve_and_align(const size_t reserve_size,
+                                       const size_t prefix_size,
+                                       const size_t prefix_align,
+                                       const size_t suffix_size,
+                                       const size_t suffix_align)
+{
+  assert(reserve_size > prefix_size + suffix_size, "should not be here");
+
+  char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
+  if (raw_addr == NULL) return NULL;
+
+  char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
+                                       prefix_align, suffix_size,
+                                       suffix_align);
+  if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
+    fatal("os::release_memory failed");
+  }
+
+#ifdef ASSERT
+  if (result != NULL) {
+    const size_t raw = size_t(raw_addr);
+    const size_t res = size_t(result);
+    assert(res >= raw, "alignment decreased start addr");
+    assert(res + prefix_size + suffix_size <= raw + reserve_size,
+           "alignment increased end addr");
+    assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
+    assert((res + prefix_size & suffix_align - 1) == 0,
+           "bad alignment of suffix");
+  }
+#endif
+
+  return result;
+}
+
+ReservedSpace::ReservedSpace(const size_t prefix_size,
+                             const size_t prefix_align,
+                             const size_t suffix_size,
+                             const size_t suffix_align)
+{
+  assert(prefix_size != 0, "sanity");
+  assert(prefix_align != 0, "sanity");
+  assert(suffix_size != 0, "sanity");
+  assert(suffix_align != 0, "sanity");
+  assert((prefix_size & prefix_align - 1) == 0,
+    "prefix_size not divisible by prefix_align");
+  assert((suffix_size & suffix_align - 1) == 0,
+    "suffix_size not divisible by suffix_align");
+  assert((suffix_align & prefix_align - 1) == 0,
+    "suffix_align not divisible by prefix_align");
+
+  // On systems where the entire region has to be reserved and committed up
+  // front, the compound alignment normally done by this method is unnecessary.
+  const bool try_reserve_special = UseLargePages &&
+    prefix_align == os::large_page_size();
+  if (!os::can_commit_large_page_memory() && try_reserve_special) {
+    initialize(prefix_size + suffix_size, prefix_align, true);
+    return;
+  }
+
+  _base = NULL;
+  _size = 0;
+  _alignment = 0;
+  _special = false;
+
+  // Optimistically try to reserve the exact size needed.
+  const size_t size = prefix_size + suffix_size;
+  char* addr = os::reserve_memory(size, NULL, prefix_align);
+  if (addr == NULL) return;
+
+  // Check whether the result has the needed alignment (unlikely unless
+  // prefix_align == suffix_align).
+  const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1;
+  if (ofs != 0) {
+    // Wrong alignment.  Release, allocate more space and do manual alignment.
+    //
+    // On most operating systems, another allocation with a somewhat larger size
+    // will return an address "close to" that of the previous allocation.  The
+    // result is often the same address (if the kernel hands out virtual
+    // addresses from low to high), or an address that is offset by the increase
+    // in size.  Exploit that to minimize the amount of extra space requested.
+    if (!os::release_memory(addr, size)) {
+      fatal("os::release_memory failed");
+    }
+
+    const size_t extra = MAX2(ofs, suffix_align - ofs);
+    addr = reserve_and_align(size + extra, prefix_size, prefix_align,
+                             suffix_size, suffix_align);
+    if (addr == NULL) {
+      // Try an even larger region.  If this fails, address space is exhausted.
+      addr = reserve_and_align(size + suffix_align, prefix_size,
+                               prefix_align, suffix_size, suffix_align);
+    }
+  }
+
+  _base = addr;
+  _size = size;
+  _alignment = prefix_align;
+}
+
+void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
+                               char* requested_address) {
+  const size_t granularity = os::vm_allocation_granularity();
+  assert((size & granularity - 1) == 0,
+         "size not aligned to os::vm_allocation_granularity()");
+  assert((alignment & granularity - 1) == 0,
+         "alignment not aligned to os::vm_allocation_granularity()");
+  assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
+         "not a power of 2");
+
+  _base = NULL;
+  _size = 0;
+  _special = false;
+  _alignment = 0;
+  if (size == 0) {
+    return;
+  }
+
+  // If OS doesn't support demand paging for large page memory, we need
+  // to use reserve_memory_special() to reserve and pin the entire region.
+  bool special = large && !os::can_commit_large_page_memory();
+  char* base = NULL;
+
+  if (special) {
+    // It's not hard to implement reserve_memory_special() such that it can
+    // allocate at fixed address, but there seems no use of this feature
+    // for now, so it's not implemented.
+    assert(requested_address == NULL, "not implemented");
+
+    base = os::reserve_memory_special(size);
+
+    if (base != NULL) {
+      // Check alignment constraints
+      if (alignment > 0) {
+        assert((uintptr_t) base % alignment == 0,
+               "Large pages returned a non-aligned address");
+      }
+      _special = true;
+    } else {
+      // failed; try to reserve regular memory below
+    }
+  }
+
+  if (base == NULL) {
+    // Optimistically assume that the OSes returns an aligned base pointer.
+    // When reserving a large address range, most OSes seem to align to at
+    // least 64K.
+
+    // If the memory was requested at a particular address, use
+    // os::attempt_reserve_memory_at() to avoid over mapping something
+    // important.  If available space is not detected, return NULL.
+
+    if (requested_address != 0) {
+      base = os::attempt_reserve_memory_at(size, requested_address);
+    } else {
+      base = os::reserve_memory(size, NULL, alignment);
+    }
+
+    if (base == NULL) return;
+
+    // Check alignment constraints
+    if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
+      // Base not aligned, retry
+      if (!os::release_memory(base, size)) fatal("os::release_memory failed");
+      // Reserve size large enough to do manual alignment and
+      // increase size to a multiple of the desired alignment
+      size = align_size_up(size, alignment);
+      size_t extra_size = size + alignment;
+      char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
+      if (extra_base == NULL) return;
+      // Do manual alignement
+      base = (char*) align_size_up((uintptr_t) extra_base, alignment);
+      assert(base >= extra_base, "just checking");
+      // Release unused areas
+      size_t unused_bottom_size = base - extra_base;
+      size_t unused_top_size = extra_size - size - unused_bottom_size;
+      assert(unused_bottom_size % os::vm_allocation_granularity() == 0,
+             "size not allocation aligned");
+      assert(unused_top_size % os::vm_allocation_granularity() == 0,
+             "size not allocation aligned");
+      if (unused_bottom_size > 0) {
+        os::release_memory(extra_base, unused_bottom_size);
+      }
+      if (unused_top_size > 0) {
+        os::release_memory(base + size, unused_top_size);
+      }
+    }
+  }
+  // Done
+  _base = base;
+  _size = size;
+  _alignment = MAX2(alignment, (size_t) os::vm_page_size());
+
+  assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
+         "area must be distinguisable from marks for mark-sweep");
+  assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
+         "area must be distinguisable from marks for mark-sweep");
+}
+
+
+ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
+                             bool special) {
+  assert((size % os::vm_allocation_granularity()) == 0,
+         "size not allocation aligned");
+  _base = base;
+  _size = size;
+  _alignment = alignment;
+  _special = special;
+}
+
+
+ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
+                                        bool split, bool realloc) {
+  assert(partition_size <= size(), "partition failed");
+  if (split) {
+    os::split_reserved_memory(_base, _size, partition_size, realloc);
+  }
+  ReservedSpace result(base(), partition_size, alignment, special());
+  return result;
+}
+
+
+ReservedSpace
+ReservedSpace::last_part(size_t partition_size, size_t alignment) {
+  assert(partition_size <= size(), "partition failed");
+  ReservedSpace result(base() + partition_size, size() - partition_size,
+                       alignment, special());
+  return result;
+}
+
+
+size_t ReservedSpace::page_align_size_up(size_t size) {
+  return align_size_up(size, os::vm_page_size());
+}
+
+
+size_t ReservedSpace::page_align_size_down(size_t size) {
+  return align_size_down(size, os::vm_page_size());
+}
+
+
+size_t ReservedSpace::allocation_align_size_up(size_t size) {
+  return align_size_up(size, os::vm_allocation_granularity());
+}
+
+
+size_t ReservedSpace::allocation_align_size_down(size_t size) {
+  return align_size_down(size, os::vm_allocation_granularity());
+}
+
+
+void ReservedSpace::release() {
+  if (is_reserved()) {
+    if (special()) {
+      os::release_memory_special(_base, _size);
+    } else{
+      os::release_memory(_base, _size);
+    }
+    _base = NULL;
+    _size = 0;
+    _special = false;
+  }
+}
+
+
+// VirtualSpace
+
+VirtualSpace::VirtualSpace() {
+  _low_boundary           = NULL;
+  _high_boundary          = NULL;
+  _low                    = NULL;
+  _high                   = NULL;
+  _lower_high             = NULL;
+  _middle_high            = NULL;
+  _upper_high             = NULL;
+  _lower_high_boundary    = NULL;
+  _middle_high_boundary   = NULL;
+  _upper_high_boundary    = NULL;
+  _lower_alignment        = 0;
+  _middle_alignment       = 0;
+  _upper_alignment        = 0;
+}
+
+
+bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
+  if(!rs.is_reserved()) return false;  // allocation failed.
+  assert(_low_boundary == NULL, "VirtualSpace already initialized");
+  _low_boundary  = rs.base();
+  _high_boundary = low_boundary() + rs.size();
+
+  _low = low_boundary();
+  _high = low();
+
+  _special = rs.special();
+
+  // When a VirtualSpace begins life at a large size, make all future expansion
+  // and shrinking occur aligned to a granularity of large pages.  This avoids
+  // fragmentation of physical addresses that inhibits the use of large pages
+  // by the OS virtual memory system.  Empirically,  we see that with a 4MB
+  // page size, the only spaces that get handled this way are codecache and
+  // the heap itself, both of which provide a substantial performance
+  // boost in many benchmarks when covered by large pages.
+  //
+  // No attempt is made to force large page alignment at the very top and
+  // bottom of the space if they are not aligned so already.
+  _lower_alignment  = os::vm_page_size();
+  _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
+  _upper_alignment  = os::vm_page_size();
+
+  // End of each region
+  _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
+  _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
+  _upper_high_boundary = high_boundary();
+
+  // High address of each region
+  _lower_high = low_boundary();
+  _middle_high = lower_high_boundary();
+  _upper_high = middle_high_boundary();
+
+  // commit to initial size
+  if (committed_size > 0) {
+    if (!expand_by(committed_size)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+VirtualSpace::~VirtualSpace() {
+  release();
+}
+
+
+void VirtualSpace::release() {
+  (void)os::release_memory(low_boundary(), reserved_size());
+  _low_boundary           = NULL;
+  _high_boundary          = NULL;
+  _low                    = NULL;
+  _high                   = NULL;
+  _lower_high             = NULL;
+  _middle_high            = NULL;
+  _upper_high             = NULL;
+  _lower_high_boundary    = NULL;
+  _middle_high_boundary   = NULL;
+  _upper_high_boundary    = NULL;
+  _lower_alignment        = 0;
+  _middle_alignment       = 0;
+  _upper_alignment        = 0;
+  _special                = false;
+}
+
+
+size_t VirtualSpace::committed_size() const {
+  return pointer_delta(high(), low(), sizeof(char));
+}
+
+
+size_t VirtualSpace::reserved_size() const {
+  return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
+}
+
+
+size_t VirtualSpace::uncommitted_size()  const {
+  return reserved_size() - committed_size();
+}
+
+
+bool VirtualSpace::contains(const void* p) const {
+  return low() <= (const char*) p && (const char*) p < high();
+}
+
+/*
+   First we need to determine if a particular virtual space is using large
+   pages.  This is done at the initialize function and only virtual spaces
+   that are larger than LargePageSizeInBytes use large pages.  Once we
+   have determined this, all expand_by and shrink_by calls must grow and
+   shrink by large page size chunks.  If a particular request
+   is within the current large page, the call to commit and uncommit memory
+   can be ignored.  In the case that the low and high boundaries of this
+   space is not large page aligned, the pages leading to the first large
+   page address and the pages after the last large page address must be
+   allocated with default pages.
+*/
+bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
+  if (uncommitted_size() < bytes) return false;
+
+  if (special()) {
+    // don't commit memory if the entire space is pinned in memory
+    _high += bytes;
+    return true;
+  }
+
+  char* previous_high = high();
+  char* unaligned_new_high = high() + bytes;
+  assert(unaligned_new_high <= high_boundary(),
+         "cannot expand by more than upper boundary");
+
+  // Calculate where the new high for each of the regions should be.  If
+  // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
+  // then the unaligned lower and upper new highs would be the
+  // lower_high() and upper_high() respectively.
+  char* unaligned_lower_new_high =
+    MIN2(unaligned_new_high, lower_high_boundary());
+  char* unaligned_middle_new_high =
+    MIN2(unaligned_new_high, middle_high_boundary());
+  char* unaligned_upper_new_high =
+    MIN2(unaligned_new_high, upper_high_boundary());
+
+  // Align the new highs based on the regions alignment.  lower and upper
+  // alignment will always be default page size.  middle alignment will be
+  // LargePageSizeInBytes if the actual size of the virtual space is in
+  // fact larger than LargePageSizeInBytes.
+  char* aligned_lower_new_high =
+    (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
+  char* aligned_middle_new_high =
+    (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
+  char* aligned_upper_new_high =
+    (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
+
+  // Determine which regions need to grow in this expand_by call.
+  // If you are growing in the lower region, high() must be in that
+  // region so calcuate the size based on high().  For the middle and
+  // upper regions, determine the starting point of growth based on the
+  // location of high().  By getting the MAX of the region's low address
+  // (or the prevoius region's high address) and high(), we can tell if it
+  // is an intra or inter region growth.
+  size_t lower_needs = 0;
+  if (aligned_lower_new_high > lower_high()) {
+    lower_needs =
+      pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
+  }
+  size_t middle_needs = 0;
+  if (aligned_middle_new_high > middle_high()) {
+    middle_needs =
+      pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
+  }
+  size_t upper_needs = 0;
+  if (aligned_upper_new_high > upper_high()) {
+    upper_needs =
+      pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
+  }
+
+  // Check contiguity.
+  assert(low_boundary() <= lower_high() &&
+         lower_high() <= lower_high_boundary(),
+         "high address must be contained within the region");
+  assert(lower_high_boundary() <= middle_high() &&
+         middle_high() <= middle_high_boundary(),
+         "high address must be contained within the region");
+  assert(middle_high_boundary() <= upper_high() &&
+         upper_high() <= upper_high_boundary(),
+         "high address must be contained within the region");
+
+  // Commit regions
+  if (lower_needs > 0) {
+    assert(low_boundary() <= lower_high() &&
+           lower_high() + lower_needs <= lower_high_boundary(),
+           "must not expand beyond region");
+    if (!os::commit_memory(lower_high(), lower_needs)) {
+      debug_only(warning("os::commit_memory failed"));
+      return false;
+    } else {
+      _lower_high += lower_needs;
+     }
+  }
+  if (middle_needs > 0) {
+    assert(lower_high_boundary() <= middle_high() &&
+           middle_high() + middle_needs <= middle_high_boundary(),
+           "must not expand beyond region");
+    if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
+      debug_only(warning("os::commit_memory failed"));
+      return false;
+    }
+    _middle_high += middle_needs;
+  }
+  if (upper_needs > 0) {
+    assert(middle_high_boundary() <= upper_high() &&
+           upper_high() + upper_needs <= upper_high_boundary(),
+           "must not expand beyond region");
+    if (!os::commit_memory(upper_high(), upper_needs)) {
+      debug_only(warning("os::commit_memory failed"));
+      return false;
+    } else {
+      _upper_high += upper_needs;
+    }
+  }
+
+  if (pre_touch || AlwaysPreTouch) {
+    int vm_ps = os::vm_page_size();
+    for (char* curr = previous_high;
+         curr < unaligned_new_high;
+         curr += vm_ps) {
+      // Note the use of a write here; originally we tried just a read, but
+      // since the value read was unused, the optimizer removed the read.
+      // If we ever have a concurrent touchahead thread, we'll want to use
+      // a read, to avoid the potential of overwriting data (if a mutator
+      // thread beats the touchahead thread to a page).  There are various
+      // ways of making sure this read is not optimized away: for example,
+      // generating the code for a read procedure at runtime.
+      *curr = 0;
+    }
+  }
+
+  _high += bytes;
+  return true;
+}
+
+// A page is uncommitted if the contents of the entire page is deemed unusable.
+// Continue to decrement the high() pointer until it reaches a page boundary
+// in which case that particular page can now be uncommitted.
+void VirtualSpace::shrink_by(size_t size) {
+  if (committed_size() < size)
+    fatal("Cannot shrink virtual space to negative size");
+
+  if (special()) {
+    // don't uncommit if the entire space is pinned in memory
+    _high -= size;
+    return;
+  }
+
+  char* unaligned_new_high = high() - size;
+  assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
+
+  // Calculate new unaligned address
+  char* unaligned_upper_new_high =
+    MAX2(unaligned_new_high, middle_high_boundary());
+  char* unaligned_middle_new_high =
+    MAX2(unaligned_new_high, lower_high_boundary());
+  char* unaligned_lower_new_high =
+    MAX2(unaligned_new_high, low_boundary());
+
+  // Align address to region's alignment
+  char* aligned_upper_new_high =
+    (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
+  char* aligned_middle_new_high =
+    (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
+  char* aligned_lower_new_high =
+    (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
+
+  // Determine which regions need to shrink
+  size_t upper_needs = 0;
+  if (aligned_upper_new_high < upper_high()) {
+    upper_needs =
+      pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
+  }
+  size_t middle_needs = 0;
+  if (aligned_middle_new_high < middle_high()) {
+    middle_needs =
+      pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
+  }
+  size_t lower_needs = 0;
+  if (aligned_lower_new_high < lower_high()) {
+    lower_needs =
+      pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
+  }
+
+  // Check contiguity.
+  assert(middle_high_boundary() <= upper_high() &&
+         upper_high() <= upper_high_boundary(),
+         "high address must be contained within the region");
+  assert(lower_high_boundary() <= middle_high() &&
+         middle_high() <= middle_high_boundary(),
+         "high address must be contained within the region");
+  assert(low_boundary() <= lower_high() &&
+         lower_high() <= lower_high_boundary(),
+         "high address must be contained within the region");
+
+  // Uncommit
+  if (upper_needs > 0) {
+    assert(middle_high_boundary() <= aligned_upper_new_high &&
+           aligned_upper_new_high + upper_needs <= upper_high_boundary(),
+           "must not shrink beyond region");
+    if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
+      debug_only(warning("os::uncommit_memory failed"));
+      return;
+    } else {
+      _upper_high -= upper_needs;
+    }
+  }
+  if (middle_needs > 0) {
+    assert(lower_high_boundary() <= aligned_middle_new_high &&
+           aligned_middle_new_high + middle_needs <= middle_high_boundary(),
+           "must not shrink beyond region");
+    if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
+      debug_only(warning("os::uncommit_memory failed"));
+      return;
+    } else {
+      _middle_high -= middle_needs;
+    }
+  }
+  if (lower_needs > 0) {
+    assert(low_boundary() <= aligned_lower_new_high &&
+           aligned_lower_new_high + lower_needs <= lower_high_boundary(),
+           "must not shrink beyond region");
+    if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
+      debug_only(warning("os::uncommit_memory failed"));
+      return;
+    } else {
+      _lower_high -= lower_needs;
+    }
+  }
+
+  _high -= size;
+}
+
+#ifndef PRODUCT
+void VirtualSpace::check_for_contiguity() {
+  // Check contiguity.
+  assert(low_boundary() <= lower_high() &&
+         lower_high() <= lower_high_boundary(),
+         "high address must be contained within the region");
+  assert(lower_high_boundary() <= middle_high() &&
+         middle_high() <= middle_high_boundary(),
+         "high address must be contained within the region");
+  assert(middle_high_boundary() <= upper_high() &&
+         upper_high() <= upper_high_boundary(),
+         "high address must be contained within the region");
+  assert(low() >= low_boundary(), "low");
+  assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
+  assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
+  assert(high() <= upper_high(), "upper high");
+}
+
+void VirtualSpace::print() {
+  tty->print   ("Virtual space:");
+  if (special()) tty->print(" (pinned in memory)");
+  tty->cr();
+  tty->print_cr(" - committed: %ld", committed_size());
+  tty->print_cr(" - reserved:  %ld", reserved_size());
+  tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
+  tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
+}
+
+#endif