Merge
authorstefank
Fri, 15 Mar 2013 04:39:05 -0700
changeset 15961 77dc1c6c5456
parent 15948 a131c55f286b (current diff)
parent 15960 674d0586183f (diff)
child 15962 57159db35c41
Merge
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Mar 15 04:39:05 2013 -0700
@@ -2945,7 +2945,7 @@
   while (p < (uint64_t)end) {
     addrs[0] = p;
     size_t addrs_count = 1;
-    while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) {
+    while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
       addrs_count++;
     }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Mar 15 04:39:05 2013 -0700
@@ -2063,11 +2063,6 @@
       // required.
       _collectorState = FinalMarking;
   }
-  if (PrintGCDetails &&
-      (_collectorState > Idling ||
-       !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
-    gclog_or_tty->print(" (concurrent mode failure)");
-  }
   collect_in_foreground(clear_all_soft_refs);
 
   // For a mark-sweep, compute_new_size() will be called
@@ -3400,10 +3395,10 @@
   if (PrintCMSStatistics != 0) {
     _collector->resetYields();
   }
-  if (PrintGCDetails && PrintGCTimeStamps) {
+  if (PrintGCDetails) {
     gclog_or_tty->date_stamp(PrintGCDateStamps);
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
+    gclog_or_tty->stamp(PrintGCTimeStamps);
+    gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
       _collector->cmsGen()->short_name(), _phase);
   }
   _collector->resetTimer();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Mar 15 04:39:05 2013 -0700
@@ -4095,7 +4095,7 @@
         // bitmap knows by how much we need to move it as it knows its
         // granularity).
         assert(_finger < _region_limit, "invariant");
-        HeapWord* new_finger = _nextMarkBitMap->nextWord(_finger);
+        HeapWord* new_finger = _nextMarkBitMap->nextObject(_finger);
         // Check if bitmap iteration was aborted while scanning the last object
         if (new_finger >= _region_limit) {
           giveup_current_region();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Mar 15 04:39:05 2013 -0700
@@ -97,7 +97,6 @@
                                        HeapWord* limit = NULL) const;
 
   // conversion utilities
-  // XXX Fix these so that offsets are size_t's...
   HeapWord* offsetToHeapWord(size_t offset) const {
     return _bmStartWord + (offset << _shifter);
   }
@@ -105,8 +104,13 @@
     return pointer_delta(addr, _bmStartWord) >> _shifter;
   }
   int heapWordDiffToOffsetDiff(size_t diff) const;
-  HeapWord* nextWord(HeapWord* addr) {
-    return offsetToHeapWord(heapWordToOffset(addr) + 1);
+
+  // The argument addr should be the start address of a valid object
+  HeapWord* nextObject(HeapWord* addr) {
+    oop obj = (oop) addr;
+    HeapWord* res =  addr + obj->size();
+    assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
+    return res;
   }
 
   // debugging
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Fri Mar 15 04:39:05 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -252,12 +252,10 @@
 
     start_offset = _bm.get_next_one_offset(start_offset, end_offset);
     while (start_offset < end_offset) {
-      HeapWord* obj_addr = offsetToHeapWord(start_offset);
-      oop obj = (oop) obj_addr;
       if (!cl->do_bit(start_offset)) {
         return false;
       }
-      HeapWord* next_addr = MIN2(obj_addr + obj->size(), end_addr);
+      HeapWord* next_addr = MIN2(nextObject(offsetToHeapWord(start_offset)), end_addr);
       BitMap::idx_t next_offset = heapWordToOffset(next_addr);
       start_offset = _bm.get_next_one_offset(next_offset, end_offset);
     }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Mar 15 04:39:05 2013 -0700
@@ -409,7 +409,7 @@
         // heap remains parsable.
         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
         const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
-        assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
+
         if (limit_exceeded && softrefs_clear) {
           *gc_overhead_limit_was_exceeded = true;
           size_policy()->set_gc_overhead_limit_exceeded(false);
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Fri Mar 15 04:39:05 2013 -0700
@@ -948,6 +948,8 @@
       break;
     }
     if (e != scan_end) {
+      assert(e < scan_end, err_msg("e: " PTR_FORMAT " scan_end: " PTR_FORMAT, e, scan_end));
+
       if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
           && page_expected.size != 0) {
         os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri Mar 15 04:39:05 2013 -0700
@@ -620,7 +620,7 @@
 
       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
       const bool softrefs_clear = all_soft_refs_clear();
-      assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
+
       if (limit_exceeded && softrefs_clear) {
         *gc_overhead_limit_was_exceeded = true;
         size_policy()->set_gc_overhead_limit_exceeded(false);
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Fri Mar 15 04:39:05 2013 -0700
@@ -2439,7 +2439,8 @@
              free_chunks_capacity_bytes / K,
              used_and_free / K,
              capacity_bytes / K);
-  assert(used_and_free == capacity_bytes, "Accounting is wrong");
+  // Accounting can only be correct if we got the values during a safepoint
+  assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
 }
 
 // Print total fragmentation for class and data metaspaces separately
--- a/hotspot/src/share/vm/memory/universe.cpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Fri Mar 15 04:39:05 2013 -0700
@@ -885,6 +885,8 @@
   // the actual alignment depends on its size.
   Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
   size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
+  assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
+      "heap size is too big for compressed oops");
   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 
   ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Mar 15 04:39:05 2013 -0700
@@ -1381,6 +1381,40 @@
   return false;
 }
 
+void Arguments::set_use_compressed_oops() {
+#ifndef ZERO
+#ifdef _LP64
+  // MaxHeapSize is not set up properly at this point, but
+  // the only value that can override MaxHeapSize if we are
+  // to use UseCompressedOops is InitialHeapSize.
+  size_t max_heap_size = MAX2(MaxHeapSize, InitialHeapSize);
+
+  if (max_heap_size <= max_heap_for_compressed_oops()) {
+#if !defined(COMPILER1) || defined(TIERED)
+    if (FLAG_IS_DEFAULT(UseCompressedOops)) {
+      FLAG_SET_ERGO(bool, UseCompressedOops, true);
+    }
+#endif
+#ifdef _WIN64
+    if (UseLargePages && UseCompressedOops) {
+      // Cannot allocate guard pages for implicit checks in indexed addressing
+      // mode, when large pages are specified on windows.
+      // This flag could be switched ON if narrow oop base address is set to 0,
+      // see code in Universe::initialize_heap().
+      Universe::set_narrow_oop_use_implicit_null_checks(false);
+    }
+#endif //  _WIN64
+  } else {
+    if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
+      warning("Max heap size too large for Compressed Oops");
+      FLAG_SET_DEFAULT(UseCompressedOops, false);
+      FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
+    }
+  }
+#endif // _LP64
+#endif // ZERO
+}
+
 void Arguments::set_ergonomics_flags() {
 
   if (os::is_server_class_machine()) {
@@ -1410,30 +1444,7 @@
 
 #ifndef ZERO
 #ifdef _LP64
-  // Check that UseCompressedOops can be set with the max heap size allocated
-  // by ergonomics.
-  if (MaxHeapSize <= max_heap_for_compressed_oops()) {
-#if !defined(COMPILER1) || defined(TIERED)
-    if (FLAG_IS_DEFAULT(UseCompressedOops)) {
-      FLAG_SET_ERGO(bool, UseCompressedOops, true);
-    }
-#endif
-#ifdef _WIN64
-    if (UseLargePages && UseCompressedOops) {
-      // Cannot allocate guard pages for implicit checks in indexed addressing
-      // mode, when large pages are specified on windows.
-      // This flag could be switched ON if narrow oop base address is set to 0,
-      // see code in Universe::initialize_heap().
-      Universe::set_narrow_oop_use_implicit_null_checks(false);
-    }
-#endif //  _WIN64
-  } else {
-    if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
-      warning("Max heap size too large for Compressed Oops");
-      FLAG_SET_DEFAULT(UseCompressedOops, false);
-      FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
-    }
-  }
+  set_use_compressed_oops();
   // UseCompressedOops must be on for UseCompressedKlassPointers to be on.
   if (!UseCompressedOops) {
     if (UseCompressedKlassPointers) {
@@ -1813,6 +1824,13 @@
   }
 }
 
+void Arguments::check_deprecated_gc_flags() {
+  if (FLAG_IS_CMDLINE(MaxGCMinorPauseMillis)) {
+    warning("Using MaxGCMinorPauseMillis as minor pause goal is deprecated"
+            "and will likely be removed in future release");
+  }
+}
+
 // Check stack pages settings
 bool Arguments::check_stack_pages()
 {
@@ -3305,6 +3323,7 @@
     set_g1_gc_flags();
   }
   check_deprecated_gcs();
+  check_deprecated_gc_flags();
 #else // INCLUDE_ALL_GCS
   assert(verify_serial_gc_flags(), "SerialGC unset");
 #endif // INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Thu Mar 14 16:16:05 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Fri Mar 15 04:39:05 2013 -0700
@@ -309,6 +309,7 @@
   // Garbage-First (UseG1GC)
   static void set_g1_gc_flags();
   // GC ergonomics
+  static void set_use_compressed_oops();
   static void set_ergonomics_flags();
   static void set_shared_spaces_flags();
   // Setup heap size
@@ -414,6 +415,7 @@
   // Check for consistency in the selection of the garbage collector.
   static bool check_gc_consistency();
   static void check_deprecated_gcs();
+  static void check_deprecated_gc_flags();
   // Check consistecy or otherwise of VM argument settings
   static bool check_vm_args_consistency();
   // Check stack pages settings