Merge
authorjwilhelm
Mon, 22 Sep 2014 16:22:21 +0200
changeset 26833 fa1e0d1c960f
parent 26710 e9c76272cd40 (current diff)
parent 26832 75bb9cfe53e9 (diff)
child 26834 41332d860d6a
Merge
--- a/hotspot/make/bsd/makefiles/vm.make	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/make/bsd/makefiles/vm.make	Mon Sep 22 16:22:21 2014 +0200
@@ -234,10 +234,10 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) vm.def
+mapfile : $(MAPFILE) vm.def mapfile_ext
 	rm -f $@
 	awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE")	\
-                 { system ("cat vm.def"); }		\
+                 { system ("cat mapfile_ext"); system ("cat vm.def"); } \
                else					\
                  { print $$0 }				\
              }' > $@ < $(MAPFILE)
@@ -249,6 +249,13 @@
 vm.def: $(Res_Files) $(Obj_Files)
 	sh $(GAMMADIR)/make/bsd/makefiles/build_vm_def.sh *.o > $@
 
+mapfile_ext:
+	rm -f $@
+	touch $@
+	if [ -f $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext ]; then \
+	  cat $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext > $@; \
+	fi
+
 STATIC_CXX = false
 
 ifeq ($(LINK_INTO),AOUT)
--- a/hotspot/make/linux/makefiles/vm.make	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/make/linux/makefiles/vm.make	Mon Sep 22 16:22:21 2014 +0200
@@ -227,10 +227,10 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) vm.def
+mapfile : $(MAPFILE) vm.def mapfile_ext
 	rm -f $@
 	awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE")	\
-                 { system ("cat vm.def"); }		\
+                 { system ("cat mapfile_ext"); system ("cat vm.def"); } \
                else					\
                  { print $$0 }				\
              }' > $@ < $(MAPFILE)
@@ -242,6 +242,13 @@
 vm.def: $(Res_Files) $(Obj_Files)
 	sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
 
+mapfile_ext:
+	rm -f $@
+	touch $@
+	if [ -f $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext ]; then \
+	  cat $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext > $@; \
+	fi
+
 ifeq ($(JVM_VARIANT_ZEROSHARK), true)
   STATIC_CXX = false
 else
--- a/hotspot/make/solaris/makefiles/buildtree.make	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/make/solaris/makefiles/buildtree.make	Mon Sep 22 16:22:21 2014 +0200
@@ -258,6 +258,8 @@
 	    echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
 	[ -n "$(ZIPEXE)" ] && \
 	    echo && echo "ZIPEXE = $(ZIPEXE)"; \
+	[ -n "$(HS_ALT_MAKE)" ] && \
+	    echo && echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
 	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
 	    echo && \
 	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
--- a/hotspot/make/solaris/makefiles/vm.make	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/make/solaris/makefiles/vm.make	Mon Sep 22 16:22:21 2014 +0200
@@ -249,11 +249,12 @@
 
 vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
 
-mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def
+mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def mapfile_ext
 	rm -f $@
 	cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \
 	    | $(NAWK) '{                                         \
 	              if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") {  \
+	                  system ("cat mapfile_ext");            \
 	                  system ("cat vm.def");                 \
 	              } else {                                   \
 	                  print $$0;                             \
@@ -267,6 +268,13 @@
 vm.def: $(Obj_Files)
 	sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@
 
+mapfile_ext:
+	rm -f $@
+	touch $@
+	if [ -f $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext ]; then \
+	  cat $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext > $@; \
+	fi
+
 ifeq ($(LINK_INTO),AOUT)
   LIBJVM.o                 =
   LIBJVM_MAPFILE           =
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -3129,8 +3129,7 @@
   return true;
 }
 
-char* os::reserve_memory_special(size_t size, size_t alignment, char* addr,
-                                 bool exec) {
+char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
   fatal("os::reserve_memory_special should not be called on Solaris.");
   return NULL;
 }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -4167,7 +4167,7 @@
 // been published), so we do not need to check for
 // uninitialized objects before pushing here.
 void Par_ConcMarkingClosure::do_oop(oop obj) {
-  assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
@@ -7226,7 +7226,7 @@
 // isMarked() query is "safe".
 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
   // Ignore mark word because we are running concurrent with mutators
-  assert(p->is_oop_or_null(true), "expected an oop or null");
+  assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
   HeapWord* addr = (HeapWord*)p;
   assert(_span.contains(addr), "we are scanning the CMS generation");
   bool is_obj_array = false;
@@ -7666,7 +7666,7 @@
 }
 
 void PushAndMarkVerifyClosure::do_oop(oop obj) {
-  assert(obj->is_oop_or_null(), "expected an oop or NULL");
+  assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black
@@ -7764,7 +7764,7 @@
 
 void PushOrMarkClosure::do_oop(oop obj) {
   // Ignore mark word because we are running concurrent with mutators.
-  assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black
@@ -7802,7 +7802,7 @@
 
 void Par_PushOrMarkClosure::do_oop(oop obj) {
   // Ignore mark word because we are running concurrent with mutators.
-  assert(obj->is_oop_or_null(true), "expected an oop or NULL");
+  assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
     // Oop lies in _span and isn't yet grey or black
@@ -7879,7 +7879,7 @@
   // path and may be at the end of the global overflow list (so
   // the mark word may be NULL).
   assert(obj->is_oop_or_null(true /* ignore mark word */),
-         "expected an oop or NULL");
+         err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
@@ -7959,7 +7959,7 @@
   // the debugger, is_oop_or_null(false) may subsequently start
   // to hold.
   assert(obj->is_oop_or_null(true),
-         "expected an oop or NULL");
+         err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
   // and is not marked
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp	Mon Sep 22 16:22:21 2014 +0200
@@ -73,7 +73,7 @@
     } else {
       res = (PromotedObject*)(_next & next_mask);
     }
-    assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?");
+    assert(oop(res)->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res))));
     return res;
   }
   inline void setNext(PromotedObject* x) {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Mon Sep 22 16:22:21 2014 +0200
@@ -277,7 +277,7 @@
   ++_refs_reached;
 
   HeapWord* objAddr = (HeapWord*) obj;
-  assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
+  assert(obj->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
   if (_g1h->is_in_g1_reserved(objAddr)) {
     assert(obj != NULL, "null check is implicit");
     if (!_nextMarkBitMap->isMarked(objAddr)) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -1960,15 +1960,10 @@
   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
                                                  heap_alignment);
 
-  // It is important to do this in a way such that concurrent readers can't
-  // temporarily think something is in the heap.  (I've actually seen this
-  // happen in asserts: DLD.)
-  _reserved.set_word_size(0);
-  _reserved.set_start((HeapWord*)heap_rs.base());
-  _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
+  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   // Create the gen rem set (and barrier set) for the entire reserved region.
-  _rem_set = collector_policy()->create_rem_set(_reserved, 2);
+  _rem_set = collector_policy()->create_rem_set(reserved_region(), 2);
   set_barrier_set(rem_set()->bs());
   if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
     vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
@@ -2052,7 +2047,7 @@
 
   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
 
-  _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
+  _bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
 
   _g1h = this;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Mon Sep 22 16:22:21 2014 +0200
@@ -43,8 +43,8 @@
 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
   assert(is_in_reserved(addr),
          err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
-                 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
-  return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
+                 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())));
+  return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
 }
 
 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -43,9 +43,7 @@
     _hot_cache_idx = 0;
 
     // For refining the cards in the hot cache in parallel
-    uint n_workers = (ParallelGCThreads > 0 ?
-                        _g1h->workers()->total_workers() : 1);
-    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
+    _hot_cache_par_chunk_size = (ParallelGCThreads > 0 ? ClaimChunkSize : _hot_cache_size);
     _hot_cache_par_claimed_idx = 0;
 
     _card_counts.initialize(card_counts_storage);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Mon Sep 22 16:22:21 2014 +0200
@@ -70,6 +70,9 @@
 
   G1CardCounts _card_counts;
 
+  // The number of cached cards a thread claims when flushing the cache
+  static const int ClaimChunkSize = 32;
+
   bool default_use_cache() const {
     return (G1ConcRSLogCacheSize > 0);
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -213,7 +213,7 @@
 void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
   assert(_humongous_start_region == NULL,
          "we should have already filtered out humongous regions");
-  assert(_end == _orig_end,
+  assert(_end == orig_end(),
          "we should have already filtered out humongous regions");
 
   _in_collection_set = false;
@@ -266,7 +266,7 @@
 
 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
   assert(!isHumongous(), "sanity / pre-condition");
-  assert(end() == _orig_end,
+  assert(end() == orig_end(),
          "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
   assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
@@ -280,7 +280,7 @@
 
 void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
   assert(!isHumongous(), "sanity / pre-condition");
-  assert(end() == _orig_end,
+  assert(end() == orig_end(),
          "Should be normal before the humongous object allocation");
   assert(top() == bottom(), "should be empty");
   assert(first_hr->startsHumongous(), "pre-condition");
@@ -294,14 +294,14 @@
 
   if (startsHumongous()) {
     assert(top() <= end(), "pre-condition");
-    set_end(_orig_end);
+    set_end(orig_end());
     if (top() > end()) {
       // at least one "continues humongous" region after it
       set_top(end());
     }
   } else {
     // continues humongous
-    assert(end() == _orig_end, "sanity");
+    assert(end() == orig_end(), "sanity");
   }
 
   assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
@@ -326,7 +326,7 @@
     _hrm_index(hrm_index),
     _humongous_start_region(NULL),
     _in_collection_set(false),
-    _next_in_special_set(NULL), _orig_end(NULL),
+    _next_in_special_set(NULL),
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
     _next_young_region(NULL),
@@ -349,10 +349,14 @@
 
   G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
 
-  _orig_end = mr.end();
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
   record_top_and_timestamp();
+
+  assert(mr.end() == orig_end(),
+         err_msg("Given region end address " PTR_FORMAT " should match exactly "
+                 "bottom plus one region size, i.e. " PTR_FORMAT,
+                 p2i(mr.end()), p2i(orig_end())));
 }
 
 CompactibleSpace* HeapRegion::next_compaction_space() const {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Sep 22 16:22:21 2014 +0200
@@ -226,9 +226,6 @@
 
   // For a humongous region, region in which it starts.
   HeapRegion* _humongous_start_region;
-  // For the start region of a humongous sequence, it's original end().
-  HeapWord* _orig_end;
-
   // True iff the region is in current collection_set.
   bool _in_collection_set;
 
@@ -452,7 +449,7 @@
   // their _end set up to be the end of the last continues region of the
   // corresponding humongous object.
   bool is_in_reserved_raw(const void* p) const {
-    return _bottom <= p && p < _orig_end;
+    return _bottom <= p && p < orig_end();
   }
 
   // Makes the current region be a "starts humongous" region, i.e.,
@@ -556,7 +553,8 @@
   void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
   bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
 
-  HeapWord* orig_end() const { return _orig_end; }
+  // For the start region of a humongous sequence, it's original end().
+  HeapWord* orig_end() const { return _bottom + GrainWords; }
 
   // Reset HR stuff to default values.
   void hr_clear(bool par, bool clear_space, bool locked = false);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -288,7 +288,7 @@
           while (p < to) {
             Prefetch::write(p, interval);
             oop m = oop(p);
-            assert(m->is_oop_or_null(), "check for header");
+            assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
             m->push_contents(pm);
             p += m->size();
           }
@@ -296,7 +296,7 @@
         } else {
           while (p < to) {
             oop m = oop(p);
-            assert(m->is_oop_or_null(), "check for header");
+            assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
             m->push_contents(pm);
             p += m->size();
           }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -74,10 +74,9 @@
     return JNI_ENOMEM;
   }
 
-  _reserved = MemRegion((HeapWord*)heap_rs.base(),
-                        (HeapWord*)(heap_rs.base() + heap_rs.size()));
+  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
+  CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3);
   barrier_set->initialize();
   _barrier_set = barrier_set;
   oopDesc::set_bs(_barrier_set);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -2882,7 +2882,7 @@
         start_array->allocate_block(addr);
       }
       oop(addr)->update_contents(cm);
-      assert(oop(addr)->is_oop_or_null(), "should be an oop now");
+      assert(oop(addr)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr))));
     }
   }
 }
@@ -3366,7 +3366,7 @@
 
   oop moved_oop = (oop) destination();
   moved_oop->update_contents(compaction_manager());
-  assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
+  assert(moved_oop->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop)));
 
   update_state(words);
   assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -582,6 +582,14 @@
   }
 }
 
+void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
+  // It is important to do this in a way such that concurrent readers can't
+  // temporarily think something is in the heap.  (Seen this happen in asserts.)
+  _reserved.set_word_size(0);
+  _reserved.set_start(start);
+  _reserved.set_end(end);
+}
+
 /////////////// Unit tests ///////////////
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Mon Sep 22 16:22:21 2014 +0200
@@ -85,6 +85,7 @@
   friend class VMStructs;
   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
 
+ private:
 #ifdef ASSERT
   static int       _fire_out_of_memory_count;
 #endif
@@ -97,8 +98,9 @@
   // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
   bool _defer_initial_card_mark;
 
+  MemRegion _reserved;
+
  protected:
-  MemRegion _reserved;
   BarrierSet* _barrier_set;
   bool _is_gc_active;
   uint _n_par_threads;
@@ -211,6 +213,7 @@
   // Stop any onging concurrent work and prepare for exit.
   virtual void stop() {}
 
+  void initialize_reserved_region(HeapWord *start, HeapWord *end);
   MemRegion reserved_region() const { return _reserved; }
   address base() const { return (address)reserved_region().start(); }
 
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp	Mon Sep 22 16:22:21 2014 +0200
@@ -35,7 +35,7 @@
 #ifdef ASSERT
 #define VERIFY_OOP(o_) \
       if (VerifyOops) { \
-        assert((oop(o_))->is_oop_or_null(), "Not an oop!"); \
+        assert((oop(o_))->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(o_)))); \
         StubRoutines::_verify_oop_count++;  \
       }
 #else
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -123,17 +123,9 @@
     return JNI_ENOMEM;
   }
 
-  _reserved = MemRegion((HeapWord*)heap_rs.base(),
-                        (HeapWord*)(heap_rs.base() + heap_rs.size()));
+  initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  // It is important to do this in a way such that concurrent readers can't
-  // temporarily think something is in the heap.  (Seen this happen in asserts.)
-  _reserved.set_word_size(0);
-  _reserved.set_start((HeapWord*)heap_rs.base());
-  size_t actual_heap_size = heap_rs.size();
-  _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
-
-  _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
+  _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
   set_barrier_set(rem_set()->bs());
 
   _gch = this;
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -473,7 +473,7 @@
   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
   oop discovered = java_lang_ref_Reference::discovered(_ref);
   assert(_discovered_addr && discovered->is_oop_or_null(),
-         "discovered field is bad");
+         err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)));
   _next = discovered;
   _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
   _referent = java_lang_ref_Reference::referent(_ref);
@@ -482,7 +482,9 @@
   assert(allow_null_referent ?
              _referent->is_oop_or_null()
            : _referent->is_oop(),
-         "bad referent");
+         err_msg("Expected an oop%s for referent field at " PTR_FORMAT,
+                 (allow_null_referent ? " or NULL" : ""),
+                 p2i(_referent)));
 }
 
 void DiscoveredListIterator::remove() {
@@ -630,7 +632,7 @@
     oop next = java_lang_ref_Reference::next(iter.obj());
     if ((iter.referent() == NULL || iter.is_referent_alive() ||
          next != NULL)) {
-      assert(next->is_oop_or_null(), "bad next field");
+      assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
       // Remove Reference object from list
       iter.remove();
       // Trace the cohorts
@@ -979,7 +981,7 @@
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop next = java_lang_ref_Reference::next(iter.obj());
-    assert(next->is_oop_or_null(), "bad next field");
+    assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
     // If referent has been cleared or Reference is not active,
     // drop it.
     if (iter.referent() == NULL || next != NULL) {
@@ -1172,7 +1174,7 @@
 
   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
   const oop  discovered = java_lang_ref_Reference::discovered(obj);
-  assert(discovered->is_oop_or_null(), "bad discovered field");
+  assert(discovered->is_oop_or_null(), err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)));
   if (discovered != NULL) {
     // The reference has already been discovered...
     if (TraceReferenceGC) {
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -88,6 +88,8 @@
 bool   Arguments::_has_profile                  = false;
 size_t Arguments::_conservative_max_heap_alignment = 0;
 uintx  Arguments::_min_heap_size                = 0;
+uintx  Arguments::_min_heap_free_ratio          = 0;
+uintx  Arguments::_max_heap_free_ratio          = 0;
 Arguments::Mode Arguments::_mode                = _mixed;
 bool   Arguments::_java_compiler                = false;
 bool   Arguments::_xdebug_mode                  = false;
@@ -1630,9 +1632,11 @@
     // unless the user actually sets these flags.
     if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
       FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+      _min_heap_free_ratio = MinHeapFreeRatio;
     }
     if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
       FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+      _max_heap_free_ratio = MaxHeapFreeRatio;
     }
   }
 
@@ -2025,6 +2029,8 @@
                   MaxHeapFreeRatio);
     return false;
   }
+  // This does not set the flag itself, but stores the value in a safe place for later usage.
+  _min_heap_free_ratio = min_heap_free_ratio;
   return true;
 }
 
@@ -2039,6 +2045,8 @@
                   MinHeapFreeRatio);
     return false;
   }
+  // This does not set the flag itself, but stores the value in a safe place for later usage.
+  _max_heap_free_ratio = max_heap_free_ratio;
   return true;
 }
 
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Mon Sep 22 16:22:21 2014 +0200
@@ -285,7 +285,11 @@
   // Value of the conservative maximum heap alignment needed
   static size_t  _conservative_max_heap_alignment;
 
-  static uintx  _min_heap_size;
+  static uintx _min_heap_size;
+
+  // Used to store original flag values
+  static uintx _min_heap_free_ratio;
+  static uintx _max_heap_free_ratio;
 
   // -Xrun arguments
   static AgentLibraryList _libraryList;
@@ -516,6 +520,10 @@
   static uintx min_heap_size()              { return _min_heap_size; }
   static void  set_min_heap_size(uintx v)   { _min_heap_size = v;  }
 
+  // Returns the original values of -XX:MinHeapFreeRatio and -XX:MaxHeapFreeRatio
+  static uintx min_heap_free_ratio()        { return _min_heap_free_ratio; }
+  static uintx max_heap_free_ratio()        { return _max_heap_free_ratio; }
+
   // -Xrun
   static AgentLibrary* libraries()          { return _libraryList.first(); }
   static bool init_libraries_at_startup()   { return !_libraryList.is_empty(); }
--- a/hotspot/src/share/vm/services/heapDumper.cpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/services/heapDumper.cpp	Mon Sep 22 16:22:21 2014 +0200
@@ -722,7 +722,7 @@
 
       // reflection and sun.misc.Unsafe classes may have a reference to a
       // Klass* so filter it out.
-      assert(o->is_oop_or_null(), "should always be an oop");
+      assert(o->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(o)));
       writer->write_objectID(o);
       break;
     }
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp	Fri Sep 19 01:59:59 2014 -0700
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp	Mon Sep 22 16:22:21 2014 +0200
@@ -331,7 +331,7 @@
     //            index, &_elems[index], _elems[index]);
     E* t = (E*)&_elems[index];      // cast away volatility
     oop* p = (oop*)t;
-    assert((*t)->is_oop_or_null(), "Not an oop or null");
+    assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t)));
     f->do_oop(p);
   }
   // tty->print_cr("END OopTaskQueue::oops_do");