Merge
authorbrutisso
Wed, 18 May 2011 13:19:32 +0200
changeset 9937 4f843e4c0cf6
parent 9726 4a05062d8c4d (current diff)
parent 9936 59dac2950cc7 (diff)
child 9940 a9287f2151fb
Merge
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Wed May 18 13:19:32 2011 +0200
@@ -1810,7 +1810,7 @@
   void maybe_print(oop* p) {
     if (_print_nm == NULL)  return;
     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
-    tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
+    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
                   _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
                   (intptr_t)(*p), (intptr_t)p);
     (*p)->print();
@@ -2311,7 +2311,7 @@
       _nm->print_nmethod(true);
       _ok = false;
     }
-    tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
+    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
                   (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
     (*p)->print();
   }
@@ -2324,7 +2324,7 @@
     DebugScavengeRoot debug_scavenge_root(this);
     oops_do(&debug_scavenge_root);
     if (!debug_scavenge_root.ok())
-      fatal("found an unadvertised bad non-perm oop in the code cache");
+      fatal("found an unadvertised bad scavengable oop in the code cache");
   }
   assert(scavenge_root_not_marked(), "");
 }
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Wed May 18 13:19:32 2011 +0200
@@ -109,7 +109,7 @@
 class nmethod : public CodeBlob {
   friend class VMStructs;
   friend class NMethodSweeper;
-  friend class CodeCache;  // non-perm oops
+  friend class CodeCache;  // scavengable oops
  private:
   // Shared fields for all nmethod's
   methodOop _method;
@@ -466,17 +466,17 @@
   bool is_at_poll_return(address pc);
   bool is_at_poll_or_poll_return(address pc);
 
-  // Non-perm oop support
+  // Scavengable oop support
   bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
  protected:
-  enum { npl_on_list = 0x01, npl_marked = 0x10 };
-  void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
+  enum { sl_on_list = 0x01, sl_marked = 0x10 };
+  void  set_on_scavenge_root_list()                    { _scavenge_root_state = sl_on_list; }
   void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
   // assertion-checking and pruning logic uses the bits of _scavenge_root_state
 #ifndef PRODUCT
-  void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
-  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
-  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
+  void  set_scavenge_root_marked()                     { _scavenge_root_state |= sl_marked; }
+  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~sl_marked; }
+  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ sl_on_list) == 0; }
   // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
 #endif //PRODUCT
   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed May 18 13:19:32 2011 +0200
@@ -3054,6 +3054,28 @@
     _should_gray_objects = true;
 }
 
+// Resets the region fields of active CMTasks whose values point
+// into the collection set.
+void ConcurrentMark::reset_active_task_region_fields_in_cset() {
+  assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
+  assert(parallel_marking_threads() <= _max_task_num, "sanity");
+
+  for (int i = 0; i < (int)parallel_marking_threads(); i += 1) {
+    CMTask* task = _tasks[i];
+    HeapWord* task_finger = task->finger();
+    if (task_finger != NULL) {
+      assert(_g1h->is_in_g1_reserved(task_finger), "not in heap");
+      HeapRegion* finger_region = _g1h->heap_region_containing(task_finger);
+      if (finger_region->in_collection_set()) {
+        // The task's current region is in the collection set.
+        // This region will be evacuated in the current GC and
+        // the region fields in the task will be stale.
+        task->giveup_current_region();
+      }
+    }
+  }
+}
+
 // abandon current marking iteration due to a Full GC
 void ConcurrentMark::abort() {
   // Clear all marks to force marking thread to do nothing
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed May 18 13:19:32 2011 +0200
@@ -809,10 +809,19 @@
 
   // It indicates that a new collection set is being chosen.
   void newCSet();
+
   // It registers a collection set heap region with CM. This is used
   // to determine whether any heap regions are located above the finger.
   void registerCSetRegion(HeapRegion* hr);
 
+  // Resets the region fields of any active CMTask whose region fields
+  // are in the collection set (i.e. the region currently claimed by
+  // the CMTask will be evacuated and may be used, subsequently, as
+  // an alloc region). When this happens the region fields in the CMTask
+  // are stale and, hence, should be cleared causing the worker thread
+  // to claim a new region.
+  void reset_active_task_region_fields_in_cset();
+
   // Registers the maximum region-end associated with a set of
   // regions with CM. Again this is used to determine whether any
   // heap regions are located above the finger.
@@ -1039,9 +1048,6 @@
   void setup_for_region(HeapRegion* hr);
   // it brings up-to-date the limit of the region
   void update_region_limit();
-  // it resets the local fields after a task has finished scanning a
-  // region
-  void giveup_current_region();
 
   // called when either the words scanned or the refs visited limit
   // has been reached
@@ -1094,6 +1100,11 @@
   // exit the termination protocol after it's entered it.
   virtual bool should_exit_termination();
 
+  // Resets the local region fields after a task has finished scanning a
+  // region; or when they have become stale as a result of the region
+  // being evacuated.
+  void giveup_current_region();
+
   HeapWord* finger()            { return _finger; }
 
   bool has_aborted()            { return _has_aborted; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed May 18 13:19:32 2011 +0200
@@ -428,6 +428,37 @@
   _cmThread->stop();
 }
 
+#ifdef ASSERT
+// A region is added to the collection set as it is retired
+// so an address p can point to a region which will be in the
+// collection set but has not yet been retired.  This method
+// therefore is only accurate during a GC pause after all
+// regions have been retired.  It is used for debugging
+// to check if an nmethod has references to objects that can
+// be move during a partial collection.  Though it can be
+// inaccurate, it is sufficient for G1 because the conservative
+// implementation of is_scavengable() for G1 will indicate that
+// all nmethods must be scanned during a partial collection.
+bool G1CollectedHeap::is_in_partial_collection(const void* p) {
+  HeapRegion* hr = heap_region_containing(p);
+  return hr != NULL && hr->in_collection_set();
+}
+#endif
+
+// Returns true if the reference points to an object that
+// can move in an incremental collecction.
+bool G1CollectedHeap::is_scavengable(const void* p) {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  G1CollectorPolicy* g1p = g1h->g1_policy();
+  HeapRegion* hr = heap_region_containing(p);
+  if (hr == NULL) {
+     // perm gen (or null)
+     return false;
+  } else {
+    return !hr->isHumongous();
+  }
+}
+
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
@@ -3292,8 +3323,9 @@
       // progress, this will be zero.
       _cm->set_oops_do_bound();
 
-      if (mark_in_progress())
+      if (mark_in_progress()) {
         concurrent_mark()->newCSet();
+      }
 
 #if YOUNG_LIST_VERBOSE
       gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
@@ -3303,6 +3335,16 @@
 
       g1_policy()->choose_collection_set(target_pause_time_ms);
 
+      // We have chosen the complete collection set. If marking is
+      // active then, we clear the region fields of any of the
+      // concurrent marking tasks whose region fields point into
+      // the collection set as these values will become stale. This
+      // will cause the owning marking threads to claim a new region
+      // when marking restarts.
+      if (mark_in_progress()) {
+        concurrent_mark()->reset_active_task_region_fields_in_cset();
+      }
+
       // Nothing to do if we were unable to choose a collection set.
 #if G1_REM_SET_LOGGING
       gclog_or_tty->print_cr("\nAfter pause, heap:");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed May 18 13:19:32 2011 +0200
@@ -1254,6 +1254,12 @@
     return hr != NULL && hr->is_young();
   }
 
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void* p);
+#endif
+
+  virtual bool is_scavengable(const void* addr);
+
   // We don't need barriers for initializing stores to objects
   // in the young gen: for the SATB pre-barrier, there is no
   // pre-value that needs to be remembered; for the remembered-set
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed May 18 13:19:32 2011 +0200
@@ -339,6 +339,21 @@
   return false;
 }
 
+bool ParallelScavengeHeap::is_scavengable(const void* addr) {
+  return is_in_young((oop)addr);
+}
+
+#ifdef ASSERT
+// Don't implement this by using is_in_young().  This method is used
+// in some cases to check that is_in_young() is correct.
+bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
+  assert(is_in_reserved(p) || p == NULL,
+    "Does not work if address is non-null and outside of the heap");
+  // The order of the generations is perm (low addr), old, young (high addr)
+  return p >= old_gen()->reserved().end();
+}
+#endif
+
 // There are two levels of allocation policy here.
 //
 // When an allocation request fails, the requesting thread must invoke a VM
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed May 18 13:19:32 2011 +0200
@@ -127,6 +127,12 @@
   // collection.
   virtual bool is_maximal_no_gc() const;
 
+  // Return true if the reference points to an object that
+  // can be moved in a partial collection.  For currently implemented
+  // generational collectors that means during a collection of
+  // the young gen.
+  virtual bool is_scavengable(const void* addr);
+
   // Does this heap support heap inspection? (+PrintClassHistogram)
   bool supports_heap_inspection() const { return true; }
 
@@ -143,6 +149,10 @@
     return perm_gen()->reserved().contains(p);
   }
 
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void *p);
+#endif
+
   bool is_permanent(const void *p) const {    // committed part
     return perm_gen()->is_in(p);
   }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Wed May 18 13:19:32 2011 +0200
@@ -51,7 +51,12 @@
 }
 
 inline bool ParallelScavengeHeap::is_in_young(oop p) {
-  return young_gen()->is_in_reserved(p);
+  // Assumes the the old gen address range is lower than that of the young gen.
+  const void* loc = (void*) p;
+  bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
+  assert(result == young_gen()->is_in_reserved(p),
+        err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
+  return result;
 }
 
 inline bool ParallelScavengeHeap::is_in_old_or_perm(oop p) {
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed May 18 13:19:32 2011 +0200
@@ -269,6 +269,13 @@
   // space). If you need the more conservative answer use is_permanent().
   virtual bool is_in_permanent(const void *p) const = 0;
 
+
+#ifdef ASSERT
+  // Returns true if "p" is in the part of the
+  // heap being collected.
+  virtual bool is_in_partial_collection(const void *p) = 0;
+#endif
+
   bool is_in_permanent_or_null(const void *p) const {
     return p == NULL || is_in_permanent(p);
   }
@@ -284,11 +291,7 @@
 
   // An object is scavengable if its location may move during a scavenge.
   // (A scavenge is a GC which is not a full GC.)
-  // Currently, this just means it is not perm (and not null).
-  // This could change if we rethink what's in perm-gen.
-  bool is_scavengable(const void *p) const {
-    return !is_in_permanent_or_null(p);
-  }
+  virtual bool is_scavengable(const void *p) = 0;
 
   // Returns "TRUE" if "p" is a method oop in the
   // current heap, with high probability. This predicate
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed May 18 13:19:32 2011 +0200
@@ -711,15 +711,6 @@
   _gen_process_strong_tasks->set_n_threads(t);
 }
 
-class AssertIsPermClosure: public OopClosure {
-public:
-  void do_oop(oop* p) {
-    assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
-  }
-  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-};
-static AssertIsPermClosure assert_is_perm_closure;
-
 void GenCollectedHeap::
 gen_process_strong_roots(int level,
                          bool younger_gens_as_roots,
@@ -962,6 +953,13 @@
   }
 }
 
+bool GenCollectedHeap::is_in_young(oop p) {
+  bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
+  assert(result == _gens[0]->is_in_reserved(p),
+         err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
+  return result;
+}
+
 // Returns "TRUE" iff "p" points into the allocated area of the heap.
 bool GenCollectedHeap::is_in(const void* p) const {
   #ifndef ASSERT
@@ -984,10 +982,16 @@
   return false;
 }
 
-// Returns "TRUE" iff "p" points into the allocated area of the heap.
-bool GenCollectedHeap::is_in_youngest(void* p) {
-  return _gens[0]->is_in(p);
+#ifdef ASSERT
+// Don't implement this by using is_in_young().  This method is used
+// in some cases to check that is_in_young() is correct.
+bool GenCollectedHeap::is_in_partial_collection(const void* p) {
+  assert(is_in_reserved(p) || p == NULL,
+    "Does not work if address is non-null and outside of the heap");
+  // The order of the generations is young (low addr), old, perm (high addr)
+  return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
 }
+#endif
 
 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
   for (int i = 0; i < _n_gens; i++) {
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Wed May 18 13:19:32 2011 +0200
@@ -216,8 +216,18 @@
     }
   }
 
-  // Returns "TRUE" iff "p" points into the youngest generation.
-  bool is_in_youngest(void* p);
+  // Returns true if the reference is to an object in the reserved space
+  // for the young generation.
+  // Assumes the the young gen address range is less than that of the old gen.
+  bool is_in_young(oop p);
+
+#ifdef ASSERT
+  virtual bool is_in_partial_collection(const void* p);
+#endif
+
+  virtual bool is_scavengable(const void* addr) {
+    return is_in_young((oop)addr);
+  }
 
   // Iteration functions.
   void oop_iterate(OopClosure* cl);
@@ -283,7 +293,7 @@
     //       "Check can_elide_initializing_store_barrier() for this collector");
     // but unfortunately the flag UseSerialGC need not necessarily always
     // be set when DefNew+Tenured are being used.
-    return is_in_youngest((void*)new_obj);
+    return is_in_young(new_obj);
   }
 
   // Can a compiler elide a store barrier when it writes
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Wed May 18 13:19:32 2011 +0200
@@ -102,6 +102,17 @@
 };
 static AssertIsPermClosure assert_is_perm_closure;
 
+#ifdef ASSERT
+class AssertNonScavengableClosure: public OopClosure {
+public:
+  virtual void do_oop(oop* p) {
+    assert(!Universe::heap()->is_in_partial_collection(*p),
+      "Referent should not be scavengable.");  }
+  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
+};
+static AssertNonScavengableClosure assert_is_non_scavengable_closure;
+#endif
+
 void SharedHeap::change_strong_roots_parity() {
   // Also set the new collection parity.
   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
@@ -196,9 +207,10 @@
         CodeCache::scavenge_root_nmethods_do(code_roots);
       }
     }
-    // Verify if the code cache contents are in the perm gen
-    NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
-    NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
+    // Verify that the code cache contents are not subject to
+    // movement by a scavenging collection.
+    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
+    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   }
 
   if (!collecting_perm_gen) {
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Wed Jul 05 17:45:01 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Wed May 18 13:19:32 2011 +0200
@@ -397,7 +397,7 @@
 
   if (referent != NULL) {
     guarantee(referent->is_oop(), "referent field heap failed");
-    if (gch != NULL && !gch->is_in_youngest(obj)) {
+    if (gch != NULL && !gch->is_in_young(obj)) {
       // We do a specific remembered set check here since the referent
       // field is not part of the oop mask and therefore skipped by the
       // regular verify code.
@@ -415,7 +415,7 @@
   if (next != NULL) {
     guarantee(next->is_oop(), "next field verify failed");
     guarantee(next->is_instanceRef(), "next field verify failed");
-    if (gch != NULL && !gch->is_in_youngest(obj)) {
+    if (gch != NULL && !gch->is_in_young(obj)) {
       // We do a specific remembered set check here since the next field is
       // not part of the oop mask and therefore skipped by the regular
       // verify code.