Merge
authorcoleenp
Fri, 13 Mar 2015 18:59:41 +0000
changeset 29577 bb06d25e302d
parent 29575 437742bd5a1b (current diff)
parent 29576 c223b0a9872e (diff)
child 29579 9ff6780f640a
Merge
hotspot/src/share/vm/classfile/classLoaderData.cpp
hotspot/src/share/vm/classfile/classLoaderData.hpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -467,6 +467,12 @@
       } else {
         ShouldNotReachHere();
       }
+    } else {
+      // Metadata is alive.
+      // If scratch_class is on stack then it shouldn't be on this list!
+      assert(!m->is_klass() || !((InstanceKlass*)m)->is_scratch_class(),
+             "scratch classes on this list should be dead");
+      // Also should assert that other metadata on the list was found in handles.
     }
   }
 }
@@ -737,11 +743,22 @@
 
 // Move class loader data from main list to the unloaded list for unloading
 // and deallocation later.
-bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, bool clean_alive) {
+bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure,
+                                        bool clean_previous_versions) {
+
   ClassLoaderData* data = _head;
   ClassLoaderData* prev = NULL;
   bool seen_dead_loader = false;
 
+  // Mark metadata seen on the stack only so we can delete unneeded entries.
+  // Only walk all metadata, including the expensive code cache walk, for Full GC
+  // and only if class redefinition and if there's previous versions of
+  // Klasses to delete.
+  bool walk_all_metadata = clean_previous_versions &&
+                           JvmtiExport::has_redefined_a_class() &&
+                           InstanceKlass::has_previous_versions();
+  MetadataOnStackMark md_on_stack(walk_all_metadata);
+
   // Save previous _unloading pointer for CMS which may add to unloading list before
   // purging and we don't want to rewalk the previously unloaded class loader data.
   _saved_unloading = _unloading;
@@ -749,6 +766,11 @@
   data = _head;
   while (data != NULL) {
     if (data->is_alive(is_alive_closure)) {
+      // clean metaspace
+      if (walk_all_metadata) {
+        data->classes_do(InstanceKlass::purge_previous_versions);
+      }
+      data->free_deallocate_list();
       prev = data;
       data = data->next();
       continue;
@@ -770,11 +792,6 @@
     _unloading = dead;
   }
 
-  if (clean_alive) {
-    // Clean previous versions and the deallocate list.
-    ClassLoaderDataGraph::clean_metaspaces();
-  }
-
   if (seen_dead_loader) {
     post_class_unload_events();
   }
@@ -782,21 +799,6 @@
   return seen_dead_loader;
 }
 
-void ClassLoaderDataGraph::clean_metaspaces() {
-  // mark metadata seen on the stack and code cache so we can delete unneeded entries.
-  bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
-  MetadataOnStackMark md_on_stack(has_redefined_a_class);
-
-  if (has_redefined_a_class) {
-    for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
-      data->classes_do(InstanceKlass::purge_previous_versions);
-    }
-  }
-
-  // Should purge the previous version before deallocating.
-  free_deallocate_lists();
-}
-
 void ClassLoaderDataGraph::purge() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
   ClassLoaderData* list = _unloading;
@@ -829,12 +831,6 @@
 #endif
 }
 
-void ClassLoaderDataGraph::free_deallocate_lists() {
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    cld->free_deallocate_list();
-  }
-}
-
 // CDS support
 
 // Global metaspaces for writing information to the shared archive.  When
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,6 @@
 
   static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
   static void post_class_unload_events(void);
-  static void clean_metaspaces();
  public:
   static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
   static void purge();
@@ -95,7 +94,7 @@
   static void methods_do(void f(Method*));
   static void loaded_classes_do(KlassClosure* klass_closure);
   static void classes_unloading_do(void f(Klass* const));
-  static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive);
+  static bool do_unloading(BoolObjectClosure* is_alive, bool clean_previous_versions);
 
   // CMS support.
   static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
@@ -114,8 +113,6 @@
   static bool has_metaspace_oom()           { return _metaspace_oom; }
   static void set_metaspace_oom(bool value) { _metaspace_oom = value; }
 
-  static void free_deallocate_lists();
-
   static void dump_on(outputStream * const out) PRODUCT_RETURN;
   static void dump() { dump_on(tty); }
   static void verify();
--- a/hotspot/src/share/vm/classfile/metadataOnStackMark.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/classfile/metadataOnStackMark.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,26 +33,31 @@
 #include "services/threadService.hpp"
 #include "utilities/chunkedList.hpp"
 
-volatile MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL;
-volatile MetadataOnStackBuffer* MetadataOnStackMark::_free_buffers = NULL;
+MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL;
+MetadataOnStackBuffer* MetadataOnStackMark::_free_buffers = NULL;
 
+MetadataOnStackBuffer* MetadataOnStackMark::_current_buffer = NULL;
 NOT_PRODUCT(bool MetadataOnStackMark::_is_active = false;)
 
 // Walk metadata on the stack and mark it so that redefinition doesn't delete
-// it.  Class unloading also walks the previous versions and might try to
-// delete it, so this class is used by class unloading also.
-MetadataOnStackMark::MetadataOnStackMark(bool visit_code_cache) {
+// it.  Class unloading only deletes in-error class files, methods created by
+// the relocator and dummy constant pools.  None of these appear anywhere except
+// in metadata Handles.
+MetadataOnStackMark::MetadataOnStackMark(bool redefinition_walk) {
   assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
   assert(_used_buffers == NULL, "sanity check");
+  assert(!_is_active, "MetadataOnStackMarks do not nest");
   NOT_PRODUCT(_is_active = true;)
 
-  Threads::metadata_do(Metadata::mark_on_stack);
-  if (visit_code_cache) {
+  Threads::metadata_handles_do(Metadata::mark_on_stack);
+
+  if (redefinition_walk) {
+    Threads::metadata_do(Metadata::mark_on_stack);
     CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
+    CompileBroker::mark_on_stack();
+    JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
+    ThreadService::metadata_do(Metadata::mark_on_stack);
   }
-  CompileBroker::mark_on_stack();
-  JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
-  ThreadService::metadata_do(Metadata::mark_on_stack);
 }
 
 MetadataOnStackMark::~MetadataOnStackMark() {
@@ -60,10 +65,9 @@
   // Unmark everything that was marked.   Can't do the same walk because
   // redefine classes messes up the code cache so the set of methods
   // might not be the same.
+  retire_current_buffer();
 
-  retire_buffer_for_thread(Thread::current());
-
-  MetadataOnStackBuffer* buffer = const_cast<MetadataOnStackBuffer* >(_used_buffers);
+  MetadataOnStackBuffer* buffer = _used_buffers;
   while (buffer != NULL) {
     // Clear on stack state for all metadata.
     size_t size = buffer->size();
@@ -77,7 +81,7 @@
     // Move the buffer to the free list.
     buffer->clear();
     buffer->set_next_used(NULL);
-    buffer->set_next_free(const_cast<MetadataOnStackBuffer*>(_free_buffers));
+    buffer->set_next_free(_free_buffers);
     _free_buffers = buffer;
 
     // Step to next used buffer.
@@ -93,35 +97,23 @@
   if (buffer == NULL) {
     return;
   }
-
-  MetadataOnStackBuffer* old_head;
-
-  do {
-    old_head = const_cast<MetadataOnStackBuffer*>(_used_buffers);
-    buffer->set_next_used(old_head);
-  } while (Atomic::cmpxchg_ptr(buffer, &_used_buffers, old_head) != old_head);
-}
-
-void MetadataOnStackMark::retire_buffer_for_thread(Thread* thread) {
-  retire_buffer(thread->metadata_on_stack_buffer());
-  thread->set_metadata_on_stack_buffer(NULL);
+  buffer->set_next_used(_used_buffers);
+  _used_buffers = buffer;
 }
 
-bool MetadataOnStackMark::has_buffer_for_thread(Thread* thread) {
-  return thread->metadata_on_stack_buffer() != NULL;
+// Current buffer is full or we're ready to walk them, add it to the used list.
+void MetadataOnStackMark::retire_current_buffer() {
+  retire_buffer(_current_buffer);
+  _current_buffer = NULL;
 }
 
+// Get buffer off free list.
 MetadataOnStackBuffer* MetadataOnStackMark::allocate_buffer() {
-  MetadataOnStackBuffer* allocated;
-  MetadataOnStackBuffer* new_head;
+  MetadataOnStackBuffer* allocated = _free_buffers;
 
-  do {
-    allocated = const_cast<MetadataOnStackBuffer*>(_free_buffers);
-    if (allocated == NULL) {
-      break;
-    }
-    new_head = allocated->next_free();
-  } while (Atomic::cmpxchg_ptr(new_head, &_free_buffers, allocated) != allocated);
+  if (allocated != NULL) {
+    _free_buffers = allocated->next_free();
+  }
 
   if (allocated == NULL) {
     allocated = new MetadataOnStackBuffer();
@@ -133,10 +125,10 @@
 }
 
 // Record which objects are marked so we can unmark the same objects.
-void MetadataOnStackMark::record(Metadata* m, Thread* thread) {
+void MetadataOnStackMark::record(Metadata* m) {
   assert(_is_active, "metadata on stack marking is active");
 
-  MetadataOnStackBuffer* buffer =  thread->metadata_on_stack_buffer();
+  MetadataOnStackBuffer* buffer = _current_buffer;
 
   if (buffer != NULL && buffer->is_full()) {
     retire_buffer(buffer);
@@ -145,7 +137,7 @@
 
   if (buffer == NULL) {
     buffer = allocate_buffer();
-    thread->set_metadata_on_stack_buffer(buffer);
+    _current_buffer = buffer;
   }
 
   buffer->push(m);
--- a/hotspot/src/share/vm/classfile/metadataOnStackMark.hpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/classfile/metadataOnStackMark.hpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,23 +36,23 @@
 // or executing methods, so that it can't be deleted during class redefinition
 // and class unloading.
 // This is also used for other things that can be deallocated, like class
-// metadata during parsing, relocated methods, and methods in backtraces.
+// metadata during parsing if errors occur, relocated methods, and temporary
+// constant pools.
 class MetadataOnStackMark : public StackObj {
   NOT_PRODUCT(static bool _is_active;)
-
-  static volatile MetadataOnStackBuffer* _used_buffers;
-  static volatile MetadataOnStackBuffer* _free_buffers;
+  static MetadataOnStackBuffer* _used_buffers;
+  static MetadataOnStackBuffer* _free_buffers;
+  static MetadataOnStackBuffer* _current_buffer;
 
   static MetadataOnStackBuffer* allocate_buffer();
   static void retire_buffer(MetadataOnStackBuffer* buffer);
 
  public:
-  MetadataOnStackMark(bool visit_code_cache);
+  MetadataOnStackMark(bool redefinition_walk);
    ~MetadataOnStackMark();
 
-  static void record(Metadata* m, Thread* thread);
-  static void retire_buffer_for_thread(Thread* thread);
-  static bool has_buffer_for_thread(Thread* thread);
+  static void record(Metadata* m);
+  static void retire_current_buffer();
 };
 
 #endif // SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1690,9 +1690,11 @@
 
 // Assumes classes in the SystemDictionary are only unloaded at a safepoint
 // Note: anonymous classes are not in the SD.
-bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive, bool clean_alive) {
+bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive,
+                                    bool clean_previous_versions) {
   // First, mark for unload all ClassLoaderData referencing a dead class loader.
-  bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive, clean_alive);
+  bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive,
+                                                               clean_previous_versions);
   if (unloading_occurred) {
     dictionary()->do_unloading();
     constraints()->purge_loader_constraints();
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -335,7 +335,8 @@
 
   // Unload (that is, break root links to) all unmarked classes and
   // loaders.  Returns "true" iff something was unloaded.
-  static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive = true);
+  static bool do_unloading(BoolObjectClosure* is_alive,
+                           bool clean_previous_versions = true);
 
   // Used by DumpSharedSpaces only to remove classes that failed verification
   static void remove_classes_in_error_state();
--- a/hotspot/src/share/vm/code/nmethod.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1572,17 +1572,12 @@
   set_unload_reported();
 }
 
-void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
+void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
   if (ic->is_icholder_call()) {
     // The only exception is compiledICHolder oops which may
     // yet be marked below. (We check this further below).
     CompiledICHolder* cichk_oop = ic->cached_icholder();
 
-    if (mark_on_stack) {
-      Metadata::mark_on_stack(cichk_oop->holder_method());
-      Metadata::mark_on_stack(cichk_oop->holder_klass());
-    }
-
     if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
         cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
       return;
@@ -1590,10 +1585,6 @@
   } else {
     Metadata* ic_oop = ic->cached_metadata();
     if (ic_oop != NULL) {
-      if (mark_on_stack) {
-        Metadata::mark_on_stack(ic_oop);
-      }
-
       if (ic_oop->is_klass()) {
         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
           return;
@@ -1634,8 +1625,7 @@
   // The RedefineClasses() API can cause the class unloading invariant
   // to no longer be true. See jvmtiExport.hpp for details.
   // Also, leave a debugging breadcrumb in local flag.
-  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
-  if (a_class_was_redefined) {
+  if (JvmtiExport::has_redefined_a_class()) {
     // This set of the unloading_occurred flag is done before the
     // call to post_compiled_method_unload() so that the unloading
     // of this nmethod is reported.
@@ -1654,7 +1644,7 @@
     while(iter.next()) {
       if (iter.type() == relocInfo::virtual_call_type) {
         CompiledIC *ic = CompiledIC_at(&iter);
-        clean_ic_if_metadata_is_dead(ic, is_alive, false);
+        clean_ic_if_metadata_is_dead(ic, is_alive);
       }
     }
   }
@@ -1741,33 +1731,6 @@
   return false;
 }
 
-void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
-  assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");
-
-  metadata_Relocation* r = iter_at_metadata->metadata_reloc();
-  // In this metadata, we must only follow those metadatas directly embedded in
-  // the code.  Other metadatas (oop_index>0) are seen as part of
-  // the metadata section below.
-  assert(1 == (r->metadata_is_immediate()) +
-         (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
-         "metadata must be found in exactly one place");
-  if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
-    Metadata* md = r->metadata_value();
-    if (md != _method) Metadata::mark_on_stack(md);
-  }
-}
-
-void nmethod::mark_metadata_on_stack_non_relocs() {
-    // Visit the metadata section
-    for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
-      if (*p == Universe::non_oop_word() || *p == NULL)  continue;  // skip non-oops
-      Metadata* md = *p;
-      Metadata::mark_on_stack(md);
-    }
-
-    // Visit metadata not embedded in the other places.
-    if (_method != NULL) Metadata::mark_on_stack(_method);
-}
 
 bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
   ResourceMark rm;
@@ -1790,19 +1753,13 @@
   // The RedefineClasses() API can cause the class unloading invariant
   // to no longer be true. See jvmtiExport.hpp for details.
   // Also, leave a debugging breadcrumb in local flag.
-  bool a_class_was_redefined = JvmtiExport::has_redefined_a_class();
-  if (a_class_was_redefined) {
+  if (JvmtiExport::has_redefined_a_class()) {
     // This set of the unloading_occurred flag is done before the
     // call to post_compiled_method_unload() so that the unloading
     // of this nmethod is reported.
     unloading_occurred = true;
   }
 
-  // When class redefinition is used all metadata in the CodeCache has to be recorded,
-  // so that unused "previous versions" can be purged. Since walking the CodeCache can
-  // be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
-  bool mark_metadata_on_stack = a_class_was_redefined;
-
   // Exception cache
   clean_exception_cache(is_alive);
 
@@ -1818,7 +1775,7 @@
       if (unloading_occurred) {
         // If class unloading occurred we first iterate over all inline caches and
         // clear ICs where the cached oop is referring to an unloaded klass or method.
-        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
+        clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
       }
 
       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
@@ -1839,16 +1796,10 @@
       break;
 
     case relocInfo::metadata_type:
-      if (mark_metadata_on_stack) {
-        mark_metadata_on_stack_at(&iter);
-      }
+      break; // nothing to do.
     }
   }
 
-  if (mark_metadata_on_stack) {
-    mark_metadata_on_stack_non_relocs();
-  }
-
   if (is_unloaded) {
     return postponed;
   }
--- a/hotspot/src/share/vm/code/nmethod.hpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -593,9 +593,6 @@
   bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
   bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
 
-  void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
-  void mark_metadata_on_stack_non_relocs();
-
  public:
   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
                                      OopClosure* f);
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -2532,11 +2532,6 @@
     G1CMTraceTime trace("Unloading", G1Log::finer());
 
     if (ClassUnloadingWithConcurrentMark) {
-      // Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
-      // part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase.
-      // Defer the cleaning until we have complete on_stack data.
-      MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
-
       bool purged_classes;
 
       {
@@ -2548,11 +2543,6 @@
         G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
       }
-
-      {
-        G1CMTraceTime trace("Deallocate Metadata", G1Log::finest());
-        ClassLoaderDataGraph::free_deallocate_lists();
-      }
     }
 
     if (G1StringDedup::is_enabled()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -4909,10 +4909,6 @@
         clean_nmethod(claimed_nmethods[i]);
       }
     }
-
-    // The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
-    // Need to retire the buffers now that this thread has stopped cleaning nmethods.
-    MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
   }
 
   void work_second_pass(uint worker_id) {
@@ -4965,9 +4961,6 @@
     // G1 specific cleanup work that has
     // been moved here to be done in parallel.
     ik->clean_dependent_nmethods();
-    if (JvmtiExport::has_redefined_a_class()) {
-      InstanceKlass::purge_previous_versions(ik);
-    }
   }
 
   void work() {
@@ -5002,18 +4995,8 @@
       _klass_cleaning_task(is_alive) {
   }
 
-  void pre_work_verification() {
-    assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
-  }
-
-  void post_work_verification() {
-    assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
-  }
-
   // The parallel work done by all worker threads.
   void work(uint worker_id) {
-    pre_work_verification();
-
     // Do first pass of code cache cleaning.
     _code_cache_task.work_first_pass(worker_id);
 
@@ -5032,8 +5015,6 @@
 
     // Clean all klasses that were not unloaded.
     _klass_cleaning_task.work();
-
-    post_work_verification();
   }
 };
 
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1790,17 +1790,10 @@
 
 void ConstantPool::set_on_stack(const bool value) {
   if (value) {
-    int old_flags = *const_cast<volatile int *>(&_flags);
-    while ((old_flags & _on_stack) == 0) {
-      int new_flags = old_flags | _on_stack;
-      int result = Atomic::cmpxchg(new_flags, &_flags, old_flags);
-
-      if (result == old_flags) {
-        // Succeeded.
-        MetadataOnStackMark::record(this, Thread::current());
-        return;
-      }
-      old_flags = result;
+    // Only record if it's not already set.
+    if (!on_stack()) {
+      _flags |= _on_stack;
+      MetadataOnStackMark::record(this);
     }
   } else {
     // Clearing is done single-threadedly.
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -3492,9 +3492,11 @@
 #endif
 
 
+
 // RedefineClasses() support for previous versions:
-
-// Purge previous versions
+int InstanceKlass::_previous_version_count = 0;
+
+// Purge previous versions before adding new previous versions of the class.
 void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
   if (ik->previous_versions() != NULL) {
     // This klass has previous versions so see what we can cleanup
@@ -3524,6 +3526,11 @@
         // are executing.  Unlink this previous_version.
         // The previous version InstanceKlass is on the ClassLoaderData deallocate list
         // so will be deallocated during the next phase of class unloading.
+        RC_TRACE(0x00000200, ("purge: previous version " INTPTR_FORMAT " is dead",
+                              pv_node));
+        // For debugging purposes.
+        pv_node->set_is_scratch_class();
+        pv_node->class_loader_data()->add_to_deallocate_list(pv_node);
         pv_node = pv_node->previous_versions();
         last->link_previous_versions(pv_node);
         deleted_count++;
@@ -3537,7 +3544,7 @@
         live_count++;
       }
 
-      // At least one method is live in this previous version so clean its MethodData.
+      // At least one method is live in this previous version.
       // Reset dead EMCP methods not to get breakpoints.
       // All methods are deallocated when all of the methods for this class are no
       // longer running.
@@ -3561,12 +3568,6 @@
               ("purge: %s(%s): prev method @%d in version @%d is alive",
               method->name()->as_C_string(),
               method->signature()->as_C_string(), j, version));
-#ifdef ASSERT
-            if (method->method_data() != NULL) {
-              // Verify MethodData for running methods don't refer to old methods.
-              method->method_data()->verify_clean_weak_method_links();
-            }
-#endif // ASSERT
           }
         }
       }
@@ -3579,18 +3580,6 @@
       ("purge: previous version stats: live=%d, deleted=%d", live_count,
       deleted_count));
   }
-
-#ifdef ASSERT
-  // Verify clean MethodData for this class's methods, e.g. they don't refer to
-  // old methods that are no longer running.
-  Array<Method*>* methods = ik->methods();
-  int num_methods = methods->length();
-  for (int index = 0; index < num_methods; ++index) {
-    if (methods->at(index)->method_data() != NULL) {
-      methods->at(index)->method_data()->verify_clean_weak_method_links();
-    }
-  }
-#endif // ASSERT
 }
 
 void InstanceKlass::mark_newly_obsolete_methods(Array<Method*>* old_methods,
@@ -3677,6 +3666,11 @@
   ConstantPool* cp_ref = scratch_class->constants();
   if (!cp_ref->on_stack()) {
     RC_TRACE(0x00000400, ("add: scratch class not added; no methods are running"));
+    // For debugging purposes.
+    scratch_class->set_is_scratch_class();
+    scratch_class->class_loader_data()->add_to_deallocate_list(scratch_class());
+    // Update count for class unloading.
+    _previous_version_count--;
     return;
   }
 
@@ -3688,8 +3682,8 @@
         // if EMCP method (not obsolete) is on the stack, mark as EMCP so that
         // we can add breakpoints for it.
 
-        // We set the method->on_stack bit during safepoints for class redefinition and
-        // class unloading and use this bit to set the is_running_emcp bit.
+        // We set the method->on_stack bit during safepoints for class redefinition
+        // and use this bit to set the is_running_emcp bit.
         // After the safepoint, the on_stack bit is cleared and the running emcp
         // method may exit.   If so, we would set a breakpoint in a method that
         // is never reached, but this won't be noticeable to the programmer.
@@ -3708,6 +3702,8 @@
   assert(scratch_class->previous_versions() == NULL, "shouldn't have a previous version");
   scratch_class->link_previous_versions(previous_versions());
   link_previous_versions(scratch_class());
+  // Update count for class unloading.
+  _previous_version_count++;
 } // end add_previous_version()
 
 
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Fri Mar 13 18:59:41 2015 +0000
@@ -206,7 +206,8 @@
     _misc_is_contended             = 1 << 4, // marked with contended annotation
     _misc_has_default_methods      = 1 << 5, // class/superclass/implemented interfaces has default methods
     _misc_declares_default_methods = 1 << 6, // directly declares default methods (any access)
-    _misc_has_been_redefined       = 1 << 7  // class has been redefined
+    _misc_has_been_redefined       = 1 << 7, // class has been redefined
+    _misc_is_scratch_class         = 1 << 8  // class is the redefined scratch class
   };
   u2              _misc_flags;
   u2              _minor_version;        // minor version number of class file
@@ -626,11 +627,23 @@
     _misc_flags |= _misc_has_been_redefined;
   }
 
+  bool is_scratch_class() const {
+    return (_misc_flags & _misc_is_scratch_class) != 0;
+  }
+
+  void set_is_scratch_class() {
+    _misc_flags |= _misc_is_scratch_class;
+  }
+
   void init_previous_versions() {
     _previous_versions = NULL;
   }
 
+ private:
+  static int  _previous_version_count;
+ public:
   static void purge_previous_versions(InstanceKlass* ik);
+  static bool has_previous_versions() { return _previous_version_count > 0; }
 
   // JVMTI: Support for caching a class file before it is modified by an agent that can do retransformation
   void set_cached_class_file(JvmtiCachedClassFileData *data) {
--- a/hotspot/src/share/vm/oops/method.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1970,9 +1970,10 @@
   // on stack means some method referring to it is also on the stack.
   constants()->set_on_stack(value);
 
-  bool succeeded = _access_flags.set_on_stack(value);
-  if (value && succeeded) {
-    MetadataOnStackMark::record(this, Thread::current());
+  bool already_set = on_stack();
+  _access_flags.set_on_stack(value);
+  if (value && !already_set) {
+    MetadataOnStackMark::record(this);
   }
 }
 
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -142,14 +142,11 @@
 
   for (int i = 0; i < _class_count; i++) {
     redefine_single_class(_class_defs[i].klass, _scratch_classes[i], thread);
-    ClassLoaderData* cld = _scratch_classes[i]->class_loader_data();
-    // Free the memory for this class at class unloading time.  Not before
-    // because CMS might think this is still live.
-    cld->add_to_deallocate_list((InstanceKlass*)_scratch_classes[i]);
-    _scratch_classes[i] = NULL;
   }
 
   // Clean out MethodData pointing to old Method*
+  // Have to do this after all classes are redefined and all methods that
+  // are redefined are marked as old.
   MethodDataCleaner clean_weak_method_links;
   ClassLoaderDataGraph::classes_do(&clean_weak_method_links);
 
--- a/hotspot/src/share/vm/runtime/thread.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -203,8 +203,6 @@
   // This initial value ==> never claimed.
   _oops_do_parity = 0;
 
-  _metadata_on_stack_buffer = NULL;
-
   // the handle mark links itself to last_handle_mark
   new HandleMark(this);
 
@@ -776,7 +774,8 @@
   // no nmethods in a generic thread...
 }
 
-void Thread::metadata_do(void f(Metadata*)) {
+void Thread::metadata_handles_do(void f(Metadata*)) {
+  // Only walk the Handles in Thread.
   if (metadata_handles() != NULL) {
     for (int i = 0; i< metadata_handles()->length(); i++) {
       f(metadata_handles()->at(i));
@@ -2713,7 +2712,6 @@
 }
 
 void JavaThread::metadata_do(void f(Metadata*)) {
-  Thread::metadata_do(f);
   if (has_last_Java_frame()) {
     // Traverse the execution stack to call f() on the methods in the stack
     for (StackFrameStream fst(this); !fst.is_done(); fst.next()) {
@@ -4104,6 +4102,21 @@
   }
 }
 
+class ThreadHandlesClosure : public ThreadClosure {
+  void (*_f)(Metadata*);
+ public:
+  ThreadHandlesClosure(void f(Metadata*)) : _f(f) {}
+  virtual void do_thread(Thread* thread) {
+    thread->metadata_handles_do(_f);
+  }
+};
+
+void Threads::metadata_handles_do(void f(Metadata*)) {
+  // Only walk the Handles in Thread.
+  ThreadHandlesClosure handles_closure(f);
+  threads_do(&handles_closure);
+}
+
 void Threads::deoptimized_wrt_marked_nmethods() {
   ALL_JAVA_THREADS(p) {
     p->deoptimized_wrt_marked_nmethods();
--- a/hotspot/src/share/vm/runtime/thread.hpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Fri Mar 13 18:59:41 2015 +0000
@@ -255,9 +255,6 @@
   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
                                                 // the Java heap
 
-  // Thread-local buffer used by MetadataOnStackMark.
-  MetadataOnStackBuffer* _metadata_on_stack_buffer;
-
   TRACE_DATA _trace_data;                       // Thread-local data for tracing
 
   ThreadExt _ext;
@@ -478,7 +475,7 @@
   void nmethods_do(CodeBlobClosure* cf);
 
   // jvmtiRedefineClasses support
-  void metadata_do(void f(Metadata*));
+  void metadata_handles_do(void f(Metadata*));
 
   // Used by fast lock support
   virtual bool is_lock_owned(address adr) const;
@@ -494,9 +491,6 @@
   // creation fails due to lack of memory, too many threads etc.
   bool set_as_starting_thread();
 
-  void set_metadata_on_stack_buffer(MetadataOnStackBuffer* buffer) { _metadata_on_stack_buffer = buffer; }
-  MetadataOnStackBuffer* metadata_on_stack_buffer() const          { return _metadata_on_stack_buffer; }
-
 protected:
   // OS data associated with the thread
   OSThread* _osthread;  // Platform-specific thread information
@@ -1915,6 +1909,7 @@
 
   // RedefineClasses support
   static void metadata_do(void f(Metadata*));
+  static void metadata_handles_do(void f(Metadata*));
 
 #ifdef ASSERT
   static bool is_vm_complete() { return _vm_complete; }
--- a/hotspot/src/share/vm/utilities/accessFlags.cpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/utilities/accessFlags.cpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,20 +47,6 @@
   } while(f != old_flags);
 }
 
-// Returns true iff this thread succeeded setting the bit.
-bool AccessFlags::atomic_set_one_bit(jint bit) {
-  // Atomically update the flags with the bit given
-  jint old_flags, new_flags, f;
-  bool is_setting_bit = false;
-  do {
-    old_flags = _flags;
-    new_flags = old_flags | bit;
-    is_setting_bit = old_flags != new_flags;
-    f = Atomic::cmpxchg(new_flags, &_flags, old_flags);
-  } while(f != old_flags);
-
-  return is_setting_bit;
-}
 
 #if !defined(PRODUCT) || INCLUDE_JVMTI
 
--- a/hotspot/src/share/vm/utilities/accessFlags.hpp	Fri Mar 13 09:18:28 2015 -0700
+++ b/hotspot/src/share/vm/utilities/accessFlags.hpp	Fri Mar 13 18:59:41 2015 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -172,7 +172,6 @@
 
   // Atomic update of flags
   void atomic_set_bits(jint bits);
-  bool atomic_set_one_bit(jint bit);
   void atomic_clear_bits(jint bits);
 
  private:
@@ -234,13 +233,12 @@
                                          atomic_set_bits(JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE);
                                        }
 
-  bool set_on_stack(const bool value)
+  void set_on_stack(const bool value)
                                        {
                                          if (value) {
-                                           return atomic_set_one_bit(JVM_ACC_ON_STACK);
+                                           atomic_set_bits(JVM_ACC_ON_STACK);
                                          } else {
                                            atomic_clear_bits(JVM_ACC_ON_STACK);
-                                           return true; // Ignored
                                          }
                                        }
   // Conversion