8046809: vm/mlvm/meth/stress/compiler/deoptimize CodeCache is full.
authoranoll
Fri, 24 Oct 2014 14:25:46 +0200
changeset 27420 04e6f914cce1
parent 27419 a934f24b4dcf
child 27421 df2b6ff5c959
8046809: vm/mlvm/meth/stress/compiler/deoptimize CodeCache is full. Summary: Use separate sweeper thread; enables more aggressive sweeping. Reviewed-by: kvn, jrose
hotspot/src/share/vm/ci/ciEnv.cpp
hotspot/src/share/vm/code/codeBlob.cpp
hotspot/src/share/vm/code/codeBlob.hpp
hotspot/src/share/vm/code/codeCache.cpp
hotspot/src/share/vm/code/codeCache.hpp
hotspot/src/share/vm/code/nmethod.cpp
hotspot/src/share/vm/code/vtableStubs.cpp
hotspot/src/share/vm/compiler/compileBroker.cpp
hotspot/src/share/vm/compiler/compileBroker.hpp
hotspot/src/share/vm/interpreter/interpreterRuntime.cpp
hotspot/src/share/vm/memory/heap.cpp
hotspot/src/share/vm/memory/heap.hpp
hotspot/src/share/vm/opto/compile.cpp
hotspot/src/share/vm/opto/output.cpp
hotspot/src/share/vm/prims/methodHandles.cpp
hotspot/src/share/vm/prims/methodHandles.hpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/mutexLocker.cpp
hotspot/src/share/vm/runtime/mutexLocker.hpp
hotspot/src/share/vm/runtime/sharedRuntime.cpp
hotspot/src/share/vm/runtime/sweeper.cpp
hotspot/src/share/vm/runtime/sweeper.hpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
hotspot/src/share/vm/runtime/vm_operations.cpp
hotspot/src/share/vm/runtime/vm_operations.hpp
hotspot/src/share/vm/trace/trace.xml
hotspot/test/compiler/startup/SmallCodeCacheStartup.java
hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -1093,9 +1093,8 @@
     // JVMTI -- compiled method notification (must be done outside lock)
     nm->post_compiled_method_load_event();
   } else {
-    // The CodeCache is full. Print out warning and disable compilation.
+    // The CodeCache is full.
     record_failure("code cache is full");
-    CompileBroker::handle_full_code_cache(CodeCache::get_code_blob_type(comp_level));
   }
 }
 
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -229,8 +229,8 @@
   return blob;
 }
 
-void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
-  return CodeCache::allocate(size, CodeBlobType::NonNMethod, is_critical);
+void* BufferBlob::operator new(size_t s, unsigned size) throw() {
+  return CodeCache::allocate(size, CodeBlobType::NonNMethod);
 }
 
 void BufferBlob::free(BufferBlob *blob) {
@@ -260,10 +260,7 @@
   unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    // The parameter 'true' indicates a critical memory allocation.
-    // This means that CodeCacheMinimumFreeSpace is used, if necessary
-    const bool is_critical = true;
-    blob = new (size, is_critical) AdapterBlob(size, cb);
+    blob = new (size) AdapterBlob(size, cb);
   }
   // Track memory usage statistic after releasing CodeCache_lock
   MemoryService::track_code_cache_memory_usage();
@@ -285,10 +282,7 @@
   size += round_to(buffer_size, oopSize);
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    // The parameter 'true' indicates a critical memory allocation.
-    // This means that CodeCacheMinimumFreeSpace is used, if necessary
-    const bool is_critical = true;
-    blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
+    blob = new (size) MethodHandlesAdapterBlob(size);
   }
   // Track memory usage statistic after releasing CodeCache_lock
   MemoryService::track_code_cache_memory_usage();
@@ -336,14 +330,14 @@
 
 
 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
-  void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
+  void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
 }
 
 // operator new shared by all singletons:
 void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
-  void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod, true);
+  void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
   if (!p) fatal("Initial size of CodeCache is too small");
   return p;
 }
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -221,7 +221,7 @@
   BufferBlob(const char* name, int size);
   BufferBlob(const char* name, int size, CodeBuffer* cb);
 
-  void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
+  void* operator new(size_t s, unsigned size) throw();
 
  public:
   // Creation
--- a/hotspot/src/share/vm/code/codeCache.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -44,6 +44,7 @@
 #include "runtime/icache.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/sweeper.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "services/memoryService.hpp"
 #include "trace/tracing.hpp"
@@ -192,16 +193,16 @@
   }
 
   // Make sure we have enough space for VM internal code
-  uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
+  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
   if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
     vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
   }
   guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
 
   // Align reserved sizes of CodeHeaps
-  size_t non_method_size    = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
-  size_t profiled_size      = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
-  size_t non_profiled_size  = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
+  size_t non_method_size   = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
+  size_t profiled_size     = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
+  size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
 
   // Compute initial sizes of CodeHeaps
   size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
@@ -348,14 +349,18 @@
   return next_blob(get_code_heap(cb), cb);
 }
 
-CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
-  // Do not seize the CodeCache lock here--if the caller has not
-  // already done so, we are going to lose bigtime, since the code
-  // cache will contain a garbage CodeBlob until the caller can
-  // run the constructor for the CodeBlob subclass he is busy
-  // instantiating.
+/**
+ * Do not seize the CodeCache lock here--if the caller has not
+ * already done so, we are going to lose bigtime, since the code
+ * cache will contain a garbage CodeBlob until the caller can
+ * run the constructor for the CodeBlob subclass he is busy
+ * instantiating.
+ */
+CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
+  // Possibly wakes up the sweeper thread.
+  NMethodSweeper::notify(code_blob_type);
   assert_locked_or_safepoint(CodeCache_lock);
-  assert(size > 0, "allocation request must be reasonable");
+  assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size));
   if (size <= 0) {
     return NULL;
   }
@@ -366,14 +371,18 @@
   assert(heap != NULL, "heap is null");
 
   while (true) {
-    cb = (CodeBlob*)heap->allocate(size, is_critical);
+    cb = (CodeBlob*)heap->allocate(size);
     if (cb != NULL) break;
     if (!heap->expand_by(CodeCacheExpansionSize)) {
       // Expansion failed
       if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
-        // Fallback solution: Store non-nmethod code in the non-profiled code heap
-        return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
+        // Fallback solution: Store non-nmethod code in the non-profiled code heap.
+        // Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled
+        // code heap and force stack scanning if less than 10% if the code heap are free.
+        return allocate(size, CodeBlobType::MethodNonProfiled);
       }
+      MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      CompileBroker::handle_full_code_cache(code_blob_type);
       return NULL;
     }
     if (PrintCodeCacheExtension) {
@@ -771,19 +780,6 @@
 }
 
 /**
- * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
- */
-bool CodeCache::is_full(int* code_blob_type) {
-  FOR_ALL_HEAPS(heap) {
-    if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
-      *code_blob_type = (*heap)->code_blob_type();
-      return true;
-    }
-  }
-  return false;
-}
-
-/**
  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
  * is free, reverse_free_ratio() returns 4.
  */
@@ -792,9 +788,13 @@
   if (heap == NULL) {
     return 0;
   }
-  double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
+
+  double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
   double max_capacity = (double)heap->max_capacity();
-  return max_capacity / unallocated_capacity;
+  double result = max_capacity / unallocated_capacity;
+  assert (max_capacity >= unallocated_capacity, "Must be");
+  assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result));
+  return result;
 }
 
 size_t CodeCache::bytes_allocated_in_freelists() {
--- a/hotspot/src/share/vm/code/codeCache.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -120,16 +120,16 @@
   static void initialize();
 
   // Allocation/administration
-  static CodeBlob* allocate(int size, int code_blob_type, bool is_critical = false); // allocates a new CodeBlob
-  static void commit(CodeBlob* cb);                     // called when the allocated CodeBlob has been filled
-  static int  alignment_unit();                         // guaranteed alignment of all CodeBlobs
-  static int  alignment_offset();                       // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
-  static void free(CodeBlob* cb);                       // frees a CodeBlob
-  static bool contains(void *p);                        // returns whether p is included
-  static void blobs_do(void f(CodeBlob* cb));           // iterates over all CodeBlobs
-  static void blobs_do(CodeBlobClosure* f);             // iterates over all CodeBlobs
-  static void nmethods_do(void f(nmethod* nm));         // iterates over all nmethods
-  static void alive_nmethods_do(void f(nmethod* nm));   // iterates over all alive nmethods
+  static CodeBlob* allocate(int size, int code_blob_type); // allocates a new CodeBlob
+  static void commit(CodeBlob* cb);                        // called when the allocated CodeBlob has been filled
+  static int  alignment_unit();                            // guaranteed alignment of all CodeBlobs
+  static int  alignment_offset();                          // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
+  static void free(CodeBlob* cb);                          // frees a CodeBlob
+  static bool contains(void *p);                           // returns whether p is included
+  static void blobs_do(void f(CodeBlob* cb));              // iterates over all CodeBlobs
+  static void blobs_do(CodeBlobClosure* f);                // iterates over all CodeBlobs
+  static void nmethods_do(void f(nmethod* nm));            // iterates over all nmethods
+  static void alive_nmethods_do(void f(nmethod* nm));      // iterates over all alive nmethods
 
   // Lookup
   static CodeBlob* find_blob(void* start);              // Returns the CodeBlob containing the given address
@@ -182,7 +182,6 @@
   static size_t unallocated_capacity();
   static size_t max_capacity();
 
-  static bool   is_full(int* code_blob_type);
   static double reverse_free_ratio(int code_blob_type);
 
   static bool needs_cache_clean()                     { return _needs_cache_clean; }
--- a/hotspot/src/share/vm/code/nmethod.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -804,10 +804,7 @@
 #endif // def HAVE_DTRACE_H
 
 void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () {
-  // With a SegmentedCodeCache, nmethods are allocated on separate heaps and therefore do not share memory
-  // with critical CodeBlobs. We define the allocation as critical to make sure all code heap memory is used.
-  bool is_critical = SegmentedCodeCache;
-  return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level), is_critical);
+  return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level));
 }
 
 nmethod::nmethod(
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -63,7 +63,6 @@
    // If changing the name, update the other file accordingly.
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
-      CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
       return NULL;
     }
     _chunk = blob->content_begin();
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -156,8 +156,6 @@
 CompileQueue* CompileBroker::_c2_compile_queue   = NULL;
 CompileQueue* CompileBroker::_c1_compile_queue   = NULL;
 
-GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
-
 
 class CompilationLog : public StringEventLog {
  public:
@@ -649,13 +647,10 @@
   lock()->notify_all();
 }
 
-// ------------------------------------------------------------------
-// CompileQueue::get
-//
-// Get the next CompileTask from a CompileQueue
+/**
+ * Get the next CompileTask from a CompileQueue
+ */
 CompileTask* CompileQueue::get() {
-  NMethodSweeper::possibly_sweep();
-
   MutexLocker locker(lock());
   // If _first is NULL we have no more compile jobs. There are two reasons for
   // having no compile jobs: First, we compiled everything we wanted. Second,
@@ -668,35 +663,16 @@
       return NULL;
     }
 
-    if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
-      // Wait a certain amount of time to possibly do another sweep.
-      // We must wait until stack scanning has happened so that we can
-      // transition a method's state from 'not_entrant' to 'zombie'.
-      long wait_time = NmethodSweepCheckInterval * 1000;
-      if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
-        // Only one thread at a time can do sweeping. Scale the
-        // wait time according to the number of compiler threads.
-        // As a result, the next sweep is likely to happen every 100ms
-        // with an arbitrary number of threads that do sweeping.
-        wait_time = 100 * CICompilerCount;
-      }
-      bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
-      if (timeout) {
-        MutexUnlocker ul(lock());
-        NMethodSweeper::possibly_sweep();
-      }
-    } else {
-      // If there are no compilation tasks and we can compile new jobs
-      // (i.e., there is enough free space in the code cache) there is
-      // no need to invoke the sweeper. As a result, the hotness of methods
-      // remains unchanged. This behavior is desired, since we want to keep
-      // the stable state, i.e., we do not want to evict methods from the
-      // code cache if it is unnecessary.
-      // We need a timed wait here, since compiler threads can exit if compilation
-      // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
-      // is not critical and we do not want idle compiler threads to wake up too often.
-      lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
-    }
+    // If there are no compilation tasks and we can compile new jobs
+    // (i.e., there is enough free space in the code cache) there is
+    // no need to invoke the sweeper. As a result, the hotness of methods
+    // remains unchanged. This behavior is desired, since we want to keep
+    // the stable state, i.e., we do not want to evict methods from the
+    // code cache if it is unnecessary.
+    // We need a timed wait here, since compiler threads can exit if compilation
+    // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
+    // is not critical and we do not want idle compiler threads to wake up too often.
+    lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
   }
 
   if (CompileBroker::is_compilation_disabled_forever()) {
@@ -886,8 +862,8 @@
   _compilers[1] = new SharkCompiler();
 #endif // SHARK
 
-  // Start the CompilerThreads
-  init_compiler_threads(c1_count, c2_count);
+  // Start the compiler thread(s) and the sweeper thread
+  init_compiler_sweeper_threads(c1_count, c2_count);
   // totalTime performance counter is always created as it is required
   // by the implementation of java.lang.management.CompilationMBean.
   {
@@ -991,13 +967,10 @@
 }
 
 
-CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
-                                                    AbstractCompiler* comp, TRAPS) {
-  CompilerThread* compiler_thread = NULL;
-
-  Klass* k =
-    SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(),
-                                      true, CHECK_0);
+JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
+                                       AbstractCompiler* comp, bool compiler_thread, TRAPS) {
+  JavaThread* thread = NULL;
+  Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_0);
   instanceKlassHandle klass (THREAD, k);
   instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_0);
   Handle string = java_lang_String::create_from_str(name, CHECK_0);
@@ -1015,7 +988,11 @@
 
   {
     MutexLocker mu(Threads_lock, THREAD);
-    compiler_thread = new CompilerThread(queue, counters);
+    if (compiler_thread) {
+      thread = new CompilerThread(queue, counters);
+    } else {
+      thread = new CodeCacheSweeperThread();
+    }
     // At this point the new CompilerThread data-races with this startup
     // thread (which I believe is the primoridal thread and NOT the VM
     // thread).  This means Java bytecodes being executed at startup can
@@ -1028,12 +1005,12 @@
     // in that case. However, since this must work and we do not allow
     // exceptions anyway, check and abort if this fails.
 
-    if (compiler_thread == NULL || compiler_thread->osthread() == NULL){
+    if (thread == NULL || thread->osthread() == NULL) {
       vm_exit_during_initialization("java.lang.OutOfMemoryError",
                                     os::native_thread_creation_failed_msg());
     }
 
-    java_lang_Thread::set_thread(thread_oop(), compiler_thread);
+    java_lang_Thread::set_thread(thread_oop(), thread);
 
     // Note that this only sets the JavaThread _priority field, which by
     // definition is limited to Java priorities and not OS priorities.
@@ -1054,24 +1031,26 @@
         native_prio = os::java_to_os_priority[NearMaxPriority];
       }
     }
-    os::set_native_priority(compiler_thread, native_prio);
+    os::set_native_priority(thread, native_prio);
 
     java_lang_Thread::set_daemon(thread_oop());
 
-    compiler_thread->set_threadObj(thread_oop());
-    compiler_thread->set_compiler(comp);
-    Threads::add(compiler_thread);
-    Thread::start(compiler_thread);
+    thread->set_threadObj(thread_oop());
+    if (compiler_thread) {
+      thread->as_CompilerThread()->set_compiler(comp);
+    }
+    Threads::add(thread);
+    Thread::start(thread);
   }
 
   // Let go of Threads_lock before yielding
   os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
 
-  return compiler_thread;
+  return thread;
 }
 
 
-void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
+void CompileBroker::init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count) {
   EXCEPTION_MARK;
 #if !defined(ZERO) && !defined(SHARK)
   assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
@@ -1088,17 +1067,14 @@
 
   int compiler_count = c1_compiler_count + c2_compiler_count;
 
-  _compiler_threads =
-    new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
-
   char name_buffer[256];
+  const bool compiler_thread = true;
   for (int i = 0; i < c2_compiler_count; i++) {
     // Create a name for our thread.
     sprintf(name_buffer, "C2 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
     // Shark and C2
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], CHECK);
-    _compiler_threads->append(new_thread);
+    make_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], compiler_thread, CHECK);
   }
 
   for (int i = c2_compiler_count; i < compiler_count; i++) {
@@ -1106,13 +1082,17 @@
     sprintf(name_buffer, "C1 CompilerThread%d", i);
     CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
     // C1
-    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], CHECK);
-    _compiler_threads->append(new_thread);
+    make_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], compiler_thread, CHECK);
   }
 
   if (UsePerfData) {
     PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
   }
+
+  if (MethodFlushing) {
+    // Initialize the sweeper thread
+    make_thread("Sweeper thread", NULL, NULL, NULL, false, CHECK);
+  }
 }
 
 
@@ -1759,13 +1739,6 @@
     // We need this HandleMark to avoid leaking VM handles.
     HandleMark hm(thread);
 
-    // Check if the CodeCache is full
-    int code_blob_type = 0;
-    if (CodeCache::is_full(&code_blob_type)) {
-      // The CodeHeap for code_blob_type is really full
-      handle_full_code_cache(code_blob_type);
-    }
-
     CompileTask* task = queue->get();
     if (task == NULL) {
       continue;
@@ -1773,8 +1746,9 @@
 
     // Give compiler threads an extra quanta.  They tend to be bursty and
     // this helps the compiler to finish up the job.
-    if( CompilerThreadHintNoPreempt )
+    if (CompilerThreadHintNoPreempt) {
       os::hint_no_preempt();
+    }
 
     // trace per thread time and compile statistics
     CompilerCounters* counters = ((CompilerThread*)thread)->counters();
@@ -2074,8 +2048,10 @@
 }
 
 /**
- * The CodeCache is full.  Print out warning and disable compilation
- * or try code cache cleaning so compilation can continue later.
+ * The CodeCache is full. Print warning and disable compilation.
+ * Schedule code cache cleaning so compilation can continue later.
+ * This function needs to be called only from CodeCache::allocate(),
+ * since we currently handle a full code cache uniformly.
  */
 void CompileBroker::handle_full_code_cache(int code_blob_type) {
   UseInterpreter = true;
@@ -2107,10 +2083,6 @@
       if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
         NMethodSweeper::log_sweep("disable_compiler");
       }
-      // Switch to 'vm_state'. This ensures that possibly_sweep() can be called
-      // without having to consider the state in which the current thread is.
-      ThreadInVMfromUnknown in_vm;
-      NMethodSweeper::possibly_sweep();
     } else {
       disable_compilation_forever();
     }
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -290,8 +290,6 @@
   static CompileQueue* _c2_compile_queue;
   static CompileQueue* _c1_compile_queue;
 
-  static GrowableArray<CompilerThread*>* _compiler_threads;
-
   // performance counters
   static PerfCounter* _perf_total_compilation;
   static PerfCounter* _perf_native_compilation;
@@ -339,8 +337,8 @@
 
   static volatile jint _print_compilation_warning;
 
-  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
-  static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
+  static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS);
+  static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count);
   static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
   static bool is_compile_blocking();
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -1077,7 +1077,6 @@
 address SignatureHandlerLibrary::set_handler_blob() {
   BufferBlob* handler_blob = BufferBlob::create("native signature handlers", blob_size);
   if (handler_blob == NULL) {
-    CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
     return NULL;
   }
   address handler = handler_blob->code_begin();
--- a/hotspot/src/share/vm/memory/heap.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/memory/heap.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -171,13 +171,13 @@
 }
 
 
-void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
+void* CodeHeap::allocate(size_t instance_size) {
   size_t number_of_segments = size_to_segments(instance_size + header_size());
   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 
   // First check if we can satisfy request from freelist
   NOT_PRODUCT(verify());
-  HeapBlock* block = search_freelist(number_of_segments, is_critical);
+  HeapBlock* block = search_freelist(number_of_segments);
   NOT_PRODUCT(verify());
 
   if (block != NULL) {
@@ -191,15 +191,6 @@
   // Ensure minimum size for allocation to the heap.
   number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
 
-  if (!is_critical) {
-    // Make sure the allocation fits in the unallocated heap without using
-    // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
-    if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
-      // Fail allocation
-      return NULL;
-    }
-  }
-
   if (_next_segment + number_of_segments <= _number_of_committed_segments) {
     mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
     HeapBlock* b =  block_at(_next_segment);
@@ -427,24 +418,17 @@
  * Search freelist for an entry on the list with the best fit.
  * @return NULL, if no one was found
  */
-FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
+FreeBlock* CodeHeap::search_freelist(size_t length) {
   FreeBlock* found_block = NULL;
   FreeBlock* found_prev  = NULL;
   size_t     found_length = 0;
 
   FreeBlock* prev = NULL;
   FreeBlock* cur = _freelist;
-  const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace;
 
   // Search for first block that fits
   while(cur != NULL) {
     if (cur->length() >= length) {
-      // Non critical allocations are not allowed to use the last part of the code heap.
-      // Make sure the end of the allocation doesn't cross into the last part of the code heap.
-      if (!is_critical && (((size_t)cur + length) > critical_boundary)) {
-        // The freelist is sorted by address - if one fails, all consecutive will also fail.
-        break;
-      }
       // Remember block, its previous element, and its length
       found_block = cur;
       found_prev  = prev;
--- a/hotspot/src/share/vm/memory/heap.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/memory/heap.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -120,7 +120,7 @@
 
   // Toplevel freelist management
   void add_to_freelist(HeapBlock* b);
-  FreeBlock* search_freelist(size_t length, bool is_critical);
+  FreeBlock* search_freelist(size_t length);
 
   // Iteration helpers
   void*      next_free(HeapBlock* b) const;
@@ -140,8 +140,8 @@
   bool  expand_by(size_t size);                  // expands committed memory by size
 
   // Memory allocation
-  void* allocate  (size_t size, bool is_critical);  // allocates a block of size or returns NULL
-  void  deallocate(void* p);                        // deallocates a block
+  void* allocate (size_t size); // Allocate 'size' bytes in the code cache or return NULL
+  void  deallocate(void* p);    // Deallocate memory
 
   // Attributes
   char* low_boundary() const                     { return _memory.low_boundary (); }
--- a/hotspot/src/share/vm/opto/compile.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -535,7 +535,6 @@
     if (scratch_buffer_blob() == NULL) {
       // Let CompilerBroker disable further compilations.
       record_failure("Not enough space for scratch buffer in CodeCache");
-      CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
       return;
     }
   }
--- a/hotspot/src/share/vm/opto/output.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/opto/output.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -1166,7 +1166,6 @@
   // Have we run out of code space?
   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     C->record_failure("CodeCache is full");
-    CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
     return NULL;
   }
   // Configure the code buffer.
@@ -1491,7 +1490,6 @@
       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
       if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
         C->record_failure("CodeCache is full");
-        CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
         return;
       }
 
@@ -1648,7 +1646,6 @@
   // One last check for failed CodeBuffer::expand:
   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     C->record_failure("CodeCache is full");
-    CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
     return;
   }
 
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -36,6 +36,7 @@
 #include "runtime/reflection.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
+#include "utilities/exceptions.hpp"
 
 
 /*
@@ -55,26 +56,30 @@
 bool MethodHandles::_enabled = false; // set true after successful native linkage
 MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
 
-//------------------------------------------------------------------------------
-// MethodHandles::generate_adapters
-//
-void MethodHandles::generate_adapters() {
-  if (SystemDictionary::MethodHandle_klass() == NULL)  return;
+
+/**
+ * Generates method handle adapters. Returns 'false' if memory allocation
+ * failed and true otherwise.
+ */
+bool MethodHandles::generate_adapters() {
+  if (SystemDictionary::MethodHandle_klass() == NULL) {
+    return true;
+  }
 
   assert(_adapter_code == NULL, "generate only once");
 
   ResourceMark rm;
   TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
   _adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
-  if (_adapter_code == NULL)
-    vm_exit_out_of_memory(adapter_code_size, OOM_MALLOC_ERROR,
-                          "CodeCache: no room for MethodHandles adapters");
-  {
-    CodeBuffer code(_adapter_code);
-    MethodHandlesAdapterGenerator g(&code);
-    g.generate();
-    code.log_section_sizes("MethodHandlesAdapterBlob");
+  if (_adapter_code == NULL) {
+     return false;
   }
+
+  CodeBuffer code(_adapter_code);
+  MethodHandlesAdapterGenerator g(&code);
+  g.generate();
+  code.log_section_sizes("MethodHandlesAdapterBlob");
+  return true;
 }
 
 //------------------------------------------------------------------------------
@@ -1401,7 +1406,9 @@
   }
 
   if (enable_MH) {
-    MethodHandles::generate_adapters();
+    if (MethodHandles::generate_adapters() == false) {
+      THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), "Out of space in CodeCache for method handle adapters");
+    }
     MethodHandles::set_enabled(true);
   }
 }
--- a/hotspot/src/share/vm/prims/methodHandles.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -69,7 +69,7 @@
   enum { _suppress_defc = 1, _suppress_name = 2, _suppress_type = 4 };
 
   // Generate MethodHandles adapters.
-                              static void generate_adapters();
+  static bool generate_adapters();
 
   // Called from MethodHandlesAdapterGenerator.
   static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid);
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -306,6 +306,9 @@
   { "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
   { "VerifyReflectionBytecodes",     JDK_Version::jdk(9), JDK_Version::jdk(10) },
   { "AutoShutdownNMT",               JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "NmethodSweepFraction",          JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "NmethodSweepCheckInterval",     JDK_Version::jdk(9), JDK_Version::jdk(10) },
+  { "CodeCacheMinimumFreeSpace",     JDK_Version::jdk(9), JDK_Version::jdk(10) },
 #ifndef ZERO
   { "UseFastAccessorMethods",        JDK_Version::jdk(9), JDK_Version::jdk(10) },
   { "UseFastEmptyMethods",           JDK_Version::jdk(9), JDK_Version::jdk(10) },
@@ -2528,7 +2531,7 @@
 
   // Check lower bounds of the code cache
   // Template Interpreter code is approximately 3X larger in debug builds.
-  uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
+  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
   if (InitialCodeCacheSize < (uintx)os::vm_page_size()) {
     jio_fprintf(defaultStream::error_stream(),
                 "Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K,
@@ -2564,10 +2567,11 @@
     status = false;
   }
 
-  status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
   status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
   status &= verify_interval(CodeCacheMinBlockLength, 1, 100, "CodeCacheMinBlockLength");
   status &= verify_interval(CodeCacheSegmentSize, 1, 1024, "CodeCacheSegmentSize");
+  status &= verify_interval(StartAggressiveSweepingAt, 0, 100, "StartAggressiveSweepingAt");
+
 
   int min_number_of_compiler_threads = get_min_number_of_compiler_threads();
   // The default CICompilerCount's value is CI_COMPILER_COUNT.
@@ -3985,12 +3989,6 @@
 #endif
 #endif
 
-  // Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered)
-  if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
-    FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
-  }
-
-
   // Set heap size based on available physical memory
   set_heap_size();
 
@@ -4058,13 +4056,6 @@
   }
 
 #ifndef PRODUCT
-  if (CompileTheWorld) {
-    // Force NmethodSweeper to sweep whole CodeCache each time.
-    if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
-      NmethodSweepFraction = 1;
-    }
-  }
-
   if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
     if (use_vm_log()) {
       LogVMOutput = true;
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -2984,12 +2984,6 @@
   product(intx, SafepointTimeoutDelay, 10000,                               \
           "Delay in milliseconds for option SafepointTimeout")              \
                                                                             \
-  product(intx, NmethodSweepFraction, 16,                                   \
-          "Number of invocations of sweeper to cover all nmethods")         \
-                                                                            \
-  product(intx, NmethodSweepCheckInterval, 5,                               \
-          "Compilers wake up every n seconds to possibly sweep nmethods")   \
-                                                                            \
   product(intx, NmethodSweepActivity, 10,                                   \
           "Removes cold nmethods from code cache if > 0. Higher values "    \
           "result in more aggressive sweeping")                             \
@@ -3378,9 +3372,6 @@
   product_pd(uintx, NonNMethodCodeHeapSize,                                 \
           "Size of code heap with non-nmethods (in bytes)")                 \
                                                                             \
-  product(uintx, CodeCacheMinimumFreeSpace, 500*K,                          \
-          "When less than X space left, we stop compiling")                 \
-                                                                            \
   product_pd(uintx, CodeCacheExpansionSize,                                 \
           "Code cache expansion size (in bytes)")                           \
                                                                             \
@@ -3393,6 +3384,11 @@
   product(bool, UseCodeCacheFlushing, true,                                 \
           "Remove cold/old nmethods from the code cache")                   \
                                                                             \
+  product(uintx, StartAggressiveSweepingAt, 10,                             \
+          "Start aggressive sweeping if X[%] of the code cache is free."    \
+          "Segmented code cache: X[%] of the non-profiled heap."            \
+          "Non-segmented code cache: X[%] of the total code cache")         \
+                                                                            \
   /* interpreter debugging */                                               \
   develop(intx, BinarySwitchThreshold, 5,                                   \
           "Minimal number of lookupswitch entries for rewriting to binary " \
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -61,7 +61,7 @@
 Mutex*   StringTable_lock             = NULL;
 Monitor* StringDedupQueue_lock        = NULL;
 Mutex*   StringDedupTable_lock        = NULL;
-Mutex*   CodeCache_lock               = NULL;
+Monitor* CodeCache_lock               = NULL;
 Mutex*   MethodData_lock              = NULL;
 Mutex*   RetData_lock                 = NULL;
 Monitor* VMOperationQueue_lock        = NULL;
@@ -205,7 +205,7 @@
   }
   def(ParGCRareEvent_lock          , Mutex  , leaf     ,   true );
   def(DerivedPointerTableGC_lock   , Mutex,   leaf,        true );
-  def(CodeCache_lock               , Mutex  , special,     true );
+  def(CodeCache_lock               , Monitor, special,     true );
   def(Interrupt_lock               , Monitor, special,     true ); // used for interrupt processing
   def(RawMonitor_lock              , Mutex,   special,     true );
   def(OopMapCacheAlloc_lock        , Mutex,   leaf,        true ); // used for oop_map_cache allocation.
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -53,7 +53,7 @@
 extern Mutex*   StringTable_lock;                // a lock on the interned string table
 extern Monitor* StringDedupQueue_lock;           // a lock on the string deduplication queue
 extern Mutex*   StringDedupTable_lock;           // a lock on the string deduplication table
-extern Mutex*   CodeCache_lock;                  // a lock on the CodeCache, rank is special, use MutexLockerEx
+extern Monitor* CodeCache_lock;                  // a lock on the CodeCache, rank is special, use MutexLockerEx
 extern Mutex*   MethodData_lock;                 // a lock on installation of method data
 extern Mutex*   RetData_lock;                    // a lock on installation of RetData inside method data
 extern Mutex*   DerivedPointerTableGC_lock;      // a lock to protect the derived pointer table
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -2421,8 +2421,6 @@
       // CodeCache is full, disable compilation
       // Ought to log this but compile log is only per compile thread
       // and we're some non descript Java thread.
-      MutexUnlocker mu(AdapterHandlerLibrary_lock);
-      CompileBroker::handle_full_code_cache(CodeBlobType::NonNMethod);
       return NULL; // Out of CodeCache space
     }
     entry->relocate(new_adapter->content_begin());
@@ -2594,9 +2592,6 @@
       CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : "");
     }
     nm->post_compiled_method_load_event();
-  } else {
-    // CodeCache is full, disable compilation
-    CompileBroker::handle_full_code_cache(CodeBlobType::MethodNonProfiled);
   }
 }
 
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -52,7 +52,6 @@
 class SweeperRecord {
  public:
   int traversal;
-  int invocation;
   int compile_id;
   long traversal_mark;
   int state;
@@ -62,10 +61,9 @@
   int line;
 
   void print() {
-      tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
+      tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
                     PTR_FORMAT " state = %d traversal_mark %d line = %d",
                     traversal,
-                    invocation,
                     compile_id,
                     kind == NULL ? "" : kind,
                     uep,
@@ -117,7 +115,6 @@
   if (_records != NULL) {
     _records[_sweep_index].traversal = _traversals;
     _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
-    _records[_sweep_index].invocation = _sweep_fractions_left;
     _records[_sweep_index].compile_id = nm->compile_id();
     _records[_sweep_index].kind = nm->compile_kind();
     _records[_sweep_index].state = nm->_state;
@@ -127,6 +124,14 @@
     _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
   }
 }
+
+void NMethodSweeper::init_sweeper_log() {
+ if (LogSweeper && _records == NULL) {
+   // Create the ring buffer for the logging code
+   _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
+   memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
+  }
+}
 #else
 #define SWEEP(nm)
 #endif
@@ -142,8 +147,6 @@
 int      NMethodSweeper::_marked_for_reclamation_count = 0;    // Nof. nmethods marked for reclaim in current sweep
 
 volatile bool NMethodSweeper::_should_sweep            = true; // Indicates if we should invoke the sweeper
-volatile int  NMethodSweeper::_sweep_fractions_left    = 0;    // Nof. invocations left until we are completed with this pass
-volatile int  NMethodSweeper::_sweep_started           = 0;    // Flag to control conc sweeper
 volatile int  NMethodSweeper::_bytes_changed           = 0;    // Counts the total nmethod size if the nmethod changed from:
                                                                //   1) alive       -> not_entrant
                                                                //   2) not_entrant -> zombie
@@ -190,13 +193,15 @@
   }
   return _hotness_counter_reset_val;
 }
-bool NMethodSweeper::sweep_in_progress() {
-  return !_current.end();
+bool NMethodSweeper::wait_for_stack_scanning() {
+  return _current.end();
 }
 
-// Scans the stacks of all Java threads and marks activations of not-entrant methods.
-// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
-// safepoint.
+/**
+  * Scans the stacks of all Java threads and marks activations of not-entrant methods.
+  * No need to synchronize access, since 'mark_active_nmethods' is always executed at a
+  * safepoint.
+  */
 void NMethodSweeper::mark_active_nmethods() {
   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
   // If we do not want to reclaim not-entrant or zombie methods there is no need
@@ -210,9 +215,8 @@
 
   // Check for restart
   assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid");
-  if (!sweep_in_progress()) {
+  if (wait_for_stack_scanning()) {
     _seen = 0;
-    _sweep_fractions_left = NmethodSweepFraction;
     _current = NMethodIterator();
     // Initialize to first nmethod
     _current.next();
@@ -231,6 +235,64 @@
 
   OrderAccess::storestore();
 }
+
+/**
+  * This function triggers a VM operation that does stack scanning of active
+  * methods. Stack scanning is mandatory for the sweeper to make progress.
+  */
+void NMethodSweeper::do_stack_scanning() {
+  assert(!CodeCache_lock->owned_by_self(), "just checking");
+  if (wait_for_stack_scanning()) {
+    VM_MarkActiveNMethods op;
+    VMThread::execute(&op);
+    _should_sweep = true;
+  }
+}
+
+void NMethodSweeper::sweeper_loop() {
+  bool timeout;
+  while (true) {
+    {
+      ThreadBlockInVM tbivm(JavaThread::current());
+      MutexLockerEx waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      const long wait_time = 60*60*24 * 1000;
+      timeout = CodeCache_lock->wait(Mutex::_no_safepoint_check_flag, wait_time);
+    }
+    if (!timeout) {
+      possibly_sweep();
+    }
+  }
+}
+
+/**
+  * Wakes up the sweeper thread to possibly sweep.
+  */
+void NMethodSweeper::notify(int code_blob_type) {
+  // Makes sure that we do not invoke the sweeper too often during startup.
+  double start_threshold = 100.0 / (double)StartAggressiveSweepingAt;
+  double aggressive_sweep_threshold = MIN2(start_threshold, 1.1);
+  if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) {
+    assert_locked_or_safepoint(CodeCache_lock);
+    CodeCache_lock->notify();
+  }
+}
+
+/**
+ * Handle a safepoint request
+ */
+void NMethodSweeper::handle_safepoint_request() {
+  if (SafepointSynchronize::is_synchronizing()) {
+    if (PrintMethodFlushing && Verbose) {
+      tty->print_cr("### Sweep at %d out of %d, yielding to safepoint", _seen, CodeCache::nof_nmethods());
+    }
+    MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+
+    JavaThread* thread = JavaThread::current();
+    ThreadBlockInVM tbivm(thread);
+    thread->java_suspend_self();
+  }
+}
+
 /**
  * This function invokes the sweeper if at least one of the three conditions is met:
  *    (1) The code cache is getting full
@@ -239,11 +301,6 @@
  */
 void NMethodSweeper::possibly_sweep() {
   assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
-  // Only compiler threads are allowed to sweep
-  if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
-    return;
-  }
-
   // If there was no state change while nmethod sweeping, 'should_sweep' will be false.
   // This is one of the two places where should_sweep can be set to true. The general
   // idea is as follows: If there is enough free space in the code cache, there is no
@@ -280,46 +337,37 @@
     }
   }
 
-  if (_should_sweep && _sweep_fractions_left > 0) {
-    // Only one thread at a time will sweep
-    jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
-    if (old != 0) {
-      return;
-    }
-#ifdef ASSERT
-    if (LogSweeper && _records == NULL) {
-      // Create the ring buffer for the logging code
-      _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
-      memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
-    }
-#endif
+  // Force stack scanning if there is only 10% free space in the code cache.
+  // We force stack scanning only non-profiled code heap gets full, since critical
+  // allocation go to the non-profiled heap and we must be make sure that there is
+  // enough space.
+  double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100;
+  if (free_percent <= StartAggressiveSweepingAt) {
+    do_stack_scanning();
+  }
 
-    if (_sweep_fractions_left > 0) {
-      sweep_code_cache();
-      _sweep_fractions_left--;
-    }
+  if (_should_sweep) {
+    init_sweeper_log();
+    sweep_code_cache();
+  }
 
-    // We are done with sweeping the code cache once.
-    if (_sweep_fractions_left == 0) {
-      _total_nof_code_cache_sweeps++;
-      _last_sweep = _time_counter;
-      // Reset flag; temporarily disables sweeper
-      _should_sweep = false;
-      // If there was enough state change, 'possibly_enable_sweeper()'
-      // sets '_should_sweep' to true
-      possibly_enable_sweeper();
-      // Reset _bytes_changed only if there was enough state change. _bytes_changed
-      // can further increase by calls to 'report_state_change'.
-      if (_should_sweep) {
-        _bytes_changed = 0;
-      }
-    }
-    // Release work, because another compiler thread could continue.
-    OrderAccess::release_store((int*)&_sweep_started, 0);
+  // We are done with sweeping the code cache once.
+  _total_nof_code_cache_sweeps++;
+  _last_sweep = _time_counter;
+  // Reset flag; temporarily disables sweeper
+  _should_sweep = false;
+  // If there was enough state change, 'possibly_enable_sweeper()'
+  // sets '_should_sweep' to true
+   possibly_enable_sweeper();
+  // Reset _bytes_changed only if there was enough state change. _bytes_changed
+  // can further increase by calls to 'report_state_change'.
+  if (_should_sweep) {
+    _bytes_changed = 0;
   }
 }
 
 void NMethodSweeper::sweep_code_cache() {
+  ResourceMark rm;
   Ticks sweep_start_counter = Ticks::now();
 
   _flushed_count                = 0;
@@ -327,25 +375,10 @@
   _marked_for_reclamation_count = 0;
 
   if (PrintMethodFlushing && Verbose) {
-    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
-  }
-
-  if (!CompileBroker::should_compile_new_jobs()) {
-    // If we have turned off compilations we might as well do full sweeps
-    // in order to reach the clean state faster. Otherwise the sleeping compiler
-    // threads will slow down sweeping.
-    _sweep_fractions_left = 1;
+    tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nof_nmethods());
   }
 
-  // We want to visit all nmethods after NmethodSweepFraction
-  // invocations so divide the remaining number of nmethods by the
-  // remaining number of invocations.  This is only an estimate since
-  // the number of nmethods changes during the sweep so the final
-  // stage must iterate until it there are no more nmethods.
-  int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
   int swept_count = 0;
-
-
   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
   assert(!CodeCache_lock->owned_by_self(), "just checking");
 
@@ -354,19 +387,9 @@
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 
     // The last invocation iterates until there are no more nmethods
-    while ((swept_count < todo || _sweep_fractions_left == 1) && !_current.end()) {
+    while (!_current.end()) {
       swept_count++;
-      if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
-        if (PrintMethodFlushing && Verbose) {
-          tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
-        }
-        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-
-        assert(Thread::current()->is_Java_thread(), "should be java thread");
-        JavaThread* thread = (JavaThread*)Thread::current();
-        ThreadBlockInVM tbivm(thread);
-        thread->java_suspend_self();
-      }
+      handle_safepoint_request();
       // Since we will give up the CodeCache_lock, always skip ahead
       // to the next nmethod.  Other blobs can be deleted by other
       // threads but nmethods are only reclaimed by the sweeper.
@@ -382,7 +405,7 @@
     }
   }
 
-  assert(_sweep_fractions_left > 1 || _current.end(), "must have scanned the whole cache");
+  assert(_current.end(), "must have scanned the whole cache");
 
   const Ticks sweep_end_counter = Ticks::now();
   const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
@@ -397,7 +420,6 @@
     event.set_starttime(sweep_start_counter);
     event.set_endtime(sweep_end_counter);
     event.set_sweepIndex(_traversals);
-    event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
     event.set_sweptCount(swept_count);
     event.set_flushedCount(_flushed_count);
     event.set_markedCount(_marked_for_reclamation_count);
@@ -407,15 +429,12 @@
 
 #ifdef ASSERT
   if(PrintMethodFlushing) {
-    tty->print_cr("### sweeper:      sweep time(%d): "
-      INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value());
+    tty->print_cr("### sweeper:      sweep time(%d): ", (jlong)sweep_time.value());
   }
 #endif
 
-  if (_sweep_fractions_left == 1) {
-    _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
-    log_sweep("finished");
-  }
+  _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
+  log_sweep("finished");
 
   // Sweeper is the only case where memory is released, check here if it
   // is time to restart the compiler. Only checking if there is a certain
@@ -459,10 +478,12 @@
 
 class NMethodMarker: public StackObj {
  private:
-  CompilerThread* _thread;
+  CodeCacheSweeperThread* _thread;
  public:
   NMethodMarker(nmethod* nm) {
-    _thread = CompilerThread::current();
+    JavaThread* current = JavaThread::current();
+    assert (current->is_Code_cache_sweeper_thread(), "Must be");
+    _thread = (CodeCacheSweeperThread*)JavaThread::current();
     if (!nm->is_zombie() && !nm->is_unloaded()) {
       // Only expose live nmethods for scanning
       _thread->set_scanned_nmethod(nm);
@@ -473,7 +494,7 @@
   }
 };
 
-void NMethodSweeper::release_nmethod(nmethod *nm) {
+void NMethodSweeper::release_nmethod(nmethod* nm) {
   // Clean up any CompiledICHolders
   {
     ResourceMark rm;
@@ -490,7 +511,7 @@
   nm->flush();
 }
 
-int NMethodSweeper::process_nmethod(nmethod *nm) {
+int NMethodSweeper::process_nmethod(nmethod* nm) {
   assert(!CodeCache_lock->owned_by_self(), "just checking");
 
   int freed_memory = 0;
--- a/hotspot/src/share/vm/runtime/sweeper.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -49,9 +49,7 @@
 //     remove the nmethod, all inline caches (IC) that point to the the nmethod must be
 //     cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
 //     state change happens during separate sweeps. It may take at least 3 sweeps before an
-//     nmethod's space is freed. Sweeping is currently done by compiler threads between
-//     compilations or at least each 5 sec (NmethodSweepCheckInterval) when the code cache
-//     is full.
+//     nmethod's space is freed.
 
 class NMethodSweeper : public AllStatic {
   static long      _traversals;                   // Stack scan count, also sweep ID.
@@ -64,7 +62,6 @@
   static int       _zombified_count;              // Nof. nmethods made zombie in current sweep
   static int       _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
 
-  static volatile int  _sweep_fractions_left;     // Nof. invocations left until we are completed with this pass
   static volatile int  _sweep_started;            // Flag to control conc sweeper
   static volatile bool _should_sweep;             // Indicates if we should invoke the sweeper
   static volatile int _bytes_changed;             // Counts the total nmethod size if the nmethod changed from:
@@ -85,8 +82,12 @@
   static int  process_nmethod(nmethod *nm);
   static void release_nmethod(nmethod* nm);
 
-  static bool sweep_in_progress();
+  static void init_sweeper_log() NOT_DEBUG_RETURN;
+  static bool wait_for_stack_scanning();
   static void sweep_code_cache();
+  static void handle_safepoint_request();
+  static void do_stack_scanning();
+  static void possibly_sweep();
 
  public:
   static long traversal_count()              { return _traversals; }
@@ -106,7 +107,8 @@
 #endif
 
   static void mark_active_nmethods();      // Invoked at the end of each safepoint
-  static void possibly_sweep();            // Compiler threads call this to sweep
+  static void sweeper_loop();
+  static void notify(int code_blob_type);  // Possibly start the sweeper thread.
 
   static int hotness_counter_reset_val();
   static void report_state_change(nmethod* nm);
--- a/hotspot/src/share/vm/runtime/thread.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -66,6 +66,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/statSampler.hpp"
 #include "runtime/stubRoutines.hpp"
+#include "runtime/sweeper.hpp"
 #include "runtime/task.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadCritical.hpp"
@@ -1551,6 +1552,7 @@
 
 // Remove this ifdef when C1 is ported to the compiler interface.
 static void compiler_thread_entry(JavaThread* thread, TRAPS);
+static void sweeper_thread_entry(JavaThread* thread, TRAPS);
 
 JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) :
                        Thread()
@@ -3170,6 +3172,10 @@
   CompileBroker::compiler_thread_loop();
 }
 
+static void sweeper_thread_entry(JavaThread* thread, TRAPS) {
+  NMethodSweeper::sweeper_loop();
+}
+
 // Create a CompilerThread
 CompilerThread::CompilerThread(CompileQueue* queue,
                                CompilerCounters* counters)
@@ -3180,7 +3186,6 @@
   _queue = queue;
   _counters = counters;
   _buffer_blob = NULL;
-  _scanned_nmethod = NULL;
   _compiler = NULL;
 
 #ifndef PRODUCT
@@ -3188,7 +3193,12 @@
 #endif
 }
 
-void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
+// Create sweeper thread
+CodeCacheSweeperThread::CodeCacheSweeperThread()
+: JavaThread(&sweeper_thread_entry) {
+  _scanned_nmethod = NULL;
+}
+void CodeCacheSweeperThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   JavaThread::oops_do(f, cld_f, cf);
   if (_scanned_nmethod != NULL && cf != NULL) {
     // Safepoints can occur when the sweeper is scanning an nmethod so
--- a/hotspot/src/share/vm/runtime/thread.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -305,6 +305,7 @@
   virtual bool is_VM_thread()       const            { return false; }
   virtual bool is_Java_thread()     const            { return false; }
   virtual bool is_Compiler_thread() const            { return false; }
+  virtual bool is_Code_cache_sweeper_thread() const  { return false; }
   virtual bool is_hidden_from_external_view() const  { return false; }
   virtual bool is_jvmti_agent_thread() const         { return false; }
   // True iff the thread can perform GC operations at a safepoint.
@@ -1746,6 +1747,24 @@
   return (CompilerThread*)this;
 }
 
+// Dedicated thread to sweep the code cache
+class CodeCacheSweeperThread : public JavaThread {
+  nmethod*       _scanned_nmethod; // nmethod being scanned by the sweeper
+ public:
+  CodeCacheSweeperThread();
+  // Track the nmethod currently being scanned by the sweeper
+  void set_scanned_nmethod(nmethod* nm) {
+    assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
+    _scanned_nmethod = nm;
+  }
+
+  bool is_Code_cache_sweeper_thread() const { return true; }
+  // GC support
+  // Apply "f->do_oop" to all root oops in "this".
+  // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
+  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
+};
+
 // A thread used for Compilation.
 class CompilerThread : public JavaThread {
   friend class VMStructs;
@@ -1758,7 +1777,6 @@
   CompileQueue*     _queue;
   BufferBlob*       _buffer_blob;
 
-  nmethod*          _scanned_nmethod;  // nmethod being scanned by the sweeper
   AbstractCompiler* _compiler;
 
  public:
@@ -1792,28 +1810,17 @@
     _log = log;
   }
 
-  // GC support
-  // Apply "f->do_oop" to all root oops in "this".
-  // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
-  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
-
 #ifndef PRODUCT
  private:
   IdealGraphPrinter *_ideal_graph_printer;
  public:
-  IdealGraphPrinter *ideal_graph_printer()                       { return _ideal_graph_printer; }
-  void set_ideal_graph_printer(IdealGraphPrinter *n)             { _ideal_graph_printer = n; }
+  IdealGraphPrinter *ideal_graph_printer()           { return _ideal_graph_printer; }
+  void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; }
 #endif
 
   // Get/set the thread's current task
-  CompileTask*  task()                           { return _task; }
-  void          set_task(CompileTask* task)      { _task = task; }
-
-  // Track the nmethod currently being scanned by the sweeper
-  void          set_scanned_nmethod(nmethod* nm) {
-    assert(_scanned_nmethod == NULL || nm == NULL, "should reset to NULL before writing a new value");
-    _scanned_nmethod = nm;
-  }
+  CompileTask* task()                      { return _task; }
+  void         set_task(CompileTask* task) { _task = task; }
 };
 
 inline CompilerThread* CompilerThread::current() {
--- a/hotspot/src/share/vm/runtime/vm_operations.cpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/vm_operations.cpp	Fri Oct 24 14:25:46 2014 +0200
@@ -111,6 +111,9 @@
   CodeCache::make_marked_nmethods_zombies();
 }
 
+void VM_MarkActiveNMethods::doit() {
+  NMethodSweeper::mark_active_nmethods();
+}
 
 VM_DeoptimizeFrame::VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id) {
   _thread = thread;
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp	Fri Oct 24 14:25:46 2014 +0200
@@ -100,6 +100,7 @@
   template(RotateGCLog)                           \
   template(WhiteBoxOperation)                     \
   template(ClassLoaderStatsOperation)             \
+  template(MarkActiveNMethods)                    \
   template(PrintCompileQueue)                     \
   template(PrintCodeList)                         \
   template(PrintCodeCache)                        \
@@ -252,6 +253,13 @@
   bool allow_nested_vm_operations() const        { return true; }
 };
 
+class VM_MarkActiveNMethods: public VM_Operation {
+ public:
+  VM_MarkActiveNMethods() {}
+  VMOp_Type type() const                         { return VMOp_MarkActiveNMethods; }
+  void doit();
+  bool allow_nested_vm_operations() const        { return true; }
+};
 
 // Deopt helper that can deoptimize frames in threads other than the
 // current thread.  Only used through Deoptimization::deoptimize_frame.
--- a/hotspot/src/share/vm/trace/trace.xml	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/src/share/vm/trace/trace.xml	Fri Oct 24 14:25:46 2014 +0200
@@ -383,7 +383,6 @@
     <event id="SweepCodeCache" path="vm/code_sweeper/sweep" label="Sweep Code Cache"
        has_thread="true" is_requestable="false" is_constant="false">
       <value type="INTEGER" field="sweepIndex" label="Sweep Index" relation="SWEEP_ID"/>
-      <value type="USHORT" field="sweepFractionIndex" label="Fraction Index"/>
       <value type="UINT" field="sweptCount" label="Methods Swept"/>
       <value type="UINT" field="flushedCount" label="Methods Flushed"/>
       <value type="UINT" field="markedCount" label="Methods Reclaimed"/>
--- a/hotspot/test/compiler/startup/SmallCodeCacheStartup.java	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/test/compiler/startup/SmallCodeCacheStartup.java	Fri Oct 24 14:25:46 2014 +0200
@@ -27,10 +27,20 @@
  * @summary Test ensures that there is no crash if there is not enough ReservedCodeacacheSize
  *          to initialize all compiler threads. The option -Xcomp gives the VM more time to
  *          to trigger the old bug.
- * @run main/othervm -XX:ReservedCodeCacheSize=3m -XX:CICompilerCount=64 -Xcomp SmallCodeCacheStartup
+ * @library /testlibrary
  */
+import com.oracle.java.testlibrary.*;
+
 public class SmallCodeCacheStartup {
   public static void main(String[] args) throws Exception {
+    try {
+      ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m",
+                                                                "-XX:CICompilerCount=64",
+                                                                "-Xcomp",
+                                                                "SmallCodeCacheStartup");
+      pb.start();
+    } catch (VirtualMachineError e) {}
+
     System.out.println("TEST PASSED");
   }
 }
--- a/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java	Fri Oct 24 08:35:29 2014 +0200
+++ b/hotspot/test/gc/g1/TestHumongousCodeCacheRoots.java	Fri Oct 24 14:25:46 2014 +0200
@@ -135,7 +135,6 @@
       "-XX:+UnlockDiagnosticVMOptions",
       "-XX:InitiatingHeapOccupancyPercent=1", // strong code root marking
       "-XX:+G1VerifyHeapRegionCodeRoots", "-XX:+VerifyAfterGC", // make sure that verification is run
-      "-XX:NmethodSweepFraction=1", "-XX:NmethodSweepCheckInterval=1",  // make the code cache sweep more predictable
     };
     runTest("-client", baseArguments);
     runTest("-server", baseArguments);