hotspot/src/share/vm/memory/sharedHeap.cpp
changeset 22550 820966182ab9
parent 21561 c619b1cb4554
child 22882 195c8f70d605
equal deleted inserted replaced
22549:d1ef75b0a43a 22550:820966182ab9
   135 SharedHeap::StrongRootsScope::~StrongRootsScope() {
   135 SharedHeap::StrongRootsScope::~StrongRootsScope() {
   136   // nothing particular
   136   // nothing particular
   137 }
   137 }
   138 
   138 
   139 void SharedHeap::process_strong_roots(bool activate_scope,
   139 void SharedHeap::process_strong_roots(bool activate_scope,
   140                                       bool is_scavenging,
       
   141                                       ScanningOption so,
   140                                       ScanningOption so,
   142                                       OopClosure* roots,
   141                                       OopClosure* roots,
   143                                       CodeBlobClosure* code_roots,
   142                                       CodeBlobClosure* code_roots,
   144                                       KlassClosure* klass_closure) {
   143                                       KlassClosure* klass_closure) {
   145   StrongRootsScope srs(this, activate_scope);
   144   StrongRootsScope srs(this, activate_scope);
   155   }
   154   }
   156   // Global (strong) JNI handles
   155   // Global (strong) JNI handles
   157   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
   156   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
   158     JNIHandles::oops_do(roots);
   157     JNIHandles::oops_do(roots);
   159 
   158 
       
   159   CLDToOopClosure roots_from_clds(roots);
       
   160   // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
       
   161   // CLDs which are strongly reachable from the thread stacks.
       
   162   CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
   160   // All threads execute this; the individual threads are task groups.
   163   // All threads execute this; the individual threads are task groups.
   161   CLDToOopClosure roots_from_clds(roots);
       
   162   CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
       
   163   if (CollectedHeap::use_parallel_gc_threads()) {
   164   if (CollectedHeap::use_parallel_gc_threads()) {
   164     Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
   165     Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
   165   } else {
   166   } else {
   166     Threads::oops_do(roots, roots_from_clds_p, code_roots);
   167     Threads::oops_do(roots, roots_from_clds_p, code_roots);
   167   }
   168   }
   185     }
   186     }
   186   }
   187   }
   187 
   188 
   188   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
   189   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
   189     if (so & SO_AllClasses) {
   190     if (so & SO_AllClasses) {
   190       ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
   191       ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
   191     } else if (so & SO_SystemClasses) {
   192     } else if (so & SO_SystemClasses) {
   192       ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
   193       ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
   193     }
   194     }
   194   }
   195   }
   195 
   196 
   196   // All threads execute the following. A specific chunk of buckets
   197   // All threads execute the following. A specific chunk of buckets
   197   // from the StringTable are the individual tasks.
   198   // from the StringTable are the individual tasks.
   202       StringTable::oops_do(roots);
   203       StringTable::oops_do(roots);
   203     }
   204     }
   204   }
   205   }
   205 
   206 
   206   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
   207   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
   207     if (so & SO_CodeCache) {
   208     if (so & SO_ScavengeCodeCache) {
   208       assert(code_roots != NULL, "must supply closure for code cache");
   209       assert(code_roots != NULL, "must supply closure for code cache");
   209 
   210 
   210       if (is_scavenging) {
   211       // We only visit parts of the CodeCache when scavenging.
   211         // We only visit parts of the CodeCache when scavenging.
   212       CodeCache::scavenge_root_nmethods_do(code_roots);
   212         CodeCache::scavenge_root_nmethods_do(code_roots);
   213     }
   213       } else {
   214     if (so & SO_AllCodeCache) {
   214         // CMSCollector uses this to do intermediate-strength collections.
   215       assert(code_roots != NULL, "must supply closure for code cache");
   215         // We scan the entire code cache, since CodeCache::do_unloading is not called.
   216 
   216         CodeCache::blobs_do(code_roots);
   217       // CMSCollector uses this to do intermediate-strength collections.
   217       }
   218       // We scan the entire code cache, since CodeCache::do_unloading is not called.
       
   219       CodeCache::blobs_do(code_roots);
   218     }
   220     }
   219     // Verify that the code cache contents are not subject to
   221     // Verify that the code cache contents are not subject to
   220     // movement by a scavenging collection.
   222     // movement by a scavenging collection.
   221     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
   223     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
   222     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   224     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));