src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp
changeset 52925 9c18c9d839d3
child 53015 632c4baddbb8
equal deleted inserted replaced
52924:420ff459906f 52925:9c18c9d839d3
       
     1 /*
       
     2  * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
       
     3  *
       
     4  * This code is free software; you can redistribute it and/or modify it
       
     5  * under the terms of the GNU General Public License version 2 only, as
       
     6  * published by the Free Software Foundation.
       
     7  *
       
     8  * This code is distributed in the hope that it will be useful, but WITHOUT
       
     9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    11  * version 2 for more details (a copy is included in the LICENSE file that
       
    12  * accompanied this code).
       
    13  *
       
    14  * You should have received a copy of the GNU General Public License version
       
    15  * 2 along with this work; if not, write to the Free Software Foundation,
       
    16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    17  *
       
    18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    19  * or visit www.oracle.com if you need additional information or have any
       
    20  * questions.
       
    21  *
       
    22  */
       
    23 
       
    24 #include "precompiled.hpp"
       
    25 
       
    26 #include "classfile/classLoaderData.hpp"
       
    27 #include "classfile/classLoaderDataGraph.hpp"
       
    28 #include "gc/shared/referenceProcessor.hpp"
       
    29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
       
    30 #include "gc/shared/workgroup.hpp"
       
    31 #include "gc/shared/weakProcessor.hpp"
       
    32 #include "gc/shared/weakProcessor.inline.hpp"
       
    33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
       
    34 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
       
    35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
       
    36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
       
    37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
       
    38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
       
    39 #include "gc/shenandoah/shenandoahHeap.hpp"
       
    40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
       
    41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
       
    42 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
       
    43 #include "gc/shenandoah/shenandoahHeuristics.hpp"
       
    44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
       
    45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
       
    46 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
       
    47 #include "gc/shenandoah/shenandoahStringDedup.hpp"
       
    48 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
       
    49 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
       
    50 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
       
    51 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
       
    52 #include "gc/shenandoah/shenandoahUtils.hpp"
       
    53 #include "gc/shenandoah/shenandoahVerifier.hpp"
       
    54 
       
    55 #include "memory/iterator.hpp"
       
    56 #include "memory/metaspace.hpp"
       
    57 #include "memory/resourceArea.hpp"
       
    58 
       
    59 /**
       
    60  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
       
    61  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
       
    62  * is incremental-update-based.
       
    63  *
       
    64  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
       
    65  * several reasons:
       
    66  * - We will not reclaim them in this cycle anyway, because they are not in the
       
    67  *   cset
       
    68  * - It makes up for the bulk of work during final-pause
       
    69  * - It also shortens the concurrent cycle because we don't need to
       
    70  *   pointlessly traverse through newly allocated objects.
       
    71  * - As a nice side-effect, it solves the I-U termination problem (mutators
       
    72  *   cannot outrun the GC by allocating like crazy)
       
    73  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
       
    74  *   target object of stores if it's new. Treating new objects live implicitely
       
    75  *   achieves the same, but without extra barriers. I think the effect of
       
    76  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
       
    77  *   particular, we will not see the head of a completely new long linked list
       
    78  *   in final-pause and end up traversing huge chunks of the heap there.
       
    79  * - We don't need to see/update the fields of new objects either, because they
       
    80  *   are either still null, or anything that's been stored into them has been
       
    81  *   evacuated+enqueued before (and will thus be treated later).
       
    82  *
       
    83  * We achieve this by setting TAMS for each region, and everything allocated
       
    84  * beyond TAMS will be 'implicitely marked'.
       
    85  *
       
    86  * Gotchas:
       
    87  * - While we want new objects to be implicitely marked, we don't want to count
       
    88  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
       
    89  *   them for cset. This means that we need to protect such regions from
       
    90  *   getting accidentally thrashed at the end of traversal cycle. This is why I
       
    91  *   keep track of alloc-regions and check is_alloc_region() in the trashing
       
    92  *   code.
       
    93  * - We *need* to traverse through evacuated objects. Those objects are
       
    94  *   pre-existing, and any references in them point to interesting objects that
       
    95  *   we need to see. We also want to count them as live, because we just
       
    96  *   determined that they are alive :-) I achieve this by upping TAMS
       
    97  *   concurrently for every gclab/gc-shared alloc before publishing the
       
    98  *   evacuated object. This way, the GC threads will not consider such objects
       
    99  *   implictely marked, and traverse through them as normal.
       
   100  */
       
   101 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
       
   102 private:
       
   103   ShenandoahObjToScanQueue* _queue;
       
   104   ShenandoahTraversalGC* _traversal_gc;
       
   105   ShenandoahHeap* const _heap;
       
   106 
       
   107 public:
       
   108   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
       
   109     _queue(q),
       
   110     _heap(ShenandoahHeap::heap())
       
   111  { }
       
   112 
       
   113   void do_buffer(void** buffer, size_t size) {
       
   114     for (size_t i = 0; i < size; ++i) {
       
   115       oop* p = (oop*) &buffer[i];
       
   116       oop obj = RawAccess<>::oop_load(p);
       
   117       shenandoah_assert_not_forwarded(p, obj);
       
   118       if (_heap->marking_context()->mark(obj)) {
       
   119         _queue->push(ShenandoahMarkTask(obj));
       
   120       }
       
   121     }
       
   122   }
       
   123 };
       
   124 
       
   125 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
       
   126 private:
       
   127   ShenandoahTraversalSATBBufferClosure* _satb_cl;
       
   128 
       
   129 public:
       
   130   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
       
   131     _satb_cl(satb_cl) {}
       
   132 
       
   133   void do_thread(Thread* thread) {
       
   134     if (thread->is_Java_thread()) {
       
   135       JavaThread* jt = (JavaThread*)thread;
       
   136       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
       
   137     } else if (thread->is_VM_thread()) {
       
   138       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
       
   139     }
       
   140   }
       
   141 };
       
   142 
       
   143 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
       
   144 // and remark them later during final-traversal.
       
   145 class ShenandoahMarkCLDClosure : public CLDClosure {
       
   146 private:
       
   147   OopClosure* _cl;
       
   148 public:
       
   149   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
       
   150   void do_cld(ClassLoaderData* cld) {
       
   151     cld->oops_do(_cl, true, true);
       
   152   }
       
   153 };
       
   154 
       
   155 // Like CLDToOopClosure, but only process modified CLDs
       
   156 class ShenandoahRemarkCLDClosure : public CLDClosure {
       
   157 private:
       
   158   OopClosure* _cl;
       
   159 public:
       
   160   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
       
   161   void do_cld(ClassLoaderData* cld) {
       
   162     if (cld->has_modified_oops()) {
       
   163       cld->oops_do(_cl, true, true);
       
   164     }
       
   165   }
       
   166 };
       
   167 
       
   168 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
       
   169 private:
       
   170   ShenandoahRootProcessor* _rp;
       
   171   ShenandoahHeap* _heap;
       
   172   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
       
   173 public:
       
   174   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) :
       
   175     AbstractGangTask("Shenandoah Init Traversal Collection"),
       
   176     _rp(rp),
       
   177     _heap(ShenandoahHeap::heap()),
       
   178     _cset_coderoots(cset_coderoots) {}
       
   179 
       
   180   void work(uint worker_id) {
       
   181     ShenandoahParallelWorkerSession worker_session(worker_id);
       
   182 
       
   183     ShenandoahEvacOOMScope oom_evac_scope;
       
   184     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
       
   185     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
       
   186 
       
   187     bool process_refs = _heap->process_references();
       
   188     bool unload_classes = _heap->unload_classes();
       
   189     ReferenceProcessor* rp = NULL;
       
   190     if (process_refs) {
       
   191       rp = _heap->ref_processor();
       
   192     }
       
   193 
       
   194     // Step 1: Process ordinary GC roots.
       
   195     {
       
   196       ShenandoahTraversalClosure roots_cl(q, rp);
       
   197       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
       
   198       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
       
   199       if (unload_classes) {
       
   200         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, NULL, NULL, worker_id);
       
   201         // Need to pre-evac code roots here. Otherwise we might see from-space constants.
       
   202         ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times();
       
   203         ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
       
   204         _cset_coderoots->possibly_parallel_blobs_do(&code_cl);
       
   205       } else {
       
   206         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
       
   207       }
       
   208     }
       
   209   }
       
   210 };
       
   211 
       
   212 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
       
   213 private:
       
   214   ShenandoahTaskTerminator* _terminator;
       
   215   ShenandoahHeap* _heap;
       
   216 public:
       
   217   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
       
   218     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
       
   219     _terminator(terminator),
       
   220     _heap(ShenandoahHeap::heap()) {}
       
   221 
       
   222   void work(uint worker_id) {
       
   223     ShenandoahConcurrentWorkerSession worker_session(worker_id);
       
   224     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
       
   225     ShenandoahEvacOOMScope oom_evac_scope;
       
   226     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
       
   227 
       
   228     // Drain all outstanding work in queues.
       
   229     traversal_gc->main_loop(worker_id, _terminator, true);
       
   230   }
       
   231 };
       
   232 
       
   233 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
       
   234 private:
       
   235   ShenandoahRootProcessor* _rp;
       
   236   ShenandoahTaskTerminator* _terminator;
       
   237   ShenandoahHeap* _heap;
       
   238 public:
       
   239   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) :
       
   240     AbstractGangTask("Shenandoah Final Traversal Collection"),
       
   241     _rp(rp),
       
   242     _terminator(terminator),
       
   243     _heap(ShenandoahHeap::heap()) {}
       
   244 
       
   245   void work(uint worker_id) {
       
   246     ShenandoahParallelWorkerSession worker_session(worker_id);
       
   247 
       
   248     ShenandoahEvacOOMScope oom_evac_scope;
       
   249     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
       
   250 
       
   251     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
       
   252     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
       
   253 
       
   254     bool process_refs = _heap->process_references();
       
   255     bool unload_classes = _heap->unload_classes();
       
   256     ReferenceProcessor* rp = NULL;
       
   257     if (process_refs) {
       
   258       rp = _heap->ref_processor();
       
   259     }
       
   260 
       
   261     // Step 0: Drain outstanding SATB queues.
       
   262     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
       
   263     ShenandoahTraversalSATBBufferClosure satb_cl(q);
       
   264     {
       
   265       // Process remaining finished SATB buffers.
       
   266       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
       
   267       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
       
   268       // Process remaining threads SATB buffers below.
       
   269     }
       
   270 
       
   271     // Step 1: Process GC roots.
       
   272     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
       
   273     // and the references to the oops are updated during init pause. New nmethods are handled
       
   274     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
       
   275     // roots here.
       
   276     if (!_heap->is_degenerated_gc_in_progress()) {
       
   277       ShenandoahTraversalClosure roots_cl(q, rp);
       
   278       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
       
   279       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
       
   280       if (unload_classes) {
       
   281         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
       
   282         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id);
       
   283       } else {
       
   284         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id);
       
   285       }
       
   286     } else {
       
   287       ShenandoahTraversalDegenClosure roots_cl(q, rp);
       
   288       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
       
   289       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
       
   290       if (unload_classes) {
       
   291         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
       
   292         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id);
       
   293       } else {
       
   294         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id);
       
   295       }
       
   296     }
       
   297 
       
   298     {
       
   299       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
       
   300       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
       
   301 
       
   302       // Step 3: Finally drain all outstanding work in queues.
       
   303       traversal_gc->main_loop(worker_id, _terminator, false);
       
   304     }
       
   305 
       
   306   }
       
   307 };
       
   308 
       
   309 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
       
   310   _heap(heap),
       
   311   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
       
   312   _traversal_set(ShenandoahHeapRegionSet()) {
       
   313 
       
   314   uint num_queues = heap->max_workers();
       
   315   for (uint i = 0; i < num_queues; ++i) {
       
   316     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
       
   317     task_queue->initialize();
       
   318     _task_queues->register_queue(i, task_queue);
       
   319   }
       
   320 }
       
   321 
       
   322 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
       
   323 }
       
   324 
       
   325 void ShenandoahTraversalGC::prepare_regions() {
       
   326   size_t num_regions = _heap->num_regions();
       
   327   ShenandoahMarkingContext* const ctx = _heap->marking_context();
       
   328   for (size_t i = 0; i < num_regions; i++) {
       
   329     ShenandoahHeapRegion* region = _heap->get_region(i);
       
   330     if (_heap->is_bitmap_slice_committed(region)) {
       
   331       if (_traversal_set.is_in(i)) {
       
   332         ctx->capture_top_at_mark_start(region);
       
   333         region->clear_live_data();
       
   334         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
       
   335       } else {
       
   336         // Everything outside the traversal set is always considered live.
       
   337         ctx->reset_top_at_mark_start(region);
       
   338       }
       
   339     } else {
       
   340       // FreeSet may contain uncommitted empty regions, once they are recommitted,
       
   341       // their TAMS may have old values, so reset them here.
       
   342       ctx->reset_top_at_mark_start(region);
       
   343     }
       
   344   }
       
   345 }
       
   346 
       
   347 void ShenandoahTraversalGC::prepare() {
       
   348   _heap->collection_set()->clear();
       
   349   assert(_heap->collection_set()->count() == 0, "collection set not clear");
       
   350 
       
   351   {
       
   352     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
       
   353     _heap->make_parsable(true);
       
   354   }
       
   355 
       
   356   if (UseTLAB) {
       
   357     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
       
   358     _heap->resize_tlabs();
       
   359   }
       
   360 
       
   361   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
       
   362   assert(!_heap->marking_context()->is_complete(), "should not be complete");
       
   363 
       
   364   ShenandoahFreeSet* free_set = _heap->free_set();
       
   365   ShenandoahCollectionSet* collection_set = _heap->collection_set();
       
   366 
       
   367   // Find collection set
       
   368   _heap->heuristics()->choose_collection_set(collection_set);
       
   369   prepare_regions();
       
   370 
       
   371   // Rebuild free set
       
   372   free_set->rebuild();
       
   373 
       
   374   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
       
   375                      collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
       
   376 }
       
   377 
       
   378 void ShenandoahTraversalGC::init_traversal_collection() {
       
   379   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
       
   380 
       
   381   if (ShenandoahVerify) {
       
   382     _heap->verifier()->verify_before_traversal();
       
   383   }
       
   384 
       
   385   if (VerifyBeforeGC) {
       
   386     Universe::verify();
       
   387   }
       
   388 
       
   389   {
       
   390     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
       
   391     ShenandoahHeapLocker lock(_heap->lock());
       
   392     prepare();
       
   393   }
       
   394 
       
   395   _heap->set_concurrent_traversal_in_progress(true);
       
   396 
       
   397   bool process_refs = _heap->process_references();
       
   398   if (process_refs) {
       
   399     ReferenceProcessor* rp = _heap->ref_processor();
       
   400     rp->enable_discovery(true /*verify_no_refs*/);
       
   401     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
       
   402   }
       
   403 
       
   404   {
       
   405     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
       
   406     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
       
   407     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
       
   408 
       
   409 #if defined(COMPILER2) || INCLUDE_JVMCI
       
   410     DerivedPointerTable::clear();
       
   411 #endif
       
   412 
       
   413     {
       
   414       uint nworkers = _heap->workers()->active_workers();
       
   415       task_queues()->reserve(nworkers);
       
   416       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
       
   417 
       
   418       ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator();
       
   419 
       
   420       ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots);
       
   421       _heap->workers()->run_task(&traversal_task);
       
   422     }
       
   423 
       
   424 #if defined(COMPILER2) || INCLUDE_JVMCI
       
   425     DerivedPointerTable::update_pointers();
       
   426 #endif
       
   427   }
       
   428 
       
   429   if (ShenandoahPacing) {
       
   430     _heap->pacer()->setup_for_traversal();
       
   431   }
       
   432 }
       
   433 
       
   434 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
       
   435   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
       
   436 
       
   437   // Initialize live data.
       
   438   jushort* ld = _heap->get_liveness_cache(w);
       
   439 
       
   440   ReferenceProcessor* rp = NULL;
       
   441   if (_heap->process_references()) {
       
   442     rp = _heap->ref_processor();
       
   443   }
       
   444   {
       
   445     if (!_heap->is_degenerated_gc_in_progress()) {
       
   446       if (_heap->unload_classes()) {
       
   447         if (ShenandoahStringDedup::is_enabled()) {
       
   448           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
       
   449           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
       
   450         } else {
       
   451           ShenandoahTraversalMetadataClosure cl(q, rp);
       
   452           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
       
   453         }
       
   454       } else {
       
   455         if (ShenandoahStringDedup::is_enabled()) {
       
   456           ShenandoahTraversalDedupClosure cl(q, rp);
       
   457           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
       
   458         } else {
       
   459           ShenandoahTraversalClosure cl(q, rp);
       
   460           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
       
   461         }
       
   462       }
       
   463     } else {
       
   464       if (_heap->unload_classes()) {
       
   465         if (ShenandoahStringDedup::is_enabled()) {
       
   466           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
       
   467           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
       
   468         } else {
       
   469           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
       
   470           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
       
   471         }
       
   472       } else {
       
   473         if (ShenandoahStringDedup::is_enabled()) {
       
   474           ShenandoahTraversalDedupDegenClosure cl(q, rp);
       
   475           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
       
   476         } else {
       
   477           ShenandoahTraversalDegenClosure cl(q, rp);
       
   478           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
       
   479         }
       
   480       }
       
   481     }
       
   482   }
       
   483 
       
   484   _heap->flush_liveness_cache(w);
       
   485 }
       
   486 
       
   487 template <class T>
       
   488 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
       
   489   ShenandoahObjToScanQueueSet* queues = task_queues();
       
   490   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
       
   491   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
       
   492 
       
   493   uintx stride = ShenandoahMarkLoopStride;
       
   494 
       
   495   ShenandoahMarkTask task;
       
   496 
       
   497   // Process outstanding queues, if any.
       
   498   q = queues->claim_next();
       
   499   while (q != NULL) {
       
   500     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
       
   501       ShenandoahCancelledTerminatorTerminator tt;
       
   502       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
       
   503       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
       
   504       while (!terminator->offer_termination(&tt));
       
   505       return;
       
   506     }
       
   507 
       
   508     for (uint i = 0; i < stride; i++) {
       
   509       if (q->pop(task)) {
       
   510         conc_mark->do_task<T>(q, cl, live_data, &task);
       
   511       } else {
       
   512         assert(q->is_empty(), "Must be empty");
       
   513         q = queues->claim_next();
       
   514         break;
       
   515       }
       
   516     }
       
   517   }
       
   518 
       
   519   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
       
   520 
       
   521   // Normal loop.
       
   522   q = queues->queue(worker_id);
       
   523 
       
   524   ShenandoahTraversalSATBBufferClosure drain_satb(q);
       
   525   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
       
   526 
       
   527   while (true) {
       
   528     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
       
   529 
       
   530     while (satb_mq_set.completed_buffers_num() > 0) {
       
   531       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
       
   532     }
       
   533 
       
   534     uint work = 0;
       
   535     for (uint i = 0; i < stride; i++) {
       
   536       if (q->pop(task) ||
       
   537           queues->steal(worker_id, task)) {
       
   538         conc_mark->do_task<T>(q, cl, live_data, &task);
       
   539         work++;
       
   540       } else {
       
   541         break;
       
   542       }
       
   543     }
       
   544 
       
   545     if (work == 0) {
       
   546       // No more work, try to terminate
       
   547       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
       
   548       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
       
   549       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
       
   550       if (terminator->offer_termination()) return;
       
   551     }
       
   552   }
       
   553 }
       
   554 
       
   555 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
       
   556   if (_heap->cancelled_gc()) {
       
   557     ShenandoahCancelledTerminatorTerminator tt;
       
   558     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
       
   559     ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
       
   560     while (! terminator->offer_termination(&tt));
       
   561     return true;
       
   562   }
       
   563   return false;
       
   564 }
       
   565 
       
   566 void ShenandoahTraversalGC::concurrent_traversal_collection() {
       
   567   ClassLoaderDataGraph::clear_claimed_marks();
       
   568 
       
   569   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
       
   570   if (!_heap->cancelled_gc()) {
       
   571     uint nworkers = _heap->workers()->active_workers();
       
   572     task_queues()->reserve(nworkers);
       
   573     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
       
   574 
       
   575     ShenandoahTaskTerminator terminator(nworkers, task_queues());
       
   576     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
       
   577     _heap->workers()->run_task(&task);
       
   578   }
       
   579 
       
   580   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
       
   581     preclean_weak_refs();
       
   582   }
       
   583 }
       
   584 
       
   585 void ShenandoahTraversalGC::final_traversal_collection() {
       
   586   _heap->make_parsable(true);
       
   587 
       
   588   if (!_heap->cancelled_gc()) {
       
   589 #if defined(COMPILER2) || INCLUDE_JVMCI
       
   590     DerivedPointerTable::clear();
       
   591 #endif
       
   592     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
       
   593     uint nworkers = _heap->workers()->active_workers();
       
   594     task_queues()->reserve(nworkers);
       
   595 
       
   596     // Finish traversal
       
   597     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
       
   598     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
       
   599 
       
   600     ShenandoahTaskTerminator terminator(nworkers, task_queues());
       
   601     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
       
   602     _heap->workers()->run_task(&task);
       
   603 #if defined(COMPILER2) || INCLUDE_JVMCI
       
   604     DerivedPointerTable::update_pointers();
       
   605 #endif
       
   606   }
       
   607 
       
   608   if (!_heap->cancelled_gc() && _heap->process_references()) {
       
   609     weak_refs_work();
       
   610   }
       
   611 
       
   612   if (!_heap->cancelled_gc() && _heap->unload_classes()) {
       
   613     _heap->unload_classes_and_cleanup_tables(false);
       
   614     fixup_roots();
       
   615   }
       
   616 
       
   617   if (!_heap->cancelled_gc()) {
       
   618     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
       
   619     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
       
   620     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
       
   621 
       
   622     // No more marking expected
       
   623     _heap->mark_complete_marking_context();
       
   624 
       
   625     // Resize metaspace
       
   626     MetaspaceGC::compute_new_size();
       
   627 
       
   628     // Still good? We can now trash the cset, and make final verification
       
   629     {
       
   630       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
       
   631       ShenandoahHeapLocker lock(_heap->lock());
       
   632 
       
   633       // Trash everything
       
   634       // Clear immediate garbage regions.
       
   635       size_t num_regions = _heap->num_regions();
       
   636 
       
   637       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
       
   638       ShenandoahFreeSet* free_regions = _heap->free_set();
       
   639       ShenandoahMarkingContext* const ctx = _heap->marking_context();
       
   640       free_regions->clear();
       
   641       for (size_t i = 0; i < num_regions; i++) {
       
   642         ShenandoahHeapRegion* r = _heap->get_region(i);
       
   643         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
       
   644 
       
   645         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
       
   646         if (r->is_humongous_start() && candidate) {
       
   647           // Trash humongous.
       
   648           HeapWord* humongous_obj = r->bottom() + ShenandoahBrooksPointer::word_size();
       
   649           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
       
   650           r->make_trash_immediate();
       
   651           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
       
   652             i++;
       
   653             r = _heap->get_region(i);
       
   654             assert(r->is_humongous_continuation(), "must be humongous continuation");
       
   655             r->make_trash_immediate();
       
   656           }
       
   657         } else if (!r->is_empty() && candidate) {
       
   658           // Trash regular.
       
   659           assert(!r->is_humongous(), "handled above");
       
   660           assert(!r->is_trash(), "must not already be trashed");
       
   661           r->make_trash_immediate();
       
   662         }
       
   663       }
       
   664       _heap->collection_set()->clear();
       
   665       _heap->free_set()->rebuild();
       
   666       reset();
       
   667     }
       
   668 
       
   669     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
       
   670     _heap->set_concurrent_traversal_in_progress(false);
       
   671     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
       
   672 
       
   673     if (ShenandoahVerify) {
       
   674       _heap->verifier()->verify_after_traversal();
       
   675     }
       
   676 
       
   677     if (VerifyAfterGC) {
       
   678       Universe::verify();
       
   679     }
       
   680   }
       
   681 }
       
   682 
       
   683 class ShenandoahTraversalFixRootsClosure : public OopClosure {
       
   684 private:
       
   685   template <class T>
       
   686   inline void do_oop_work(T* p) {
       
   687     T o = RawAccess<>::oop_load(p);
       
   688     if (!CompressedOops::is_null(o)) {
       
   689       oop obj = CompressedOops::decode_not_null(o);
       
   690       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
       
   691       if (!oopDesc::equals_raw(obj, forw)) {
       
   692         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
       
   693       }
       
   694     }
       
   695   }
       
   696 
       
   697 public:
       
   698   inline void do_oop(oop* p) { do_oop_work(p); }
       
   699   inline void do_oop(narrowOop* p) { do_oop_work(p); }
       
   700 };
       
   701 
       
   702 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
       
   703 private:
       
   704   ShenandoahRootProcessor* _rp;
       
   705 
       
   706 public:
       
   707   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
       
   708     AbstractGangTask("Shenandoah traversal fix roots"),
       
   709     _rp(rp) {}
       
   710 
       
   711   void work(uint worker_id) {
       
   712     ShenandoahParallelWorkerSession worker_session(worker_id);
       
   713     ShenandoahTraversalFixRootsClosure cl;
       
   714     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
       
   715     CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
       
   716     _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
       
   717   }
       
   718 };
       
   719 
       
   720 void ShenandoahTraversalGC::fixup_roots() {
       
   721 #if defined(COMPILER2) || INCLUDE_JVMCI
       
   722   DerivedPointerTable::clear();
       
   723 #endif
       
   724   ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
       
   725   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
       
   726   _heap->workers()->run_task(&update_roots_task);
       
   727 #if defined(COMPILER2) || INCLUDE_JVMCI
       
   728   DerivedPointerTable::update_pointers();
       
   729 #endif
       
   730 }
       
   731 
       
   732 void ShenandoahTraversalGC::reset() {
       
   733   _task_queues->clear();
       
   734 }
       
   735 
       
   736 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
       
   737   return _task_queues;
       
   738 }
       
   739 
       
   740 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
       
   741 private:
       
   742   ShenandoahHeap* const _heap;
       
   743 public:
       
   744   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
       
   745   virtual bool should_return() { return _heap->cancelled_gc(); }
       
   746 };
       
   747 
       
   748 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
       
   749 public:
       
   750   void do_void() {
       
   751     ShenandoahHeap* sh = ShenandoahHeap::heap();
       
   752     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
       
   753     assert(sh->process_references(), "why else would we be here?");
       
   754     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
       
   755     shenandoah_assert_rp_isalive_installed();
       
   756     traversal_gc->main_loop((uint) 0, &terminator, true);
       
   757   }
       
   758 };
       
   759 
       
   760 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
       
   761 private:
       
   762   ShenandoahObjToScanQueue* _queue;
       
   763   Thread* _thread;
       
   764   ShenandoahTraversalGC* _traversal_gc;
       
   765   ShenandoahMarkingContext* const _mark_context;
       
   766 
       
   767   template <class T>
       
   768   inline void do_oop_work(T* p) {
       
   769     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
       
   770   }
       
   771 
       
   772 public:
       
   773   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
       
   774     _queue(q), _thread(Thread::current()),
       
   775     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
       
   776     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
       
   777 
       
   778   void do_oop(narrowOop* p) { do_oop_work(p); }
       
   779   void do_oop(oop* p)       { do_oop_work(p); }
       
   780 };
       
   781 
       
   782 class ShenandoahTraversalWeakUpdateClosure : public OopClosure {
       
   783 private:
       
   784   template <class T>
       
   785   inline void do_oop_work(T* p) {
       
   786     // Cannot call maybe_update_with_forwarded, because on traversal-degen
       
   787     // path the collection set is already dropped. Instead, do the unguarded store.
       
   788     // TODO: This can be fixed after degen-traversal stops dropping cset.
       
   789     T o = RawAccess<>::oop_load(p);
       
   790     if (!CompressedOops::is_null(o)) {
       
   791       oop obj = CompressedOops::decode_not_null(o);
       
   792       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
       
   793       shenandoah_assert_marked(p, obj);
       
   794       RawAccess<IS_NOT_NULL>::oop_store(p, obj);
       
   795     }
       
   796   }
       
   797 
       
   798 public:
       
   799   ShenandoahTraversalWeakUpdateClosure() {}
       
   800 
       
   801   void do_oop(narrowOop* p) { do_oop_work(p); }
       
   802   void do_oop(oop* p)       { do_oop_work(p); }
       
   803 };
       
   804 
       
   805 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
       
   806 private:
       
   807   ShenandoahObjToScanQueue* _queue;
       
   808   Thread* _thread;
       
   809   ShenandoahTraversalGC* _traversal_gc;
       
   810   ShenandoahMarkingContext* const _mark_context;
       
   811 
       
   812   template <class T>
       
   813   inline void do_oop_work(T* p) {
       
   814     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
       
   815   }
       
   816 
       
   817 public:
       
   818   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
       
   819           _queue(q), _thread(Thread::current()),
       
   820           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
       
   821           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
       
   822 
       
   823   void do_oop(narrowOop* p) { do_oop_work(p); }
       
   824   void do_oop(oop* p)       { do_oop_work(p); }
       
   825 };
       
   826 
       
   827 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
       
   828 private:
       
   829   ShenandoahObjToScanQueue* _queue;
       
   830   Thread* _thread;
       
   831   ShenandoahTraversalGC* _traversal_gc;
       
   832   ShenandoahMarkingContext* const _mark_context;
       
   833 
       
   834   template <class T>
       
   835   inline void do_oop_work(T* p) {
       
   836     ShenandoahEvacOOMScope evac_scope;
       
   837     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
       
   838   }
       
   839 
       
   840 public:
       
   841   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
       
   842           _queue(q), _thread(Thread::current()),
       
   843           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
       
   844           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
       
   845 
       
   846   void do_oop(narrowOop* p) { do_oop_work(p); }
       
   847   void do_oop(oop* p)       { do_oop_work(p); }
       
   848 };
       
   849 
       
   850 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
       
   851 private:
       
   852   ShenandoahObjToScanQueue* _queue;
       
   853   Thread* _thread;
       
   854   ShenandoahTraversalGC* _traversal_gc;
       
   855   ShenandoahMarkingContext* const _mark_context;
       
   856 
       
   857   template <class T>
       
   858   inline void do_oop_work(T* p) {
       
   859     ShenandoahEvacOOMScope evac_scope;
       
   860     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
       
   861   }
       
   862 
       
   863 public:
       
   864   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
       
   865           _queue(q), _thread(Thread::current()),
       
   866           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
       
   867           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
       
   868 
       
   869   void do_oop(narrowOop* p) { do_oop_work(p); }
       
   870   void do_oop(oop* p)       { do_oop_work(p); }
       
   871 };
       
   872 
       
   873 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
       
   874 private:
       
   875   ReferenceProcessor* _rp;
       
   876 
       
   877 public:
       
   878   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
       
   879           AbstractGangTask("Precleaning task"),
       
   880           _rp(rp) {}
       
   881 
       
   882   void work(uint worker_id) {
       
   883     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
       
   884     ShenandoahParallelWorkerSession worker_session(worker_id);
       
   885     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
       
   886     ShenandoahEvacOOMScope oom_evac_scope;
       
   887 
       
   888     ShenandoahHeap* sh = ShenandoahHeap::heap();
       
   889 
       
   890     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
       
   891 
       
   892     ShenandoahForwardedIsAliveClosure is_alive;
       
   893     ShenandoahTraversalCancelledGCYieldClosure yield;
       
   894     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
       
   895     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
       
   896     ResourceMark rm;
       
   897     _rp->preclean_discovered_references(&is_alive, &keep_alive,
       
   898                                         &complete_gc, &yield,
       
   899                                         NULL);
       
   900   }
       
   901 };
       
   902 
       
   903 void ShenandoahTraversalGC::preclean_weak_refs() {
       
   904   // Pre-cleaning weak references before diving into STW makes sense at the
       
   905   // end of concurrent mark. This will filter out the references which referents
       
   906   // are alive. Note that ReferenceProcessor already filters out these on reference
       
   907   // discovery, and the bulk of work is done here. This phase processes leftovers
       
   908   // that missed the initial filtering, i.e. when referent was marked alive after
       
   909   // reference was discovered by RP.
       
   910 
       
   911   assert(_heap->process_references(), "sanity");
       
   912   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
       
   913 
       
   914   // Shortcut if no references were discovered to avoid winding up threads.
       
   915   ReferenceProcessor* rp = _heap->ref_processor();
       
   916   if (!rp->has_discovered_references()) {
       
   917     return;
       
   918   }
       
   919 
       
   920   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
       
   921 
       
   922   shenandoah_assert_rp_isalive_not_installed();
       
   923   ShenandoahForwardedIsAliveClosure is_alive;
       
   924   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
       
   925 
       
   926   assert(task_queues()->is_empty(), "Should be empty");
       
   927 
       
   928   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
       
   929   // queues and other goodies. When upstream ReferenceProcessor starts supporting
       
   930   // parallel precleans, we can extend this to more threads.
       
   931   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
       
   932 
       
   933   WorkGang* workers = _heap->workers();
       
   934   uint nworkers = workers->active_workers();
       
   935   assert(nworkers == 1, "This code uses only a single worker");
       
   936   task_queues()->reserve(nworkers);
       
   937 
       
   938   ShenandoahTraversalPrecleanTask task(rp);
       
   939   workers->run_task(&task);
       
   940 
       
   941   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
       
   942 }
       
   943 
       
   944 // Weak Reference Closures
       
   945 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
       
   946   uint _worker_id;
       
   947   ShenandoahTaskTerminator* _terminator;
       
   948   bool _reset_terminator;
       
   949 
       
   950 public:
       
   951   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
       
   952     _worker_id(worker_id),
       
   953     _terminator(t),
       
   954     _reset_terminator(reset_terminator) {
       
   955   }
       
   956 
       
   957   void do_void() {
       
   958     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
       
   959 
       
   960     ShenandoahHeap* sh = ShenandoahHeap::heap();
       
   961     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
       
   962     assert(sh->process_references(), "why else would we be here?");
       
   963     shenandoah_assert_rp_isalive_installed();
       
   964 
       
   965     traversal_gc->main_loop(_worker_id, _terminator, false);
       
   966 
       
   967     if (_reset_terminator) {
       
   968       _terminator->reset_for_reuse();
       
   969     }
       
   970   }
       
   971 };
       
   972 
       
   973 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
       
   974   uint _worker_id;
       
   975   ShenandoahTaskTerminator* _terminator;
       
   976   bool _reset_terminator;
       
   977 
       
   978 public:
       
   979   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
       
   980           _worker_id(worker_id),
       
   981           _terminator(t),
       
   982           _reset_terminator(reset_terminator) {
       
   983   }
       
   984 
       
   985   void do_void() {
       
   986     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
       
   987 
       
   988     ShenandoahHeap* sh = ShenandoahHeap::heap();
       
   989     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
       
   990     assert(sh->process_references(), "why else would we be here?");
       
   991     shenandoah_assert_rp_isalive_installed();
       
   992 
       
   993     ShenandoahEvacOOMScope evac_scope;
       
   994     traversal_gc->main_loop(_worker_id, _terminator, false);
       
   995 
       
   996     if (_reset_terminator) {
       
   997       _terminator->reset_for_reuse();
       
   998     }
       
   999   }
       
  1000 };
       
  1001 
       
  1002 void ShenandoahTraversalGC::weak_refs_work() {
       
  1003   assert(_heap->process_references(), "sanity");
       
  1004 
       
  1005   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
       
  1006 
       
  1007   ShenandoahGCPhase phase(phase_root);
       
  1008 
       
  1009   ReferenceProcessor* rp = _heap->ref_processor();
       
  1010 
       
  1011   // NOTE: We cannot shortcut on has_discovered_references() here, because
       
  1012   // we will miss marking JNI Weak refs then, see implementation in
       
  1013   // ReferenceProcessor::process_discovered_references.
       
  1014   weak_refs_work_doit();
       
  1015 
       
  1016   rp->verify_no_references_recorded();
       
  1017   assert(!rp->discovery_enabled(), "Post condition");
       
  1018 
       
  1019 }
       
  1020 
       
  1021 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
       
  1022 private:
       
  1023   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
       
  1024   ShenandoahTaskTerminator* _terminator;
       
  1025 
       
  1026 public:
       
  1027   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
       
  1028                                       ShenandoahTaskTerminator* t) :
       
  1029     AbstractGangTask("Process reference objects in parallel"),
       
  1030     _proc_task(proc_task),
       
  1031     _terminator(t) {
       
  1032   }
       
  1033 
       
  1034   void work(uint worker_id) {
       
  1035     ShenandoahEvacOOMScope oom_evac_scope;
       
  1036     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
       
  1037     ShenandoahHeap* heap = ShenandoahHeap::heap();
       
  1038     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
       
  1039 
       
  1040     ShenandoahForwardedIsAliveClosure is_alive;
       
  1041     if (!heap->is_degenerated_gc_in_progress()) {
       
  1042       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
       
  1043       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
       
  1044     } else {
       
  1045       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
       
  1046       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
       
  1047     }
       
  1048   }
       
  1049 };
       
  1050 
       
  1051 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
       
  1052 private:
       
  1053   WorkGang* _workers;
       
  1054 
       
  1055 public:
       
  1056   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
       
  1057 
       
  1058   // Executes a task using worker threads.
       
  1059   void execute(ProcessTask& task, uint ergo_workers) {
       
  1060     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
       
  1061 
       
  1062     ShenandoahHeap* heap = ShenandoahHeap::heap();
       
  1063     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
       
  1064     ShenandoahPushWorkerQueuesScope scope(_workers,
       
  1065                                           traversal_gc->task_queues(),
       
  1066                                           ergo_workers,
       
  1067                                           /* do_check = */ false);
       
  1068     uint nworkers = _workers->active_workers();
       
  1069     traversal_gc->task_queues()->reserve(nworkers);
       
  1070     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
       
  1071     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
       
  1072     _workers->run_task(&proc_task_proxy);
       
  1073   }
       
  1074 };
       
  1075 
       
  1076 void ShenandoahTraversalGC::weak_refs_work_doit() {
       
  1077   ReferenceProcessor* rp = _heap->ref_processor();
       
  1078 
       
  1079   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
       
  1080 
       
  1081   shenandoah_assert_rp_isalive_not_installed();
       
  1082   ShenandoahForwardedIsAliveClosure is_alive;
       
  1083   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
       
  1084 
       
  1085   WorkGang* workers = _heap->workers();
       
  1086   uint nworkers = workers->active_workers();
       
  1087 
       
  1088   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
       
  1089   rp->set_active_mt_degree(nworkers);
       
  1090 
       
  1091   assert(task_queues()->is_empty(), "Should be empty");
       
  1092 
       
  1093   // complete_gc and keep_alive closures instantiated here are only needed for
       
  1094   // single-threaded path in RP. They share the queue 0 for tracking work, which
       
  1095   // simplifies implementation. Since RP may decide to call complete_gc several
       
  1096   // times, we need to be able to reuse the terminator.
       
  1097   uint serial_worker_id = 0;
       
  1098   ShenandoahTaskTerminator terminator(1, task_queues());
       
  1099   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
       
  1100   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
       
  1101 
       
  1102   ShenandoahTraversalRefProcTaskExecutor executor(workers);
       
  1103 
       
  1104   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
       
  1105   if (!_heap->is_degenerated_gc_in_progress()) {
       
  1106     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
       
  1107     rp->process_discovered_references(&is_alive, &keep_alive,
       
  1108                                       &complete_gc, &executor,
       
  1109                                       &pt);
       
  1110   } else {
       
  1111     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
       
  1112     rp->process_discovered_references(&is_alive, &keep_alive,
       
  1113                                       &complete_gc, &executor,
       
  1114                                       &pt);
       
  1115   }
       
  1116 
       
  1117   {
       
  1118     ShenandoahGCPhase phase(phase_process);
       
  1119     ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination);
       
  1120 
       
  1121     // Process leftover weak oops (using parallel version)
       
  1122     ShenandoahTraversalWeakUpdateClosure cl;
       
  1123     WeakProcessor::weak_oops_do(workers, &is_alive, &cl, 1);
       
  1124 
       
  1125     pt.print_all_references();
       
  1126 
       
  1127     assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
       
  1128   }
       
  1129 }