src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
changeset 52925 9c18c9d839d3
child 53276 72fdf46a274e
equal deleted inserted replaced
52924:420ff459906f 52925:9c18c9d839d3
       
     1 /*
       
     2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
       
     3  *
       
     4  * This code is free software; you can redistribute it and/or modify it
       
     5  * under the terms of the GNU General Public License version 2 only, as
       
     6  * published by the Free Software Foundation.
       
     7  *
       
     8  * This code is distributed in the hope that it will be useful, but WITHOUT
       
     9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    11  * version 2 for more details (a copy is included in the LICENSE file that
       
    12  * accompanied this code).
       
    13  *
       
    14  * You should have received a copy of the GNU General Public License version
       
    15  * 2 along with this work; if not, write to the Free Software Foundation,
       
    16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    17  *
       
    18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    19  * or visit www.oracle.com if you need additional information or have any
       
    20  * questions.
       
    21  *
       
    22  */
       
    23 
       
    24 #include "precompiled.hpp"
       
    25 #include "memory/allocation.hpp"
       
    26 
       
    27 #include "gc/shared/gcTimer.hpp"
       
    28 #include "gc/shared/gcTraceTime.inline.hpp"
       
    29 #include "gc/shared/memAllocator.hpp"
       
    30 #include "gc/shared/parallelCleaning.hpp"
       
    31 #include "gc/shared/plab.hpp"
       
    32 
       
    33 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
       
    34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
       
    35 #include "gc/shenandoah/shenandoahBrooksPointer.hpp"
       
    36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
       
    37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
       
    38 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
       
    39 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
       
    40 #include "gc/shenandoah/shenandoahControlThread.hpp"
       
    41 #include "gc/shenandoah/shenandoahFreeSet.hpp"
       
    42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
       
    43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
       
    44 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
       
    45 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
       
    46 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
       
    47 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
       
    48 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
       
    49 #include "gc/shenandoah/shenandoahMetrics.hpp"
       
    50 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
       
    51 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
       
    52 #include "gc/shenandoah/shenandoahPacer.hpp"
       
    53 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
       
    54 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
       
    55 #include "gc/shenandoah/shenandoahStringDedup.hpp"
       
    56 #include "gc/shenandoah/shenandoahUtils.hpp"
       
    57 #include "gc/shenandoah/shenandoahVerifier.hpp"
       
    58 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
       
    59 #include "gc/shenandoah/shenandoahVMOperations.hpp"
       
    60 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
       
    61 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
       
    62 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
       
    63 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
       
    64 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
       
    65 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
       
    66 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
       
    67 #include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
       
    68 
       
    69 #include "memory/metaspace.hpp"
       
    70 #include "runtime/vmThread.hpp"
       
    71 #include "services/mallocTracker.hpp"
       
    72 
       
    73 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
       
    74 
       
    75 #ifdef ASSERT
       
    76 template <class T>
       
    77 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
       
    78   T o = RawAccess<>::oop_load(p);
       
    79   if (! CompressedOops::is_null(o)) {
       
    80     oop obj = CompressedOops::decode_not_null(o);
       
    81     shenandoah_assert_not_forwarded(p, obj);
       
    82   }
       
    83 }
       
    84 
       
    85 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
       
    86 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_work(p); }
       
    87 #endif
       
    88 
       
    89 class ShenandoahPretouchTask : public AbstractGangTask {
       
    90 private:
       
    91   ShenandoahRegionIterator _regions;
       
    92   const size_t _bitmap_size;
       
    93   const size_t _page_size;
       
    94   char* _bitmap_base;
       
    95 public:
       
    96   ShenandoahPretouchTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
       
    97     AbstractGangTask("Shenandoah PreTouch"),
       
    98     _bitmap_size(bitmap_size),
       
    99     _page_size(page_size),
       
   100     _bitmap_base(bitmap_base) {
       
   101   }
       
   102 
       
   103   virtual void work(uint worker_id) {
       
   104     ShenandoahHeapRegion* r = _regions.next();
       
   105     while (r != NULL) {
       
   106       os::pretouch_memory(r->bottom(), r->end(), _page_size);
       
   107 
       
   108       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
       
   109       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
       
   110       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
       
   111 
       
   112       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
       
   113 
       
   114       r = _regions.next();
       
   115     }
       
   116   }
       
   117 };
       
   118 
       
   119 jint ShenandoahHeap::initialize() {
       
   120   ShenandoahBrooksPointer::initial_checks();
       
   121 
       
   122   initialize_heuristics();
       
   123 
       
   124   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
       
   125   size_t max_byte_size = collector_policy()->max_heap_byte_size();
       
   126   size_t heap_alignment = collector_policy()->heap_alignment();
       
   127 
       
   128   if (ShenandoahAlwaysPreTouch) {
       
   129     // Enabled pre-touch means the entire heap is committed right away.
       
   130     init_byte_size = max_byte_size;
       
   131   }
       
   132 
       
   133   Universe::check_alignment(max_byte_size,
       
   134                             ShenandoahHeapRegion::region_size_bytes(),
       
   135                             "shenandoah heap");
       
   136   Universe::check_alignment(init_byte_size,
       
   137                             ShenandoahHeapRegion::region_size_bytes(),
       
   138                             "shenandoah heap");
       
   139 
       
   140   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
       
   141                                                  heap_alignment);
       
   142   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
       
   143 
       
   144   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
       
   145 
       
   146   _num_regions = ShenandoahHeapRegion::region_count();
       
   147 
       
   148   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
       
   149   num_committed_regions = MIN2(num_committed_regions, _num_regions);
       
   150   assert(num_committed_regions <= _num_regions, "sanity");
       
   151 
       
   152   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
       
   153   _committed = _initial_size;
       
   154 
       
   155   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT "%s",
       
   156           byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size));
       
   157   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
       
   158     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
       
   159   }
       
   160 
       
   161   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
       
   162   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
       
   163 
       
   164   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
       
   165   _free_set = new ShenandoahFreeSet(this, _num_regions);
       
   166 
       
   167   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
       
   168 
       
   169   if (ShenandoahPacing) {
       
   170     _pacer = new ShenandoahPacer(this);
       
   171     _pacer->setup_for_idle();
       
   172   } else {
       
   173     _pacer = NULL;
       
   174   }
       
   175 
       
   176   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
       
   177          "misaligned heap: " PTR_FORMAT, p2i(base()));
       
   178 
       
   179   // The call below uses stuff (the SATB* things) that are in G1, but probably
       
   180   // belong into a shared location.
       
   181   ShenandoahBarrierSet::satb_mark_queue_set().initialize(this,
       
   182                                                SATB_Q_CBL_mon,
       
   183                                                20 /*G1SATBProcessCompletedThreshold */,
       
   184                                                60 /* G1SATBBufferEnqueueingThresholdPercent */,
       
   185                                                Shared_SATB_Q_lock);
       
   186 
       
   187   // Reserve space for prev and next bitmap.
       
   188   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
       
   189   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
       
   190   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
       
   191   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
       
   192 
       
   193   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
       
   194 
       
   195   guarantee(bitmap_bytes_per_region != 0,
       
   196             "Bitmap bytes per region should not be zero");
       
   197   guarantee(is_power_of_2(bitmap_bytes_per_region),
       
   198             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
       
   199 
       
   200   if (bitmap_page_size > bitmap_bytes_per_region) {
       
   201     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
       
   202     _bitmap_bytes_per_slice = bitmap_page_size;
       
   203   } else {
       
   204     _bitmap_regions_per_slice = 1;
       
   205     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
       
   206   }
       
   207 
       
   208   guarantee(_bitmap_regions_per_slice >= 1,
       
   209             "Should have at least one region per slice: " SIZE_FORMAT,
       
   210             _bitmap_regions_per_slice);
       
   211 
       
   212   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
       
   213             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
       
   214             _bitmap_bytes_per_slice, bitmap_page_size);
       
   215 
       
   216   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
       
   217   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
       
   218   _bitmap_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
       
   219 
       
   220   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
       
   221                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
       
   222   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
       
   223   os::commit_memory_or_exit((char *) (_bitmap_region.start()), bitmap_init_commit, false,
       
   224                             "couldn't allocate initial bitmap");
       
   225 
       
   226   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
       
   227 
       
   228   if (ShenandoahVerify) {
       
   229     ReservedSpace verify_bitmap(_bitmap_size, page_size);
       
   230     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
       
   231                               "couldn't allocate verification bitmap");
       
   232     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
       
   233     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
       
   234     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
       
   235     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
       
   236   }
       
   237 
       
   238   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
       
   239 
       
   240   {
       
   241     ShenandoahHeapLocker locker(lock());
       
   242     for (size_t i = 0; i < _num_regions; i++) {
       
   243       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
       
   244                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
       
   245                                                          reg_size_words,
       
   246                                                          i,
       
   247                                                          i < num_committed_regions);
       
   248 
       
   249       _marking_context->initialize_top_at_mark_start(r);
       
   250       _regions[i] = r;
       
   251       assert(!collection_set()->is_in(i), "New region should not be in collection set");
       
   252     }
       
   253 
       
   254     // Initialize to complete
       
   255     _marking_context->mark_complete();
       
   256 
       
   257     _free_set->rebuild();
       
   258   }
       
   259 
       
   260   if (ShenandoahAlwaysPreTouch) {
       
   261     assert (!AlwaysPreTouch, "Should have been overridden");
       
   262 
       
   263     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
       
   264     // before initialize() below zeroes it with initializing thread. For any given region,
       
   265     // we touch the region and the corresponding bitmaps from the same thread.
       
   266     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
       
   267 
       
   268     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
       
   269                        _num_regions, page_size);
       
   270     ShenandoahPretouchTask cl(bitmap0.base(), _bitmap_size, page_size);
       
   271     _workers->run_task(&cl);
       
   272   }
       
   273 
       
   274   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
       
   275   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
       
   276   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
       
   277   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
       
   278   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
       
   279 
       
   280   _traversal_gc = heuristics()->can_do_traversal_gc() ?
       
   281                 new ShenandoahTraversalGC(this, _num_regions) :
       
   282                 NULL;
       
   283 
       
   284   _monitoring_support = new ShenandoahMonitoringSupport(this);
       
   285 
       
   286   _phase_timings = new ShenandoahPhaseTimings();
       
   287 
       
   288   if (ShenandoahAllocationTrace) {
       
   289     _alloc_tracker = new ShenandoahAllocTracker();
       
   290   }
       
   291 
       
   292   ShenandoahStringDedup::initialize();
       
   293 
       
   294   _control_thread = new ShenandoahControlThread();
       
   295 
       
   296   ShenandoahCodeRoots::initialize();
       
   297 
       
   298   log_info(gc, init)("Safepointing mechanism: %s",
       
   299                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
       
   300                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
       
   301 
       
   302   _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC);
       
   303   for (uint worker = 0; worker < _max_workers; worker++) {
       
   304     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC);
       
   305     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort));
       
   306   }
       
   307 
       
   308   return JNI_OK;
       
   309 }
       
   310 
       
   311 void ShenandoahHeap::initialize_heuristics() {
       
   312   if (ShenandoahGCHeuristics != NULL) {
       
   313     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
       
   314       _heuristics = new ShenandoahAggressiveHeuristics();
       
   315     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
       
   316       _heuristics = new ShenandoahStaticHeuristics();
       
   317     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
       
   318       _heuristics = new ShenandoahAdaptiveHeuristics();
       
   319     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
       
   320       _heuristics = new ShenandoahPassiveHeuristics();
       
   321     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
       
   322       _heuristics = new ShenandoahCompactHeuristics();
       
   323     } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
       
   324       _heuristics = new ShenandoahTraversalHeuristics();
       
   325     } else {
       
   326       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
       
   327     }
       
   328 
       
   329     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
       
   330       vm_exit_during_initialization(
       
   331               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
       
   332                       _heuristics->name()));
       
   333     }
       
   334     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
       
   335       vm_exit_during_initialization(
       
   336               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
       
   337                       _heuristics->name()));
       
   338     }
       
   339 
       
   340     if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
       
   341       vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
       
   342     }
       
   343     log_info(gc, init)("Shenandoah heuristics: %s",
       
   344                        _heuristics->name());
       
   345   } else {
       
   346       ShouldNotReachHere();
       
   347   }
       
   348 
       
   349 }
       
   350 
       
   351 #ifdef _MSC_VER
       
   352 #pragma warning( push )
       
   353 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
       
   354 #endif
       
   355 
       
   356 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
       
   357   CollectedHeap(),
       
   358   _initial_size(0),
       
   359   _used(0),
       
   360   _committed(0),
       
   361   _bytes_allocated_since_gc_start(0),
       
   362   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
       
   363   _workers(NULL),
       
   364   _safepoint_workers(NULL),
       
   365   _num_regions(0),
       
   366   _regions(NULL),
       
   367   _update_refs_iterator(this),
       
   368   _control_thread(NULL),
       
   369   _shenandoah_policy(policy),
       
   370   _heuristics(NULL),
       
   371   _free_set(NULL),
       
   372   _scm(new ShenandoahConcurrentMark()),
       
   373   _traversal_gc(NULL),
       
   374   _full_gc(new ShenandoahMarkCompact()),
       
   375   _pacer(NULL),
       
   376   _verifier(NULL),
       
   377   _alloc_tracker(NULL),
       
   378   _phase_timings(NULL),
       
   379   _monitoring_support(NULL),
       
   380   _memory_pool(NULL),
       
   381   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
       
   382   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
       
   383   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
       
   384   _soft_ref_policy(),
       
   385   _ref_processor(NULL),
       
   386   _marking_context(NULL),
       
   387   _collection_set(NULL)
       
   388 {
       
   389   log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
       
   390   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
       
   391 
       
   392   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
       
   393 
       
   394   _max_workers = MAX2(_max_workers, 1U);
       
   395   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
       
   396                             /* are_GC_task_threads */true,
       
   397                             /* are_ConcurrentGC_threads */false);
       
   398   if (_workers == NULL) {
       
   399     vm_exit_during_initialization("Failed necessary allocation.");
       
   400   } else {
       
   401     _workers->initialize_workers();
       
   402   }
       
   403 
       
   404   if (ShenandoahParallelSafepointThreads > 1) {
       
   405     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
       
   406                                                 ShenandoahParallelSafepointThreads,
       
   407                                                 false, false);
       
   408     _safepoint_workers->initialize_workers();
       
   409   }
       
   410 }
       
   411 
       
   412 #ifdef _MSC_VER
       
   413 #pragma warning( pop )
       
   414 #endif
       
   415 
       
   416 class ShenandoahResetBitmapTask : public AbstractGangTask {
       
   417 private:
       
   418   ShenandoahRegionIterator _regions;
       
   419 
       
   420 public:
       
   421   ShenandoahResetBitmapTask() :
       
   422     AbstractGangTask("Parallel Reset Bitmap Task") {}
       
   423 
       
   424   void work(uint worker_id) {
       
   425     ShenandoahHeapRegion* region = _regions.next();
       
   426     ShenandoahHeap* heap = ShenandoahHeap::heap();
       
   427     ShenandoahMarkingContext* const ctx = heap->marking_context();
       
   428     while (region != NULL) {
       
   429       if (heap->is_bitmap_slice_committed(region)) {
       
   430         ctx->clear_bitmap(region);
       
   431       }
       
   432       region = _regions.next();
       
   433     }
       
   434   }
       
   435 };
       
   436 
       
   437 void ShenandoahHeap::reset_mark_bitmap() {
       
   438   assert_gc_workers(_workers->active_workers());
       
   439   mark_incomplete_marking_context();
       
   440 
       
   441   ShenandoahResetBitmapTask task;
       
   442   _workers->run_task(&task);
       
   443 }
       
   444 
       
   445 void ShenandoahHeap::print_on(outputStream* st) const {
       
   446   st->print_cr("Shenandoah Heap");
       
   447   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
       
   448                capacity() / K, committed() / K, used() / K);
       
   449   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
       
   450                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
       
   451 
       
   452   st->print("Status: ");
       
   453   if (has_forwarded_objects())               st->print("has forwarded objects, ");
       
   454   if (is_concurrent_mark_in_progress())      st->print("marking, ");
       
   455   if (is_evacuation_in_progress())           st->print("evacuating, ");
       
   456   if (is_update_refs_in_progress())          st->print("updating refs, ");
       
   457   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
       
   458   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
       
   459   if (is_full_gc_in_progress())              st->print("full gc, ");
       
   460   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
       
   461 
       
   462   if (cancelled_gc()) {
       
   463     st->print("cancelled");
       
   464   } else {
       
   465     st->print("not cancelled");
       
   466   }
       
   467   st->cr();
       
   468 
       
   469   st->print_cr("Reserved region:");
       
   470   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
       
   471                p2i(reserved_region().start()),
       
   472                p2i(reserved_region().end()));
       
   473 
       
   474   st->cr();
       
   475   MetaspaceUtils::print_on(st);
       
   476 
       
   477   if (Verbose) {
       
   478     print_heap_regions_on(st);
       
   479   }
       
   480 }
       
   481 
       
   482 class ShenandoahInitGCLABClosure : public ThreadClosure {
       
   483 public:
       
   484   void do_thread(Thread* thread) {
       
   485     if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread())) {
       
   486       ShenandoahThreadLocalData::initialize_gclab(thread);
       
   487     }
       
   488   }
       
   489 };
       
   490 
       
   491 void ShenandoahHeap::post_initialize() {
       
   492   CollectedHeap::post_initialize();
       
   493   MutexLocker ml(Threads_lock);
       
   494 
       
   495   ShenandoahInitGCLABClosure init_gclabs;
       
   496   Threads::threads_do(&init_gclabs);
       
   497   _workers->threads_do(&init_gclabs);
       
   498   _safepoint_workers->threads_do(&init_gclabs);
       
   499 
       
   500   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
       
   501   // Now, we will let WorkGang to initialize gclab when new worker is created.
       
   502   _workers->set_initialize_gclab();
       
   503 
       
   504   _scm->initialize(_max_workers);
       
   505   _full_gc->initialize(_gc_timer);
       
   506 
       
   507   ref_processing_init();
       
   508 
       
   509   _heuristics->initialize();
       
   510 }
       
   511 
       
   512 size_t ShenandoahHeap::used() const {
       
   513   return OrderAccess::load_acquire(&_used);
       
   514 }
       
   515 
       
   516 size_t ShenandoahHeap::committed() const {
       
   517   OrderAccess::acquire();
       
   518   return _committed;
       
   519 }
       
   520 
       
   521 void ShenandoahHeap::increase_committed(size_t bytes) {
       
   522   assert_heaplock_or_safepoint();
       
   523   _committed += bytes;
       
   524 }
       
   525 
       
   526 void ShenandoahHeap::decrease_committed(size_t bytes) {
       
   527   assert_heaplock_or_safepoint();
       
   528   _committed -= bytes;
       
   529 }
       
   530 
       
   531 void ShenandoahHeap::increase_used(size_t bytes) {
       
   532   Atomic::add(bytes, &_used);
       
   533 }
       
   534 
       
   535 void ShenandoahHeap::set_used(size_t bytes) {
       
   536   OrderAccess::release_store_fence(&_used, bytes);
       
   537 }
       
   538 
       
   539 void ShenandoahHeap::decrease_used(size_t bytes) {
       
   540   assert(used() >= bytes, "never decrease heap size by more than we've left");
       
   541   Atomic::sub(bytes, &_used);
       
   542 }
       
   543 
       
   544 void ShenandoahHeap::increase_allocated(size_t bytes) {
       
   545   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
       
   546 }
       
   547 
       
   548 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
       
   549   size_t bytes = words * HeapWordSize;
       
   550   if (!waste) {
       
   551     increase_used(bytes);
       
   552   }
       
   553   increase_allocated(bytes);
       
   554   if (ShenandoahPacing) {
       
   555     control_thread()->pacing_notify_alloc(words);
       
   556     if (waste) {
       
   557       pacer()->claim_for_alloc(words, true);
       
   558     }
       
   559   }
       
   560 }
       
   561 
       
   562 size_t ShenandoahHeap::capacity() const {
       
   563   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
       
   564 }
       
   565 
       
   566 size_t ShenandoahHeap::max_capacity() const {
       
   567   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
       
   568 }
       
   569 
       
   570 size_t ShenandoahHeap::initial_capacity() const {
       
   571   return _initial_size;
       
   572 }
       
   573 
       
   574 bool ShenandoahHeap::is_in(const void* p) const {
       
   575   HeapWord* heap_base = (HeapWord*) base();
       
   576   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
       
   577   return p >= heap_base && p < last_region_end;
       
   578 }
       
   579 
       
   580 void ShenandoahHeap::op_uncommit(double shrink_before) {
       
   581   assert (ShenandoahUncommit, "should be enabled");
       
   582 
       
   583   size_t count = 0;
       
   584   for (size_t i = 0; i < num_regions(); i++) {
       
   585     ShenandoahHeapRegion* r = get_region(i);
       
   586     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
       
   587       ShenandoahHeapLocker locker(lock());
       
   588       if (r->is_empty_committed()) {
       
   589         r->make_uncommitted();
       
   590         count++;
       
   591       }
       
   592     }
       
   593     SpinPause(); // allow allocators to take the lock
       
   594   }
       
   595 
       
   596   if (count > 0) {
       
   597     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
       
   598                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
       
   599     control_thread()->notify_heap_changed();
       
   600   }
       
   601 }
       
   602 
       
   603 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
       
   604   // New object should fit the GCLAB size
       
   605   size_t min_size = MAX2(size, PLAB::min_size());
       
   606 
       
   607   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
       
   608   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
       
   609   new_size = MIN2(new_size, PLAB::max_size());
       
   610   new_size = MAX2(new_size, PLAB::min_size());
       
   611 
       
   612   // Record new heuristic value even if we take any shortcut. This captures
       
   613   // the case when moderately-sized objects always take a shortcut. At some point,
       
   614   // heuristics should catch up with them.
       
   615   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
       
   616 
       
   617   if (new_size < size) {
       
   618     // New size still does not fit the object. Fall back to shared allocation.
       
   619     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
       
   620     return NULL;
       
   621   }
       
   622 
       
   623   // Retire current GCLAB, and allocate a new one.
       
   624   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
       
   625   gclab->retire();
       
   626 
       
   627   size_t actual_size = 0;
       
   628   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
       
   629   if (gclab_buf == NULL) {
       
   630     return NULL;
       
   631   }
       
   632 
       
   633   assert (size <= actual_size, "allocation should fit");
       
   634 
       
   635   if (ZeroTLAB) {
       
   636     // ..and clear it.
       
   637     Copy::zero_to_words(gclab_buf, actual_size);
       
   638   } else {
       
   639     // ...and zap just allocated object.
       
   640 #ifdef ASSERT
       
   641     // Skip mangling the space corresponding to the object header to
       
   642     // ensure that the returned space is not considered parsable by
       
   643     // any concurrent GC thread.
       
   644     size_t hdr_size = oopDesc::header_size();
       
   645     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
       
   646 #endif // ASSERT
       
   647   }
       
   648   gclab->set_buf(gclab_buf, actual_size);
       
   649   return gclab->allocate(size);
       
   650 }
       
   651 
       
   652 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
       
   653                                             size_t requested_size,
       
   654                                             size_t* actual_size) {
       
   655   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
       
   656   HeapWord* res = allocate_memory(req);
       
   657   if (res != NULL) {
       
   658     *actual_size = req.actual_size();
       
   659   } else {
       
   660     *actual_size = 0;
       
   661   }
       
   662   return res;
       
   663 }
       
   664 
       
   665 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
       
   666                                              size_t word_size,
       
   667                                              size_t* actual_size) {
       
   668   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
       
   669   HeapWord* res = allocate_memory(req);
       
   670   if (res != NULL) {
       
   671     *actual_size = req.actual_size();
       
   672   } else {
       
   673     *actual_size = 0;
       
   674   }
       
   675   return res;
       
   676 }
       
   677 
       
   678 ShenandoahHeap* ShenandoahHeap::heap() {
       
   679   CollectedHeap* heap = Universe::heap();
       
   680   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
       
   681   assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
       
   682   return (ShenandoahHeap*) heap;
       
   683 }
       
   684 
       
   685 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
       
   686   CollectedHeap* heap = Universe::heap();
       
   687   return (ShenandoahHeap*) heap;
       
   688 }
       
   689 
       
   690 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
       
   691   ShenandoahAllocTrace trace_alloc(req.size(), req.type());
       
   692 
       
   693   intptr_t pacer_epoch = 0;
       
   694   bool in_new_region = false;
       
   695   HeapWord* result = NULL;
       
   696 
       
   697   if (req.is_mutator_alloc()) {
       
   698     if (ShenandoahPacing) {
       
   699       pacer()->pace_for_alloc(req.size());
       
   700       pacer_epoch = pacer()->epoch();
       
   701     }
       
   702 
       
   703     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
       
   704       result = allocate_memory_under_lock(req, in_new_region);
       
   705     }
       
   706 
       
   707     // Allocation failed, block until control thread reacted, then retry allocation.
       
   708     //
       
   709     // It might happen that one of the threads requesting allocation would unblock
       
   710     // way later after GC happened, only to fail the second allocation, because
       
   711     // other threads have already depleted the free storage. In this case, a better
       
   712     // strategy is to try again, as long as GC makes progress.
       
   713     //
       
   714     // Then, we need to make sure the allocation was retried after at least one
       
   715     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
       
   716 
       
   717     size_t tries = 0;
       
   718 
       
   719     while (result == NULL && _progress_last_gc.is_set()) {
       
   720       tries++;
       
   721       control_thread()->handle_alloc_failure(req.size());
       
   722       result = allocate_memory_under_lock(req, in_new_region);
       
   723     }
       
   724 
       
   725     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
       
   726       tries++;
       
   727       control_thread()->handle_alloc_failure(req.size());
       
   728       result = allocate_memory_under_lock(req, in_new_region);
       
   729     }
       
   730 
       
   731   } else {
       
   732     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
       
   733     result = allocate_memory_under_lock(req, in_new_region);
       
   734     // Do not call handle_alloc_failure() here, because we cannot block.
       
   735     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
       
   736   }
       
   737 
       
   738   if (in_new_region) {
       
   739     control_thread()->notify_heap_changed();
       
   740   }
       
   741 
       
   742   if (result != NULL) {
       
   743     size_t requested = req.size();
       
   744     size_t actual = req.actual_size();
       
   745 
       
   746     assert (req.is_lab_alloc() || (requested == actual),
       
   747             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
       
   748             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
       
   749 
       
   750     if (req.is_mutator_alloc()) {
       
   751       notify_mutator_alloc_words(actual, false);
       
   752 
       
   753       // If we requested more than we were granted, give the rest back to pacer.
       
   754       // This only matters if we are in the same pacing epoch: do not try to unpace
       
   755       // over the budget for the other phase.
       
   756       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
       
   757         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
       
   758       }
       
   759     } else {
       
   760       increase_used(actual*HeapWordSize);
       
   761     }
       
   762   }
       
   763 
       
   764   return result;
       
   765 }
       
   766 
       
   767 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
       
   768   ShenandoahHeapLocker locker(lock());
       
   769   return _free_set->allocate(req, in_new_region);
       
   770 }
       
   771 
       
   772 class ShenandoahMemAllocator : public MemAllocator {
       
   773 private:
       
   774   MemAllocator& _initializer;
       
   775 public:
       
   776   ShenandoahMemAllocator(MemAllocator& initializer, Klass* klass, size_t word_size, Thread* thread) :
       
   777   MemAllocator(klass, word_size + ShenandoahBrooksPointer::word_size(), thread),
       
   778     _initializer(initializer) {}
       
   779 
       
   780 protected:
       
   781   virtual HeapWord* mem_allocate(Allocation& allocation) const {
       
   782     HeapWord* result = MemAllocator::mem_allocate(allocation);
       
   783     // Initialize brooks-pointer
       
   784     if (result != NULL) {
       
   785       result += ShenandoahBrooksPointer::word_size();
       
   786       ShenandoahBrooksPointer::initialize(oop(result));
       
   787       assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
       
   788     }
       
   789     return result;
       
   790   }
       
   791 
       
   792   virtual oop initialize(HeapWord* mem) const {
       
   793      return _initializer.initialize(mem);
       
   794   }
       
   795 };
       
   796 
       
   797 oop ShenandoahHeap::obj_allocate(Klass* klass, int size, TRAPS) {
       
   798   ObjAllocator initializer(klass, size, THREAD);
       
   799   ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
       
   800   return allocator.allocate();
       
   801 }
       
   802 
       
   803 oop ShenandoahHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
       
   804   ObjArrayAllocator initializer(klass, size, length, do_zero, THREAD);
       
   805   ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
       
   806   return allocator.allocate();
       
   807 }
       
   808 
       
   809 oop ShenandoahHeap::class_allocate(Klass* klass, int size, TRAPS) {
       
   810   ClassAllocator initializer(klass, size, THREAD);
       
   811   ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
       
   812   return allocator.allocate();
       
   813 }
       
   814 
       
   815 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
       
   816                                         bool*  gc_overhead_limit_was_exceeded) {
       
   817   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
       
   818   return allocate_memory(req);
       
   819 }
       
   820 
       
   821 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
       
   822                                                              size_t size,
       
   823                                                              Metaspace::MetadataType mdtype) {
       
   824   MetaWord* result;
       
   825 
       
   826   // Inform metaspace OOM to GC heuristics if class unloading is possible.
       
   827   if (heuristics()->can_unload_classes()) {
       
   828     ShenandoahHeuristics* h = heuristics();
       
   829     h->record_metaspace_oom();
       
   830   }
       
   831 
       
   832   // Expand and retry allocation
       
   833   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
       
   834   if (result != NULL) {
       
   835     return result;
       
   836   }
       
   837 
       
   838   // Start full GC
       
   839   collect(GCCause::_metadata_GC_clear_soft_refs);
       
   840 
       
   841   // Retry allocation
       
   842   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
       
   843   if (result != NULL) {
       
   844     return result;
       
   845   }
       
   846 
       
   847   // Expand and retry allocation
       
   848   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
       
   849   if (result != NULL) {
       
   850     return result;
       
   851   }
       
   852 
       
   853   // Out of memory
       
   854   return NULL;
       
   855 }
       
   856 
       
   857 void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
       
   858   HeapWord* obj = tlab_post_allocation_setup(start);
       
   859   CollectedHeap::fill_with_object(obj, end);
       
   860 }
       
   861 
       
   862 size_t ShenandoahHeap::min_dummy_object_size() const {
       
   863   return CollectedHeap::min_dummy_object_size() + ShenandoahBrooksPointer::word_size();
       
   864 }
       
   865 
       
   866 class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure {
       
   867 private:
       
   868   ShenandoahHeap* _heap;
       
   869   Thread* _thread;
       
   870 public:
       
   871   ShenandoahEvacuateUpdateRootsClosure() :
       
   872     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
       
   873   }
       
   874 
       
   875 private:
       
   876   template <class T>
       
   877   void do_oop_work(T* p) {
       
   878     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
       
   879 
       
   880     T o = RawAccess<>::oop_load(p);
       
   881     if (! CompressedOops::is_null(o)) {
       
   882       oop obj = CompressedOops::decode_not_null(o);
       
   883       if (_heap->in_collection_set(obj)) {
       
   884         shenandoah_assert_marked(p, obj);
       
   885         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
       
   886         if (oopDesc::equals_raw(resolved, obj)) {
       
   887           resolved = _heap->evacuate_object(obj, _thread);
       
   888         }
       
   889         RawAccess<IS_NOT_NULL>::oop_store(p, resolved);
       
   890       }
       
   891     }
       
   892   }
       
   893 
       
   894 public:
       
   895   void do_oop(oop* p) {
       
   896     do_oop_work(p);
       
   897   }
       
   898   void do_oop(narrowOop* p) {
       
   899     do_oop_work(p);
       
   900   }
       
   901 };
       
   902 
       
   903 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
       
   904 private:
       
   905   ShenandoahHeap* const _heap;
       
   906   Thread* const _thread;
       
   907 public:
       
   908   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
       
   909     _heap(heap), _thread(Thread::current()) {}
       
   910 
       
   911   void do_object(oop p) {
       
   912     shenandoah_assert_marked(NULL, p);
       
   913     if (oopDesc::equals_raw(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
       
   914       _heap->evacuate_object(p, _thread);
       
   915     }
       
   916   }
       
   917 };
       
   918 
       
   919 class ShenandoahEvacuationTask : public AbstractGangTask {
       
   920 private:
       
   921   ShenandoahHeap* const _sh;
       
   922   ShenandoahCollectionSet* const _cs;
       
   923   bool _concurrent;
       
   924 public:
       
   925   ShenandoahEvacuationTask(ShenandoahHeap* sh,
       
   926                            ShenandoahCollectionSet* cs,
       
   927                            bool concurrent) :
       
   928     AbstractGangTask("Parallel Evacuation Task"),
       
   929     _sh(sh),
       
   930     _cs(cs),
       
   931     _concurrent(concurrent)
       
   932   {}
       
   933 
       
   934   void work(uint worker_id) {
       
   935     if (_concurrent) {
       
   936       ShenandoahConcurrentWorkerSession worker_session(worker_id);
       
   937       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
       
   938       ShenandoahEvacOOMScope oom_evac_scope;
       
   939       do_work();
       
   940     } else {
       
   941       ShenandoahParallelWorkerSession worker_session(worker_id);
       
   942       ShenandoahEvacOOMScope oom_evac_scope;
       
   943       do_work();
       
   944     }
       
   945   }
       
   946 
       
   947 private:
       
   948   void do_work() {
       
   949     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
       
   950     ShenandoahHeapRegion* r;
       
   951     while ((r =_cs->claim_next()) != NULL) {
       
   952       assert(r->has_live(), "all-garbage regions are reclaimed early");
       
   953       _sh->marked_object_iterate(r, &cl);
       
   954 
       
   955       if (ShenandoahPacing) {
       
   956         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
       
   957       }
       
   958 
       
   959       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
       
   960         break;
       
   961       }
       
   962     }
       
   963   }
       
   964 };
       
   965 
       
   966 void ShenandoahHeap::trash_cset_regions() {
       
   967   ShenandoahHeapLocker locker(lock());
       
   968 
       
   969   ShenandoahCollectionSet* set = collection_set();
       
   970   ShenandoahHeapRegion* r;
       
   971   set->clear_current_index();
       
   972   while ((r = set->next()) != NULL) {
       
   973     r->make_trash();
       
   974   }
       
   975   collection_set()->clear();
       
   976 }
       
   977 
       
   978 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
       
   979   st->print_cr("Heap Regions:");
       
   980   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
       
   981   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
       
   982   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
       
   983   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
       
   984 
       
   985   for (size_t i = 0; i < num_regions(); i++) {
       
   986     get_region(i)->print_on(st);
       
   987   }
       
   988 }
       
   989 
       
   990 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
       
   991   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
       
   992 
       
   993   oop humongous_obj = oop(start->bottom() + ShenandoahBrooksPointer::word_size());
       
   994   size_t size = humongous_obj->size() + ShenandoahBrooksPointer::word_size();
       
   995   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
       
   996   size_t index = start->region_number() + required_regions - 1;
       
   997 
       
   998   assert(!start->has_live(), "liveness must be zero");
       
   999 
       
  1000   for(size_t i = 0; i < required_regions; i++) {
       
  1001     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
       
  1002     // as it expects that every region belongs to a humongous region starting with a humongous start region.
       
  1003     ShenandoahHeapRegion* region = get_region(index --);
       
  1004 
       
  1005     assert(region->is_humongous(), "expect correct humongous start or continuation");
       
  1006     assert(!region->is_cset(), "Humongous region should not be in collection set");
       
  1007 
       
  1008     region->make_trash_immediate();
       
  1009   }
       
  1010 }
       
  1011 
       
  1012 class ShenandoahRetireGCLABClosure : public ThreadClosure {
       
  1013 public:
       
  1014   void do_thread(Thread* thread) {
       
  1015     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
       
  1016     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
       
  1017     gclab->retire();
       
  1018   }
       
  1019 };
       
  1020 
       
  1021 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
       
  1022   if (UseTLAB) {
       
  1023     CollectedHeap::ensure_parsability(retire_tlabs);
       
  1024   }
       
  1025   ShenandoahRetireGCLABClosure cl;
       
  1026   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
       
  1027     cl.do_thread(t);
       
  1028   }
       
  1029   workers()->threads_do(&cl);
       
  1030   _safepoint_workers->threads_do(&cl);
       
  1031 }
       
  1032 
       
  1033 void ShenandoahHeap::resize_tlabs() {
       
  1034   CollectedHeap::resize_all_tlabs();
       
  1035 }
       
  1036 
       
  1037 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
       
  1038 private:
       
  1039   ShenandoahRootEvacuator* _rp;
       
  1040 
       
  1041 public:
       
  1042   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
       
  1043     AbstractGangTask("Shenandoah evacuate and update roots"),
       
  1044     _rp(rp) {}
       
  1045 
       
  1046   void work(uint worker_id) {
       
  1047     ShenandoahParallelWorkerSession worker_session(worker_id);
       
  1048     ShenandoahEvacOOMScope oom_evac_scope;
       
  1049     ShenandoahEvacuateUpdateRootsClosure cl;
       
  1050 
       
  1051     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
       
  1052     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
       
  1053   }
       
  1054 };
       
  1055 
       
  1056 void ShenandoahHeap::evacuate_and_update_roots() {
       
  1057 #if defined(COMPILER2) || INCLUDE_JVMCI
       
  1058   DerivedPointerTable::clear();
       
  1059 #endif
       
  1060   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
       
  1061 
       
  1062   {
       
  1063     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
       
  1064     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
       
  1065     workers()->run_task(&roots_task);
       
  1066   }
       
  1067 
       
  1068 #if defined(COMPILER2) || INCLUDE_JVMCI
       
  1069   DerivedPointerTable::update_pointers();
       
  1070 #endif
       
  1071 }
       
  1072 
       
  1073 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
       
  1074   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
       
  1075 
       
  1076   CodeBlobToOopClosure blobsCl(cl, false);
       
  1077   CLDToOopClosure cldCl(cl, ClassLoaderData::_claim_strong);
       
  1078 
       
  1079   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
       
  1080   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
       
  1081 }
       
  1082 
       
  1083 // Returns size in bytes
       
  1084 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
       
  1085   if (ShenandoahElasticTLAB) {
       
  1086     // With Elastic TLABs, return the max allowed size, and let the allocation path
       
  1087     // figure out the safe size for current allocation.
       
  1088     return ShenandoahHeapRegion::max_tlab_size_bytes();
       
  1089   } else {
       
  1090     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
       
  1091   }
       
  1092 }
       
  1093 
       
  1094 size_t ShenandoahHeap::max_tlab_size() const {
       
  1095   // Returns size in words
       
  1096   return ShenandoahHeapRegion::max_tlab_size_words();
       
  1097 }
       
  1098 
       
  1099 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
       
  1100 public:
       
  1101   void do_thread(Thread* thread) {
       
  1102     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
       
  1103     gclab->retire();
       
  1104     if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
       
  1105       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
       
  1106     }
       
  1107   }
       
  1108 };
       
  1109 
       
  1110 void ShenandoahHeap::retire_and_reset_gclabs() {
       
  1111   ShenandoahRetireAndResetGCLABClosure cl;
       
  1112   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
       
  1113     cl.do_thread(t);
       
  1114   }
       
  1115   workers()->threads_do(&cl);
       
  1116   _safepoint_workers->threads_do(&cl);
       
  1117 }
       
  1118 
       
  1119 void ShenandoahHeap::collect(GCCause::Cause cause) {
       
  1120   control_thread()->request_gc(cause);
       
  1121 }
       
  1122 
       
  1123 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
       
  1124   //assert(false, "Shouldn't need to do full collections");
       
  1125 }
       
  1126 
       
  1127 CollectorPolicy* ShenandoahHeap::collector_policy() const {
       
  1128   return _shenandoah_policy;
       
  1129 }
       
  1130 
       
  1131 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
       
  1132   Space* sp = heap_region_containing(addr);
       
  1133   if (sp != NULL) {
       
  1134     return sp->block_start(addr);
       
  1135   }
       
  1136   return NULL;
       
  1137 }
       
  1138 
       
  1139 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
       
  1140   Space* sp = heap_region_containing(addr);
       
  1141   assert(sp != NULL, "block_size of address outside of heap");
       
  1142   return sp->block_size(addr);
       
  1143 }
       
  1144 
       
  1145 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
       
  1146   Space* sp = heap_region_containing(addr);
       
  1147   return sp->block_is_obj(addr);
       
  1148 }
       
  1149 
       
  1150 jlong ShenandoahHeap::millis_since_last_gc() {
       
  1151   double v = heuristics()->time_since_last_gc() * 1000;
       
  1152   assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
       
  1153   return (jlong)v;
       
  1154 }
       
  1155 
       
  1156 void ShenandoahHeap::prepare_for_verify() {
       
  1157   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
       
  1158     make_parsable(false);
       
  1159   }
       
  1160 }
       
  1161 
       
  1162 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
       
  1163   workers()->print_worker_threads_on(st);
       
  1164   if (ShenandoahStringDedup::is_enabled()) {
       
  1165     ShenandoahStringDedup::print_worker_threads_on(st);
       
  1166   }
       
  1167 }
       
  1168 
       
  1169 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
       
  1170   workers()->threads_do(tcl);
       
  1171   _safepoint_workers->threads_do(tcl);
       
  1172   if (ShenandoahStringDedup::is_enabled()) {
       
  1173     ShenandoahStringDedup::threads_do(tcl);
       
  1174   }
       
  1175 }
       
  1176 
       
  1177 void ShenandoahHeap::print_tracing_info() const {
       
  1178   LogTarget(Info, gc, stats) lt;
       
  1179   if (lt.is_enabled()) {
       
  1180     ResourceMark rm;
       
  1181     LogStream ls(lt);
       
  1182 
       
  1183     phase_timings()->print_on(&ls);
       
  1184 
       
  1185     ls.cr();
       
  1186     ls.cr();
       
  1187 
       
  1188     shenandoah_policy()->print_gc_stats(&ls);
       
  1189 
       
  1190     ls.cr();
       
  1191     ls.cr();
       
  1192 
       
  1193     if (ShenandoahPacing) {
       
  1194       pacer()->print_on(&ls);
       
  1195     }
       
  1196 
       
  1197     ls.cr();
       
  1198     ls.cr();
       
  1199 
       
  1200     if (ShenandoahAllocationTrace) {
       
  1201       assert(alloc_tracker() != NULL, "Must be");
       
  1202       alloc_tracker()->print_on(&ls);
       
  1203     } else {
       
  1204       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
       
  1205     }
       
  1206   }
       
  1207 }
       
  1208 
       
  1209 void ShenandoahHeap::verify(VerifyOption vo) {
       
  1210   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
       
  1211     if (ShenandoahVerify) {
       
  1212       verifier()->verify_generic(vo);
       
  1213     } else {
       
  1214       // TODO: Consider allocating verification bitmaps on demand,
       
  1215       // and turn this on unconditionally.
       
  1216     }
       
  1217   }
       
  1218 }
       
  1219 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
       
  1220   return _free_set->capacity();
       
  1221 }
       
  1222 
       
  1223 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
       
  1224 private:
       
  1225   MarkBitMap* _bitmap;
       
  1226   Stack<oop,mtGC>* _oop_stack;
       
  1227 
       
  1228   template <class T>
       
  1229   void do_oop_work(T* p) {
       
  1230     T o = RawAccess<>::oop_load(p);
       
  1231     if (!CompressedOops::is_null(o)) {
       
  1232       oop obj = CompressedOops::decode_not_null(o);
       
  1233       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
       
  1234       assert(oopDesc::is_oop(obj), "must be a valid oop");
       
  1235       if (!_bitmap->is_marked((HeapWord*) obj)) {
       
  1236         _bitmap->mark((HeapWord*) obj);
       
  1237         _oop_stack->push(obj);
       
  1238       }
       
  1239     }
       
  1240   }
       
  1241 public:
       
  1242   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
       
  1243     _bitmap(bitmap), _oop_stack(oop_stack) {}
       
  1244   void do_oop(oop* p)       { do_oop_work(p); }
       
  1245   void do_oop(narrowOop* p) { do_oop_work(p); }
       
  1246 };
       
  1247 
       
  1248 /*
       
  1249  * This is public API, used in preparation of object_iterate().
       
  1250  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
       
  1251  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
       
  1252  * control, we call SH::make_tlabs_parsable().
       
  1253  */
       
  1254 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
       
  1255   // No-op.
       
  1256 }
       
  1257 
       
  1258 /*
       
  1259  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
       
  1260  *
       
  1261  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
       
  1262  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
       
  1263  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
       
  1264  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
       
  1265  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
       
  1266  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
       
  1267  * wiped the bitmap in preparation for next marking).
       
  1268  *
       
  1269  * For all those reasons, we implement object iteration as a single marking traversal, reporting
       
  1270  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
       
  1271  * is allowed to report dead objects, but is not required to do so.
       
  1272  */
       
  1273 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
       
  1274   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
       
  1275   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
       
  1276     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
       
  1277     return;
       
  1278   }
       
  1279 
       
  1280   // Reset bitmap
       
  1281   _aux_bit_map.clear();
       
  1282 
       
  1283   Stack<oop,mtGC> oop_stack;
       
  1284 
       
  1285   // First, we process all GC roots. This populates the work stack with initial objects.
       
  1286   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
       
  1287   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
       
  1288   CLDToOopClosure clds(&oops, ClassLoaderData::_claim_none);
       
  1289   CodeBlobToOopClosure blobs(&oops, false);
       
  1290   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
       
  1291 
       
  1292   // Work through the oop stack to traverse heap.
       
  1293   while (! oop_stack.is_empty()) {
       
  1294     oop obj = oop_stack.pop();
       
  1295     assert(oopDesc::is_oop(obj), "must be a valid oop");
       
  1296     cl->do_object(obj);
       
  1297     obj->oop_iterate(&oops);
       
  1298   }
       
  1299 
       
  1300   assert(oop_stack.is_empty(), "should be empty");
       
  1301 
       
  1302   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
       
  1303     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
       
  1304   }
       
  1305 }
       
  1306 
       
  1307 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
       
  1308   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
       
  1309   object_iterate(cl);
       
  1310 }
       
  1311 
       
  1312 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
       
  1313   for (size_t i = 0; i < num_regions(); i++) {
       
  1314     ShenandoahHeapRegion* current = get_region(i);
       
  1315     blk->heap_region_do(current);
       
  1316   }
       
  1317 }
       
  1318 
       
  1319 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
       
  1320 private:
       
  1321   ShenandoahHeap* const _heap;
       
  1322   ShenandoahHeapRegionClosure* const _blk;
       
  1323 
       
  1324   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
       
  1325   volatile size_t _index;
       
  1326   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
       
  1327 
       
  1328 public:
       
  1329   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
       
  1330           AbstractGangTask("Parallel Region Task"),
       
  1331           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
       
  1332 
       
  1333   void work(uint worker_id) {
       
  1334     size_t stride = ShenandoahParallelRegionStride;
       
  1335 
       
  1336     size_t max = _heap->num_regions();
       
  1337     while (_index < max) {
       
  1338       size_t cur = Atomic::add(stride, &_index) - stride;
       
  1339       size_t start = cur;
       
  1340       size_t end = MIN2(cur + stride, max);
       
  1341       if (start >= max) break;
       
  1342 
       
  1343       for (size_t i = cur; i < end; i++) {
       
  1344         ShenandoahHeapRegion* current = _heap->get_region(i);
       
  1345         _blk->heap_region_do(current);
       
  1346       }
       
  1347     }
       
  1348   }
       
  1349 };
       
  1350 
       
  1351 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
       
  1352   assert(blk->is_thread_safe(), "Only thread-safe closures here");
       
  1353   if (num_regions() > ShenandoahParallelRegionStride) {
       
  1354     ShenandoahParallelHeapRegionTask task(blk);
       
  1355     workers()->run_task(&task);
       
  1356   } else {
       
  1357     heap_region_iterate(blk);
       
  1358   }
       
  1359 }
       
  1360 
       
  1361 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
       
  1362 private:
       
  1363   ShenandoahMarkingContext* const _ctx;
       
  1364 public:
       
  1365   ShenandoahClearLivenessClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
       
  1366 
       
  1367   void heap_region_do(ShenandoahHeapRegion* r) {
       
  1368     if (r->is_active()) {
       
  1369       r->clear_live_data();
       
  1370       _ctx->capture_top_at_mark_start(r);
       
  1371     } else {
       
  1372       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
       
  1373       assert(_ctx->top_at_mark_start(r) == r->top(),
       
  1374              "Region " SIZE_FORMAT " should already have correct TAMS", r->region_number());
       
  1375     }
       
  1376   }
       
  1377 
       
  1378   bool is_thread_safe() { return true; }
       
  1379 };
       
  1380 
       
  1381 void ShenandoahHeap::op_init_mark() {
       
  1382   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
       
  1383   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
       
  1384 
       
  1385   assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
       
  1386   assert(!marking_context()->is_complete(), "should not be complete");
       
  1387 
       
  1388   if (ShenandoahVerify) {
       
  1389     verifier()->verify_before_concmark();
       
  1390   }
       
  1391 
       
  1392   if (VerifyBeforeGC) {
       
  1393     Universe::verify();
       
  1394   }
       
  1395 
       
  1396   set_concurrent_mark_in_progress(true);
       
  1397   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
       
  1398   {
       
  1399     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
       
  1400     make_parsable(true);
       
  1401   }
       
  1402 
       
  1403   {
       
  1404     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
       
  1405     ShenandoahClearLivenessClosure clc;
       
  1406     parallel_heap_region_iterate(&clc);
       
  1407   }
       
  1408 
       
  1409   // Make above changes visible to worker threads
       
  1410   OrderAccess::fence();
       
  1411 
       
  1412   concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
       
  1413 
       
  1414   if (UseTLAB) {
       
  1415     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
       
  1416     resize_tlabs();
       
  1417   }
       
  1418 
       
  1419   if (ShenandoahPacing) {
       
  1420     pacer()->setup_for_mark();
       
  1421   }
       
  1422 }
       
  1423 
       
  1424 void ShenandoahHeap::op_mark() {
       
  1425   concurrent_mark()->mark_from_roots();
       
  1426 }
       
  1427 
       
  1428 class ShenandoahCompleteLivenessClosure : public ShenandoahHeapRegionClosure {
       
  1429 private:
       
  1430   ShenandoahMarkingContext* const _ctx;
       
  1431 public:
       
  1432   ShenandoahCompleteLivenessClosure() : _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
       
  1433 
       
  1434   void heap_region_do(ShenandoahHeapRegion* r) {
       
  1435     if (r->is_active()) {
       
  1436       HeapWord *tams = _ctx->top_at_mark_start(r);
       
  1437       HeapWord *top = r->top();
       
  1438       if (top > tams) {
       
  1439         r->increase_live_data_alloc_words(pointer_delta(top, tams));
       
  1440       }
       
  1441     } else {
       
  1442       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
       
  1443       assert(_ctx->top_at_mark_start(r) == r->top(),
       
  1444              "Region " SIZE_FORMAT " should have correct TAMS", r->region_number());
       
  1445     }
       
  1446   }
       
  1447 
       
  1448   bool is_thread_safe() { return true; }
       
  1449 };
       
  1450 
       
  1451 void ShenandoahHeap::op_final_mark() {
       
  1452   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
       
  1453 
       
  1454   // It is critical that we
       
  1455   // evacuate roots right after finishing marking, so that we don't
       
  1456   // get unmarked objects in the roots.
       
  1457 
       
  1458   if (!cancelled_gc()) {
       
  1459     concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
       
  1460 
       
  1461     if (has_forwarded_objects()) {
       
  1462       concurrent_mark()->update_roots(ShenandoahPhaseTimings::update_roots);
       
  1463     }
       
  1464 
       
  1465     stop_concurrent_marking();
       
  1466 
       
  1467     {
       
  1468       ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
       
  1469 
       
  1470       // All allocations past TAMS are implicitly live, adjust the region data.
       
  1471       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
       
  1472       ShenandoahCompleteLivenessClosure cl;
       
  1473       parallel_heap_region_iterate(&cl);
       
  1474     }
       
  1475 
       
  1476     {
       
  1477       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
       
  1478 
       
  1479       make_parsable(true);
       
  1480 
       
  1481       trash_cset_regions();
       
  1482 
       
  1483       {
       
  1484         ShenandoahHeapLocker locker(lock());
       
  1485         _collection_set->clear();
       
  1486         _free_set->clear();
       
  1487 
       
  1488         heuristics()->choose_collection_set(_collection_set);
       
  1489 
       
  1490         _free_set->rebuild();
       
  1491       }
       
  1492     }
       
  1493 
       
  1494     // If collection set has candidates, start evacuation.
       
  1495     // Otherwise, bypass the rest of the cycle.
       
  1496     if (!collection_set()->is_empty()) {
       
  1497       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
       
  1498 
       
  1499       if (ShenandoahVerify) {
       
  1500         verifier()->verify_before_evacuation();
       
  1501       }
       
  1502 
       
  1503       set_evacuation_in_progress(true);
       
  1504       // From here on, we need to update references.
       
  1505       set_has_forwarded_objects(true);
       
  1506 
       
  1507       evacuate_and_update_roots();
       
  1508 
       
  1509       if (ShenandoahPacing) {
       
  1510         pacer()->setup_for_evac();
       
  1511       }
       
  1512     } else {
       
  1513       if (ShenandoahVerify) {
       
  1514         verifier()->verify_after_concmark();
       
  1515       }
       
  1516 
       
  1517       if (VerifyAfterGC) {
       
  1518         Universe::verify();
       
  1519       }
       
  1520     }
       
  1521 
       
  1522   } else {
       
  1523     concurrent_mark()->cancel();
       
  1524     stop_concurrent_marking();
       
  1525 
       
  1526     if (process_references()) {
       
  1527       // Abandon reference processing right away: pre-cleaning must have failed.
       
  1528       ReferenceProcessor *rp = ref_processor();
       
  1529       rp->disable_discovery();
       
  1530       rp->abandon_partial_discovery();
       
  1531       rp->verify_no_references_recorded();
       
  1532     }
       
  1533   }
       
  1534 }
       
  1535 
       
  1536 void ShenandoahHeap::op_final_evac() {
       
  1537   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
       
  1538 
       
  1539   set_evacuation_in_progress(false);
       
  1540 
       
  1541   retire_and_reset_gclabs();
       
  1542 
       
  1543   if (ShenandoahVerify) {
       
  1544     verifier()->verify_after_evacuation();
       
  1545   }
       
  1546 
       
  1547   if (VerifyAfterGC) {
       
  1548     Universe::verify();
       
  1549   }
       
  1550 }
       
  1551 
       
  1552 void ShenandoahHeap::op_conc_evac() {
       
  1553   ShenandoahEvacuationTask task(this, _collection_set, true);
       
  1554   workers()->run_task(&task);
       
  1555 }
       
  1556 
       
  1557 void ShenandoahHeap::op_stw_evac() {
       
  1558   ShenandoahEvacuationTask task(this, _collection_set, false);
       
  1559   workers()->run_task(&task);
       
  1560 }
       
  1561 
       
  1562 void ShenandoahHeap::op_updaterefs() {
       
  1563   update_heap_references(true);
       
  1564 }
       
  1565 
       
  1566 void ShenandoahHeap::op_cleanup() {
       
  1567   free_set()->recycle_trash();
       
  1568 }
       
  1569 
       
  1570 void ShenandoahHeap::op_reset() {
       
  1571   reset_mark_bitmap();
       
  1572 }
       
  1573 
       
  1574 void ShenandoahHeap::op_preclean() {
       
  1575   concurrent_mark()->preclean_weak_refs();
       
  1576 }
       
  1577 
       
  1578 void ShenandoahHeap::op_init_traversal() {
       
  1579   traversal_gc()->init_traversal_collection();
       
  1580 }
       
  1581 
       
  1582 void ShenandoahHeap::op_traversal() {
       
  1583   traversal_gc()->concurrent_traversal_collection();
       
  1584 }
       
  1585 
       
  1586 void ShenandoahHeap::op_final_traversal() {
       
  1587   traversal_gc()->final_traversal_collection();
       
  1588 }
       
  1589 
       
  1590 void ShenandoahHeap::op_full(GCCause::Cause cause) {
       
  1591   ShenandoahMetricsSnapshot metrics;
       
  1592   metrics.snap_before();
       
  1593 
       
  1594   full_gc()->do_it(cause);
       
  1595   if (UseTLAB) {
       
  1596     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
       
  1597     resize_all_tlabs();
       
  1598   }
       
  1599 
       
  1600   metrics.snap_after();
       
  1601   metrics.print();
       
  1602 
       
  1603   if (metrics.is_good_progress("Full GC")) {
       
  1604     _progress_last_gc.set();
       
  1605   } else {
       
  1606     // Nothing to do. Tell the allocation path that we have failed to make
       
  1607     // progress, and it can finally fail.
       
  1608     _progress_last_gc.unset();
       
  1609   }
       
  1610 }
       
  1611 
       
  1612 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
       
  1613   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
       
  1614   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
       
  1615   // some phase, we have to upgrade the Degenerate GC to Full GC.
       
  1616 
       
  1617   clear_cancelled_gc();
       
  1618 
       
  1619   ShenandoahMetricsSnapshot metrics;
       
  1620   metrics.snap_before();
       
  1621 
       
  1622   switch (point) {
       
  1623     case _degenerated_traversal:
       
  1624       {
       
  1625         // Drop the collection set. Note: this leaves some already forwarded objects
       
  1626         // behind, which may be problematic, see comments for ShenandoahEvacAssist
       
  1627         // workarounds in ShenandoahTraversalHeuristics.
       
  1628 
       
  1629         ShenandoahHeapLocker locker(lock());
       
  1630         collection_set()->clear_current_index();
       
  1631         for (size_t i = 0; i < collection_set()->count(); i++) {
       
  1632           ShenandoahHeapRegion* r = collection_set()->next();
       
  1633           r->make_regular_bypass();
       
  1634         }
       
  1635         collection_set()->clear();
       
  1636       }
       
  1637       op_final_traversal();
       
  1638       op_cleanup();
       
  1639       return;
       
  1640 
       
  1641     // The cases below form the Duff's-like device: it describes the actual GC cycle,
       
  1642     // but enters it at different points, depending on which concurrent phase had
       
  1643     // degenerated.
       
  1644 
       
  1645     case _degenerated_outside_cycle:
       
  1646       // We have degenerated from outside the cycle, which means something is bad with
       
  1647       // the heap, most probably heavy humongous fragmentation, or we are very low on free
       
  1648       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
       
  1649       // we can do the most aggressive degen cycle, which includes processing references and
       
  1650       // class unloading, unless those features are explicitly disabled.
       
  1651       //
       
  1652       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
       
  1653       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
       
  1654       set_process_references(heuristics()->can_process_references());
       
  1655       set_unload_classes(heuristics()->can_unload_classes());
       
  1656 
       
  1657       if (heuristics()->can_do_traversal_gc()) {
       
  1658         // Not possible to degenerate from here, upgrade to Full GC right away.
       
  1659         cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
       
  1660         op_degenerated_fail();
       
  1661         return;
       
  1662       }
       
  1663 
       
  1664       op_reset();
       
  1665 
       
  1666       op_init_mark();
       
  1667       if (cancelled_gc()) {
       
  1668         op_degenerated_fail();
       
  1669         return;
       
  1670       }
       
  1671 
       
  1672     case _degenerated_mark:
       
  1673       op_final_mark();
       
  1674       if (cancelled_gc()) {
       
  1675         op_degenerated_fail();
       
  1676         return;
       
  1677       }
       
  1678 
       
  1679       op_cleanup();
       
  1680 
       
  1681     case _degenerated_evac:
       
  1682       // If heuristics thinks we should do the cycle, this flag would be set,
       
  1683       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
       
  1684       if (is_evacuation_in_progress()) {
       
  1685 
       
  1686         // Degeneration under oom-evac protocol might have left some objects in
       
  1687         // collection set un-evacuated. Restart evacuation from the beginning to
       
  1688         // capture all objects. For all the objects that are already evacuated,
       
  1689         // it would be a simple check, which is supposed to be fast. This is also
       
  1690         // safe to do even without degeneration, as CSet iterator is at beginning
       
  1691         // in preparation for evacuation anyway.
       
  1692         collection_set()->clear_current_index();
       
  1693 
       
  1694         op_stw_evac();
       
  1695         if (cancelled_gc()) {
       
  1696           op_degenerated_fail();
       
  1697           return;
       
  1698         }
       
  1699       }
       
  1700 
       
  1701       // If heuristics thinks we should do the cycle, this flag would be set,
       
  1702       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
       
  1703       if (has_forwarded_objects()) {
       
  1704         op_init_updaterefs();
       
  1705         if (cancelled_gc()) {
       
  1706           op_degenerated_fail();
       
  1707           return;
       
  1708         }
       
  1709       }
       
  1710 
       
  1711     case _degenerated_updaterefs:
       
  1712       if (has_forwarded_objects()) {
       
  1713         op_final_updaterefs();
       
  1714         if (cancelled_gc()) {
       
  1715           op_degenerated_fail();
       
  1716           return;
       
  1717         }
       
  1718       }
       
  1719 
       
  1720       op_cleanup();
       
  1721       break;
       
  1722 
       
  1723     default:
       
  1724       ShouldNotReachHere();
       
  1725   }
       
  1726 
       
  1727   if (ShenandoahVerify) {
       
  1728     verifier()->verify_after_degenerated();
       
  1729   }
       
  1730 
       
  1731   if (VerifyAfterGC) {
       
  1732     Universe::verify();
       
  1733   }
       
  1734 
       
  1735   metrics.snap_after();
       
  1736   metrics.print();
       
  1737 
       
  1738   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
       
  1739   // because that probably means the heap is overloaded and/or fragmented.
       
  1740   if (!metrics.is_good_progress("Degenerated GC")) {
       
  1741     _progress_last_gc.unset();
       
  1742     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
       
  1743     op_degenerated_futile();
       
  1744   } else {
       
  1745     _progress_last_gc.set();
       
  1746   }
       
  1747 }
       
  1748 
       
  1749 void ShenandoahHeap::op_degenerated_fail() {
       
  1750   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
       
  1751   shenandoah_policy()->record_degenerated_upgrade_to_full();
       
  1752   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
       
  1753 }
       
  1754 
       
  1755 void ShenandoahHeap::op_degenerated_futile() {
       
  1756   shenandoah_policy()->record_degenerated_upgrade_to_full();
       
  1757   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
       
  1758 }
       
  1759 
       
  1760 void ShenandoahHeap::stop_concurrent_marking() {
       
  1761   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
       
  1762   if (!cancelled_gc()) {
       
  1763     // If we needed to update refs, and concurrent marking has been cancelled,
       
  1764     // we need to finish updating references.
       
  1765     set_has_forwarded_objects(false);
       
  1766     mark_complete_marking_context();
       
  1767   }
       
  1768   set_concurrent_mark_in_progress(false);
       
  1769 }
       
  1770 
       
  1771 void ShenandoahHeap::force_satb_flush_all_threads() {
       
  1772   if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
       
  1773     // No need to flush SATBs
       
  1774     return;
       
  1775   }
       
  1776 
       
  1777   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
       
  1778     ShenandoahThreadLocalData::set_force_satb_flush(t, true);
       
  1779   }
       
  1780   // The threads are not "acquiring" their thread-local data, but it does not
       
  1781   // hurt to "release" the updates here anyway.
       
  1782   OrderAccess::fence();
       
  1783 }
       
  1784 
       
  1785 void ShenandoahHeap::set_gc_state_all_threads(char state) {
       
  1786   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
       
  1787     ShenandoahThreadLocalData::set_gc_state(t, state);
       
  1788   }
       
  1789 }
       
  1790 
       
  1791 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
       
  1792   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
       
  1793   _gc_state.set_cond(mask, value);
       
  1794   set_gc_state_all_threads(_gc_state.raw_value());
       
  1795 }
       
  1796 
       
  1797 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
       
  1798   set_gc_state_mask(MARKING, in_progress);
       
  1799   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
       
  1800 }
       
  1801 
       
  1802 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
       
  1803    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
       
  1804    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
       
  1805 }
       
  1806 
       
  1807 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
       
  1808   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
       
  1809   set_gc_state_mask(EVACUATION, in_progress);
       
  1810 }
       
  1811 
       
  1812 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
       
  1813   // Initialize Brooks pointer for the next object
       
  1814   HeapWord* result = obj + ShenandoahBrooksPointer::word_size();
       
  1815   ShenandoahBrooksPointer::initialize(oop(result));
       
  1816   return result;
       
  1817 }
       
  1818 
       
  1819 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
       
  1820   _mark_context(ShenandoahHeap::heap()->marking_context()) {
       
  1821 }
       
  1822 
       
  1823 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
       
  1824   _mark_context(ShenandoahHeap::heap()->marking_context()) {
       
  1825 }
       
  1826 
       
  1827 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
       
  1828   if (CompressedOops::is_null(obj)) {
       
  1829     return false;
       
  1830   }
       
  1831   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
       
  1832   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress() || ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
       
  1833   return _mark_context->is_marked(obj);
       
  1834 }
       
  1835 
       
  1836 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
       
  1837   if (CompressedOops::is_null(obj)) {
       
  1838     return false;
       
  1839   }
       
  1840   shenandoah_assert_not_forwarded(NULL, obj);
       
  1841   return _mark_context->is_marked(obj);
       
  1842 }
       
  1843 
       
  1844 void ShenandoahHeap::ref_processing_init() {
       
  1845   assert(_max_workers > 0, "Sanity");
       
  1846 
       
  1847   _ref_processor =
       
  1848     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
       
  1849                            ParallelRefProcEnabled,  // MT processing
       
  1850                            _max_workers,            // Degree of MT processing
       
  1851                            true,                    // MT discovery
       
  1852                            _max_workers,            // Degree of MT discovery
       
  1853                            false,                   // Reference discovery is not atomic
       
  1854                            NULL,                    // No closure, should be installed before use
       
  1855                            true);                   // Scale worker threads
       
  1856 
       
  1857   shenandoah_assert_rp_isalive_not_installed();
       
  1858 }
       
  1859 
       
  1860 GCTracer* ShenandoahHeap::tracer() {
       
  1861   return shenandoah_policy()->tracer();
       
  1862 }
       
  1863 
       
  1864 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
       
  1865   return _free_set->used();
       
  1866 }
       
  1867 
       
  1868 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
       
  1869   if (try_cancel_gc()) {
       
  1870     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
       
  1871     log_info(gc)("%s", msg.buffer());
       
  1872     Events::log(Thread::current(), "%s", msg.buffer());
       
  1873   }
       
  1874 }
       
  1875 
       
  1876 uint ShenandoahHeap::max_workers() {
       
  1877   return _max_workers;
       
  1878 }
       
  1879 
       
  1880 void ShenandoahHeap::stop() {
       
  1881   // The shutdown sequence should be able to terminate when GC is running.
       
  1882 
       
  1883   // Step 0. Notify policy to disable event recording.
       
  1884   _shenandoah_policy->record_shutdown();
       
  1885 
       
  1886   // Step 1. Notify control thread that we are in shutdown.
       
  1887   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
       
  1888   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
       
  1889   control_thread()->prepare_for_graceful_shutdown();
       
  1890 
       
  1891   // Step 2. Notify GC workers that we are cancelling GC.
       
  1892   cancel_gc(GCCause::_shenandoah_stop_vm);
       
  1893 
       
  1894   // Step 3. Wait until GC worker exits normally.
       
  1895   control_thread()->stop();
       
  1896 
       
  1897   // Step 4. Stop String Dedup thread if it is active
       
  1898   if (ShenandoahStringDedup::is_enabled()) {
       
  1899     ShenandoahStringDedup::stop();
       
  1900   }
       
  1901 }
       
  1902 
       
  1903 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
       
  1904   assert(heuristics()->can_unload_classes(), "Class unloading should be enabled");
       
  1905 
       
  1906   ShenandoahGCPhase root_phase(full_gc ?
       
  1907                                ShenandoahPhaseTimings::full_gc_purge :
       
  1908                                ShenandoahPhaseTimings::purge);
       
  1909 
       
  1910   ShenandoahIsAliveSelector alive;
       
  1911   BoolObjectClosure* is_alive = alive.is_alive_closure();
       
  1912 
       
  1913   bool purged_class;
       
  1914 
       
  1915   // Unload classes and purge SystemDictionary.
       
  1916   {
       
  1917     ShenandoahGCPhase phase(full_gc ?
       
  1918                             ShenandoahPhaseTimings::full_gc_purge_class_unload :
       
  1919                             ShenandoahPhaseTimings::purge_class_unload);
       
  1920     purged_class = SystemDictionary::do_unloading(gc_timer());
       
  1921   }
       
  1922 
       
  1923   {
       
  1924     ShenandoahGCPhase phase(full_gc ?
       
  1925                             ShenandoahPhaseTimings::full_gc_purge_par :
       
  1926                             ShenandoahPhaseTimings::purge_par);
       
  1927     uint active = _workers->active_workers();
       
  1928     StringDedupUnlinkOrOopsDoClosure dedup_cl(is_alive, NULL);
       
  1929     ParallelCleaningTask unlink_task(is_alive, &dedup_cl, active, purged_class);
       
  1930     _workers->run_task(&unlink_task);
       
  1931   }
       
  1932 
       
  1933   if (ShenandoahStringDedup::is_enabled()) {
       
  1934     ShenandoahGCPhase phase(full_gc ?
       
  1935                             ShenandoahPhaseTimings::full_gc_purge_string_dedup :
       
  1936                             ShenandoahPhaseTimings::purge_string_dedup);
       
  1937     ShenandoahStringDedup::parallel_cleanup();
       
  1938   }
       
  1939 
       
  1940   {
       
  1941     ShenandoahGCPhase phase(full_gc ?
       
  1942                       ShenandoahPhaseTimings::full_gc_purge_cldg :
       
  1943                       ShenandoahPhaseTimings::purge_cldg);
       
  1944     ClassLoaderDataGraph::purge();
       
  1945   }
       
  1946 }
       
  1947 
       
  1948 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
       
  1949   set_gc_state_mask(HAS_FORWARDED, cond);
       
  1950 }
       
  1951 
       
  1952 void ShenandoahHeap::set_process_references(bool pr) {
       
  1953   _process_references.set_cond(pr);
       
  1954 }
       
  1955 
       
  1956 void ShenandoahHeap::set_unload_classes(bool uc) {
       
  1957   _unload_classes.set_cond(uc);
       
  1958 }
       
  1959 
       
  1960 bool ShenandoahHeap::process_references() const {
       
  1961   return _process_references.is_set();
       
  1962 }
       
  1963 
       
  1964 bool ShenandoahHeap::unload_classes() const {
       
  1965   return _unload_classes.is_set();
       
  1966 }
       
  1967 
       
  1968 address ShenandoahHeap::in_cset_fast_test_addr() {
       
  1969   ShenandoahHeap* heap = ShenandoahHeap::heap();
       
  1970   assert(heap->collection_set() != NULL, "Sanity");
       
  1971   return (address) heap->collection_set()->biased_map_address();
       
  1972 }
       
  1973 
       
  1974 address ShenandoahHeap::cancelled_gc_addr() {
       
  1975   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
       
  1976 }
       
  1977 
       
  1978 address ShenandoahHeap::gc_state_addr() {
       
  1979   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
       
  1980 }
       
  1981 
       
  1982 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
       
  1983   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
       
  1984 }
       
  1985 
       
  1986 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
       
  1987   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
       
  1988 }
       
  1989 
       
  1990 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
       
  1991   _degenerated_gc_in_progress.set_cond(in_progress);
       
  1992 }
       
  1993 
       
  1994 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
       
  1995   _full_gc_in_progress.set_cond(in_progress);
       
  1996 }
       
  1997 
       
  1998 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
       
  1999   assert (is_full_gc_in_progress(), "should be");
       
  2000   _full_gc_move_in_progress.set_cond(in_progress);
       
  2001 }
       
  2002 
       
  2003 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
       
  2004   set_gc_state_mask(UPDATEREFS, in_progress);
       
  2005 }
       
  2006 
       
  2007 void ShenandoahHeap::register_nmethod(nmethod* nm) {
       
  2008   ShenandoahCodeRoots::add_nmethod(nm);
       
  2009 }
       
  2010 
       
  2011 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
       
  2012   ShenandoahCodeRoots::remove_nmethod(nm);
       
  2013 }
       
  2014 
       
  2015 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
       
  2016   o = ShenandoahBarrierSet::barrier_set()->write_barrier(o);
       
  2017   ShenandoahHeapLocker locker(lock());
       
  2018   heap_region_containing(o)->make_pinned();
       
  2019   return o;
       
  2020 }
       
  2021 
       
  2022 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
       
  2023   o = ShenandoahBarrierSet::barrier_set()->read_barrier(o);
       
  2024   ShenandoahHeapLocker locker(lock());
       
  2025   heap_region_containing(o)->make_unpinned();
       
  2026 }
       
  2027 
       
  2028 GCTimer* ShenandoahHeap::gc_timer() const {
       
  2029   return _gc_timer;
       
  2030 }
       
  2031 
       
  2032 #ifdef ASSERT
       
  2033 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
       
  2034   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
       
  2035 
       
  2036   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
       
  2037     if (UseDynamicNumberOfGCThreads ||
       
  2038         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
       
  2039       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
       
  2040     } else {
       
  2041       // Use ParallelGCThreads inside safepoints
       
  2042       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
       
  2043     }
       
  2044   } else {
       
  2045     if (UseDynamicNumberOfGCThreads ||
       
  2046         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
       
  2047       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
       
  2048     } else {
       
  2049       // Use ConcGCThreads outside safepoints
       
  2050       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
       
  2051     }
       
  2052   }
       
  2053 }
       
  2054 #endif
       
  2055 
       
  2056 ShenandoahVerifier* ShenandoahHeap::verifier() {
       
  2057   guarantee(ShenandoahVerify, "Should be enabled");
       
  2058   assert (_verifier != NULL, "sanity");
       
  2059   return _verifier;
       
  2060 }
       
  2061 
       
  2062 template<class T>
       
  2063 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
       
  2064 private:
       
  2065   T cl;
       
  2066   ShenandoahHeap* _heap;
       
  2067   ShenandoahRegionIterator* _regions;
       
  2068   bool _concurrent;
       
  2069 public:
       
  2070   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
       
  2071     AbstractGangTask("Concurrent Update References Task"),
       
  2072     cl(T()),
       
  2073     _heap(ShenandoahHeap::heap()),
       
  2074     _regions(regions),
       
  2075     _concurrent(concurrent) {
       
  2076   }
       
  2077 
       
  2078   void work(uint worker_id) {
       
  2079     if (_concurrent) {
       
  2080       ShenandoahConcurrentWorkerSession worker_session(worker_id);
       
  2081       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
       
  2082       do_work();
       
  2083     } else {
       
  2084       ShenandoahParallelWorkerSession worker_session(worker_id);
       
  2085       do_work();
       
  2086     }
       
  2087   }
       
  2088 
       
  2089 private:
       
  2090   void do_work() {
       
  2091     ShenandoahHeapRegion* r = _regions->next();
       
  2092     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
       
  2093     while (r != NULL) {
       
  2094       HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
       
  2095       assert (top_at_start_ur >= r->bottom(), "sanity");
       
  2096       if (r->is_active() && !r->is_cset()) {
       
  2097         _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur);
       
  2098       }
       
  2099       if (ShenandoahPacing) {
       
  2100         _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
       
  2101       }
       
  2102       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
       
  2103         return;
       
  2104       }
       
  2105       r = _regions->next();
       
  2106     }
       
  2107   }
       
  2108 };
       
  2109 
       
  2110 void ShenandoahHeap::update_heap_references(bool concurrent) {
       
  2111   ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
       
  2112   workers()->run_task(&task);
       
  2113 }
       
  2114 
       
  2115 void ShenandoahHeap::op_init_updaterefs() {
       
  2116   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
       
  2117 
       
  2118   set_evacuation_in_progress(false);
       
  2119 
       
  2120   retire_and_reset_gclabs();
       
  2121 
       
  2122   if (ShenandoahVerify) {
       
  2123     verifier()->verify_before_updaterefs();
       
  2124   }
       
  2125 
       
  2126   set_update_refs_in_progress(true);
       
  2127   make_parsable(true);
       
  2128   for (uint i = 0; i < num_regions(); i++) {
       
  2129     ShenandoahHeapRegion* r = get_region(i);
       
  2130     r->set_concurrent_iteration_safe_limit(r->top());
       
  2131   }
       
  2132 
       
  2133   // Reset iterator.
       
  2134   _update_refs_iterator.reset();
       
  2135 
       
  2136   if (ShenandoahPacing) {
       
  2137     pacer()->setup_for_updaterefs();
       
  2138   }
       
  2139 }
       
  2140 
       
  2141 void ShenandoahHeap::op_final_updaterefs() {
       
  2142   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
       
  2143 
       
  2144   // Check if there is left-over work, and finish it
       
  2145   if (_update_refs_iterator.has_next()) {
       
  2146     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
       
  2147 
       
  2148     // Finish updating references where we left off.
       
  2149     clear_cancelled_gc();
       
  2150     update_heap_references(false);
       
  2151   }
       
  2152 
       
  2153   // Clear cancelled GC, if set. On cancellation path, the block before would handle
       
  2154   // everything. On degenerated paths, cancelled gc would not be set anyway.
       
  2155   if (cancelled_gc()) {
       
  2156     clear_cancelled_gc();
       
  2157   }
       
  2158   assert(!cancelled_gc(), "Should have been done right before");
       
  2159 
       
  2160   concurrent_mark()->update_roots(is_degenerated_gc_in_progress() ?
       
  2161                                  ShenandoahPhaseTimings::degen_gc_update_roots:
       
  2162                                  ShenandoahPhaseTimings::final_update_refs_roots);
       
  2163 
       
  2164   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
       
  2165 
       
  2166   trash_cset_regions();
       
  2167   set_has_forwarded_objects(false);
       
  2168   set_update_refs_in_progress(false);
       
  2169 
       
  2170   if (ShenandoahVerify) {
       
  2171     verifier()->verify_after_updaterefs();
       
  2172   }
       
  2173 
       
  2174   if (VerifyAfterGC) {
       
  2175     Universe::verify();
       
  2176   }
       
  2177 
       
  2178   {
       
  2179     ShenandoahHeapLocker locker(lock());
       
  2180     _free_set->rebuild();
       
  2181   }
       
  2182 }
       
  2183 
       
  2184 #ifdef ASSERT
       
  2185 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
       
  2186   _lock.assert_owned_by_current_thread();
       
  2187 }
       
  2188 
       
  2189 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
       
  2190   _lock.assert_not_owned_by_current_thread();
       
  2191 }
       
  2192 
       
  2193 void ShenandoahHeap::assert_heaplock_or_safepoint() {
       
  2194   _lock.assert_owned_by_current_thread_or_safepoint();
       
  2195 }
       
  2196 #endif
       
  2197 
       
  2198 void ShenandoahHeap::print_extended_on(outputStream *st) const {
       
  2199   print_on(st);
       
  2200   print_heap_regions_on(st);
       
  2201 }
       
  2202 
       
  2203 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
       
  2204   size_t slice = r->region_number() / _bitmap_regions_per_slice;
       
  2205 
       
  2206   size_t regions_from = _bitmap_regions_per_slice * slice;
       
  2207   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
       
  2208   for (size_t g = regions_from; g < regions_to; g++) {
       
  2209     assert (g / _bitmap_regions_per_slice == slice, "same slice");
       
  2210     if (skip_self && g == r->region_number()) continue;
       
  2211     if (get_region(g)->is_committed()) {
       
  2212       return true;
       
  2213     }
       
  2214   }
       
  2215   return false;
       
  2216 }
       
  2217 
       
  2218 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
       
  2219   assert_heaplock_owned_by_current_thread();
       
  2220 
       
  2221   if (is_bitmap_slice_committed(r, true)) {
       
  2222     // Some other region from the group is already committed, meaning the bitmap
       
  2223     // slice is already committed, we exit right away.
       
  2224     return true;
       
  2225   }
       
  2226 
       
  2227   // Commit the bitmap slice:
       
  2228   size_t slice = r->region_number() / _bitmap_regions_per_slice;
       
  2229   size_t off = _bitmap_bytes_per_slice * slice;
       
  2230   size_t len = _bitmap_bytes_per_slice;
       
  2231   if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
       
  2232     return false;
       
  2233   }
       
  2234   return true;
       
  2235 }
       
  2236 
       
  2237 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
       
  2238   assert_heaplock_owned_by_current_thread();
       
  2239 
       
  2240   if (is_bitmap_slice_committed(r, true)) {
       
  2241     // Some other region from the group is still committed, meaning the bitmap
       
  2242     // slice is should stay committed, exit right away.
       
  2243     return true;
       
  2244   }
       
  2245 
       
  2246   // Uncommit the bitmap slice:
       
  2247   size_t slice = r->region_number() / _bitmap_regions_per_slice;
       
  2248   size_t off = _bitmap_bytes_per_slice * slice;
       
  2249   size_t len = _bitmap_bytes_per_slice;
       
  2250   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
       
  2251     return false;
       
  2252   }
       
  2253   return true;
       
  2254 }
       
  2255 
       
  2256 void ShenandoahHeap::safepoint_synchronize_begin() {
       
  2257   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
       
  2258     SuspendibleThreadSet::synchronize();
       
  2259   }
       
  2260 }
       
  2261 
       
  2262 void ShenandoahHeap::safepoint_synchronize_end() {
       
  2263   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
       
  2264     SuspendibleThreadSet::desynchronize();
       
  2265   }
       
  2266 }
       
  2267 
       
  2268 void ShenandoahHeap::vmop_entry_init_mark() {
       
  2269   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
       
  2270   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
       
  2271   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
       
  2272 
       
  2273   try_inject_alloc_failure();
       
  2274   VM_ShenandoahInitMark op;
       
  2275   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
       
  2276 }
       
  2277 
       
  2278 void ShenandoahHeap::vmop_entry_final_mark() {
       
  2279   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
       
  2280   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
       
  2281   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
       
  2282 
       
  2283   try_inject_alloc_failure();
       
  2284   VM_ShenandoahFinalMarkStartEvac op;
       
  2285   VMThread::execute(&op); // jump to entry_final_mark under safepoint
       
  2286 }
       
  2287 
       
  2288 void ShenandoahHeap::vmop_entry_final_evac() {
       
  2289   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
       
  2290   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
       
  2291   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
       
  2292 
       
  2293   VM_ShenandoahFinalEvac op;
       
  2294   VMThread::execute(&op); // jump to entry_final_evac under safepoint
       
  2295 }
       
  2296 
       
  2297 void ShenandoahHeap::vmop_entry_init_updaterefs() {
       
  2298   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
       
  2299   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
       
  2300   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
       
  2301 
       
  2302   try_inject_alloc_failure();
       
  2303   VM_ShenandoahInitUpdateRefs op;
       
  2304   VMThread::execute(&op);
       
  2305 }
       
  2306 
       
  2307 void ShenandoahHeap::vmop_entry_final_updaterefs() {
       
  2308   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
       
  2309   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
       
  2310   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
       
  2311 
       
  2312   try_inject_alloc_failure();
       
  2313   VM_ShenandoahFinalUpdateRefs op;
       
  2314   VMThread::execute(&op);
       
  2315 }
       
  2316 
       
  2317 void ShenandoahHeap::vmop_entry_init_traversal() {
       
  2318   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
       
  2319   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
       
  2320   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
       
  2321 
       
  2322   try_inject_alloc_failure();
       
  2323   VM_ShenandoahInitTraversalGC op;
       
  2324   VMThread::execute(&op);
       
  2325 }
       
  2326 
       
  2327 void ShenandoahHeap::vmop_entry_final_traversal() {
       
  2328   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
       
  2329   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
       
  2330   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
       
  2331 
       
  2332   try_inject_alloc_failure();
       
  2333   VM_ShenandoahFinalTraversalGC op;
       
  2334   VMThread::execute(&op);
       
  2335 }
       
  2336 
       
  2337 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
       
  2338   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
       
  2339   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
       
  2340   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
       
  2341 
       
  2342   try_inject_alloc_failure();
       
  2343   VM_ShenandoahFullGC op(cause);
       
  2344   VMThread::execute(&op);
       
  2345 }
       
  2346 
       
  2347 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
       
  2348   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
       
  2349   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
       
  2350   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
       
  2351 
       
  2352   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
       
  2353   VMThread::execute(&degenerated_gc);
       
  2354 }
       
  2355 
       
  2356 void ShenandoahHeap::entry_init_mark() {
       
  2357   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
       
  2358   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
       
  2359   const char* msg = init_mark_event_message();
       
  2360   GCTraceTime(Info, gc) time(msg, gc_timer());
       
  2361   EventMark em("%s", msg);
       
  2362 
       
  2363   ShenandoahWorkerScope scope(workers(),
       
  2364                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
       
  2365                               "init marking");
       
  2366 
       
  2367   op_init_mark();
       
  2368 }
       
  2369 
       
  2370 void ShenandoahHeap::entry_final_mark() {
       
  2371   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
       
  2372   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
       
  2373   const char* msg = final_mark_event_message();
       
  2374   GCTraceTime(Info, gc) time(msg, gc_timer());
       
  2375   EventMark em("%s", msg);
       
  2376 
       
  2377   ShenandoahWorkerScope scope(workers(),
       
  2378                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
       
  2379                               "final marking");
       
  2380 
       
  2381   op_final_mark();
       
  2382 }
       
  2383 
       
  2384 void ShenandoahHeap::entry_final_evac() {
       
  2385   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
       
  2386   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
       
  2387   static const char* msg = "Pause Final Evac";
       
  2388   GCTraceTime(Info, gc) time(msg, gc_timer());
       
  2389   EventMark em("%s", msg);
       
  2390 
       
  2391   op_final_evac();
       
  2392 }
       
  2393 
       
  2394 void ShenandoahHeap::entry_init_updaterefs() {
       
  2395   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
       
  2396   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
       
  2397 
       
  2398   static const char* msg = "Pause Init Update Refs";
       
  2399   GCTraceTime(Info, gc) time(msg, gc_timer());
       
  2400   EventMark em("%s", msg);
       
  2401 
       
  2402   // No workers used in this phase, no setup required
       
  2403 
       
  2404   op_init_updaterefs();
       
  2405 }
       
  2406 
       
  2407 void ShenandoahHeap::entry_final_updaterefs() {
       
  2408   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
       
  2409   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
       
  2410 
       
  2411   static const char* msg = "Pause Final Update Refs";
       
  2412   GCTraceTime(Info, gc) time(msg, gc_timer());
       
  2413   EventMark em("%s", msg);
       
  2414 
       
  2415   ShenandoahWorkerScope scope(workers(),
       
  2416                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
       
  2417                               "final reference update");
       
  2418 
       
  2419   op_final_updaterefs();
       
  2420 }
       
  2421 
       
  2422 void ShenandoahHeap::entry_init_traversal() {
       
  2423   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
       
  2424   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
       
  2425 
       
  2426   static const char* msg = "Pause Init Traversal";
       
  2427   GCTraceTime(Info, gc) time(msg, gc_timer());
       
  2428   EventMark em("%s", msg);
       
  2429 
       
  2430   ShenandoahWorkerScope scope(workers(),
       
  2431                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
       
  2432                               "init traversal");
       
  2433 
       
  2434   op_init_traversal();
       
  2435 }
       
  2436 
       
  2437 void ShenandoahHeap::entry_final_traversal() {
       
  2438   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
       
  2439   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
       
  2440 
       
  2441   static const char* msg = "Pause Final Traversal";
       
  2442   GCTraceTime(Info, gc) time(msg, gc_timer());
       
  2443   EventMark em("%s", msg);
       
  2444 
       
  2445   ShenandoahWorkerScope scope(workers(),
       
  2446                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
       
  2447                               "final traversal");
       
  2448 
       
  2449   op_final_traversal();
       
  2450 }
       
  2451 
       
  2452 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
       
  2453   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
       
  2454   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
       
  2455 
       
  2456   static const char* msg = "Pause Full";
       
  2457   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
       
  2458   EventMark em("%s", msg);
       
  2459 
       
  2460   ShenandoahWorkerScope scope(workers(),
       
  2461                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
       
  2462                               "full gc");
       
  2463 
       
  2464   op_full(cause);
       
  2465 }
       
  2466 
       
  2467 void ShenandoahHeap::entry_degenerated(int point) {
       
  2468   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
       
  2469   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
       
  2470 
       
  2471   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
       
  2472   const char* msg = degen_event_message(dpoint);
       
  2473   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
       
  2474   EventMark em("%s", msg);
       
  2475 
       
  2476   ShenandoahWorkerScope scope(workers(),
       
  2477                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
       
  2478                               "stw degenerated gc");
       
  2479 
       
  2480   set_degenerated_gc_in_progress(true);
       
  2481   op_degenerated(dpoint);
       
  2482   set_degenerated_gc_in_progress(false);
       
  2483 }
       
  2484 
       
  2485 void ShenandoahHeap::entry_mark() {
       
  2486   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
       
  2487 
       
  2488   const char* msg = conc_mark_event_message();
       
  2489   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
       
  2490   EventMark em("%s", msg);
       
  2491 
       
  2492   ShenandoahWorkerScope scope(workers(),
       
  2493                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
       
  2494                               "concurrent marking");
       
  2495 
       
  2496   try_inject_alloc_failure();
       
  2497   op_mark();
       
  2498 }
       
  2499 
       
  2500 void ShenandoahHeap::entry_evac() {
       
  2501   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
       
  2502   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
       
  2503 
       
  2504   static const char* msg = "Concurrent evacuation";
       
  2505   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
       
  2506   EventMark em("%s", msg);
       
  2507 
       
  2508   ShenandoahWorkerScope scope(workers(),
       
  2509                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
       
  2510                               "concurrent evacuation");
       
  2511 
       
  2512   try_inject_alloc_failure();
       
  2513   op_conc_evac();
       
  2514 }
       
  2515 
       
  2516 void ShenandoahHeap::entry_updaterefs() {
       
  2517   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
       
  2518 
       
  2519   static const char* msg = "Concurrent update references";
       
  2520   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
       
  2521   EventMark em("%s", msg);
       
  2522 
       
  2523   ShenandoahWorkerScope scope(workers(),
       
  2524                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
       
  2525                               "concurrent reference update");
       
  2526 
       
  2527   try_inject_alloc_failure();
       
  2528   op_updaterefs();
       
  2529 }
       
  2530 void ShenandoahHeap::entry_cleanup() {
       
  2531   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
       
  2532 
       
  2533   static const char* msg = "Concurrent cleanup";
       
  2534   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
       
  2535   EventMark em("%s", msg);
       
  2536 
       
  2537   // This phase does not use workers, no need for setup
       
  2538 
       
  2539   try_inject_alloc_failure();
       
  2540   op_cleanup();
       
  2541 }
       
  2542 
       
  2543 void ShenandoahHeap::entry_reset() {
       
  2544   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
       
  2545 
       
  2546   static const char* msg = "Concurrent reset";
       
  2547   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
       
  2548   EventMark em("%s", msg);
       
  2549 
       
  2550   ShenandoahWorkerScope scope(workers(),
       
  2551                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
       
  2552                               "concurrent reset");
       
  2553 
       
  2554   try_inject_alloc_failure();
       
  2555   op_reset();
       
  2556 }
       
  2557 
       
  2558 void ShenandoahHeap::entry_preclean() {
       
  2559   if (ShenandoahPreclean && process_references()) {
       
  2560     static const char* msg = "Concurrent precleaning";
       
  2561     GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
       
  2562     EventMark em("%s", msg);
       
  2563 
       
  2564     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
       
  2565 
       
  2566     ShenandoahWorkerScope scope(workers(),
       
  2567                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
       
  2568                                 "concurrent preclean",
       
  2569                                 /* check_workers = */ false);
       
  2570 
       
  2571     try_inject_alloc_failure();
       
  2572     op_preclean();
       
  2573   }
       
  2574 }
       
  2575 
       
  2576 void ShenandoahHeap::entry_traversal() {
       
  2577   static const char* msg = "Concurrent traversal";
       
  2578   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
       
  2579   EventMark em("%s", msg);
       
  2580 
       
  2581   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
       
  2582 
       
  2583   ShenandoahWorkerScope scope(workers(),
       
  2584                               ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
       
  2585                               "concurrent traversal");
       
  2586 
       
  2587   try_inject_alloc_failure();
       
  2588   op_traversal();
       
  2589 }
       
  2590 
       
  2591 void ShenandoahHeap::entry_uncommit(double shrink_before) {
       
  2592   static const char *msg = "Concurrent uncommit";
       
  2593   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
       
  2594   EventMark em("%s", msg);
       
  2595 
       
  2596   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
       
  2597 
       
  2598   op_uncommit(shrink_before);
       
  2599 }
       
  2600 
       
  2601 void ShenandoahHeap::try_inject_alloc_failure() {
       
  2602   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
       
  2603     _inject_alloc_failure.set();
       
  2604     os::naked_short_sleep(1);
       
  2605     if (cancelled_gc()) {
       
  2606       log_info(gc)("Allocation failure was successfully injected");
       
  2607     }
       
  2608   }
       
  2609 }
       
  2610 
       
  2611 bool ShenandoahHeap::should_inject_alloc_failure() {
       
  2612   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
       
  2613 }
       
  2614 
       
  2615 void ShenandoahHeap::initialize_serviceability() {
       
  2616   _memory_pool = new ShenandoahMemoryPool(this);
       
  2617   _cycle_memory_manager.add_pool(_memory_pool);
       
  2618   _stw_memory_manager.add_pool(_memory_pool);
       
  2619 }
       
  2620 
       
  2621 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
       
  2622   GrowableArray<GCMemoryManager*> memory_managers(2);
       
  2623   memory_managers.append(&_cycle_memory_manager);
       
  2624   memory_managers.append(&_stw_memory_manager);
       
  2625   return memory_managers;
       
  2626 }
       
  2627 
       
  2628 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
       
  2629   GrowableArray<MemoryPool*> memory_pools(1);
       
  2630   memory_pools.append(_memory_pool);
       
  2631   return memory_pools;
       
  2632 }
       
  2633 
       
  2634 void ShenandoahHeap::enter_evacuation() {
       
  2635   _oom_evac_handler.enter_evacuation();
       
  2636 }
       
  2637 
       
  2638 void ShenandoahHeap::leave_evacuation() {
       
  2639   _oom_evac_handler.leave_evacuation();
       
  2640 }
       
  2641 
       
  2642 ShenandoahRegionIterator::ShenandoahRegionIterator() :
       
  2643   _heap(ShenandoahHeap::heap()),
       
  2644   _index(0) {}
       
  2645 
       
  2646 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
       
  2647   _heap(heap),
       
  2648   _index(0) {}
       
  2649 
       
  2650 void ShenandoahRegionIterator::reset() {
       
  2651   _index = 0;
       
  2652 }
       
  2653 
       
  2654 bool ShenandoahRegionIterator::has_next() const {
       
  2655   return _index < _heap->num_regions();
       
  2656 }
       
  2657 
       
  2658 char ShenandoahHeap::gc_state() const {
       
  2659   return _gc_state.raw_value();
       
  2660 }
       
  2661 
       
  2662 void ShenandoahHeap::deduplicate_string(oop str) {
       
  2663   assert(java_lang_String::is_instance(str), "invariant");
       
  2664 
       
  2665   if (ShenandoahStringDedup::is_enabled()) {
       
  2666     ShenandoahStringDedup::deduplicate(str);
       
  2667   }
       
  2668 }
       
  2669 
       
  2670 const char* ShenandoahHeap::init_mark_event_message() const {
       
  2671   bool update_refs = has_forwarded_objects();
       
  2672   bool proc_refs = process_references();
       
  2673   bool unload_cls = unload_classes();
       
  2674 
       
  2675   if (update_refs && proc_refs && unload_cls) {
       
  2676     return "Pause Init Mark (update refs) (process weakrefs) (unload classes)";
       
  2677   } else if (update_refs && proc_refs) {
       
  2678     return "Pause Init Mark (update refs) (process weakrefs)";
       
  2679   } else if (update_refs && unload_cls) {
       
  2680     return "Pause Init Mark (update refs) (unload classes)";
       
  2681   } else if (proc_refs && unload_cls) {
       
  2682     return "Pause Init Mark (process weakrefs) (unload classes)";
       
  2683   } else if (update_refs) {
       
  2684     return "Pause Init Mark (update refs)";
       
  2685   } else if (proc_refs) {
       
  2686     return "Pause Init Mark (process weakrefs)";
       
  2687   } else if (unload_cls) {
       
  2688     return "Pause Init Mark (unload classes)";
       
  2689   } else {
       
  2690     return "Pause Init Mark";
       
  2691   }
       
  2692 }
       
  2693 
       
  2694 const char* ShenandoahHeap::final_mark_event_message() const {
       
  2695   bool update_refs = has_forwarded_objects();
       
  2696   bool proc_refs = process_references();
       
  2697   bool unload_cls = unload_classes();
       
  2698 
       
  2699   if (update_refs && proc_refs && unload_cls) {
       
  2700     return "Pause Final Mark (update refs) (process weakrefs) (unload classes)";
       
  2701   } else if (update_refs && proc_refs) {
       
  2702     return "Pause Final Mark (update refs) (process weakrefs)";
       
  2703   } else if (update_refs && unload_cls) {
       
  2704     return "Pause Final Mark (update refs) (unload classes)";
       
  2705   } else if (proc_refs && unload_cls) {
       
  2706     return "Pause Final Mark (process weakrefs) (unload classes)";
       
  2707   } else if (update_refs) {
       
  2708     return "Pause Final Mark (update refs)";
       
  2709   } else if (proc_refs) {
       
  2710     return "Pause Final Mark (process weakrefs)";
       
  2711   } else if (unload_cls) {
       
  2712     return "Pause Final Mark (unload classes)";
       
  2713   } else {
       
  2714     return "Pause Final Mark";
       
  2715   }
       
  2716 }
       
  2717 
       
  2718 const char* ShenandoahHeap::conc_mark_event_message() const {
       
  2719   bool update_refs = has_forwarded_objects();
       
  2720   bool proc_refs = process_references();
       
  2721   bool unload_cls = unload_classes();
       
  2722 
       
  2723   if (update_refs && proc_refs && unload_cls) {
       
  2724     return "Concurrent marking (update refs) (process weakrefs) (unload classes)";
       
  2725   } else if (update_refs && proc_refs) {
       
  2726     return "Concurrent marking (update refs) (process weakrefs)";
       
  2727   } else if (update_refs && unload_cls) {
       
  2728     return "Concurrent marking (update refs) (unload classes)";
       
  2729   } else if (proc_refs && unload_cls) {
       
  2730     return "Concurrent marking (process weakrefs) (unload classes)";
       
  2731   } else if (update_refs) {
       
  2732     return "Concurrent marking (update refs)";
       
  2733   } else if (proc_refs) {
       
  2734     return "Concurrent marking (process weakrefs)";
       
  2735   } else if (unload_cls) {
       
  2736     return "Concurrent marking (unload classes)";
       
  2737   } else {
       
  2738     return "Concurrent marking";
       
  2739   }
       
  2740 }
       
  2741 
       
  2742 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
       
  2743   switch (point) {
       
  2744     case _degenerated_unset:
       
  2745       return "Pause Degenerated GC (<UNSET>)";
       
  2746     case _degenerated_traversal:
       
  2747       return "Pause Degenerated GC (Traversal)";
       
  2748     case _degenerated_outside_cycle:
       
  2749       return "Pause Degenerated GC (Outside of Cycle)";
       
  2750     case _degenerated_mark:
       
  2751       return "Pause Degenerated GC (Mark)";
       
  2752     case _degenerated_evac:
       
  2753       return "Pause Degenerated GC (Evacuation)";
       
  2754     case _degenerated_updaterefs:
       
  2755       return "Pause Degenerated GC (Update Refs)";
       
  2756     default:
       
  2757       ShouldNotReachHere();
       
  2758       return "ERROR";
       
  2759   }
       
  2760 }
       
  2761 
       
  2762 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
       
  2763 #ifdef ASSERT
       
  2764   assert(worker_id < _max_workers, "sanity");
       
  2765   for (uint i = 0; i < num_regions(); i++) {
       
  2766     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
       
  2767   }
       
  2768 #endif
       
  2769   return _liveness_cache[worker_id];
       
  2770 }
       
  2771 
       
  2772 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
       
  2773   assert(worker_id < _max_workers, "sanity");
       
  2774   jushort* ld = _liveness_cache[worker_id];
       
  2775   for (uint i = 0; i < num_regions(); i++) {
       
  2776     ShenandoahHeapRegion* r = get_region(i);
       
  2777     jushort live = ld[i];
       
  2778     if (live > 0) {
       
  2779       r->increase_live_data_gc_words(live);
       
  2780       ld[i] = 0;
       
  2781     }
       
  2782   }
       
  2783 }
       
  2784 
       
  2785 size_t ShenandoahHeap::obj_size(oop obj) const {
       
  2786   return CollectedHeap::obj_size(obj) + ShenandoahBrooksPointer::word_size();
       
  2787 }
       
  2788 
       
  2789 ptrdiff_t ShenandoahHeap::cell_header_size() const {
       
  2790   return ShenandoahBrooksPointer::byte_size();
       
  2791 }
       
  2792 
       
  2793 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
       
  2794   return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl)
       
  2795                                                          : reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
       
  2796 }