src/hotspot/share/gc/shared/collectedHeap.cpp
changeset 48961 120b61d50f85
parent 48168 cb5d2d4453d0
child 49045 9b556b613a07
equal deleted inserted replaced
48960:040293c73621 48961:120b61d50f85
     1 /*
     1 /*
     2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
     2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     7  * published by the Free Software Foundation.
   175   _barrier_set(NULL),
   175   _barrier_set(NULL),
   176   _is_gc_active(false),
   176   _is_gc_active(false),
   177   _total_collections(0),
   177   _total_collections(0),
   178   _total_full_collections(0),
   178   _total_full_collections(0),
   179   _gc_cause(GCCause::_no_gc),
   179   _gc_cause(GCCause::_no_gc),
   180   _gc_lastcause(GCCause::_no_gc),
   180   _gc_lastcause(GCCause::_no_gc)
   181   _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
       
   182 {
   181 {
   183   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
   182   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
   184   const size_t elements_per_word = HeapWordSize / sizeof(jint);
   183   const size_t elements_per_word = HeapWordSize / sizeof(jint);
   185   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
   184   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
   186                                              max_len / elements_per_word);
   185                                              max_len / elements_per_word);
   237 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
   236 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
   238   _barrier_set = barrier_set;
   237   _barrier_set = barrier_set;
   239   BarrierSet::set_bs(barrier_set);
   238   BarrierSet::set_bs(barrier_set);
   240 }
   239 }
   241 
   240 
   242 void CollectedHeap::pre_initialize() {
       
   243   // Used for ReduceInitialCardMarks (when COMPILER2 is used);
       
   244   // otherwise remains unused.
       
   245 #if COMPILER2_OR_JVMCI
       
   246   _defer_initial_card_mark = is_server_compilation_mode_vm() &&  ReduceInitialCardMarks && can_elide_tlab_store_barriers()
       
   247                              && (DeferInitialCardMark || card_mark_must_follow_store());
       
   248 #else
       
   249   assert(_defer_initial_card_mark == false, "Who would set it?");
       
   250 #endif
       
   251 }
       
   252 
       
   253 #ifndef PRODUCT
   241 #ifndef PRODUCT
   254 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
   242 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
   255   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
   243   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
   256     for (size_t slot = 0; slot < size; slot += 1) {
   244     for (size_t slot = 0; slot < size; slot += 1) {
   257       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
   245       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
   331   }
   319   }
   332   thread->tlab().fill(obj, obj + size, new_tlab_size);
   320   thread->tlab().fill(obj, obj + size, new_tlab_size);
   333   return obj;
   321   return obj;
   334 }
   322 }
   335 
   323 
   336 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
       
   337   MemRegion deferred = thread->deferred_card_mark();
       
   338   if (!deferred.is_empty()) {
       
   339     assert(_defer_initial_card_mark, "Otherwise should be empty");
       
   340     {
       
   341       // Verify that the storage points to a parsable object in heap
       
   342       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
       
   343       assert(is_in(old_obj), "Not in allocated heap");
       
   344       assert(!can_elide_initializing_store_barrier(old_obj),
       
   345              "Else should have been filtered in new_store_pre_barrier()");
       
   346       assert(oopDesc::is_oop(old_obj, true), "Not an oop");
       
   347       assert(deferred.word_size() == (size_t)(old_obj->size()),
       
   348              "Mismatch: multiple objects?");
       
   349     }
       
   350     BarrierSet* bs = barrier_set();
       
   351     bs->write_region(deferred);
       
   352     // "Clear" the deferred_card_mark field
       
   353     thread->set_deferred_card_mark(MemRegion());
       
   354   }
       
   355   assert(thread->deferred_card_mark().is_empty(), "invariant");
       
   356 }
       
   357 
       
   358 size_t CollectedHeap::max_tlab_size() const {
   324 size_t CollectedHeap::max_tlab_size() const {
   359   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
   325   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
   360   // This restriction could be removed by enabling filling with multiple arrays.
   326   // This restriction could be removed by enabling filling with multiple arrays.
   361   // If we compute that the reasonable way as
   327   // If we compute that the reasonable way as
   362   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
   328   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
   366   // which is fine, since we'll be able to fill that.
   332   // which is fine, since we'll be able to fill that.
   367   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
   333   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
   368               sizeof(jint) *
   334               sizeof(jint) *
   369               ((juint) max_jint / (size_t) HeapWordSize);
   335               ((juint) max_jint / (size_t) HeapWordSize);
   370   return align_down(max_int_size, MinObjAlignment);
   336   return align_down(max_int_size, MinObjAlignment);
   371 }
       
   372 
       
   373 // Helper for ReduceInitialCardMarks. For performance,
       
   374 // compiled code may elide card-marks for initializing stores
       
   375 // to a newly allocated object along the fast-path. We
       
   376 // compensate for such elided card-marks as follows:
       
   377 // (a) Generational, non-concurrent collectors, such as
       
   378 //     GenCollectedHeap(ParNew,DefNew,Tenured) and
       
   379 //     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
       
   380 //     need the card-mark if and only if the region is
       
   381 //     in the old gen, and do not care if the card-mark
       
   382 //     succeeds or precedes the initializing stores themselves,
       
   383 //     so long as the card-mark is completed before the next
       
   384 //     scavenge. For all these cases, we can do a card mark
       
   385 //     at the point at which we do a slow path allocation
       
   386 //     in the old gen, i.e. in this call.
       
   387 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
       
   388 //     in addition that the card-mark for an old gen allocated
       
   389 //     object strictly follow any associated initializing stores.
       
   390 //     In these cases, the memRegion remembered below is
       
   391 //     used to card-mark the entire region either just before the next
       
   392 //     slow-path allocation by this thread or just before the next scavenge or
       
   393 //     CMS-associated safepoint, whichever of these events happens first.
       
   394 //     (The implicit assumption is that the object has been fully
       
   395 //     initialized by this point, a fact that we assert when doing the
       
   396 //     card-mark.)
       
   397 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
       
   398 //     G1 concurrent marking is in progress an SATB (pre-write-)barrier
       
   399 //     is used to remember the pre-value of any store. Initializing
       
   400 //     stores will not need this barrier, so we need not worry about
       
   401 //     compensating for the missing pre-barrier here. Turning now
       
   402 //     to the post-barrier, we note that G1 needs a RS update barrier
       
   403 //     which simply enqueues a (sequence of) dirty cards which may
       
   404 //     optionally be refined by the concurrent update threads. Note
       
   405 //     that this barrier need only be applied to a non-young write,
       
   406 //     but, like in CMS, because of the presence of concurrent refinement
       
   407 //     (much like CMS' precleaning), must strictly follow the oop-store.
       
   408 //     Thus, using the same protocol for maintaining the intended
       
   409 //     invariants turns out, serendepitously, to be the same for both
       
   410 //     G1 and CMS.
       
   411 //
       
   412 // For any future collector, this code should be reexamined with
       
   413 // that specific collector in mind, and the documentation above suitably
       
   414 // extended and updated.
       
   415 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
       
   416   // If a previous card-mark was deferred, flush it now.
       
   417   flush_deferred_store_barrier(thread);
       
   418   if (can_elide_initializing_store_barrier(new_obj) ||
       
   419       new_obj->is_typeArray()) {
       
   420     // Arrays of non-references don't need a pre-barrier.
       
   421     // The deferred_card_mark region should be empty
       
   422     // following the flush above.
       
   423     assert(thread->deferred_card_mark().is_empty(), "Error");
       
   424   } else {
       
   425     MemRegion mr((HeapWord*)new_obj, new_obj->size());
       
   426     assert(!mr.is_empty(), "Error");
       
   427     if (_defer_initial_card_mark) {
       
   428       // Defer the card mark
       
   429       thread->set_deferred_card_mark(mr);
       
   430     } else {
       
   431       // Do the card mark
       
   432       BarrierSet* bs = barrier_set();
       
   433       bs->write_region(mr);
       
   434     }
       
   435   }
       
   436   return new_obj;
       
   437 }
   337 }
   438 
   338 
   439 size_t CollectedHeap::filler_array_hdr_size() {
   339 size_t CollectedHeap::filler_array_hdr_size() {
   440   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
   340   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
   441 }
   341 }
   536          !is_init_completed(),
   436          !is_init_completed(),
   537          "Should only be called at a safepoint or at start-up"
   437          "Should only be called at a safepoint or at start-up"
   538          " otherwise concurrent mutator activity may make heap "
   438          " otherwise concurrent mutator activity may make heap "
   539          " unparsable again");
   439          " unparsable again");
   540   const bool use_tlab = UseTLAB;
   440   const bool use_tlab = UseTLAB;
   541   const bool deferred = _defer_initial_card_mark;
       
   542   // The main thread starts allocating via a TLAB even before it
   441   // The main thread starts allocating via a TLAB even before it
   543   // has added itself to the threads list at vm boot-up.
   442   // has added itself to the threads list at vm boot-up.
   544   JavaThreadIteratorWithHandle jtiwh;
   443   JavaThreadIteratorWithHandle jtiwh;
   545   assert(!use_tlab || jtiwh.length() > 0,
   444   assert(!use_tlab || jtiwh.length() > 0,
   546          "Attempt to fill tlabs before main thread has been added"
   445          "Attempt to fill tlabs before main thread has been added"
   547          " to threads list is doomed to failure!");
   446          " to threads list is doomed to failure!");
       
   447   BarrierSet *bs = barrier_set();
   548   for (; JavaThread *thread = jtiwh.next(); ) {
   448   for (; JavaThread *thread = jtiwh.next(); ) {
   549      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
   449      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
   550 #if COMPILER2_OR_JVMCI
   450      bs->make_parsable(thread);
   551      // The deferred store barriers must all have been flushed to the
       
   552      // card-table (or other remembered set structure) before GC starts
       
   553      // processing the card-table (or other remembered set).
       
   554      if (deferred) flush_deferred_store_barrier(thread);
       
   555 #else
       
   556      assert(!deferred, "Should be false");
       
   557      assert(thread->deferred_card_mark().is_empty(), "Should be empty");
       
   558 #endif
       
   559   }
   451   }
   560 }
   452 }
   561 
   453 
   562 void CollectedHeap::accumulate_statistics_all_tlabs() {
   454 void CollectedHeap::accumulate_statistics_all_tlabs() {
   563   if (UseTLAB) {
   455   if (UseTLAB) {