hotspot/src/share/vm/gc_implementation/g1/concurrentZFThread.cpp
author tonyp
Tue, 24 Aug 2010 17:24:33 -0400
changeset 7398 e4aa6d9bda09
parent 7397 5b173b4ca846
permissions -rw-r--r--
6974966: G1: unnecessary direct-to-old allocations Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier. Reviewed-by: johnc, ysr

/*
 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 *
 */

#include "precompiled.hpp"
#include "gc_implementation/g1/concurrentZFThread.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/heapRegion.hpp"
#include "memory/space.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/copy.hpp"

// ======= Concurrent Zero-Fill Thread ========

// The CM thread is created when the G1 garbage collector is used

int ConcurrentZFThread::_region_allocs = 0;
int ConcurrentZFThread::_sync_zfs = 0;
int ConcurrentZFThread::_zf_waits = 0;
int ConcurrentZFThread::_regions_filled = 0;

ConcurrentZFThread::ConcurrentZFThread() :
  ConcurrentGCThread()
{
  create_and_start();
}

void ConcurrentZFThread::wait_for_ZF_completed(HeapRegion* hr) {
  assert(ZF_mon->owned_by_self(), "Precondition.");
  note_zf_wait();
  while (hr->zero_fill_state() == HeapRegion::ZeroFilling) {
    ZF_mon->wait(Mutex::_no_safepoint_check_flag);
  }
}

void ConcurrentZFThread::processHeapRegion(HeapRegion* hr) {
  assert(!Universe::heap()->is_gc_active(),
         "This should not happen during GC.");
  assert(hr != NULL, "Precondition");
  // These are unlocked reads, but if this test is successful, then no
  // other thread will attempt this zero filling.  Only a GC thread can
  // modify the ZF state of a region whose state is zero-filling, and this
  // should only happen while the ZF thread is locking out GC.
  if (hr->zero_fill_state() == HeapRegion::ZeroFilling
      && hr->zero_filler() == Thread::current()) {
    assert(hr->top() == hr->bottom(), "better be empty!");
    assert(!hr->isHumongous(), "Only free regions on unclean list.");
    Copy::fill_to_words(hr->bottom(), hr->capacity()/HeapWordSize);
    note_region_filled();
  }
}

void ConcurrentZFThread::run() {
  initialize_in_thread();
  Thread* thr_self = Thread::current();
  _vtime_start = os::elapsedVTime();
  wait_for_universe_init();

  G1CollectedHeap* g1 = G1CollectedHeap::heap();
  _sts.join();
  while (!_should_terminate) {
    _sts.leave();

    {
      MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);

      // This local variable will hold a region being zero-filled.  This
      // region will neither be on the unclean or zero-filled lists, and
      // will not be available for allocation; thus, we might have an
      // allocation fail, causing a full GC, because of this, but this is a
      // price we will pay.  (In future, we might want to make the fact
      // that there's a region being zero-filled apparent to the G1 heap,
      // which could then wait for it in this extreme case...)
      HeapRegion* to_fill;

      while (!g1->should_zf()
             || (to_fill = g1->pop_unclean_region_list_locked()) == NULL)
        ZF_mon->wait(Mutex::_no_safepoint_check_flag);
      while (to_fill->zero_fill_state() == HeapRegion::ZeroFilling)
        ZF_mon->wait(Mutex::_no_safepoint_check_flag);

      // So now to_fill is non-NULL and is not ZeroFilling.  It might be
      // Allocated or ZeroFilled.  (The latter could happen if this thread
      // starts the zero-filling of a region, but a GC intervenes and
      // pushes new regions needing on the front of the filling on the
      // front of the list.)

      switch (to_fill->zero_fill_state()) {
      case HeapRegion::Allocated:
        to_fill = NULL;
        break;

      case HeapRegion::NotZeroFilled:
        to_fill->set_zero_fill_in_progress(thr_self);

        ZF_mon->unlock();
        _sts.join();
        processHeapRegion(to_fill);
        _sts.leave();
        ZF_mon->lock_without_safepoint_check();

        if (to_fill->zero_fill_state() == HeapRegion::ZeroFilling
            && to_fill->zero_filler() == thr_self) {
          to_fill->set_zero_fill_complete();
          (void)g1->put_free_region_on_list_locked(to_fill);
        }
        break;

      case HeapRegion::ZeroFilled:
        (void)g1->put_free_region_on_list_locked(to_fill);
        break;

      case HeapRegion::ZeroFilling:
        ShouldNotReachHere();
        break;
      }
    }
    _vtime_accum = (os::elapsedVTime() - _vtime_start);
    _sts.join();
  }
  _sts.leave();

  assert(_should_terminate, "just checking");
  terminate();
}

bool ConcurrentZFThread::offer_yield() {
  if (_sts.should_yield()) {
    _sts.yield("Concurrent ZF");
    return true;
  } else {
    return false;
  }
}

void ConcurrentZFThread::stop() {
  // it is ok to take late safepoints here, if needed
  MutexLockerEx mu(Terminator_lock);
  _should_terminate = true;
  while (!_has_terminated) {
    Terminator_lock->wait();
  }
}

void ConcurrentZFThread::print() const {
  print_on(tty);
}

void ConcurrentZFThread::print_on(outputStream* st) const {
  st->print("\"G1 Concurrent Zero-Fill Thread\" ");
  Thread::print_on(st);
  st->cr();
}


double ConcurrentZFThread::_vtime_accum;

void ConcurrentZFThread::print_summary_info() {
  gclog_or_tty->print("\nConcurrent Zero-Filling:\n");
  gclog_or_tty->print("  Filled %d regions, used %5.2fs.\n",
                      _regions_filled,
                      vtime_accum());
  gclog_or_tty->print("  Of %d region allocs, %d (%5.2f%%) required sync ZF,\n",
                      _region_allocs, _sync_zfs,
                      (_region_allocs > 0 ?
                       (float)_sync_zfs/(float)_region_allocs*100.0 :
                       0.0));
  gclog_or_tty->print("     and %d (%5.2f%%) required a ZF wait.\n",
                      _zf_waits,
                      (_region_allocs > 0 ?
                       (float)_zf_waits/(float)_region_allocs*100.0 :
                       0.0));

}