8232365: Implementation for JEP 363: Remove the Concurrent Mark Sweep (CMS) Garbage Collector
Reviewed-by: kbarrett, tschatzl, erikj, coleenp, dholmes
--- a/make/autoconf/hotspot.m4 Wed Nov 13 11:21:15 2019 +0100
+++ b/make/autoconf/hotspot.m4 Wed Nov 13 11:37:29 2019 +0100
@@ -25,11 +25,11 @@
# All valid JVM features, regardless of platform
VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
- graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
+ graal vm-structs jni-check services management epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
static-build link-time-opt aot jfr"
# Deprecated JVM features (these are ignored, but with a warning)
-DEPRECATED_JVM_FEATURES="trace"
+DEPRECATED_JVM_FEATURES="trace cmsgc"
# All valid JVM variants
VALID_JVM_VARIANTS="server client minimal core zero custom"
@@ -326,10 +326,6 @@
AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1'])
fi
- if HOTSPOT_CHECK_JVM_FEATURE(cmsgc) && ! HOTSPOT_CHECK_JVM_FEATURE(serialgc); then
- AC_MSG_ERROR([Specified JVM feature 'cmsgc' requires feature 'serialgc'])
- fi
-
# Enable JFR by default, except for Zero, linux-sparcv9 and on minimal.
if ! HOTSPOT_CHECK_JVM_VARIANT(zero); then
if test "x$OPENJDK_TARGET_OS" != xaix; then
@@ -491,7 +487,7 @@
fi
# All variants but minimal (and custom) get these features
- NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
+ NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
# Disable CDS on AIX.
if test "x$OPENJDK_TARGET_OS" = "xaix"; then
--- a/make/hotspot/lib/JvmDtraceObjects.gmk Wed Nov 13 11:21:15 2019 +0100
+++ b/make/hotspot/lib/JvmDtraceObjects.gmk Wed Nov 13 11:37:29 2019 +0100
@@ -79,12 +79,6 @@
vmThread.o \
)
- ifeq ($(call check-jvm-feature, cmsgc), true)
- DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
- cmsVMOperations.o \
- )
- endif
-
ifeq ($(call check-jvm-feature, parallelgc), true)
DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
psVMOperations.o \
--- a/make/hotspot/lib/JvmFeatures.gmk Wed Nov 13 11:21:15 2019 +0100
+++ b/make/hotspot/lib/JvmFeatures.gmk Wed Nov 13 11:37:29 2019 +0100
@@ -138,11 +138,6 @@
aotLoader.cpp compiledIC_aot.cpp
endif
-ifneq ($(call check-jvm-feature, cmsgc), true)
- JVM_CFLAGS_FEATURES += -DINCLUDE_CMSGC=0
- JVM_EXCLUDE_PATTERNS += gc/cms
-endif
-
ifneq ($(call check-jvm-feature, g1gc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
JVM_EXCLUDE_PATTERNS += gc/g1
--- a/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,6 @@
*/
#pragma weak tty
-#pragma weak CMSExpAvgFactor
#if defined(i386) || defined(__i386) || defined(__amd64)
#pragma weak noreg
--- a/src/hotspot/cpu/aarch64/aarch64.ad Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/aarch64/aarch64.ad Wed Nov 13 11:37:29 2019 +0100
@@ -1192,9 +1192,6 @@
// predicate controlling translation of CompareAndSwapX
bool needs_acquiring_load_exclusive(const Node *load);
- // predicate controlling translation of StoreCM
- bool unnecessary_storestore(const Node *storecm);
-
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
%}
@@ -1583,29 +1580,6 @@
return true;
}
-// predicate controlling translation of StoreCM
-//
-// returns true if a StoreStore must precede the card write otherwise
-// false
-
-bool unnecessary_storestore(const Node *storecm)
-{
- assert(storecm->Opcode() == Op_StoreCM, "expecting a StoreCM");
-
- // we need to generate a dmb ishst between an object put and the
- // associated card mark when we are using CMS without conditional
- // card marking
-
- if (UseConcMarkSweepGC && !UseCondCardMark) {
- return false;
- }
-
- // a storestore is unnecesary in all other cases
-
- return true;
-}
-
-
#define __ _masm.
// advance declarations for helper functions to convert register
@@ -7220,7 +7194,6 @@
instruct storeimmCM0(immI0 zero, memory mem)
%{
match(Set mem (StoreCM mem zero));
- predicate(unnecessary_storestore(n));
ins_cost(INSN_COST);
format %{ "storestore (elided)\n\t"
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -64,9 +64,6 @@
define_pd_global(bool, PreserveFramePointer, false);
-// GC Ergo Flags
-define_pd_global(uintx, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
-
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/arm/globals_arm.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/arm/globals_arm.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -63,9 +63,6 @@
define_pd_global(bool, PreserveFramePointer, false);
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
-
define_pd_global(uintx, TypeProfileLevel, 0);
// No performance work done here yet.
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -67,9 +67,6 @@
define_pd_global(bool, PreserveFramePointer, false);
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
-
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/ppc/ppc.ad Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/ppc/ppc.ad Wed Nov 13 11:37:29 2019 +0100
@@ -6928,25 +6928,6 @@
ins_pipe(pipe_class_memory);
%}
-// Card-mark for CMS garbage collection.
-// This cardmark does an optimization so that it must not always
-// do a releasing store. For this, it needs the constant address of
-// CMSCollectorCardTableBarrierSetBSExt::_requires_release.
-// This constant address is split off here by expand so we can use
-// adlc / matcher functionality to load it from the constant section.
-instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{
- match(Set mem (StoreCM mem zero));
- predicate(UseConcMarkSweepGC);
-
- expand %{
- immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableBarrierSetBSExt::requires_release_address() */ %}
- iRegLdst releaseFieldAddress;
- flagsReg crx;
- loadConL_Ex(releaseFieldAddress, baseImm);
- storeCM_CMS(mem, releaseFieldAddress, crx);
- %}
-%}
-
instruct storeCM_G1(memory mem, immI_0 zero) %{
match(Set mem (StoreCM mem zero));
predicate(UseG1GC);
--- a/src/hotspot/cpu/s390/globals_s390.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/s390/globals_s390.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -69,9 +69,6 @@
define_pd_global(bool, PreserveFramePointer, false);
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // Default max size of CMS young gen, per GC worker thread.
-
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/sparc/globals_sparc.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/sparc/globals_sparc.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -74,9 +74,6 @@
define_pd_global(bool, PreserveFramePointer, false);
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
-
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/x86/globals_x86.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/x86/globals_x86.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -81,9 +81,6 @@
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 64*M); // default max size of CMS young gen, per GC worker thread
-
define_pd_global(uintx, TypeProfileLevel, 111);
define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/zero/globals_zero.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/zero/globals_zero.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -66,9 +66,6 @@
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread
-
define_pd_global(uintx, TypeProfileLevel, 0);
define_pd_global(bool, PreserveFramePointer, false);
--- a/src/hotspot/share/gc/cms/adaptiveFreeList.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/adaptiveFreeList.hpp"
-#include "gc/cms/freeChunk.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "memory/freeList.inline.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/vmThread.hpp"
-
-template <>
-void AdaptiveFreeList<FreeChunk>::print_on(outputStream* st, const char* c) const {
- if (c != NULL) {
- st->print("%16s", c);
- } else {
- st->print(SIZE_FORMAT_W(16), size());
- }
- st->print("\t"
- SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
- SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
- bfr_surp(), surplus(), desired(), prev_sweep(), before_sweep(),
- count(), coal_births(), coal_deaths(), split_births(), split_deaths());
-}
-
-template <class Chunk>
-AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
- init_statistics();
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::initialize() {
- FreeList<Chunk>::initialize();
- set_hint(0);
- init_statistics(true /* split_birth */);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::reset(size_t hint) {
- FreeList<Chunk>::reset();
- set_hint(hint);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
- _allocation_stats.initialize(split_birth);
-}
-
-template <class Chunk>
-size_t AdaptiveFreeList<Chunk>::get_better_size() {
-
- // A candidate chunk has been found. If it is already under
- // populated and there is a hinT, REturn the hint(). Else
- // return the size of this chunk.
- if (surplus() <= 0) {
- if (hint() != 0) {
- return hint();
- } else {
- return size();
- }
- } else {
- // This list has a surplus so use it.
- return size();
- }
-}
-
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
- assert_proper_lock_protection();
- return_chunk_at_head(chunk, true);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
- FreeList<Chunk>::return_chunk_at_head(chunk, record_return);
-#ifdef ASSERT
- if (record_return) {
- increment_returned_bytes_by(size()*HeapWordSize);
- }
-#endif
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
- AdaptiveFreeList<Chunk>::return_chunk_at_tail(chunk, true);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
- FreeList<Chunk>::return_chunk_at_tail(chunk, record_return);
-#ifdef ASSERT
- if (record_return) {
- increment_returned_bytes_by(size()*HeapWordSize);
- }
-#endif
-}
-
-#ifndef PRODUCT
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::verify_stats() const {
- // The +1 of the LH comparand is to allow some "looseness" in
- // checking: we usually call this interface when adding a block
- // and we'll subsequently update the stats; we cannot update the
- // stats beforehand because in the case of the large-block BT
- // dictionary for example, this might be the first block and
- // in that case there would be no place that we could record
- // the stats (which are kept in the block itself).
- assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
- + _allocation_stats.coal_births() + 1) // Total Production Stock + 1
- >= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
- + (ssize_t)count()), // Total Current Stock + depletion
- "FreeList " PTR_FORMAT " of size " SIZE_FORMAT
- " violates Conservation Principle: "
- "prev_sweep(" SIZE_FORMAT ")"
- " + split_births(" SIZE_FORMAT ")"
- " + coal_births(" SIZE_FORMAT ") + 1 >= "
- " split_deaths(" SIZE_FORMAT ")"
- " coal_deaths(" SIZE_FORMAT ")"
- " + count(" SSIZE_FORMAT ")",
- p2i(this), size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
- _allocation_stats.coal_births(), _allocation_stats.split_deaths(),
- _allocation_stats.coal_deaths(), count());
-}
-#endif
-
-// Needs to be after the definitions have been seen.
-template class AdaptiveFreeList<FreeChunk>;
--- a/src/hotspot/share/gc/cms/adaptiveFreeList.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,229 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_ADAPTIVEFREELIST_HPP
-#define SHARE_GC_CMS_ADAPTIVEFREELIST_HPP
-
-#include "gc/cms/allocationStats.hpp"
-#include "memory/freeList.hpp"
-
-class CompactibleFreeListSpace;
-
-// A class for maintaining a free list of Chunk's. The FreeList
-// maintains a the structure of the list (head, tail, etc.) plus
-// statistics for allocations from the list. The links between items
-// are not part of FreeList. The statistics are
-// used to make decisions about coalescing Chunk's when they
-// are swept during collection.
-//
-// See the corresponding .cpp file for a description of the specifics
-// for that implementation.
-
-class Mutex;
-
-template <class Chunk>
-class AdaptiveFreeList : public FreeList<Chunk> {
- friend class CompactibleFreeListSpace;
- friend class VMStructs;
- // friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
-
- size_t _hint; // next larger size list with a positive surplus
-
- AllocationStats _allocation_stats; // allocation-related statistics
-
- public:
-
- AdaptiveFreeList();
-
- using FreeList<Chunk>::assert_proper_lock_protection;
-#ifdef ASSERT
- using FreeList<Chunk>::protecting_lock;
-#endif
- using FreeList<Chunk>::count;
- using FreeList<Chunk>::size;
- using FreeList<Chunk>::verify_chunk_in_free_list;
- using FreeList<Chunk>::getFirstNChunksFromList;
- using FreeList<Chunk>::print_on;
- void return_chunk_at_head(Chunk* fc, bool record_return);
- void return_chunk_at_head(Chunk* fc);
- void return_chunk_at_tail(Chunk* fc, bool record_return);
- void return_chunk_at_tail(Chunk* fc);
- using FreeList<Chunk>::return_chunk_at_tail;
- using FreeList<Chunk>::remove_chunk;
- using FreeList<Chunk>::prepend;
- using FreeList<Chunk>::print_labels_on;
- using FreeList<Chunk>::get_chunk_at_head;
-
- // Initialize.
- void initialize();
-
- // Reset the head, tail, hint, and count of a free list.
- void reset(size_t hint);
-
- void print_on(outputStream* st, const char* c = NULL) const;
-
- size_t hint() const {
- return _hint;
- }
- void set_hint(size_t v) {
- assert_proper_lock_protection();
- assert(v == 0 || size() < v, "Bad hint");
- _hint = v;
- }
-
- size_t get_better_size();
-
- // Accessors for statistics
- void init_statistics(bool split_birth = false);
-
- AllocationStats* allocation_stats() {
- assert_proper_lock_protection();
- return &_allocation_stats;
- }
-
- ssize_t desired() const {
- return _allocation_stats.desired();
- }
- void set_desired(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_desired(v);
- }
- void compute_desired(float inter_sweep_current,
- float inter_sweep_estimate,
- float intra_sweep_estimate) {
- assert_proper_lock_protection();
- _allocation_stats.compute_desired(count(),
- inter_sweep_current,
- inter_sweep_estimate,
- intra_sweep_estimate);
- }
- ssize_t coal_desired() const {
- return _allocation_stats.coal_desired();
- }
- void set_coal_desired(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_coal_desired(v);
- }
-
- ssize_t surplus() const {
- return _allocation_stats.surplus();
- }
- void set_surplus(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_surplus(v);
- }
- void increment_surplus() {
- assert_proper_lock_protection();
- _allocation_stats.increment_surplus();
- }
- void decrement_surplus() {
- assert_proper_lock_protection();
- _allocation_stats.decrement_surplus();
- }
-
- ssize_t bfr_surp() const {
- return _allocation_stats.bfr_surp();
- }
- void set_bfr_surp(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_bfr_surp(v);
- }
- ssize_t prev_sweep() const {
- return _allocation_stats.prev_sweep();
- }
- void set_prev_sweep(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_prev_sweep(v);
- }
- ssize_t before_sweep() const {
- return _allocation_stats.before_sweep();
- }
- void set_before_sweep(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_before_sweep(v);
- }
-
- ssize_t coal_births() const {
- return _allocation_stats.coal_births();
- }
- void set_coal_births(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_coal_births(v);
- }
- void increment_coal_births() {
- assert_proper_lock_protection();
- _allocation_stats.increment_coal_births();
- }
-
- ssize_t coal_deaths() const {
- return _allocation_stats.coal_deaths();
- }
- void set_coal_deaths(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_coal_deaths(v);
- }
- void increment_coal_deaths() {
- assert_proper_lock_protection();
- _allocation_stats.increment_coal_deaths();
- }
-
- ssize_t split_births() const {
- return _allocation_stats.split_births();
- }
- void set_split_births(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_split_births(v);
- }
- void increment_split_births() {
- assert_proper_lock_protection();
- _allocation_stats.increment_split_births();
- }
-
- ssize_t split_deaths() const {
- return _allocation_stats.split_deaths();
- }
- void set_split_deaths(ssize_t v) {
- assert_proper_lock_protection();
- _allocation_stats.set_split_deaths(v);
- }
- void increment_split_deaths() {
- assert_proper_lock_protection();
- _allocation_stats.increment_split_deaths();
- }
-
-#ifndef PRODUCT
- // For debugging. The "_returned_bytes" in all the lists are summed
- // and compared with the total number of bytes swept during a
- // collection.
- size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
- void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
- void increment_returned_bytes_by(size_t v) {
- _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
- }
- // Stats verification
- void verify_stats() const;
-#endif // NOT PRODUCT
-};
-
-#endif // SHARE_GC_CMS_ADAPTIVEFREELIST_HPP
--- a/src/hotspot/share/gc/cms/allocationStats.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/allocationStats.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/ostream.hpp"
-
-// Technically this should be derived from machine speed, and
-// ideally it would be dynamically adjusted.
-float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000;
-
-void AllocationStats::initialize(bool split_birth) {
- AdaptivePaddedAverage* dummy =
- new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
- CMS_FLSPadding);
- _desired = 0;
- _coal_desired = 0;
- _surplus = 0;
- _bfr_surp = 0;
- _prev_sweep = 0;
- _before_sweep = 0;
- _coal_births = 0;
- _coal_deaths = 0;
- _split_births = (split_birth ? 1 : 0);
- _split_deaths = 0;
- _returned_bytes = 0;
-}
--- a/src/hotspot/share/gc/cms/allocationStats.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_ALLOCATIONSTATS_HPP
-#define SHARE_GC_CMS_ALLOCATIONSTATS_HPP
-
-#include "gc/shared/gcUtil.hpp"
-#include "logging/log.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
-
-class AllocationStats {
- // A duration threshold (in ms) used to filter
- // possibly unreliable samples.
- static float _threshold;
-
- // We measure the demand between the end of the previous sweep and
- // beginning of this sweep:
- // Count(end_last_sweep) - Count(start_this_sweep)
- // + split_births(between) - split_deaths(between)
- // The above number divided by the time since the end of the
- // previous sweep gives us a time rate of demand for blocks
- // of this size. We compute a padded average of this rate as
- // our current estimate for the time rate of demand for blocks
- // of this size. Similarly, we keep a padded average for the time
- // between sweeps. Our current estimate for demand for blocks of
- // this size is then simply computed as the product of these two
- // estimates.
- AdaptivePaddedAverage _demand_rate_estimate;
-
- ssize_t _desired; // Demand estimate computed as described above
- ssize_t _coal_desired; // desired +/- small-percent for tuning coalescing
-
- ssize_t _surplus; // count - (desired +/- small-percent),
- // used to tune splitting in best fit
- ssize_t _bfr_surp; // surplus at start of current sweep
- ssize_t _prev_sweep; // count from end of previous sweep
- ssize_t _before_sweep; // count from before current sweep
- ssize_t _coal_births; // additional chunks from coalescing
- ssize_t _coal_deaths; // loss from coalescing
- ssize_t _split_births; // additional chunks from splitting
- ssize_t _split_deaths; // loss from splitting
- size_t _returned_bytes; // number of bytes returned to list.
- public:
- void initialize(bool split_birth = false);
-
- AllocationStats() {
- initialize();
- }
-
- // The rate estimate is in blocks per second.
- void compute_desired(size_t count,
- float inter_sweep_current,
- float inter_sweep_estimate,
- float intra_sweep_estimate) {
- // If the latest inter-sweep time is below our granularity
- // of measurement, we may call in here with
- // inter_sweep_current == 0. However, even for suitably small
- // but non-zero inter-sweep durations, we may not trust the accuracy
- // of accumulated data, since it has not been "integrated"
- // (read "low-pass-filtered") long enough, and would be
- // vulnerable to noisy glitches. In such cases, we
- // ignore the current sample and use currently available
- // historical estimates.
- assert(prev_sweep() + split_births() + coal_births() // "Total Production Stock"
- >= split_deaths() + coal_deaths() + (ssize_t)count, // "Current stock + depletion"
- "Conservation Principle");
- if (inter_sweep_current > _threshold) {
- ssize_t demand = prev_sweep() - (ssize_t)count + split_births() + coal_births()
- - split_deaths() - coal_deaths();
- assert(demand >= 0,
- "Demand (" SSIZE_FORMAT ") should be non-negative for "
- PTR_FORMAT " (size=" SIZE_FORMAT ")",
- demand, p2i(this), count);
- // Defensive: adjust for imprecision in event counting
- if (demand < 0) {
- demand = 0;
- }
- float old_rate = _demand_rate_estimate.padded_average();
- float rate = ((float)demand)/inter_sweep_current;
- _demand_rate_estimate.sample(rate);
- float new_rate = _demand_rate_estimate.padded_average();
- ssize_t old_desired = _desired;
- float delta_ise = (CMSExtrapolateSweep ? intra_sweep_estimate : 0.0);
- _desired = (ssize_t)(new_rate * (inter_sweep_estimate + delta_ise));
- log_trace(gc, freelist)("demand: " SSIZE_FORMAT ", old_rate: %f, current_rate: %f, "
- "new_rate: %f, old_desired: " SSIZE_FORMAT ", new_desired: " SSIZE_FORMAT,
- demand, old_rate, rate, new_rate, old_desired, _desired);
- }
- }
-
- ssize_t desired() const { return _desired; }
- void set_desired(ssize_t v) { _desired = v; }
-
- ssize_t coal_desired() const { return _coal_desired; }
- void set_coal_desired(ssize_t v) { _coal_desired = v; }
-
- ssize_t surplus() const { return _surplus; }
- void set_surplus(ssize_t v) { _surplus = v; }
- void increment_surplus() { _surplus++; }
- void decrement_surplus() { _surplus--; }
-
- ssize_t bfr_surp() const { return _bfr_surp; }
- void set_bfr_surp(ssize_t v) { _bfr_surp = v; }
- ssize_t prev_sweep() const { return _prev_sweep; }
- void set_prev_sweep(ssize_t v) { _prev_sweep = v; }
- ssize_t before_sweep() const { return _before_sweep; }
- void set_before_sweep(ssize_t v) { _before_sweep = v; }
-
- ssize_t coal_births() const { return _coal_births; }
- void set_coal_births(ssize_t v) { _coal_births = v; }
- void increment_coal_births() { _coal_births++; }
-
- ssize_t coal_deaths() const { return _coal_deaths; }
- void set_coal_deaths(ssize_t v) { _coal_deaths = v; }
- void increment_coal_deaths() { _coal_deaths++; }
-
- ssize_t split_births() const { return _split_births; }
- void set_split_births(ssize_t v) { _split_births = v; }
- void increment_split_births() { _split_births++; }
-
- ssize_t split_deaths() const { return _split_deaths; }
- void set_split_deaths(ssize_t v) { _split_deaths = v; }
- void increment_split_deaths() { _split_deaths++; }
-
- NOT_PRODUCT(
- size_t returned_bytes() const { return _returned_bytes; }
- void set_returned_bytes(size_t v) { _returned_bytes = v; }
- )
-};
-
-#endif // SHARE_GC_CMS_ALLOCATIONSTATS_HPP
--- a/src/hotspot/share/gc/cms/cmsArguments.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,225 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsArguments.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/gcArguments.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/workerPolicy.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/defaultStream.hpp"
-
-void CMSArguments::set_parnew_gc_flags() {
- assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
- "control point invariant");
- assert(UseConcMarkSweepGC, "CMS is expected to be on here");
-
- if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
- FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
- assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
- } else if (ParallelGCThreads == 0) {
- jio_fprintf(defaultStream::error_stream(),
- "The ParNew GC can not be combined with -XX:ParallelGCThreads=0\n");
- vm_exit(1);
- }
-
- // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
- // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
- // we set them to 1024 and 1024.
- // See CR 6362902.
- if (FLAG_IS_DEFAULT(YoungPLABSize)) {
- FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
- }
- if (FLAG_IS_DEFAULT(OldPLABSize)) {
- FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
- }
-
- // When using compressed oops, we use local overflow stacks,
- // rather than using a global overflow list chained through
- // the klass word of the object's pre-image.
- if (UseCompressedOops && !ParGCUseLocalOverflow) {
- if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) {
- warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references");
- }
- FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true);
- }
- assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error");
-}
-
-// Adjust some sizes to suit CMS and/or ParNew needs; these work well on
-// sparc/solaris for certain applications, but would gain from
-// further optimization and tuning efforts, and would almost
-// certainly gain from analysis of platform and environment.
-void CMSArguments::initialize() {
- GCArguments::initialize();
-
- assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
- assert(UseConcMarkSweepGC, "CMS is expected to be on here");
-
- // CMS space iteration, which FLSVerifyAllHeapreferences entails,
- // insists that we hold the requisite locks so that the iteration is
- // MT-safe. For the verification at start-up and shut-down, we don't
- // yet have a good way of acquiring and releasing these locks,
- // which are not visible at the CollectedHeap level. We want to
- // be able to acquire these locks and then do the iteration rather
- // than just disable the lock verification. This will be fixed under
- // bug 4788986.
- if (UseConcMarkSweepGC && FLSVerifyAllHeapReferences) {
- if (VerifyDuringStartup) {
- warning("Heap verification at start-up disabled "
- "(due to current incompatibility with FLSVerifyAllHeapReferences)");
- VerifyDuringStartup = false; // Disable verification at start-up
- }
-
- if (VerifyBeforeExit) {
- warning("Heap verification at shutdown disabled "
- "(due to current incompatibility with FLSVerifyAllHeapReferences)");
- VerifyBeforeExit = false; // Disable verification at shutdown
- }
- }
-
- if (!ClassUnloading) {
- FLAG_SET_CMDLINE(CMSClassUnloadingEnabled, false);
- }
-
- // Set CMS global values
- CompactibleFreeListSpace::set_cms_values();
-
- // Turn off AdaptiveSizePolicy by default for cms until it is complete.
- disable_adaptive_size_policy("UseConcMarkSweepGC");
-
- set_parnew_gc_flags();
-
- size_t max_heap = align_down(MaxHeapSize,
- CardTableRS::ct_max_alignment_constraint());
-
- // Now make adjustments for CMS
- intx tenuring_default = (intx)6;
- size_t young_gen_per_worker = CMSYoungGenPerWorker;
-
- // Preferred young gen size for "short" pauses:
- // upper bound depends on # of threads and NewRatio.
- const size_t preferred_max_new_size_unaligned =
- MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
- size_t preferred_max_new_size =
- align_up(preferred_max_new_size_unaligned, os::vm_page_size());
-
- // Unless explicitly requested otherwise, size young gen
- // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
-
- // If either MaxNewSize or NewRatio is set on the command line,
- // assume the user is trying to set the size of the young gen.
- if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
-
- // Set MaxNewSize to our calculated preferred_max_new_size unless
- // NewSize was set on the command line and it is larger than
- // preferred_max_new_size.
- if (!FLAG_IS_DEFAULT(NewSize)) { // NewSize explicitly set at command-line
- FLAG_SET_ERGO(MaxNewSize, MAX2(NewSize, preferred_max_new_size));
- } else {
- FLAG_SET_ERGO(MaxNewSize, preferred_max_new_size);
- }
- log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
-
- // Code along this path potentially sets NewSize and OldSize
- log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size: " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
- MinHeapSize, InitialHeapSize, max_heap);
- size_t min_new = preferred_max_new_size;
- if (FLAG_IS_CMDLINE(NewSize)) {
- min_new = NewSize;
- }
- if (max_heap > min_new && MinHeapSize > min_new) {
- // Unless explicitly requested otherwise, make young gen
- // at least min_new, and at most preferred_max_new_size.
- if (FLAG_IS_DEFAULT(NewSize)) {
- FLAG_SET_ERGO(NewSize, MAX2(NewSize, min_new));
- FLAG_SET_ERGO(NewSize, MIN2(preferred_max_new_size, NewSize));
- log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
- }
- // Unless explicitly requested otherwise, size old gen
- // so it's NewRatio x of NewSize.
- if (FLAG_IS_DEFAULT(OldSize)) {
- if (max_heap > NewSize) {
- FLAG_SET_ERGO(OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
- log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
- }
- }
- }
- }
- // Unless explicitly requested otherwise, definitely
- // promote all objects surviving "tenuring_default" scavenges.
- if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
- FLAG_IS_DEFAULT(SurvivorRatio)) {
- FLAG_SET_ERGO(MaxTenuringThreshold, tenuring_default);
- }
- // If we decided above (or user explicitly requested)
- // `promote all' (via MaxTenuringThreshold := 0),
- // prefer minuscule survivor spaces so as not to waste
- // space for (non-existent) survivors
- if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
- FLAG_SET_ERGO(SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
- }
-
- // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
- // but rather the number of free blocks of a given size that are used when
- // replenishing the local per-worker free list caches.
- if (FLAG_IS_DEFAULT(OldPLABSize)) {
- if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
- // OldPLAB sizing manually turned off: Use a larger default setting,
- // unless it was manually specified. This is because a too-low value
- // will slow down scavenges.
- FLAG_SET_ERGO(OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
- } else {
- FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
- }
- }
-
- // If either of the static initialization defaults have changed, note this
- // modification.
- if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
- CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
- }
-
- log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
-}
-
-void CMSArguments::disable_adaptive_size_policy(const char* collector_name) {
- if (UseAdaptiveSizePolicy) {
- if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
- warning("Disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
- collector_name);
- }
- FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
- }
-}
-
-CollectedHeap* CMSArguments::create_heap() {
- return new CMSHeap();
-}
--- a/src/hotspot/share/gc/cms/cmsArguments.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSARGUMENTS_HPP
-#define SHARE_GC_CMS_CMSARGUMENTS_HPP
-
-#include "gc/shared/gcArguments.hpp"
-#include "gc/shared/genArguments.hpp"
-
-class CollectedHeap;
-
-class CMSArguments : public GenArguments {
-private:
- void disable_adaptive_size_policy(const char* collector_name);
- void set_parnew_gc_flags();
-
- virtual void initialize();
- virtual CollectedHeap* create_heap();
-};
-
-#endif // SHARE_GC_CMS_CMSARGUMENTS_HPP
--- a/src/hotspot/share/gc/cms/cmsCardTable.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,470 +0,0 @@
-/*
- * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsCardTable.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/shared/cardTableBarrierSet.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/virtualspace.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/vmThread.hpp"
-
-CMSCardTable::CMSCardTable(MemRegion whole_heap) :
- CardTableRS(whole_heap, CMSPrecleaningEnabled /* scanned_concurrently */) {
-}
-
-// Returns the number of chunks necessary to cover "mr".
-size_t CMSCardTable::chunks_to_cover(MemRegion mr) {
- return (size_t)(addr_to_chunk_index(mr.last()) -
- addr_to_chunk_index(mr.start()) + 1);
-}
-
-// Returns the index of the chunk in a stride which
-// covers the given address.
-uintptr_t CMSCardTable::addr_to_chunk_index(const void* addr) {
- uintptr_t card = (uintptr_t) byte_for(addr);
- return card / ParGCCardsPerStrideChunk;
-}
-
-void CMSCardTable::
-non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
- OopsInGenClosure* cl,
- CardTableRS* ct,
- uint n_threads) {
- assert(n_threads > 0, "expected n_threads > 0");
- assert(n_threads <= ParallelGCThreads,
- "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
-
- // Make sure the LNC array is valid for the space.
- CardValue** lowest_non_clean;
- uintptr_t lowest_non_clean_base_chunk_index;
- size_t lowest_non_clean_chunk_size;
- get_LNC_array_for_space(sp, lowest_non_clean,
- lowest_non_clean_base_chunk_index,
- lowest_non_clean_chunk_size);
-
- uint n_strides = n_threads * ParGCStridesPerThread;
- SequentialSubTasksDone* pst = sp->par_seq_tasks();
- // Sets the condition for completion of the subtask (how many threads
- // need to finish in order to be done).
- pst->set_n_threads(n_threads);
- pst->set_n_tasks(n_strides);
-
- uint stride = 0;
- while (pst->try_claim_task(/* reference */ stride)) {
- process_stride(sp, mr, stride, n_strides,
- cl, ct,
- lowest_non_clean,
- lowest_non_clean_base_chunk_index,
- lowest_non_clean_chunk_size);
- }
- if (pst->all_tasks_completed()) {
- // Clear lowest_non_clean array for next time.
- intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
- uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
- for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
- intptr_t ind = ch - lowest_non_clean_base_chunk_index;
- assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
- "Bounds error");
- lowest_non_clean[ind] = NULL;
- }
- }
-}
-
-void
-CMSCardTable::
-process_stride(Space* sp,
- MemRegion used,
- jint stride, int n_strides,
- OopsInGenClosure* cl,
- CardTableRS* ct,
- CardValue** lowest_non_clean,
- uintptr_t lowest_non_clean_base_chunk_index,
- size_t lowest_non_clean_chunk_size) {
- // We go from higher to lower addresses here; it wouldn't help that much
- // because of the strided parallelism pattern used here.
-
- // Find the first card address of the first chunk in the stride that is
- // at least "bottom" of the used region.
- CardValue* start_card = byte_for(used.start());
- CardValue* end_card = byte_after(used.last());
- uintptr_t start_chunk = addr_to_chunk_index(used.start());
- uintptr_t start_chunk_stride_num = start_chunk % n_strides;
- CardValue* chunk_card_start;
-
- if ((uintptr_t)stride >= start_chunk_stride_num) {
- chunk_card_start = (start_card +
- (stride - start_chunk_stride_num) * ParGCCardsPerStrideChunk);
- } else {
- // Go ahead to the next chunk group boundary, then to the requested stride.
- chunk_card_start = (start_card +
- (n_strides - start_chunk_stride_num + stride) * ParGCCardsPerStrideChunk);
- }
-
- while (chunk_card_start < end_card) {
- // Even though we go from lower to higher addresses below, the
- // strided parallelism can interleave the actual processing of the
- // dirty pages in various ways. For a specific chunk within this
- // stride, we take care to avoid double scanning or missing a card
- // by suitably initializing the "min_done" field in process_chunk_boundaries()
- // below, together with the dirty region extension accomplished in
- // DirtyCardToOopClosure::do_MemRegion().
- CardValue* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
- // Invariant: chunk_mr should be fully contained within the "used" region.
- MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
- chunk_card_end >= end_card ?
- used.end() : addr_for(chunk_card_end));
- assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
- assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
-
- // This function is used by the parallel card table iteration.
- const bool parallel = true;
-
- DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
- cl->gen_boundary(),
- parallel);
- ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
-
-
- // Process the chunk.
- process_chunk_boundaries(sp,
- dcto_cl,
- chunk_mr,
- used,
- lowest_non_clean,
- lowest_non_clean_base_chunk_index,
- lowest_non_clean_chunk_size);
-
- // We want the LNC array updates above in process_chunk_boundaries
- // to be visible before any of the card table value changes as a
- // result of the dirty card iteration below.
- OrderAccess::storestore();
-
- // We want to clear the cards: clear_cl here does the work of finding
- // contiguous dirty ranges of cards to process and clear.
- clear_cl.do_MemRegion(chunk_mr);
-
- // Find the next chunk of the stride.
- chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
- }
-}
-
-void
-CMSCardTable::
-process_chunk_boundaries(Space* sp,
- DirtyCardToOopClosure* dcto_cl,
- MemRegion chunk_mr,
- MemRegion used,
- CardValue** lowest_non_clean,
- uintptr_t lowest_non_clean_base_chunk_index,
- size_t lowest_non_clean_chunk_size)
-{
- // We must worry about non-array objects that cross chunk boundaries,
- // because such objects are both precisely and imprecisely marked:
- // .. if the head of such an object is dirty, the entire object
- // needs to be scanned, under the interpretation that this
- // was an imprecise mark
- // .. if the head of such an object is not dirty, we can assume
- // precise marking and it's efficient to scan just the dirty
- // cards.
- // In either case, each scanned reference must be scanned precisely
- // once so as to avoid cloning of a young referent. For efficiency,
- // our closures depend on this property and do not protect against
- // double scans.
-
- uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start());
- assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
- uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index;
-
- // First, set "our" lowest_non_clean entry, which would be
- // used by the thread scanning an adjoining left chunk with
- // a non-array object straddling the mutual boundary.
- // Find the object that spans our boundary, if one exists.
- // first_block is the block possibly straddling our left boundary.
- HeapWord* first_block = sp->block_start(chunk_mr.start());
- assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),
- "First chunk should always have a co-initial block");
- // Does the block straddle the chunk's left boundary, and is it
- // a non-array object?
- if (first_block < chunk_mr.start() // first block straddles left bdry
- && sp->block_is_obj(first_block) // first block is an object
- && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied)
- || oop(first_block)->is_typeArray())) {
- // Find our least non-clean card, so that a left neighbor
- // does not scan an object straddling the mutual boundary
- // too far to the right, and attempt to scan a portion of
- // that object twice.
- CardValue* first_dirty_card = NULL;
- CardValue* last_card_of_first_obj =
- byte_for(first_block + sp->block_size(first_block) - 1);
- CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
- CardValue* last_card_of_cur_chunk = byte_for(chunk_mr.last());
- CardValue* last_card_to_check = MIN2(last_card_of_cur_chunk, last_card_of_first_obj);
- // Note that this does not need to go beyond our last card
- // if our first object completely straddles this chunk.
- for (CardValue* cur = first_card_of_cur_chunk;
- cur <= last_card_to_check; cur++) {
- CardValue val = *cur;
- if (card_will_be_scanned(val)) {
- first_dirty_card = cur;
- break;
- } else {
- assert(!card_may_have_been_dirty(val), "Error");
- }
- }
- if (first_dirty_card != NULL) {
- assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
- assert(lowest_non_clean[cur_chunk_index] == NULL,
- "Write exactly once : value should be stable hereafter for this round");
- lowest_non_clean[cur_chunk_index] = first_dirty_card;
- }
- } else {
- // In this case we can help our neighbor by just asking them
- // to stop at our first card (even though it may not be dirty).
- assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
- CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
- lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
- }
-
- // Next, set our own max_to_do, which will strictly/exclusively bound
- // the highest address that we will scan past the right end of our chunk.
- HeapWord* max_to_do = NULL;
- if (chunk_mr.end() < used.end()) {
- // This is not the last chunk in the used region.
- // What is our last block? We check the first block of
- // the next (right) chunk rather than strictly check our last block
- // because it's potentially more efficient to do so.
- HeapWord* const last_block = sp->block_start(chunk_mr.end());
- assert(last_block <= chunk_mr.end(), "In case this property changes.");
- if ((last_block == chunk_mr.end()) // our last block does not straddle boundary
- || !sp->block_is_obj(last_block) // last_block isn't an object
- || oop(last_block)->is_objArray() // last_block is an array (precisely marked)
- || oop(last_block)->is_typeArray()) {
- max_to_do = chunk_mr.end();
- } else {
- assert(last_block < chunk_mr.end(), "Tautology");
- // It is a non-array object that straddles the right boundary of this chunk.
- // last_obj_card is the card corresponding to the start of the last object
- // in the chunk. Note that the last object may not start in
- // the chunk.
- CardValue* const last_obj_card = byte_for(last_block);
- const CardValue val = *last_obj_card;
- if (!card_will_be_scanned(val)) {
- assert(!card_may_have_been_dirty(val), "Error");
- // The card containing the head is not dirty. Any marks on
- // subsequent cards still in this chunk must have been made
- // precisely; we can cap processing at the end of our chunk.
- max_to_do = chunk_mr.end();
- } else {
- // The last object must be considered dirty, and extends onto the
- // following chunk. Look for a dirty card in that chunk that will
- // bound our processing.
- CardValue* limit_card = NULL;
- const size_t last_block_size = sp->block_size(last_block);
- CardValue* const last_card_of_last_obj =
- byte_for(last_block + last_block_size - 1);
- CardValue* const first_card_of_next_chunk = byte_for(chunk_mr.end());
- // This search potentially goes a long distance looking
- // for the next card that will be scanned, terminating
- // at the end of the last_block, if no earlier dirty card
- // is found.
- assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
- "last card of next chunk may be wrong");
- for (CardValue* cur = first_card_of_next_chunk;
- cur <= last_card_of_last_obj; cur++) {
- const CardValue val = *cur;
- if (card_will_be_scanned(val)) {
- limit_card = cur; break;
- } else {
- assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
- }
- }
- if (limit_card != NULL) {
- max_to_do = addr_for(limit_card);
- assert(limit_card != NULL && max_to_do != NULL, "Error");
- } else {
- // The following is a pessimistic value, because it's possible
- // that a dirty card on a subsequent chunk has been cleared by
- // the time we get to look at it; we'll correct for that further below,
- // using the LNC array which records the least non-clean card
- // before cards were cleared in a particular chunk.
- limit_card = last_card_of_last_obj;
- max_to_do = last_block + last_block_size;
- assert(limit_card != NULL && max_to_do != NULL, "Error");
- }
- assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
- "Bounds error.");
- // It is possible that a dirty card for the last object may have been
- // cleared before we had a chance to examine it. In that case, the value
- // will have been logged in the LNC for that chunk.
- // We need to examine as many chunks to the right as this object
- // covers. However, we need to bound this checking to the largest
- // entry in the LNC array: this is because the heap may expand
- // after the LNC array has been created but before we reach this point,
- // and the last block in our chunk may have been expanded to include
- // the expansion delta (and possibly subsequently allocated from, so
- // it wouldn't be sufficient to check whether that last block was
- // or was not an object at this point).
- uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
- - lowest_non_clean_base_chunk_index;
- const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())
- - lowest_non_clean_base_chunk_index;
- if (last_chunk_index_to_check > last_chunk_index) {
- assert(last_block + last_block_size > used.end(),
- "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
- " does not exceed used.end() = " PTR_FORMAT ","
- " yet last_chunk_index_to_check " INTPTR_FORMAT
- " exceeds last_chunk_index " INTPTR_FORMAT,
- p2i(last_block), p2i(last_block + last_block_size),
- p2i(used.end()),
- last_chunk_index_to_check, last_chunk_index);
- assert(sp->used_region().end() > used.end(),
- "Expansion did not happen: "
- "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
- p2i(sp->used_region().start()), p2i(sp->used_region().end()),
- p2i(used.start()), p2i(used.end()));
- last_chunk_index_to_check = last_chunk_index;
- }
- for (uintptr_t lnc_index = cur_chunk_index + 1;
- lnc_index <= last_chunk_index_to_check;
- lnc_index++) {
- CardValue* lnc_card = lowest_non_clean[lnc_index];
- if (lnc_card != NULL) {
- // we can stop at the first non-NULL entry we find
- if (lnc_card <= limit_card) {
- limit_card = lnc_card;
- max_to_do = addr_for(limit_card);
- assert(limit_card != NULL && max_to_do != NULL, "Error");
- }
- // In any case, we break now
- break;
- } // else continue to look for a non-NULL entry if any
- }
- assert(limit_card != NULL && max_to_do != NULL, "Error");
- }
- assert(max_to_do != NULL, "OOPS 1 !");
- }
- assert(max_to_do != NULL, "OOPS 2!");
- } else {
- max_to_do = used.end();
- }
- assert(max_to_do != NULL, "OOPS 3!");
- // Now we can set the closure we're using so it doesn't to beyond
- // max_to_do.
- dcto_cl->set_min_done(max_to_do);
-#ifndef PRODUCT
- dcto_cl->set_last_bottom(max_to_do);
-#endif
-}
-
-void
-CMSCardTable::
-get_LNC_array_for_space(Space* sp,
- CardValue**& lowest_non_clean,
- uintptr_t& lowest_non_clean_base_chunk_index,
- size_t& lowest_non_clean_chunk_size) {
-
- int i = find_covering_region_containing(sp->bottom());
- MemRegion covered = _covered[i];
- size_t n_chunks = chunks_to_cover(covered);
-
- // Only the first thread to obtain the lock will resize the
- // LNC array for the covered region. Any later expansion can't affect
- // the used_at_save_marks region.
- // (I observed a bug in which the first thread to execute this would
- // resize, and then it would cause "expand_and_allocate" that would
- // increase the number of chunks in the covered region. Then a second
- // thread would come and execute this, see that the size didn't match,
- // and free and allocate again. So the first thread would be using a
- // freed "_lowest_non_clean" array.)
-
- // Do a dirty read here. If we pass the conditional then take the rare
- // event lock and do the read again in case some other thread had already
- // succeeded and done the resize.
- int cur_collection = CMSHeap::heap()->total_collections();
- // Updated _last_LNC_resizing_collection[i] must not be visible before
- // _lowest_non_clean and friends are visible. Therefore use acquire/release
- // to guarantee this on non TSO architecures.
- if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
- MutexLocker x(ParGCRareEvent_lock);
- // This load_acquire is here for clarity only. The MutexLocker already fences.
- if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
- if (_lowest_non_clean[i] == NULL ||
- n_chunks != _lowest_non_clean_chunk_size[i]) {
-
- // Should we delete the old?
- if (_lowest_non_clean[i] != NULL) {
- assert(n_chunks != _lowest_non_clean_chunk_size[i],
- "logical consequence");
- FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
- _lowest_non_clean[i] = NULL;
- }
- // Now allocate a new one if necessary.
- if (_lowest_non_clean[i] == NULL) {
- _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
- _lowest_non_clean_chunk_size[i] = n_chunks;
- _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
- for (int j = 0; j < (int)n_chunks; j++)
- _lowest_non_clean[i][j] = NULL;
- }
- }
- // Make sure this gets visible only after _lowest_non_clean* was initialized
- OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection);
- }
- }
- // In any case, now do the initialization.
- lowest_non_clean = _lowest_non_clean[i];
- lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
- lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i];
-}
-
-#ifdef ASSERT
-void CMSCardTable::verify_used_region_at_save_marks(Space* sp) const {
- MemRegion ur = sp->used_region();
- MemRegion urasm = sp->used_region_at_save_marks();
-
- if (!ur.contains(urasm)) {
- log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? "
- "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
- "[" PTR_FORMAT ", " PTR_FORMAT ")",
- p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
- MemRegion ur2 = sp->used_region();
- MemRegion urasm2 = sp->used_region_at_save_marks();
- if (!ur.equals(ur2)) {
- log_warning(gc)("CMS+ParNew: Flickering used_region()!!");
- }
- if (!urasm.equals(urasm2)) {
- log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!");
- }
- ShouldNotReachHere();
- }
-}
-#endif // ASSERT
--- a/src/hotspot/share/gc/cms/cmsCardTable.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSCARDTABLE_HPP
-#define SHARE_GC_CMS_CMSCARDTABLE_HPP
-
-#include "gc/shared/cardTableRS.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class DirtyCardToOopClosure;
-class MemRegion;
-class OopsInGenClosure;
-class Space;
-
-class CMSCardTable : public CardTableRS {
-private:
- // Returns the number of chunks necessary to cover "mr".
- size_t chunks_to_cover(MemRegion mr);
-
- // Returns the index of the chunk in a stride which
- // covers the given address.
- uintptr_t addr_to_chunk_index(const void* addr);
-
- // Initializes "lowest_non_clean" to point to the array for the region
- // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
- // index of the corresponding to the first element of that array.
- // Ensures that these arrays are of sufficient size, allocating if necessary.
- // May be called by several threads concurrently.
- void get_LNC_array_for_space(Space* sp,
- CardValue**& lowest_non_clean,
- uintptr_t& lowest_non_clean_base_chunk_index,
- size_t& lowest_non_clean_chunk_size);
-
- // Apply cl, which must either itself apply dcto_cl or be dcto_cl,
- // to the cards in the stride (of n_strides) within the given space.
- void process_stride(Space* sp,
- MemRegion used,
- jint stride, int n_strides,
- OopsInGenClosure* cl,
- CardTableRS* ct,
- CardValue** lowest_non_clean,
- uintptr_t lowest_non_clean_base_chunk_index,
- size_t lowest_non_clean_chunk_size);
-
- // Makes sure that chunk boundaries are handled appropriately, by
- // adjusting the min_done of dcto_cl, and by using a special card-table
- // value to indicate how min_done should be set.
- void process_chunk_boundaries(Space* sp,
- DirtyCardToOopClosure* dcto_cl,
- MemRegion chunk_mr,
- MemRegion used,
- CardValue** lowest_non_clean,
- uintptr_t lowest_non_clean_base_chunk_index,
- size_t lowest_non_clean_chunk_size);
-
- virtual void verify_used_region_at_save_marks(Space* sp) const NOT_DEBUG_RETURN;
-
-protected:
- // Work method used to implement non_clean_card_iterate_possibly_parallel()
- // above in the parallel case.
- virtual void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
- OopsInGenClosure* cl, CardTableRS* ct,
- uint n_threads);
-
-public:
- CMSCardTable(MemRegion whole_heap);
-};
-
-#endif // SHARE_GC_CMS_CMSCARDTABLE_HPP
--- a/src/hotspot/share/gc/cms/cmsGCStats.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsGCStats.hpp"
-#include "gc/shared/gcUtil.inline.hpp"
-#include "runtime/globals.hpp"
-
-CMSGCStats::CMSGCStats() {
- _avg_promoted = new AdaptivePaddedNoZeroDevAverage(
- CMSExpAvgFactor,
- PromotedPadding);
-}
--- a/src/hotspot/share/gc/cms/cmsGCStats.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSGCSTATS_HPP
-#define SHARE_GC_CMS_CMSGCSTATS_HPP
-
-#include "gc/shared/gcStats.hpp"
-
-class CMSGCStats : public GCStats {
- public:
- CMSGCStats();
-
- virtual Name kind() {
- return CMSGCStatsKind;
- }
-};
-
-#endif // SHARE_GC_CMS_CMSGCSTATS_HPP
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,263 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsCardTable.hpp"
-#include "gc/cms/cmsVMOperations.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/genMemoryPools.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/strongRootsScope.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/memoryManager.hpp"
-#include "utilities/stack.inline.hpp"
-
-class CompactibleFreeListSpacePool : public CollectedMemoryPool {
-private:
- CompactibleFreeListSpace* _space;
-public:
- CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
- const char* name,
- size_t max_size,
- bool support_usage_threshold) :
- CollectedMemoryPool(name, space->capacity(), max_size, support_usage_threshold),
- _space(space) {
- }
-
- MemoryUsage get_memory_usage() {
- size_t max_heap_size = (available_for_allocation() ? max_size() : 0);
- size_t used = used_in_bytes();
- size_t committed = _space->capacity();
-
- return MemoryUsage(initial_size(), used, committed, max_heap_size);
- }
-
- size_t used_in_bytes() {
- return _space->used_stable();
- }
-};
-
-CMSHeap::CMSHeap() :
- GenCollectedHeap(Generation::ParNew,
- Generation::ConcurrentMarkSweep,
- "ParNew:CMS"),
- _workers(NULL),
- _eden_pool(NULL),
- _survivor_pool(NULL),
- _old_pool(NULL) {
-}
-
-jint CMSHeap::initialize() {
- jint status = GenCollectedHeap::initialize();
- if (status != JNI_OK) return status;
-
- _workers = new WorkGang("GC Thread", ParallelGCThreads,
- /* are_GC_task_threads */true,
- /* are_ConcurrentGC_threads */false);
- if (_workers == NULL) {
- return JNI_ENOMEM;
- }
- _workers->initialize_workers();
-
- // If we are running CMS, create the collector responsible
- // for collecting the CMS generations.
- if (!create_cms_collector()) {
- return JNI_ENOMEM;
- }
-
- return JNI_OK;
-}
-
-CardTableRS* CMSHeap::create_rem_set(const MemRegion& reserved_region) {
- return new CMSCardTable(reserved_region);
-}
-
-void CMSHeap::initialize_serviceability() {
- _young_manager = new GCMemoryManager("ParNew", "end of minor GC");
- _old_manager = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC");
-
- ParNewGeneration* young = young_gen();
- _eden_pool = new ContiguousSpacePool(young->eden(),
- "Par Eden Space",
- young->max_eden_size(),
- false);
-
- _survivor_pool = new SurvivorContiguousSpacePool(young,
- "Par Survivor Space",
- young->max_survivor_size(),
- false);
-
- ConcurrentMarkSweepGeneration* old = (ConcurrentMarkSweepGeneration*) old_gen();
- _old_pool = new CompactibleFreeListSpacePool(old->cmsSpace(),
- "CMS Old Gen",
- old->reserved().byte_size(),
- true);
-
- _young_manager->add_pool(_eden_pool);
- _young_manager->add_pool(_survivor_pool);
- young->set_gc_manager(_young_manager);
-
- _old_manager->add_pool(_eden_pool);
- _old_manager->add_pool(_survivor_pool);
- _old_manager->add_pool(_old_pool);
- old ->set_gc_manager(_old_manager);
-
-}
-
-CMSHeap* CMSHeap::heap() {
- CollectedHeap* heap = Universe::heap();
- assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
- assert(heap->kind() == CollectedHeap::CMS, "Invalid name");
- return static_cast<CMSHeap*>(heap);
-}
-
-void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
- assert(workers() != NULL, "should have workers here");
- workers()->threads_do(tc);
- ConcurrentMarkSweepThread::threads_do(tc);
-}
-
-void CMSHeap::print_gc_threads_on(outputStream* st) const {
- assert(workers() != NULL, "should have workers here");
- workers()->print_worker_threads_on(st);
- ConcurrentMarkSweepThread::print_all_on(st);
-}
-
-void CMSHeap::print_on_error(outputStream* st) const {
- GenCollectedHeap::print_on_error(st);
- st->cr();
- CMSCollector::print_on_error(st);
-}
-
-bool CMSHeap::create_cms_collector() {
- assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
- "Unexpected generation kinds");
- CMSCollector* collector =
- new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(), rem_set());
-
- if (collector == NULL || !collector->completed_initialization()) {
- if (collector) {
- delete collector; // Be nice in embedded situation
- }
- vm_shutdown_during_initialization("Could not create CMS collector");
- return false;
- }
- return true; // success
-}
-
-void CMSHeap::collect(GCCause::Cause cause) {
- if (should_do_concurrent_full_gc(cause)) {
- // Mostly concurrent full collection.
- collect_mostly_concurrent(cause);
- } else {
- GenCollectedHeap::collect(cause);
- }
-}
-
-bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
- switch (cause) {
- case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
- case GCCause::_java_lang_system_gc:
- case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
- default: return false;
- }
-}
-
-void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) {
- assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
-
- MutexLocker ml(Heap_lock);
- // Read the GC counts while holding the Heap_lock
- unsigned int full_gc_count_before = total_full_collections();
- unsigned int gc_count_before = total_collections();
- {
- MutexUnlocker mu(Heap_lock);
- VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
- VMThread::execute(&op);
- }
-}
-
-void CMSHeap::stop() {
- ConcurrentMarkSweepThread::cmst()->stop();
-}
-
-void CMSHeap::safepoint_synchronize_begin() {
- ConcurrentMarkSweepThread::synchronize(false);
-}
-
-void CMSHeap::safepoint_synchronize_end() {
- ConcurrentMarkSweepThread::desynchronize(false);
-}
-
-void CMSHeap::cms_process_roots(StrongRootsScope* scope,
- bool young_gen_as_roots,
- ScanningOption so,
- bool only_strong_roots,
- OopsInGenClosure* root_closure,
- CLDClosure* cld_closure) {
- MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
- CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
-
- process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
-
- if (young_gen_as_roots &&
- _process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
- root_closure->set_generation(young_gen());
- young_gen()->oop_iterate(root_closure);
- root_closure->reset_generation();
- }
-
- _process_strong_tasks->all_tasks_completed(scope->n_threads());
-}
-
-void CMSHeap::gc_prologue(bool full) {
- GenCollectedHeap::gc_prologue(full);
-};
-
-void CMSHeap::gc_epilogue(bool full) {
- GenCollectedHeap::gc_epilogue(full);
-};
-
-GrowableArray<GCMemoryManager*> CMSHeap::memory_managers() {
- GrowableArray<GCMemoryManager*> memory_managers(2);
- memory_managers.append(_young_manager);
- memory_managers.append(_old_manager);
- return memory_managers;
-}
-
-GrowableArray<MemoryPool*> CMSHeap::memory_pools() {
- GrowableArray<MemoryPool*> memory_pools(3);
- memory_pools.append(_eden_pool);
- memory_pools.append(_survivor_pool);
- memory_pools.append(_old_pool);
- return memory_pools;
-}
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,140 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSHEAP_HPP
-#define SHARE_GC_CMS_CMSHEAP_HPP
-
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/oopStorageParState.hpp"
-#include "utilities/growableArray.hpp"
-
-class CLDClosure;
-class GCMemoryManager;
-class MemoryPool;
-class OopsInGenClosure;
-class outputStream;
-class StrongRootsScope;
-class ThreadClosure;
-class WorkGang;
-
-class CMSHeap : public GenCollectedHeap {
-public:
- CMSHeap();
-
- // Returns JNI_OK on success
- virtual jint initialize();
- virtual CardTableRS* create_rem_set(const MemRegion& reserved_region);
-
- // Convenience function to be used in situations where the heap type can be
- // asserted to be this type.
- static CMSHeap* heap();
-
- virtual Name kind() const {
- return CollectedHeap::CMS;
- }
-
- virtual const char* name() const {
- return "Concurrent Mark Sweep";
- }
-
- WorkGang* workers() const { return _workers; }
-
- virtual void print_gc_threads_on(outputStream* st) const;
- virtual void gc_threads_do(ThreadClosure* tc) const;
- virtual void print_on_error(outputStream* st) const;
-
- // Perform a full collection of the heap; intended for use in implementing
- // "System.gc". This implies as full a collection as the CollectedHeap
- // supports. Caller does not hold the Heap_lock on entry.
- void collect(GCCause::Cause cause);
-
- void stop();
- void safepoint_synchronize_begin();
- void safepoint_synchronize_end();
-
- virtual GrowableArray<GCMemoryManager*> memory_managers();
- virtual GrowableArray<MemoryPool*> memory_pools();
-
- // If "young_gen_as_roots" is false, younger generations are
- // not scanned as roots; in this case, the caller must be arranging to
- // scan the younger generations itself. (For example, a generation might
- // explicitly mark reachable objects in younger generations, to avoid
- // excess storage retention.)
- void cms_process_roots(StrongRootsScope* scope,
- bool young_gen_as_roots,
- ScanningOption so,
- bool only_strong_roots,
- OopsInGenClosure* root_closure,
- CLDClosure* cld_closure);
-
- GCMemoryManager* old_manager() const { return _old_manager; }
-
- ParNewGeneration* young_gen() const {
- assert(_young_gen->kind() == Generation::ParNew, "Wrong generation type");
- return static_cast<ParNewGeneration*>(_young_gen);
- }
-
- ConcurrentMarkSweepGeneration* old_gen() const {
- assert(_old_gen->kind() == Generation::ConcurrentMarkSweep, "Wrong generation kind");
- return static_cast<ConcurrentMarkSweepGeneration*>(_old_gen);
- }
-
- // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
- // allocated since the last call to save_marks in the young generation.
- // The "cur" closure is applied to references in the younger generation
- // at "level", and the "older" closure to older generations.
- template <typename OopClosureType1, typename OopClosureType2>
- void oop_since_save_marks_iterate(OopClosureType1* cur,
- OopClosureType2* older);
-
-private:
- WorkGang* _workers;
- MemoryPool* _eden_pool;
- MemoryPool* _survivor_pool;
- MemoryPool* _old_pool;
-
- virtual void gc_prologue(bool full);
- virtual void gc_epilogue(bool full);
-
- virtual void initialize_serviceability();
-
- // Accessor for memory state verification support
- NOT_PRODUCT(
- virtual size_t skip_header_HeapWords() { return CMSCollector::skip_header_HeapWords(); }
- )
-
- // Returns success or failure.
- bool create_cms_collector();
-
- // In support of ExplicitGCInvokesConcurrent functionality
- bool should_do_concurrent_full_gc(GCCause::Cause cause);
-
- void collect_mostly_concurrent(GCCause::Cause cause);
-};
-
-#endif // SHARE_GC_CMS_CMSHEAP_HPP
--- a/src/hotspot/share/gc/cms/cmsHeap.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSHEAP_INLINE_HPP
-#define SHARE_GC_CMS_CMSHEAP_INLINE_HPP
-
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/serial/defNewGeneration.inline.hpp"
-
-template <typename OopClosureType1, typename OopClosureType2>
-void CMSHeap::oop_since_save_marks_iterate(OopClosureType1* cur,
- OopClosureType2* older) {
- young_gen()->oop_since_save_marks_iterate(cur);
- old_gen()->oop_since_save_marks_iterate(older);
-}
-
-#endif // SHARE_GC_CMS_CMSHEAP_INLINE_HPP
--- a/src/hotspot/share/gc/cms/cmsLockVerifier.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsLockVerifier.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "memory/universe.hpp"
-#include "runtime/vmThread.hpp"
-
-///////////// Locking verification specific to CMS //////////////
-// Much like "assert_lock_strong()", except that it relaxes the
-// assertion somewhat for the parallel GC case, where VM thread
-// or the CMS thread might hold the lock on behalf of the parallel
-// threads. The second argument is in support of an extra locking
-// check for CFL spaces' free list locks.
-#ifndef PRODUCT
-void CMSLockVerifier::assert_locked(const Mutex* lock,
- const Mutex* p_lock1,
- const Mutex* p_lock2) {
- if (!Universe::is_fully_initialized()) {
- return;
- }
-
- Thread* myThread = Thread::current();
-
- if (lock == NULL) { // a "lock-free" structure, e.g. MUT, protected by CMS token
- assert(p_lock1 == NULL && p_lock2 == NULL, "Unexpected caller error");
- if (myThread->is_ConcurrentGC_thread()) {
- // This test might have to change in the future, if there can be
- // multiple peer CMS threads. But for now, if we're testing the CMS
- assert(myThread == ConcurrentMarkSweepThread::cmst(),
- "In CMS, CMS thread is the only Conc GC thread.");
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should have CMS token");
- } else if (myThread->is_VM_thread()) {
- assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
- "VM thread should have CMS token");
- } else {
- // Token should be held on our behalf by one of the other
- // of CMS or VM thread; not enough easily testable
- // state info to test which here.
- assert(myThread->is_GC_task_thread(), "Unexpected thread type");
- }
- return;
- }
-
- if (myThread->is_VM_thread()
- || myThread->is_ConcurrentGC_thread()
- || myThread->is_Java_thread()) {
- // Make sure that we are holding the associated lock.
- assert_lock_strong(lock);
- // The checking of p_lock is a spl case for CFLS' free list
- // locks: we make sure that none of the parallel GC work gang
- // threads are holding "sub-locks" of freeListLock(). We check only
- // the parDictionaryAllocLock because the others are too numerous.
- // This spl case code is somewhat ugly and any improvements
- // are welcome.
- assert(p_lock1 == NULL || !p_lock1->is_locked() || p_lock1->owned_by_self(),
- "Possible race between this and parallel GC threads");
- assert(p_lock2 == NULL || !p_lock2->is_locked() || p_lock2->owned_by_self(),
- "Possible race between this and parallel GC threads");
- } else if (myThread->is_GC_task_thread()) {
- // Make sure that the VM or CMS thread holds lock on our behalf
- // XXX If there were a concept of a gang_master for a (set of)
- // gang_workers, we could have used the identity of that thread
- // for checking ownership here; for now we just disjunct.
- assert(lock->owner() == VMThread::vm_thread() ||
- lock->owner() == ConcurrentMarkSweepThread::cmst(),
- "Should be locked by VM thread or CMS thread on my behalf");
- if (p_lock1 != NULL) {
- assert_lock_strong(p_lock1);
- }
- if (p_lock2 != NULL) {
- assert_lock_strong(p_lock2);
- }
- } else {
- // Make sure we didn't miss some other thread type calling into here;
- // perhaps as a result of future VM evolution.
- ShouldNotReachHere();
- }
-}
-#endif
--- a/src/hotspot/share/gc/cms/cmsLockVerifier.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSLOCKVERIFIER_HPP
-#define SHARE_GC_CMS_CMSLOCKVERIFIER_HPP
-
-#include "runtime/mutex.hpp"
-
-///////////// Locking verification specific to CMS //////////////
-// Much like "assert_lock_strong()", except
-// that it relaxes the assertion somewhat for the parallel GC case, where
-// main GC thread or the CMS thread might hold the lock on behalf of
-// the parallel threads.
-class CMSLockVerifier: AllStatic {
- public:
- static void assert_locked(const Mutex* lock, const Mutex* p_lock1, const Mutex* p_lock2)
- PRODUCT_RETURN;
- static void assert_locked(const Mutex* lock, const Mutex* p_lock) {
- assert_locked(lock, p_lock, NULL);
- }
- static void assert_locked(const Mutex* lock) {
- assert_locked(lock, NULL);
- }
-};
-
-#endif // SHARE_GC_CMS_CMSLOCKVERIFIER_HPP
--- a/src/hotspot/share/gc/cms/cmsOopClosures.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,333 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSOOPCLOSURES_HPP
-#define SHARE_GC_CMS_CMSOOPCLOSURES_HPP
-
-#include "gc/shared/genOopClosures.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "memory/iterator.hpp"
-
-/////////////////////////////////////////////////////////////////
-// Closures used by ConcurrentMarkSweepGeneration's collector
-/////////////////////////////////////////////////////////////////
-class ConcurrentMarkSweepGeneration;
-class CMSBitMap;
-class CMSMarkStack;
-class CMSCollector;
-class MarkFromRootsClosure;
-class ParMarkFromRootsClosure;
-
-class Mutex;
-
-// Decode the oop and call do_oop on it.
-#define DO_OOP_WORK_DEFN \
- void do_oop(oop obj); \
- template <class T> inline void do_oop_work(T* p);
-
-// TODO: This duplication of the MetadataVisitingOopIterateClosure class is only needed
-// because some CMS OopClosures derive from OopsInGenClosure. It would be
-// good to get rid of them completely.
-class MetadataVisitingOopsInGenClosure: public OopsInGenClosure {
- public:
- virtual bool do_metadata() { return true; }
- virtual void do_klass(Klass* k);
- virtual void do_cld(ClassLoaderData* cld);
-};
-
-class MarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
- private:
- const MemRegion _span;
- CMSBitMap* _bitMap;
- protected:
- DO_OOP_WORK_DEFN
- public:
- MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-class ParMarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
- private:
- const MemRegion _span;
- CMSBitMap* _bitMap;
- protected:
- DO_OOP_WORK_DEFN
- public:
- ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-// A variant of the above used in certain kinds of CMS
-// marking verification.
-class MarkRefsIntoVerifyClosure: public MetadataVisitingOopsInGenClosure {
- private:
- const MemRegion _span;
- CMSBitMap* _verification_bm;
- CMSBitMap* _cms_bm;
- protected:
- DO_OOP_WORK_DEFN
- public:
- MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
- CMSBitMap* cms_bm);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-// The non-parallel version (the parallel version appears further below).
-class PushAndMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bit_map;
- CMSBitMap* _mod_union_table;
- CMSMarkStack* _mark_stack;
- bool _concurrent_precleaning;
- protected:
- DO_OOP_WORK_DEFN
- public:
- PushAndMarkClosure(CMSCollector* collector,
- MemRegion span,
- ReferenceDiscoverer* rd,
- CMSBitMap* bit_map,
- CMSBitMap* mod_union_table,
- CMSMarkStack* mark_stack,
- bool concurrent_precleaning);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-// In the parallel case, the bit map and the
-// reference processor are currently all shared. Access to
-// these shared mutable structures must use appropriate
-// synchronization (for instance, via CAS). The marking stack
-// used in the non-parallel case above is here replaced with
-// an OopTaskQueue structure to allow efficient work stealing.
-class ParPushAndMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bit_map;
- OopTaskQueue* _work_queue;
- protected:
- DO_OOP_WORK_DEFN
- public:
- ParPushAndMarkClosure(CMSCollector* collector,
- MemRegion span,
- ReferenceDiscoverer* rd,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-// The non-parallel version (the parallel version appears further below).
-class MarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
- private:
- MemRegion _span;
- CMSBitMap* _bit_map;
- CMSMarkStack* _mark_stack;
- PushAndMarkClosure _pushAndMarkClosure;
- CMSCollector* _collector;
- Mutex* _freelistLock;
- bool _yield;
- // Whether closure is being used for concurrent precleaning
- bool _concurrent_precleaning;
- protected:
- DO_OOP_WORK_DEFN
- public:
- MarkRefsIntoAndScanClosure(MemRegion span,
- ReferenceDiscoverer* rd,
- CMSBitMap* bit_map,
- CMSBitMap* mod_union_table,
- CMSMarkStack* mark_stack,
- CMSCollector* collector,
- bool should_yield,
- bool concurrent_precleaning);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-
- void set_freelistLock(Mutex* m) {
- _freelistLock = m;
- }
-
- private:
- inline void do_yield_check();
- void do_yield_work();
- bool take_from_overflow_list();
-};
-
-// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
-// stack and the bitMap are shared, so access needs to be suitably
-// synchronized. An OopTaskQueue structure, supporting efficient
-// work stealing, replaces a CMSMarkStack for storing grey objects.
-class ParMarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
- private:
- MemRegion _span;
- CMSBitMap* _bit_map;
- OopTaskQueue* _work_queue;
- const uint _low_water_mark;
- ParPushAndMarkClosure _parPushAndMarkClosure;
- protected:
- DO_OOP_WORK_DEFN
- public:
- ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
- MemRegion span,
- ReferenceDiscoverer* rd,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-
- void trim_queue(uint size);
-};
-
-// This closure is used during the concurrent marking phase
-// following the first checkpoint. Its use is buried in
-// the closure MarkFromRootsClosure.
-class PushOrMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bitMap;
- CMSMarkStack* _markStack;
- HeapWord* const _finger;
- MarkFromRootsClosure* const
- _parent;
- protected:
- DO_OOP_WORK_DEFN
- public:
- PushOrMarkClosure(CMSCollector* cms_collector,
- MemRegion span,
- CMSBitMap* bitMap,
- CMSMarkStack* markStack,
- HeapWord* finger,
- MarkFromRootsClosure* parent);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-
- // Deal with a stack overflow condition
- void handle_stack_overflow(HeapWord* lost);
- private:
- inline void do_yield_check();
-};
-
-// A parallel (MT) version of the above.
-// This closure is used during the concurrent marking phase
-// following the first checkpoint. Its use is buried in
-// the closure ParMarkFromRootsClosure.
-class ParPushOrMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
- CMSCollector* _collector;
- MemRegion _whole_span;
- MemRegion _span; // local chunk
- CMSBitMap* _bit_map;
- OopTaskQueue* _work_queue;
- CMSMarkStack* _overflow_stack;
- HeapWord* const _finger;
- HeapWord* volatile* const _global_finger_addr;
- ParMarkFromRootsClosure* const _parent;
- protected:
- DO_OOP_WORK_DEFN
- public:
- ParPushOrMarkClosure(CMSCollector* cms_collector,
- MemRegion span,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue,
- CMSMarkStack* mark_stack,
- HeapWord* finger,
- HeapWord* volatile* global_finger_addr,
- ParMarkFromRootsClosure* parent);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-
- // Deal with a stack overflow condition
- void handle_stack_overflow(HeapWord* lost);
- private:
- inline void do_yield_check();
-};
-
-// For objects in CMS generation, this closure marks
-// given objects (transitively) as being reachable/live.
-// This is currently used during the (weak) reference object
-// processing phase of the CMS final checkpoint step, as
-// well as during the concurrent precleaning of the discovered
-// reference lists.
-class CMSKeepAliveClosure: public MetadataVisitingOopIterateClosure {
- private:
- CMSCollector* _collector;
- const MemRegion _span;
- CMSMarkStack* _mark_stack;
- CMSBitMap* _bit_map;
- bool _concurrent_precleaning;
- protected:
- DO_OOP_WORK_DEFN
- public:
- CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
- CMSBitMap* bit_map, CMSMarkStack* mark_stack,
- bool cpc);
- bool concurrent_precleaning() const { return _concurrent_precleaning; }
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-class CMSInnerParMarkAndPushClosure: public MetadataVisitingOopIterateClosure {
- private:
- CMSCollector* _collector;
- MemRegion _span;
- OopTaskQueue* _work_queue;
- CMSBitMap* _bit_map;
- protected:
- DO_OOP_WORK_DEFN
- public:
- CMSInnerParMarkAndPushClosure(CMSCollector* collector,
- MemRegion span, CMSBitMap* bit_map,
- OopTaskQueue* work_queue);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-// A parallel (MT) version of the above, used when
-// reference processing is parallel; the only difference
-// is in the do_oop method.
-class CMSParKeepAliveClosure: public MetadataVisitingOopIterateClosure {
- private:
- MemRegion _span;
- OopTaskQueue* _work_queue;
- CMSBitMap* _bit_map;
- CMSInnerParMarkAndPushClosure
- _mark_and_push;
- const uint _low_water_mark;
- void trim_queue(uint max);
- protected:
- DO_OOP_WORK_DEFN
- public:
- CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
- CMSBitMap* bit_map, OopTaskQueue* work_queue);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-#endif // SHARE_GC_CMS_CMSOOPCLOSURES_HPP
--- a/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
-#define SHARE_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
-
-#include "gc/cms/cmsOopClosures.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/oop.inline.hpp"
-
-// MetadataVisitingOopIterateClosure and MetadataVisitingOopsInGenClosure are duplicated,
-// until we get rid of OopsInGenClosure.
-
-inline void MetadataVisitingOopsInGenClosure::do_klass(Klass* k) {
- ClassLoaderData* cld = k->class_loader_data();
- MetadataVisitingOopsInGenClosure::do_cld(cld);
-}
-
-inline void MetadataVisitingOopsInGenClosure::do_cld(ClassLoaderData* cld) {
- cld->oops_do(this, ClassLoaderData::_claim_strong);
-}
-
-// Decode the oop and call do_oop on it.
-#define DO_OOP_WORK_IMPL(cls) \
- template <class T> void cls::do_oop_work(T* p) { \
- T heap_oop = RawAccess<>::oop_load(p); \
- if (!CompressedOops::is_null(heap_oop)) { \
- oop obj = CompressedOops::decode_not_null(heap_oop); \
- do_oop(obj); \
- } \
- } \
- inline void cls::do_oop(oop* p) { do_oop_work(p); } \
- inline void cls::do_oop(narrowOop* p) { do_oop_work(p); }
-
-DO_OOP_WORK_IMPL(MarkRefsIntoClosure)
-DO_OOP_WORK_IMPL(ParMarkRefsIntoClosure)
-DO_OOP_WORK_IMPL(MarkRefsIntoVerifyClosure)
-DO_OOP_WORK_IMPL(PushAndMarkClosure)
-DO_OOP_WORK_IMPL(ParPushAndMarkClosure)
-DO_OOP_WORK_IMPL(MarkRefsIntoAndScanClosure)
-DO_OOP_WORK_IMPL(ParMarkRefsIntoAndScanClosure)
-
-// Trim our work_queue so its length is below max at return
-inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
- while (_work_queue->size() > max) {
- oop newOop;
- if (_work_queue->pop_local(newOop)) {
- assert(oopDesc::is_oop(newOop), "Expected an oop");
- assert(_bit_map->isMarked((HeapWord*)newOop),
- "only grey objects on this stack");
- // iterate over the oops in this oop, marking and pushing
- // the ones in CMS heap (i.e. in _span).
- newOop->oop_iterate(&_parPushAndMarkClosure);
- }
- }
-}
-
-DO_OOP_WORK_IMPL(PushOrMarkClosure)
-DO_OOP_WORK_IMPL(ParPushOrMarkClosure)
-DO_OOP_WORK_IMPL(CMSKeepAliveClosure)
-DO_OOP_WORK_IMPL(CMSInnerParMarkAndPushClosure)
-DO_OOP_WORK_IMPL(CMSParKeepAliveClosure)
-
-#endif // SHARE_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/cms/cmsVMOperations.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsVMOperations.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "memory/universe.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/os.hpp"
-#include "utilities/dtrace.hpp"
-
-//////////////////////////////////////////////////////////
-// Methods in abstract class VM_CMS_Operation
-//////////////////////////////////////////////////////////
-void VM_CMS_Operation::verify_before_gc() {
- if (VerifyBeforeGC &&
- CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
- GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
- HandleMark hm;
- FreelistLocker x(_collector);
- MutexLocker y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
- CMSHeap::heap()->prepare_for_verify();
- Universe::verify();
- }
-}
-
-void VM_CMS_Operation::verify_after_gc() {
- if (VerifyAfterGC &&
- CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
- GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
- HandleMark hm;
- FreelistLocker x(_collector);
- MutexLocker y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
- Universe::verify();
- }
-}
-
-bool VM_CMS_Operation::lost_race() const {
- if (CMSCollector::abstract_state() == CMSCollector::Idling) {
- // We lost a race to a foreground collection
- // -- there's nothing to do
- return true;
- }
- assert(CMSCollector::abstract_state() == legal_state(),
- "Inconsistent collector state?");
- return false;
-}
-
-bool VM_CMS_Operation::doit_prologue() {
- assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
- assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
- assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "Possible deadlock");
-
- Heap_lock->lock();
- if (lost_race()) {
- assert(_prologue_succeeded == false, "Initialized in c'tor");
- Heap_lock->unlock();
- } else {
- _prologue_succeeded = true;
- }
- return _prologue_succeeded;
-}
-
-void VM_CMS_Operation::doit_epilogue() {
- assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
- assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
- assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "Possible deadlock");
-
- if (Universe::has_reference_pending_list()) {
- Heap_lock->notify_all();
- }
- Heap_lock->unlock();
-}
-
-//////////////////////////////////////////////////////////
-// Methods in class VM_CMS_Initial_Mark
-//////////////////////////////////////////////////////////
-void VM_CMS_Initial_Mark::doit() {
- if (lost_race()) {
- // Nothing to do.
- return;
- }
- HS_PRIVATE_CMS_INITMARK_BEGIN();
- GCIdMark gc_id_mark(_gc_id);
-
- _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
-
- CMSHeap* heap = CMSHeap::heap();
- GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
-
- VM_CMS_Operation::verify_before_gc();
-
- IsGCActiveMark x; // stop-world GC active
- _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
-
- VM_CMS_Operation::verify_after_gc();
-
- _collector->_gc_timer_cm->register_gc_pause_end();
-
- HS_PRIVATE_CMS_INITMARK_END();
-}
-
-//////////////////////////////////////////////////////////
-// Methods in class VM_CMS_Final_Remark_Operation
-//////////////////////////////////////////////////////////
-void VM_CMS_Final_Remark::doit() {
- if (lost_race()) {
- // Nothing to do.
- return;
- }
- HS_PRIVATE_CMS_REMARK_BEGIN();
- GCIdMark gc_id_mark(_gc_id);
-
- _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
-
- CMSHeap* heap = CMSHeap::heap();
- GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
-
- VM_CMS_Operation::verify_before_gc();
-
- IsGCActiveMark x; // stop-world GC active
- _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
-
- VM_CMS_Operation::verify_after_gc();
-
- _collector->save_heap_summary();
- _collector->_gc_timer_cm->register_gc_pause_end();
-
- HS_PRIVATE_CMS_REMARK_END();
-}
-
-// VM operation to invoke a concurrent collection of a
-// GenCollectedHeap heap.
-void VM_GenCollectFullConcurrent::doit() {
- assert(Thread::current()->is_VM_thread(), "Should be VM thread");
- assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
-
- CMSHeap* heap = CMSHeap::heap();
- if (_gc_count_before == heap->total_collections()) {
- // The "full" of do_full_collection call below "forces"
- // a collection; the second arg, 0, below ensures that
- // only the young gen is collected. XXX In the future,
- // we'll probably need to have something in this interface
- // to say do this only if we are sure we will not bail
- // out to a full collection in this attempt, but that's
- // for the future.
- assert(SafepointSynchronize::is_at_safepoint(),
- "We can only be executing this arm of if at a safepoint");
- GCCauseSetter gccs(heap, _gc_cause);
- heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
- } // Else no need for a foreground young gc
- assert((_gc_count_before < heap->total_collections()) ||
- (GCLocker::is_active() /* gc may have been skipped */
- && (_gc_count_before == heap->total_collections())),
- "total_collections() should be monotonically increasing");
-
- MutexLocker x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
- assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
- if (heap->total_full_collections() == _full_gc_count_before) {
- // Nudge the CMS thread to start a concurrent collection.
- CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
- } else {
- assert(_full_gc_count_before < heap->total_full_collections(), "Error");
- FullGCCount_lock->notify_all(); // Inform the Java thread its work is done
- }
-}
-
-bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
- Thread* thr = Thread::current();
- assert(thr != NULL, "Unexpected tid");
- if (!thr->is_Java_thread()) {
- assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
- CMSHeap* heap = CMSHeap::heap();
- if (_gc_count_before != heap->total_collections()) {
- // No need to do a young gc, we'll just nudge the CMS thread
- // in the doit() method above, to be executed soon.
- assert(_gc_count_before < heap->total_collections(),
- "total_collections() should be monotonically increasing");
- return false; // no need for foreground young gc
- }
- }
- return true; // may still need foreground young gc
-}
-
-
-void VM_GenCollectFullConcurrent::doit_epilogue() {
- Thread* thr = Thread::current();
- assert(thr->is_Java_thread(), "just checking");
- JavaThread* jt = (JavaThread*)thr;
-
- if (Universe::has_reference_pending_list()) {
- Heap_lock->notify_all();
- }
- Heap_lock->unlock();
-
- // It is fine to test whether completed collections has
- // exceeded our request count without locking because
- // the completion count is monotonically increasing;
- // this will break for very long-running apps when the
- // count overflows and wraps around. XXX fix me !!!
- // e.g. at the rate of 1 full gc per ms, this could
- // overflow in about 1000 years.
- CMSHeap* heap = CMSHeap::heap();
- if (_gc_cause != GCCause::_gc_locker &&
- heap->total_full_collections_completed() <= _full_gc_count_before) {
- // maybe we should change the condition to test _gc_cause ==
- // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
- // instead of _gc_cause != GCCause::_gc_locker
- assert(GCCause::is_user_requested_gc(_gc_cause),
- "the only way to get here if this was a System.gc()-induced GC");
- assert(ExplicitGCInvokesConcurrent, "Error");
- // Now, wait for witnessing concurrent gc cycle to complete,
- // but do so in native mode, because we want to lock the
- // FullGCEvent_lock, which may be needed by the VM thread
- // or by the CMS thread, so we do not want to be suspended
- // while holding that lock.
- ThreadToNativeFromVM native(jt);
- MutexLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
- // Either a concurrent or a stop-world full gc is sufficient
- // witness to our request.
- while (heap->total_full_collections_completed() <= _full_gc_count_before) {
- FullGCCount_lock->wait_without_safepoint_check();
- }
- }
-}
--- a/src/hotspot/share/gc/cms/cmsVMOperations.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSVMOPERATIONS_HPP
-#define SHARE_GC_CMS_CMSVMOPERATIONS_HPP
-
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcVMOperations.hpp"
-#include "runtime/vmOperations.hpp"
-
-// The VM_CMS_Operation is slightly different from
-// a VM_GC_Operation -- and would not have subclassed easily
-// to VM_GC_Operation without several changes to VM_GC_Operation.
-// To minimize the changes, we have replicated some of the VM_GC_Operation
-// functionality here. We will consolidate that back by doing subclassing
-// as appropriate in Dolphin.
-//
-// VM_Operation
-// VM_CMS_Operation
-// - implements the common portion of work done in support
-// of CMS' stop-world phases (initial mark and remark).
-//
-// VM_CMS_Initial_Mark
-// VM_CMS_Final_Mark
-//
-
-// Forward decl.
-class CMSCollector;
-
-class VM_CMS_Operation: public VM_Operation {
- protected:
- CMSCollector* _collector; // associated collector
- bool _prologue_succeeded; // whether doit_prologue succeeded
- uint _gc_id;
-
- bool lost_race() const;
-
- public:
- VM_CMS_Operation(CMSCollector* collector):
- _collector(collector),
- _prologue_succeeded(false),
- _gc_id(GCId::current()) {}
- ~VM_CMS_Operation() {}
-
- // The legal collector state for executing this CMS op.
- virtual const CMSCollector::CollectorState legal_state() const = 0;
-
- // Whether the pending list lock needs to be held
- virtual const bool needs_pending_list_lock() const = 0;
-
- // Execute operations in the context of the caller,
- // prior to execution of the vm operation itself.
- virtual bool doit_prologue();
- // Execute operations in the context of the caller,
- // following completion of the vm operation.
- virtual void doit_epilogue();
-
- virtual bool evaluate_at_safepoint() const { return true; }
- virtual bool is_cheap_allocated() const { return false; }
- virtual bool allow_nested_vm_operations() const { return false; }
- bool prologue_succeeded() const { return _prologue_succeeded; }
-
- void verify_before_gc();
- void verify_after_gc();
-};
-
-
-// VM_CMS_Operation for the initial marking phase of CMS.
-class VM_CMS_Initial_Mark: public VM_CMS_Operation {
- public:
- VM_CMS_Initial_Mark(CMSCollector* _collector) :
- VM_CMS_Operation(_collector) {}
-
- virtual VMOp_Type type() const { return VMOp_CMS_Initial_Mark; }
- virtual void doit();
-
- virtual const CMSCollector::CollectorState legal_state() const {
- return CMSCollector::InitialMarking;
- }
-
- virtual const bool needs_pending_list_lock() const {
- return false;
- }
-};
-
-// VM_CMS_Operation for the final remark phase of CMS.
-class VM_CMS_Final_Remark: public VM_CMS_Operation {
- public:
- VM_CMS_Final_Remark(CMSCollector* _collector) :
- VM_CMS_Operation(_collector) {}
- virtual VMOp_Type type() const { return VMOp_CMS_Final_Remark; }
- virtual void doit();
-
- virtual const CMSCollector::CollectorState legal_state() const {
- return CMSCollector::FinalMarking;
- }
-
- virtual const bool needs_pending_list_lock() const {
- return true;
- }
-};
-
-
-// VM operation to invoke a concurrent collection of the heap as a
-// GenCollectedHeap heap.
-class VM_GenCollectFullConcurrent: public VM_GC_Operation {
- public:
- VM_GenCollectFullConcurrent(uint gc_count_before,
- uint full_gc_count_before,
- GCCause::Cause gc_cause)
- : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
- {
- assert(FullGCCount_lock != NULL, "Error");
- }
- ~VM_GenCollectFullConcurrent() {}
- virtual VMOp_Type type() const { return VMOp_GenCollectFullConcurrent; }
- virtual void doit();
- virtual void doit_epilogue();
- virtual bool is_cheap_allocated() const { return false; }
- virtual bool evaluate_at_safepoint() const;
-};
-
-#endif // SHARE_GC_CMS_CMSVMOPERATIONS_HPP
--- a/src/hotspot/share/gc/cms/cms_globals.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,429 +0,0 @@
-/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMS_GLOBALS_HPP
-#define SHARE_GC_CMS_CMS_GLOBALS_HPP
-
-#define GC_CMS_FLAGS(develop, \
- develop_pd, \
- product, \
- product_pd, \
- diagnostic, \
- diagnostic_pd, \
- experimental, \
- notproduct, \
- manageable, \
- product_rw, \
- lp64_product, \
- range, \
- constraint, \
- writeable) \
- product(bool, UseCMSBestFit, true, \
- "Use CMS best fit allocation strategy") \
- \
- product(size_t, CMSOldPLABMax, 1024, \
- "Maximum size of CMS gen promotion LAB caches per worker " \
- "per block size") \
- range(1, max_uintx) \
- constraint(CMSOldPLABMaxConstraintFunc,AfterMemoryInit) \
- \
- product(size_t, CMSOldPLABMin, 16, \
- "Minimum size of CMS gen promotion LAB caches per worker " \
- "per block size") \
- range(1, max_uintx) \
- constraint(CMSOldPLABMinConstraintFunc,AfterMemoryInit) \
- \
- product(uintx, CMSOldPLABNumRefills, 4, \
- "Nominal number of refills of CMS gen promotion LAB cache " \
- "per worker per block size") \
- range(1, max_uintx) \
- \
- product(bool, CMSOldPLABResizeQuicker, false, \
- "React on-the-fly during a scavenge to a sudden " \
- "change in block demand rate") \
- \
- product(uintx, CMSOldPLABToleranceFactor, 4, \
- "The tolerance of the phase-change detector for on-the-fly " \
- "PLAB resizing during a scavenge") \
- range(1, max_uintx) \
- \
- product(uintx, CMSOldPLABReactivityFactor, 2, \
- "The gain in the feedback loop for on-the-fly PLAB resizing " \
- "during a scavenge") \
- range(1, max_uintx) \
- \
- product_pd(size_t, CMSYoungGenPerWorker, \
- "The maximum size of young gen chosen by default per GC worker " \
- "thread available") \
- range(1, max_uintx) \
- \
- product(uintx, CMSIncrementalSafetyFactor, 10, \
- "Percentage (0-100) used to add conservatism when computing the " \
- "duty cycle") \
- range(0, 100) \
- \
- product(uintx, CMSExpAvgFactor, 50, \
- "Percentage (0-100) used to weight the current sample when " \
- "computing exponential averages for CMS statistics") \
- range(0, 100) \
- \
- product(uintx, CMS_FLSWeight, 75, \
- "Percentage (0-100) used to weight the current sample when " \
- "computing exponentially decaying averages for CMS FLS " \
- "statistics") \
- range(0, 100) \
- \
- product(uintx, CMS_FLSPadding, 1, \
- "The multiple of deviation from mean to use for buffering " \
- "against volatility in free list demand") \
- range(0, max_juint) \
- \
- product(uintx, FLSCoalescePolicy, 2, \
- "CMS: aggressiveness level for coalescing, increasing " \
- "from 0 to 4") \
- range(0, 4) \
- \
- product(bool, FLSAlwaysCoalesceLarge, false, \
- "CMS: larger free blocks are always available for coalescing") \
- \
- product(double, FLSLargestBlockCoalesceProximity, 0.99, \
- "CMS: the smaller the percentage the greater the coalescing " \
- "force") \
- range(0.0, 1.0) \
- \
- product(double, CMSSmallCoalSurplusPercent, 1.05, \
- "CMS: the factor by which to inflate estimated demand of small " \
- "block sizes to prevent coalescing with an adjoining block") \
- range(0.0, DBL_MAX) \
- \
- product(double, CMSLargeCoalSurplusPercent, 0.95, \
- "CMS: the factor by which to inflate estimated demand of large " \
- "block sizes to prevent coalescing with an adjoining block") \
- range(0.0, DBL_MAX) \
- \
- product(double, CMSSmallSplitSurplusPercent, 1.10, \
- "CMS: the factor by which to inflate estimated demand of small " \
- "block sizes to prevent splitting to supply demand for smaller " \
- "blocks") \
- range(0.0, DBL_MAX) \
- \
- product(double, CMSLargeSplitSurplusPercent, 1.00, \
- "CMS: the factor by which to inflate estimated demand of large " \
- "block sizes to prevent splitting to supply demand for smaller " \
- "blocks") \
- range(0.0, DBL_MAX) \
- \
- product(bool, CMSExtrapolateSweep, false, \
- "CMS: cushion for block demand during sweep") \
- \
- product(uintx, CMS_SweepWeight, 75, \
- "Percentage (0-100) used to weight the current sample when " \
- "computing exponentially decaying average for inter-sweep " \
- "duration") \
- range(0, 100) \
- \
- product(uintx, CMS_SweepPadding, 1, \
- "The multiple of deviation from mean to use for buffering " \
- "against volatility in inter-sweep duration") \
- range(0, max_juint) \
- \
- product(uintx, CMS_SweepTimerThresholdMillis, 10, \
- "Skip block flux-rate sampling for an epoch unless inter-sweep " \
- "duration exceeds this threshold in milliseconds") \
- range(0, max_uintx) \
- \
- product(bool, CMSClassUnloadingEnabled, true, \
- "Whether class unloading enabled when using CMS GC") \
- \
- product(uintx, CMSClassUnloadingMaxInterval, 0, \
- "When CMS class unloading is enabled, the maximum CMS cycle " \
- "count for which classes may not be unloaded") \
- range(0, max_uintx) \
- \
- product(uintx, CMSIndexedFreeListReplenish, 4, \
- "Replenish an indexed free list with this number of chunks") \
- range(1, max_uintx) \
- \
- product(bool, CMSReplenishIntermediate, true, \
- "Replenish all intermediate free-list caches") \
- \
- product(bool, CMSSplitIndexedFreeListBlocks, true, \
- "When satisfying batched demand, split blocks from the " \
- "IndexedFreeList whose size is a multiple of requested size") \
- \
- product(bool, CMSLoopWarn, false, \
- "Warn in case of excessive CMS looping") \
- \
- notproduct(bool, CMSMarkStackOverflowALot, false, \
- "Simulate frequent marking stack / work queue overflow") \
- \
- notproduct(uintx, CMSMarkStackOverflowInterval, 1000, \
- "An \"interval\" counter that determines how frequently " \
- "to simulate overflow; a smaller number increases frequency") \
- \
- product(uintx, CMSMaxAbortablePrecleanLoops, 0, \
- "Maximum number of abortable preclean iterations, if > 0") \
- range(0, max_uintx) \
- \
- product(intx, CMSMaxAbortablePrecleanTime, 5000, \
- "Maximum time in abortable preclean (in milliseconds)") \
- range(0, max_intx) \
- \
- product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100, \
- "Nominal minimum work per abortable preclean iteration") \
- range(0, max_uintx) \
- \
- manageable(intx, CMSAbortablePrecleanWaitMillis, 100, \
- "Time that we sleep between iterations when not given " \
- "enough work per iteration") \
- range(0, max_intx) \
- \
- /* 4096 = CardTable::card_size_in_words * BitsPerWord */ \
- product(size_t, CMSRescanMultiple, 32, \
- "Size (in cards) of CMS parallel rescan task") \
- range(1, SIZE_MAX / 4096) \
- constraint(CMSRescanMultipleConstraintFunc,AfterMemoryInit) \
- \
- /* 4096 = CardTable::card_size_in_words * BitsPerWord */ \
- product(size_t, CMSConcMarkMultiple, 32, \
- "Size (in cards) of CMS concurrent MT marking task") \
- range(1, SIZE_MAX / 4096) \
- constraint(CMSConcMarkMultipleConstraintFunc,AfterMemoryInit) \
- \
- product(bool, CMSAbortSemantics, false, \
- "Whether abort-on-overflow semantics is implemented") \
- \
- product(bool, CMSParallelInitialMarkEnabled, true, \
- "Use the parallel initial mark.") \
- \
- product(bool, CMSParallelRemarkEnabled, true, \
- "Whether parallel remark enabled (only if ParNewGC)") \
- \
- product(bool, CMSParallelSurvivorRemarkEnabled, true, \
- "Whether parallel remark of survivor space " \
- "enabled (effective only if CMSParallelRemarkEnabled)") \
- \
- product(bool, CMSPLABRecordAlways, true, \
- "Always record survivor space PLAB boundaries (effective only " \
- "if CMSParallelSurvivorRemarkEnabled)") \
- \
- product(bool, CMSEdenChunksRecordAlways, true, \
- "Always record eden chunks used for the parallel initial mark " \
- "or remark of eden") \
- \
- product(bool, CMSConcurrentMTEnabled, true, \
- "Whether multi-threaded concurrent work enabled " \
- "(effective only if ParNewGC)") \
- \
- product(bool, CMSPrecleaningEnabled, true, \
- "Whether concurrent precleaning enabled") \
- \
- product(uintx, CMSPrecleanIter, 3, \
- "Maximum number of precleaning iteration passes") \
- range(0, 9) \
- \
- product(uintx, CMSPrecleanDenominator, 3, \
- "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
- "ratio") \
- range(1, max_uintx) \
- constraint(CMSPrecleanDenominatorConstraintFunc,AfterErgo) \
- \
- product(uintx, CMSPrecleanNumerator, 2, \
- "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
- "ratio") \
- range(0, max_uintx-1) \
- constraint(CMSPrecleanNumeratorConstraintFunc,AfterErgo) \
- \
- product(bool, CMSPrecleanRefLists1, true, \
- "Preclean ref lists during (initial) preclean phase") \
- \
- product(bool, CMSPrecleanRefLists2, false, \
- "Preclean ref lists during abortable preclean phase") \
- \
- product(bool, CMSPrecleanSurvivors1, false, \
- "Preclean survivors during (initial) preclean phase") \
- \
- product(bool, CMSPrecleanSurvivors2, true, \
- "Preclean survivors during abortable preclean phase") \
- \
- product(uintx, CMSPrecleanThreshold, 1000, \
- "Do not iterate again if number of dirty cards is less than this")\
- range(100, max_uintx) \
- \
- product(bool, CMSCleanOnEnter, true, \
- "Clean-on-enter optimization for reducing number of dirty cards") \
- \
- product(uintx, CMSRemarkVerifyVariant, 1, \
- "Choose variant (1,2) of verification following remark") \
- range(1, 2) \
- \
- product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M, \
- "If Eden size is below this, do not try to schedule remark") \
- range(0, max_uintx) \
- \
- product(uintx, CMSScheduleRemarkEdenPenetration, 50, \
- "The Eden occupancy percentage (0-100) at which " \
- "to try and schedule remark pause") \
- range(0, 100) \
- \
- product(uintx, CMSScheduleRemarkSamplingRatio, 5, \
- "Start sampling eden top at least before young gen " \
- "occupancy reaches 1/<ratio> of the size at which " \
- "we plan to schedule remark") \
- range(1, max_uintx) \
- \
- product(uintx, CMSSamplingGrain, 16*K, \
- "The minimum distance between eden samples for CMS (see above)") \
- range(ObjectAlignmentInBytes, max_uintx) \
- constraint(CMSSamplingGrainConstraintFunc,AfterMemoryInit) \
- \
- product(bool, CMSScavengeBeforeRemark, false, \
- "Attempt scavenge before the CMS remark step") \
- \
- product(uintx, CMSWorkQueueDrainThreshold, 10, \
- "Don't drain below this size per parallel worker/thief") \
- range(1, max_juint) \
- constraint(CMSWorkQueueDrainThresholdConstraintFunc,AfterErgo) \
- \
- manageable(intx, CMSWaitDuration, 2000, \
- "Time in milliseconds that CMS thread waits for young GC") \
- range(min_jint, max_jint) \
- \
- develop(uintx, CMSCheckInterval, 1000, \
- "Interval in milliseconds that CMS thread checks if it " \
- "should start a collection cycle") \
- \
- product(bool, CMSYield, true, \
- "Yield between steps of CMS") \
- \
- product(size_t, CMSBitMapYieldQuantum, 10*M, \
- "Bitmap operations should process at most this many bits " \
- "between yields") \
- range(1, max_uintx) \
- constraint(CMSBitMapYieldQuantumConstraintFunc,AfterMemoryInit) \
- \
- product(bool, CMSPrintChunksInDump, false, \
- "If logging for the \"gc\" and \"promotion\" tags is enabled on" \
- "trace level include more detailed information about the" \
- "free chunks") \
- \
- product(bool, CMSPrintObjectsInDump, false, \
- "If logging for the \"gc\" and \"promotion\" tags is enabled on" \
- "trace level include more detailed information about the" \
- "allocated objects") \
- \
- diagnostic(bool, FLSVerifyAllHeapReferences, false, \
- "Verify that all references across the FLS boundary " \
- "are to valid objects") \
- \
- diagnostic(bool, FLSVerifyLists, false, \
- "Do lots of (expensive) FreeListSpace verification") \
- \
- diagnostic(bool, FLSVerifyIndexTable, false, \
- "Do lots of (expensive) FLS index table verification") \
- \
- product(uintx, CMSTriggerRatio, 80, \
- "Percentage of MinHeapFreeRatio in CMS generation that is " \
- "allocated before a CMS collection cycle commences") \
- range(0, 100) \
- \
- product(uintx, CMSBootstrapOccupancy, 50, \
- "Percentage CMS generation occupancy at which to " \
- "initiate CMS collection for bootstrapping collection stats") \
- range(0, 100) \
- \
- product(intx, CMSInitiatingOccupancyFraction, -1, \
- "Percentage CMS generation occupancy to start a CMS collection " \
- "cycle. A negative value means that CMSTriggerRatio is used") \
- range(min_intx, 100) \
- \
- manageable(intx, CMSTriggerInterval, -1, \
- "Commence a CMS collection cycle (at least) every so many " \
- "milliseconds (0 permanently, -1 disabled)") \
- range(-1, max_intx) \
- \
- product(bool, UseCMSInitiatingOccupancyOnly, false, \
- "Only use occupancy as a criterion for starting a CMS collection")\
- \
- product(uintx, CMSIsTooFullPercentage, 98, \
- "An absolute ceiling above which CMS will always consider the " \
- "unloading of classes when class unloading is enabled") \
- range(0, 100) \
- \
- develop(bool, CMSTestInFreeList, false, \
- "Check if the coalesced range is already in the " \
- "free lists as claimed") \
- \
- notproduct(bool, CMSVerifyReturnedBytes, false, \
- "Check that all the garbage collected was returned to the " \
- "free lists") \
- \
- diagnostic(bool, BindCMSThreadToCPU, false, \
- "Bind CMS Thread to CPU if possible") \
- \
- diagnostic(uintx, CPUForCMSThread, 0, \
- "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \
- range(0, max_juint) \
- \
- product(uintx, CMSCoordinatorYieldSleepCount, 10, \
- "Number of times the coordinator GC thread will sleep while " \
- "yielding before giving up and resuming GC") \
- range(0, max_juint) \
- \
- product(uintx, CMSYieldSleepCount, 0, \
- "Number of times a GC thread (minus the coordinator) " \
- "will sleep while yielding before giving up and resuming GC") \
- range(0, max_juint) \
- \
- product(bool, ParGCUseLocalOverflow, false, \
- "Instead of a global overflow list, use local overflow stacks") \
- \
- product(bool, ParGCTrimOverflow, true, \
- "Eagerly trim the local overflow lists " \
- "(when ParGCUseLocalOverflow)") \
- \
- notproduct(bool, ParGCWorkQueueOverflowALot, false, \
- "Simulate work queue overflow in ParNew") \
- \
- notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000, \
- "An `interval' counter that determines how frequently " \
- "we simulate overflow; a smaller number increases frequency") \
- \
- product(uintx, ParGCDesiredObjsFromOverflowList, 20, \
- "The desired number of objects to claim from the overflow list") \
- range(0, max_uintx) \
- \
- diagnostic(uintx, ParGCStridesPerThread, 2, \
- "The number of strides per worker thread that we divide up the " \
- "card table scanning work into") \
- range(1, max_uintx) \
- constraint(ParGCStridesPerThreadConstraintFunc,AfterErgo) \
- \
- diagnostic(intx, ParGCCardsPerStrideChunk, 256, \
- "The number of cards in each chunk of the parallel chunks used " \
- "during card table scanning") \
- range(1, max_intx) \
- constraint(ParGCCardsPerStrideChunkConstraintFunc,AfterMemoryInit)
-
-#endif // SHARE_GC_CMS_CMS_GLOBALS_HPP
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,3141 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsLockVerifier.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/shared/blockOffsetTable.inline.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/binaryTreeDictionary.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/init.hpp"
-#include "runtime/java.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/align.hpp"
-#include "utilities/copy.hpp"
-
-// Specialize for AdaptiveFreeList which tries to avoid
-// splitting a chunk of a size that is under populated in favor of
-// an over populated size. The general get_better_list() just returns
-// the current list.
-template <>
-TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
-TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
- BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
- // A candidate chunk has been found. If it is already under
- // populated, get a chunk associated with the hint for this
- // chunk.
-
- TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
- if (curTL->surplus() <= 0) {
- /* Use the hint to find a size with a surplus, and reset the hint. */
- TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
- while (hintTL->hint() != 0) {
- assert(hintTL->hint() > hintTL->size(),
- "hint points in the wrong direction");
- hintTL = dictionary->find_list(hintTL->hint());
- assert(curTL != hintTL, "Infinite loop");
- if (hintTL == NULL ||
- hintTL == curTL /* Should not happen but protect against it */ ) {
- // No useful hint. Set the hint to NULL and go on.
- curTL->set_hint(0);
- break;
- }
- assert(hintTL->size() > curTL->size(), "hint is inconsistent");
- if (hintTL->surplus() > 0) {
- // The hint led to a list that has a surplus. Use it.
- // Set the hint for the candidate to an overpopulated
- // size.
- curTL->set_hint(hintTL->size());
- // Change the candidate.
- curTL = hintTL;
- break;
- }
- }
- }
- return curTL;
-}
-
-void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
- TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
- if (nd) {
- if (split) {
- if (birth) {
- nd->increment_split_births();
- nd->increment_surplus();
- } else {
- nd->increment_split_deaths();
- nd->decrement_surplus();
- }
- } else {
- if (birth) {
- nd->increment_coal_births();
- nd->increment_surplus();
- } else {
- nd->increment_coal_deaths();
- nd->decrement_surplus();
- }
- }
- }
- // A list for this size may not be found (nd == 0) if
- // This is a death where the appropriate list is now
- // empty and has been removed from the list.
- // This is a birth associated with a LinAB. The chunk
- // for the LinAB is not in the dictionary.
-}
-
-bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
- if (FLSAlwaysCoalesceLarge) return true;
-
- TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
- // None of requested size implies overpopulated.
- return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
- list_of_size->count() > list_of_size->coal_desired();
-}
-
-// For each list in the tree, calculate the desired, desired
-// coalesce, count before sweep, and surplus before sweep.
-class BeginSweepClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
- double _percentage;
- float _inter_sweep_current;
- float _inter_sweep_estimate;
- float _intra_sweep_estimate;
-
- public:
- BeginSweepClosure(double p, float inter_sweep_current,
- float inter_sweep_estimate,
- float intra_sweep_estimate) :
- _percentage(p),
- _inter_sweep_current(inter_sweep_current),
- _inter_sweep_estimate(inter_sweep_estimate),
- _intra_sweep_estimate(intra_sweep_estimate) { }
-
- void do_list(AdaptiveFreeList<FreeChunk>* fl) {
- double coalSurplusPercent = _percentage;
- fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
- fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
- fl->set_before_sweep(fl->count());
- fl->set_bfr_surp(fl->surplus());
- }
-};
-
-void AFLBinaryTreeDictionary::begin_sweep_dict_census(double coalSurplusPercent,
- float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
- BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
- inter_sweep_estimate,
- intra_sweep_estimate);
- bsc.do_tree(root());
-}
-
-// Calculate surpluses for the lists in the tree.
-class setTreeSurplusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
- double percentage;
- public:
- setTreeSurplusClosure(double v) { percentage = v; }
-
- void do_list(AdaptiveFreeList<FreeChunk>* fl) {
- double splitSurplusPercent = percentage;
- fl->set_surplus(fl->count() -
- (ssize_t)((double)fl->desired() * splitSurplusPercent));
- }
-};
-
-void AFLBinaryTreeDictionary::set_tree_surplus(double splitSurplusPercent) {
- setTreeSurplusClosure sts(splitSurplusPercent);
- sts.do_tree(root());
-}
-
-// Set hints for the lists in the tree.
-class setTreeHintsClosure : public DescendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
- size_t hint;
- public:
- setTreeHintsClosure(size_t v) { hint = v; }
-
- void do_list(AdaptiveFreeList<FreeChunk>* fl) {
- fl->set_hint(hint);
- assert(fl->hint() == 0 || fl->hint() > fl->size(),
- "Current hint is inconsistent");
- if (fl->surplus() > 0) {
- hint = fl->size();
- }
- }
-};
-
-void AFLBinaryTreeDictionary::set_tree_hints(void) {
- setTreeHintsClosure sth(0);
- sth.do_tree(root());
-}
-
-// Save count before previous sweep and splits and coalesces.
-class clearTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
- void do_list(AdaptiveFreeList<FreeChunk>* fl) {
- fl->set_prev_sweep(fl->count());
- fl->set_coal_births(0);
- fl->set_coal_deaths(0);
- fl->set_split_births(0);
- fl->set_split_deaths(0);
- }
-};
-
-void AFLBinaryTreeDictionary::clear_tree_census(void) {
- clearTreeCensusClosure ctc;
- ctc.do_tree(root());
-}
-
-// Do reporting and post sweep clean up.
-void AFLBinaryTreeDictionary::end_sweep_dict_census(double splitSurplusPercent) {
- // Does walking the tree 3 times hurt?
- set_tree_surplus(splitSurplusPercent);
- set_tree_hints();
- LogTarget(Trace, gc, freelist, stats) log;
- if (log.is_enabled()) {
- LogStream out(log);
- report_statistics(&out);
- }
- clear_tree_census();
-}
-
-// Print census information - counts, births, deaths, etc.
-// for each list in the tree. Also print some summary
-// information.
-class PrintTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
- int _print_line;
- size_t _total_free;
- AdaptiveFreeList<FreeChunk> _total;
-
- public:
- PrintTreeCensusClosure() {
- _print_line = 0;
- _total_free = 0;
- }
- AdaptiveFreeList<FreeChunk>* total() { return &_total; }
- size_t total_free() { return _total_free; }
-
- void do_list(AdaptiveFreeList<FreeChunk>* fl) {
- LogStreamHandle(Debug, gc, freelist, census) out;
-
- if (++_print_line >= 40) {
- AdaptiveFreeList<FreeChunk>::print_labels_on(&out, "size");
- _print_line = 0;
- }
- fl->print_on(&out);
- _total_free += fl->count() * fl->size() ;
- total()->set_count( total()->count() + fl->count() );
- total()->set_bfr_surp( total()->bfr_surp() + fl->bfr_surp() );
- total()->set_surplus( total()->split_deaths() + fl->surplus() );
- total()->set_desired( total()->desired() + fl->desired() );
- total()->set_prev_sweep( total()->prev_sweep() + fl->prev_sweep() );
- total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
- total()->set_coal_births( total()->coal_births() + fl->coal_births() );
- total()->set_coal_deaths( total()->coal_deaths() + fl->coal_deaths() );
- total()->set_split_births(total()->split_births() + fl->split_births());
- total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
- }
-};
-
-void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const {
-
- st->print_cr("BinaryTree");
- AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
- PrintTreeCensusClosure ptc;
- ptc.do_tree(root());
-
- AdaptiveFreeList<FreeChunk>* total = ptc.total();
- AdaptiveFreeList<FreeChunk>::print_labels_on(st, " ");
- total->print_on(st, "TOTAL\t");
- st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f deficit: %8.5f",
- ptc.total_free(),
- (double)(total->split_births() + total->coal_births()
- - total->split_deaths() - total->coal_deaths())
- /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
- (double)(total->desired() - total->count())
- /(total->desired() != 0 ? (double)total->desired() : 1.0));
-}
-
-/////////////////////////////////////////////////////////////////////////
-//// CompactibleFreeListSpace
-/////////////////////////////////////////////////////////////////////////
-
-// highest ranked free list lock rank
-int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
-
-// Defaults are 0 so things will break badly if incorrectly initialized.
-size_t CompactibleFreeListSpace::IndexSetStart = 0;
-size_t CompactibleFreeListSpace::IndexSetStride = 0;
-size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
-
-size_t MinChunkSize = 0;
-
-void CompactibleFreeListSpace::set_cms_values() {
- // Set CMS global values
- assert(MinChunkSize == 0, "already set");
-
- // MinChunkSize should be a multiple of MinObjAlignment and be large enough
- // for chunks to contain a FreeChunk.
- _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
- MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
-
- assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
- IndexSetStart = MinChunkSize;
- IndexSetStride = MinObjAlignment;
-}
-
-// Constructor
-CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
- _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
- CMSRescanMultiple),
- _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
- CMSConcMarkMultiple),
- _bt(bs, mr),
- _collector(NULL),
- // free list locks are in the range of values taken by _lockRank
- // This range currently is [_leaf+2, _leaf+3]
- // Note: this requires that CFLspace c'tors
- // are called serially in the order in which the locks are
- // are acquired in the program text. This is true today.
- _freelistLock(_lockRank--, "CompactibleFreeListSpace_lock", true,
- Monitor::_safepoint_check_never),
- _preconsumptionDirtyCardClosure(NULL),
- _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
- "CompactibleFreeListSpace_dict_par_lock", true,
- Monitor::_safepoint_check_never)
-{
- assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
- "FreeChunk is larger than expected");
- _bt.set_space(this);
- initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
-
- _dictionary = new AFLBinaryTreeDictionary(mr);
-
- assert(_dictionary != NULL, "CMS dictionary initialization");
- // The indexed free lists are initially all empty and are lazily
- // filled in on demand. Initialize the array elements to NULL.
- initializeIndexedFreeListArray();
-
- _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
- SmallForLinearAlloc);
-
- // CMSIndexedFreeListReplenish should be at least 1
- CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
- _promoInfo.setSpace(this);
- if (UseCMSBestFit) {
- _fitStrategy = FreeBlockBestFitFirst;
- } else {
- _fitStrategy = FreeBlockStrategyNone;
- }
- check_free_list_consistency();
-
- // Initialize locks for parallel case.
- for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
- "a freelist par lock", true, Mutex::_safepoint_check_never);
- DEBUG_ONLY(
- _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
- )
- }
- _dictionary->set_par_lock(&_parDictionaryAllocLock);
-
- _used_stable = 0;
-}
-
-// Like CompactibleSpace forward() but always calls cross_threshold() to
-// update the block offset table. Removed initialize_threshold call because
-// CFLS does not use a block offset array for contiguous spaces.
-HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
- CompactPoint* cp, HeapWord* compact_top) {
- // q is alive
- // First check if we should switch compaction space
- assert(this == cp->space, "'this' should be current compaction space.");
- size_t compaction_max_size = pointer_delta(end(), compact_top);
- assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
- "virtual adjustObjectSize_v() method is not correct");
- size_t adjusted_size = adjustObjectSize(size);
- assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
- "no small fragments allowed");
- assert(minimum_free_block_size() == MinChunkSize,
- "for de-virtualized reference below");
- // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
- if (adjusted_size + MinChunkSize > compaction_max_size &&
- adjusted_size != compaction_max_size) {
- do {
- // switch to next compaction space
- cp->space->set_compaction_top(compact_top);
- cp->space = cp->space->next_compaction_space();
- if (cp->space == NULL) {
- cp->gen = CMSHeap::heap()->young_gen();
- assert(cp->gen != NULL, "compaction must succeed");
- cp->space = cp->gen->first_compaction_space();
- assert(cp->space != NULL, "generation must have a first compaction space");
- }
- compact_top = cp->space->bottom();
- cp->space->set_compaction_top(compact_top);
- // The correct adjusted_size may not be the same as that for this method
- // (i.e., cp->space may no longer be "this" so adjust the size again.
- // Use the virtual method which is not used above to save the virtual
- // dispatch.
- adjusted_size = cp->space->adjust_object_size_v(size);
- compaction_max_size = pointer_delta(cp->space->end(), compact_top);
- assert(cp->space->minimum_free_block_size() == 0, "just checking");
- } while (adjusted_size > compaction_max_size);
- }
-
- // store the forwarding pointer into the mark word
- if ((HeapWord*)q != compact_top) {
- q->forward_to(oop(compact_top));
- assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
- } else {
- // if the object isn't moving we can just set the mark to the default
- // mark and handle it specially later on.
- q->init_mark_raw();
- assert(q->forwardee() == NULL, "should be forwarded to NULL");
- }
-
- compact_top += adjusted_size;
-
- // we need to update the offset table so that the beginnings of objects can be
- // found during scavenge. Note that we are updating the offset table based on
- // where the object will be once the compaction phase finishes.
-
- // Always call cross_threshold(). A contiguous space can only call it when
- // the compaction_top exceeds the current threshold but not for an
- // non-contiguous space.
- cp->threshold =
- cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
- return compact_top;
-}
-
-// A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
-// and use of single_block instead of alloc_block. The name here is not really
-// appropriate - maybe a more general name could be invented for both the
-// contiguous and noncontiguous spaces.
-
-HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
- _bt.single_block(start, the_end);
- return end();
-}
-
-// Initialize them to NULL.
-void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
- for (size_t i = 0; i < IndexSetSize; i++) {
- // Note that on platforms where objects are double word aligned,
- // the odd array elements are not used. It is convenient, however,
- // to map directly from the object size to the array element.
- _indexedFreeList[i].reset(IndexSetSize);
- _indexedFreeList[i].set_size(i);
- assert(_indexedFreeList[i].count() == 0, "reset check failed");
- assert(_indexedFreeList[i].head() == NULL, "reset check failed");
- assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
- assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
- }
-}
-
-size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
- return adjustObjectSize(oop(addr)->size());
-}
-
-void CompactibleFreeListSpace::resetIndexedFreeListArray() {
- for (size_t i = 1; i < IndexSetSize; i++) {
- assert(_indexedFreeList[i].size() == (size_t) i,
- "Indexed free list sizes are incorrect");
- _indexedFreeList[i].reset(IndexSetSize);
- assert(_indexedFreeList[i].count() == 0, "reset check failed");
- assert(_indexedFreeList[i].head() == NULL, "reset check failed");
- assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
- assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
- }
-}
-
-void CompactibleFreeListSpace::reset(MemRegion mr) {
- resetIndexedFreeListArray();
- dictionary()->reset();
- if (BlockOffsetArrayUseUnallocatedBlock) {
- assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
- // Everything's allocated until proven otherwise.
- _bt.set_unallocated_block(end());
- }
- if (!mr.is_empty()) {
- assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
- _bt.single_block(mr.start(), mr.word_size());
- FreeChunk* fc = (FreeChunk*) mr.start();
- fc->set_size(mr.word_size());
- if (mr.word_size() >= IndexSetSize ) {
- returnChunkToDictionary(fc);
- } else {
- _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
- _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
- }
- coalBirth(mr.word_size());
- }
- _promoInfo.reset();
- _smallLinearAllocBlock._ptr = NULL;
- _smallLinearAllocBlock._word_size = 0;
-}
-
-void CompactibleFreeListSpace::reset_after_compaction() {
- // Reset the space to the new reality - one free chunk.
- MemRegion mr(compaction_top(), end());
- reset(mr);
- // Now refill the linear allocation block(s) if possible.
- refillLinearAllocBlocksIfNeeded();
-}
-
-// Walks the entire dictionary, returning a coterminal
-// chunk, if it exists. Use with caution since it involves
-// a potentially complete walk of a potentially large tree.
-FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
-
- assert_lock_strong(&_freelistLock);
-
- return dictionary()->find_chunk_ends_at(end());
-}
-
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
- for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
- }
-}
-
-size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
- size_t sum = 0;
- for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
- }
- return sum;
-}
-
-size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
- size_t count = 0;
- for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
- debug_only(
- ssize_t total_list_count = 0;
- for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
- fc = fc->next()) {
- total_list_count++;
- }
- assert(total_list_count == _indexedFreeList[i].count(),
- "Count in list is incorrect");
- )
- count += _indexedFreeList[i].count();
- }
- return count;
-}
-
-size_t CompactibleFreeListSpace::totalCount() {
- size_t num = totalCountInIndexedFreeLists();
- num += dictionary()->total_count();
- if (_smallLinearAllocBlock._word_size != 0) {
- num++;
- }
- return num;
-}
-#endif
-
-bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
- FreeChunk* fc = (FreeChunk*) p;
- return fc->is_free();
-}
-
-size_t CompactibleFreeListSpace::used() const {
- return capacity() - free();
-}
-
-size_t CompactibleFreeListSpace::used_stable() const {
- return _used_stable;
-}
-
-void CompactibleFreeListSpace::recalculate_used_stable() {
- _used_stable = used();
-}
-
-size_t CompactibleFreeListSpace::free() const {
- // "MT-safe, but not MT-precise"(TM), if you will: i.e.
- // if you do this while the structures are in flux you
- // may get an approximate answer only; for instance
- // because there is concurrent allocation either
- // directly by mutators or for promotion during a GC.
- // It's "MT-safe", however, in the sense that you are guaranteed
- // not to crash and burn, for instance, because of walking
- // pointers that could disappear as you were walking them.
- // The approximation is because the various components
- // that are read below are not read atomically (and
- // further the computation of totalSizeInIndexedFreeLists()
- // is itself a non-atomic computation. The normal use of
- // this is during a resize operation at the end of GC
- // and at that time you are guaranteed to get the
- // correct actual value. However, for instance, this is
- // also read completely asynchronously by the "perf-sampler"
- // that supports jvmstat, and you are apt to see the values
- // flicker in such cases.
- assert(_dictionary != NULL, "No _dictionary?");
- return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
- totalSizeInIndexedFreeLists() +
- _smallLinearAllocBlock._word_size) * HeapWordSize;
-}
-
-size_t CompactibleFreeListSpace::max_alloc_in_words() const {
- assert(_dictionary != NULL, "No _dictionary?");
- assert_locked();
- size_t res = _dictionary->max_chunk_size();
- res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
- (size_t) SmallForLinearAlloc - 1));
- // XXX the following could potentially be pretty slow;
- // should one, pessimistically for the rare cases when res
- // calculated above is less than IndexSetSize,
- // just return res calculated above? My reasoning was that
- // those cases will be so rare that the extra time spent doesn't
- // really matter....
- // Note: do not change the loop test i >= res + IndexSetStride
- // to i > res below, because i is unsigned and res may be zero.
- for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
- i -= IndexSetStride) {
- if (_indexedFreeList[i].head() != NULL) {
- assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
- return i;
- }
- }
- return res;
-}
-
-void LinearAllocBlock::print_on(outputStream* st) const {
- st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
- ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
- p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
-}
-
-void CompactibleFreeListSpace::print_on(outputStream* st) const {
- st->print_cr("COMPACTIBLE FREELIST SPACE");
- st->print_cr(" Space:");
- Space::print_on(st);
-
- st->print_cr("promoInfo:");
- _promoInfo.print_on(st);
-
- st->print_cr("_smallLinearAllocBlock");
- _smallLinearAllocBlock.print_on(st);
-
- // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
-
- st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
-}
-
-void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
-const {
- reportIndexedFreeListStatistics(st);
- st->print_cr("Layout of Indexed Freelists");
- st->print_cr("---------------------------");
- AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
- for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- _indexedFreeList[i].print_on(st);
- for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
- st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
- p2i(fc), p2i((HeapWord*)fc + i),
- fc->cantCoalesce() ? "\t CC" : "");
- }
- }
-}
-
-void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
-const {
- _promoInfo.print_on(st);
-}
-
-void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
-const {
- _dictionary->report_statistics(st);
- st->print_cr("Layout of Freelists in Tree");
- st->print_cr("---------------------------");
- _dictionary->print_free_lists(st);
-}
-
-class BlkPrintingClosure: public BlkClosure {
- const CMSCollector* _collector;
- const CompactibleFreeListSpace* _sp;
- const CMSBitMap* _live_bit_map;
- const bool _post_remark;
- outputStream* _st;
-public:
- BlkPrintingClosure(const CMSCollector* collector,
- const CompactibleFreeListSpace* sp,
- const CMSBitMap* live_bit_map,
- outputStream* st):
- _collector(collector),
- _sp(sp),
- _live_bit_map(live_bit_map),
- _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
- _st(st) { }
- size_t do_blk(HeapWord* addr);
-};
-
-size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
- size_t sz = _sp->block_size_no_stall(addr, _collector);
- assert(sz != 0, "Should always be able to compute a size");
- if (_sp->block_is_obj(addr)) {
- const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
- _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
- p2i(addr),
- dead ? "dead" : "live",
- sz,
- (!dead && CMSPrintObjectsInDump) ? ":" : ".");
- if (CMSPrintObjectsInDump && !dead) {
- oop(addr)->print_on(_st);
- _st->print_cr("--------------------------------------");
- }
- } else { // free block
- _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
- p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
- if (CMSPrintChunksInDump) {
- ((FreeChunk*)addr)->print_on(_st);
- _st->print_cr("--------------------------------------");
- }
- }
- return sz;
-}
-
-void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
- st->print_cr("=========================");
- st->print_cr("Block layout in CMS Heap:");
- st->print_cr("=========================");
- BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
- blk_iterate(&bpcl);
-
- st->print_cr("=======================================");
- st->print_cr("Order & Layout of Promotion Info Blocks");
- st->print_cr("=======================================");
- print_promo_info_blocks(st);
-
- st->print_cr("===========================");
- st->print_cr("Order of Indexed Free Lists");
- st->print_cr("=========================");
- print_indexed_free_lists(st);
-
- st->print_cr("=================================");
- st->print_cr("Order of Free Lists in Dictionary");
- st->print_cr("=================================");
- print_dictionary_free_lists(st);
-}
-
-
-void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
- assert_lock_strong(&_freelistLock);
- Log(gc, freelist, stats) log;
- if (!log.is_debug()) {
- return;
- }
- log.debug("%s", title);
-
- LogStream out(log.debug());
- _dictionary->report_statistics(&out);
-
- if (log.is_trace()) {
- LogStream trace_out(log.trace());
- reportIndexedFreeListStatistics(&trace_out);
- size_t total_size = totalSizeInIndexedFreeLists() +
- _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
- log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
- }
-}
-
-void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
- assert_lock_strong(&_freelistLock);
- st->print_cr("Statistics for IndexedFreeLists:");
- st->print_cr("--------------------------------");
- size_t total_size = totalSizeInIndexedFreeLists();
- size_t free_blocks = numFreeBlocksInIndexedFreeLists();
- st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
- st->print_cr("Max Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
- st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
- if (free_blocks != 0) {
- st->print_cr("Av. Block Size: " SIZE_FORMAT, total_size/free_blocks);
- }
-}
-
-size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
- size_t res = 0;
- for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- debug_only(
- ssize_t recount = 0;
- for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
- fc = fc->next()) {
- recount += 1;
- }
- assert(recount == _indexedFreeList[i].count(),
- "Incorrect count in list");
- )
- res += _indexedFreeList[i].count();
- }
- return res;
-}
-
-size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
- for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
- if (_indexedFreeList[i].head() != NULL) {
- assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
- return (size_t)i;
- }
- }
- return 0;
-}
-
-void CompactibleFreeListSpace::set_end(HeapWord* value) {
- HeapWord* prevEnd = end();
- assert(prevEnd != value, "unnecessary set_end call");
- assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
- "New end is below unallocated block");
- _end = value;
- if (prevEnd != NULL) {
- // Resize the underlying block offset table.
- _bt.resize(pointer_delta(value, bottom()));
- if (value <= prevEnd) {
- assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
- "New end is below unallocated block");
- } else {
- // Now, take this new chunk and add it to the free blocks.
- // Note that the BOT has not yet been updated for this block.
- size_t newFcSize = pointer_delta(value, prevEnd);
- // Add the block to the free lists, if possible coalescing it
- // with the last free block, and update the BOT and census data.
- addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
- }
- }
-}
-
-class FreeListSpaceDCTOC : public FilteringDCTOC {
- CompactibleFreeListSpace* _cfls;
- CMSCollector* _collector;
- bool _parallel;
-protected:
- // Override.
-#define walk_mem_region_with_cl_DECL(ClosureType) \
- virtual void walk_mem_region_with_cl(MemRegion mr, \
- HeapWord* bottom, HeapWord* top, \
- ClosureType* cl); \
- void walk_mem_region_with_cl_par(MemRegion mr, \
- HeapWord* bottom, HeapWord* top, \
- ClosureType* cl); \
- void walk_mem_region_with_cl_nopar(MemRegion mr, \
- HeapWord* bottom, HeapWord* top, \
- ClosureType* cl)
- walk_mem_region_with_cl_DECL(OopIterateClosure);
- walk_mem_region_with_cl_DECL(FilteringClosure);
-
-public:
- FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
- CMSCollector* collector,
- OopIterateClosure* cl,
- CardTable::PrecisionStyle precision,
- HeapWord* boundary,
- bool parallel) :
- FilteringDCTOC(sp, cl, precision, boundary),
- _cfls(sp), _collector(collector), _parallel(parallel) {}
-};
-
-// We de-virtualize the block-related calls below, since we know that our
-// space is a CompactibleFreeListSpace.
-
-#define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
-void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
- HeapWord* bottom, \
- HeapWord* top, \
- ClosureType* cl) { \
- if (_parallel) { \
- walk_mem_region_with_cl_par(mr, bottom, top, cl); \
- } else { \
- walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \
- } \
-} \
-void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr, \
- HeapWord* bottom, \
- HeapWord* top, \
- ClosureType* cl) { \
- /* Skip parts that are before "mr", in case "block_start" sent us \
- back too far. */ \
- HeapWord* mr_start = mr.start(); \
- size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
- HeapWord* next = bottom + bot_size; \
- while (next < mr_start) { \
- bottom = next; \
- bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \
- next = bottom + bot_size; \
- } \
- \
- while (bottom < top) { \
- if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \
- !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
- oop(bottom)) && \
- !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
- size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
- bottom += _cfls->adjustObjectSize(word_sz); \
- } else { \
- bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \
- } \
- } \
-} \
-void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \
- HeapWord* bottom, \
- HeapWord* top, \
- ClosureType* cl) { \
- /* Skip parts that are before "mr", in case "block_start" sent us \
- back too far. */ \
- HeapWord* mr_start = mr.start(); \
- size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
- HeapWord* next = bottom + bot_size; \
- while (next < mr_start) { \
- bottom = next; \
- bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
- next = bottom + bot_size; \
- } \
- \
- while (bottom < top) { \
- if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \
- !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \
- oop(bottom)) && \
- !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \
- size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \
- bottom += _cfls->adjustObjectSize(word_sz); \
- } else { \
- bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \
- } \
- } \
-}
-
-// (There are only two of these, rather than N, because the split is due
-// only to the introduction of the FilteringClosure, a local part of the
-// impl of this abstraction.)
-FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
-FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
-
-DirtyCardToOopClosure*
-CompactibleFreeListSpace::new_dcto_cl(OopIterateClosure* cl,
- CardTable::PrecisionStyle precision,
- HeapWord* boundary,
- bool parallel) {
- return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
-}
-
-
-// Note on locking for the space iteration functions:
-// since the collector's iteration activities are concurrent with
-// allocation activities by mutators, absent a suitable mutual exclusion
-// mechanism the iterators may go awry. For instance a block being iterated
-// may suddenly be allocated or divided up and part of it allocated and
-// so on.
-
-// Apply the given closure to each block in the space.
-void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
- assert_lock_strong(freelistLock());
- HeapWord *cur, *limit;
- for (cur = bottom(), limit = end(); cur < limit;
- cur += cl->do_blk_careful(cur));
-}
-
-// Apply the given closure to each block in the space.
-void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
- assert_lock_strong(freelistLock());
- HeapWord *cur, *limit;
- for (cur = bottom(), limit = end(); cur < limit;
- cur += cl->do_blk(cur));
-}
-
-// Apply the given closure to each oop in the space.
-void CompactibleFreeListSpace::oop_iterate(OopIterateClosure* cl) {
- assert_lock_strong(freelistLock());
- HeapWord *cur, *limit;
- size_t curSize;
- for (cur = bottom(), limit = end(); cur < limit;
- cur += curSize) {
- curSize = block_size(cur);
- if (block_is_obj(cur)) {
- oop(cur)->oop_iterate(cl);
- }
- }
-}
-
-// NOTE: In the following methods, in order to safely be able to
-// apply the closure to an object, we need to be sure that the
-// object has been initialized. We are guaranteed that an object
-// is initialized if we are holding the Heap_lock with the
-// world stopped.
-void CompactibleFreeListSpace::verify_objects_initialized() const {
- if (is_init_completed()) {
- assert_locked_or_safepoint(Heap_lock);
- if (Universe::is_fully_initialized()) {
- guarantee(SafepointSynchronize::is_at_safepoint(),
- "Required for objects to be initialized");
- }
- } // else make a concession at vm start-up
-}
-
-// Apply the given closure to each object in the space
-void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
- assert_lock_strong(freelistLock());
- NOT_PRODUCT(verify_objects_initialized());
- HeapWord *cur, *limit;
- size_t curSize;
- for (cur = bottom(), limit = end(); cur < limit;
- cur += curSize) {
- curSize = block_size(cur);
- if (block_is_obj(cur)) {
- blk->do_object(oop(cur));
- }
- }
-}
-
-// Apply the given closure to each live object in the space
-// The usage of CompactibleFreeListSpace
-// by the ConcurrentMarkSweepGeneration for concurrent GC's allows
-// objects in the space with references to objects that are no longer
-// valid. For example, an object may reference another object
-// that has already been sweep up (collected). This method uses
-// obj_is_alive() to determine whether it is safe to apply the closure to
-// an object. See obj_is_alive() for details on how liveness of an
-// object is decided.
-
-void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
- assert_lock_strong(freelistLock());
- NOT_PRODUCT(verify_objects_initialized());
- HeapWord *cur, *limit;
- size_t curSize;
- for (cur = bottom(), limit = end(); cur < limit;
- cur += curSize) {
- curSize = block_size(cur);
- if (block_is_obj(cur) && obj_is_alive(cur)) {
- blk->do_object(oop(cur));
- }
- }
-}
-
-void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
- UpwardsObjectClosure* cl) {
- assert_locked(freelistLock());
- NOT_PRODUCT(verify_objects_initialized());
- assert(!mr.is_empty(), "Should be non-empty");
- // We use MemRegion(bottom(), end()) rather than used_region() below
- // because the two are not necessarily equal for some kinds of
- // spaces, in particular, certain kinds of free list spaces.
- // We could use the more complicated but more precise:
- // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
- // but the slight imprecision seems acceptable in the assertion check.
- assert(MemRegion(bottom(), end()).contains(mr),
- "Should be within used space");
- HeapWord* prev = cl->previous(); // max address from last time
- if (prev >= mr.end()) { // nothing to do
- return;
- }
- // This assert will not work when we go from cms space to perm
- // space, and use same closure. Easy fix deferred for later. XXX YSR
- // assert(prev == NULL || contains(prev), "Should be within space");
-
- bool last_was_obj_array = false;
- HeapWord *blk_start_addr, *region_start_addr;
- if (prev > mr.start()) {
- region_start_addr = prev;
- blk_start_addr = prev;
- // The previous invocation may have pushed "prev" beyond the
- // last allocated block yet there may be still be blocks
- // in this region due to a particular coalescing policy.
- // Relax the assertion so that the case where the unallocated
- // block is maintained and "prev" is beyond the unallocated
- // block does not cause the assertion to fire.
- assert((BlockOffsetArrayUseUnallocatedBlock &&
- (!is_in(prev))) ||
- (blk_start_addr == block_start(region_start_addr)), "invariant");
- } else {
- region_start_addr = mr.start();
- blk_start_addr = block_start(region_start_addr);
- }
- HeapWord* region_end_addr = mr.end();
- MemRegion derived_mr(region_start_addr, region_end_addr);
- while (blk_start_addr < region_end_addr) {
- const size_t size = block_size(blk_start_addr);
- if (block_is_obj(blk_start_addr)) {
- last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
- } else {
- last_was_obj_array = false;
- }
- blk_start_addr += size;
- }
- if (!last_was_obj_array) {
- assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
- "Should be within (closed) used space");
- assert(blk_start_addr > prev, "Invariant");
- cl->set_previous(blk_start_addr); // min address for next time
- }
-}
-
-// Callers of this iterator beware: The closure application should
-// be robust in the face of uninitialized objects and should (always)
-// return a correct size so that the next addr + size below gives us a
-// valid block boundary. [See for instance,
-// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
-// in ConcurrentMarkSweepGeneration.cpp.]
-HeapWord*
-CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
- ObjectClosureCareful* cl) {
- assert_lock_strong(freelistLock());
- // Can't use used_region() below because it may not necessarily
- // be the same as [bottom(),end()); although we could
- // use [used_region().start(),align_up(used_region().end(),CardSize)),
- // that appears too cumbersome, so we just do the simpler check
- // in the assertion below.
- assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
- "mr should be non-empty and within used space");
- HeapWord *addr, *end;
- size_t size;
- for (addr = block_start_careful(mr.start()), end = mr.end();
- addr < end; addr += size) {
- FreeChunk* fc = (FreeChunk*)addr;
- if (fc->is_free()) {
- // Since we hold the free list lock, which protects direct
- // allocation in this generation by mutators, a free object
- // will remain free throughout this iteration code.
- size = fc->size();
- } else {
- // Note that the object need not necessarily be initialized,
- // because (for instance) the free list lock does NOT protect
- // object initialization. The closure application below must
- // therefore be correct in the face of uninitialized objects.
- size = cl->do_object_careful_m(oop(addr), mr);
- if (size == 0) {
- // An unparsable object found. Signal early termination.
- return addr;
- }
- }
- }
- return NULL;
-}
-
-
-HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
- NOT_PRODUCT(verify_objects_initialized());
- return _bt.block_start(p);
-}
-
-HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
- return _bt.block_start_careful(p);
-}
-
-size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
- NOT_PRODUCT(verify_objects_initialized());
- // This must be volatile, or else there is a danger that the compiler
- // will compile the code below into a sometimes-infinite loop, by keeping
- // the value read the first time in a register.
- while (true) {
- // We must do this until we get a consistent view of the object.
- if (FreeChunk::indicatesFreeChunk(p)) {
- volatile FreeChunk* fc = (volatile FreeChunk*)p;
- size_t res = fc->size();
-
- // Bugfix for systems with weak memory model (PPC64/IA64). The
- // block's free bit was set and we have read the size of the
- // block. Acquire and check the free bit again. If the block is
- // still free, the read size is correct.
- OrderAccess::acquire();
-
- // If the object is still a free chunk, return the size, else it
- // has been allocated so try again.
- if (FreeChunk::indicatesFreeChunk(p)) {
- assert(res != 0, "Block size should not be 0");
- return res;
- }
- } else {
- // Ensure klass read before size.
- Klass* k = oop(p)->klass_or_null_acquire();
- if (k != NULL) {
- assert(k->is_klass(), "Should really be klass oop.");
- oop o = (oop)p;
- assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
-
- size_t res = o->size_given_klass(k);
- res = adjustObjectSize(res);
- assert(res != 0, "Block size should not be 0");
- return res;
- }
- }
- }
-}
-
-// TODO: Now that is_parsable is gone, we should combine these two functions.
-// A variant of the above that uses the Printezis bits for
-// unparsable but allocated objects. This avoids any possible
-// stalls waiting for mutators to initialize objects, and is
-// thus potentially faster than the variant above. However,
-// this variant may return a zero size for a block that is
-// under mutation and for which a consistent size cannot be
-// inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
-size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
- const CMSCollector* c)
-const {
- assert(MemRegion(bottom(), end()).contains(p), "p not in space");
- // This must be volatile, or else there is a danger that the compiler
- // will compile the code below into a sometimes-infinite loop, by keeping
- // the value read the first time in a register.
- DEBUG_ONLY(uint loops = 0;)
- while (true) {
- // We must do this until we get a consistent view of the object.
- if (FreeChunk::indicatesFreeChunk(p)) {
- volatile FreeChunk* fc = (volatile FreeChunk*)p;
- size_t res = fc->size();
-
- // Bugfix for systems with weak memory model (PPC64/IA64). The
- // free bit of the block was set and we have read the size of
- // the block. Acquire and check the free bit again. If the
- // block is still free, the read size is correct.
- OrderAccess::acquire();
-
- if (FreeChunk::indicatesFreeChunk(p)) {
- assert(res != 0, "Block size should not be 0");
- assert(loops == 0, "Should be 0");
- return res;
- }
- } else {
- // Ensure klass read before size.
- Klass* k = oop(p)->klass_or_null_acquire();
- if (k != NULL) {
- assert(k->is_klass(), "Should really be klass oop.");
- oop o = (oop)p;
- assert(oopDesc::is_oop(o), "Should be an oop");
-
- size_t res = o->size_given_klass(k);
- res = adjustObjectSize(res);
- assert(res != 0, "Block size should not be 0");
- return res;
- } else {
- // May return 0 if P-bits not present.
- return c->block_size_if_printezis_bits(p);
- }
- }
- assert(loops == 0, "Can loop at most once");
- DEBUG_ONLY(loops++;)
- }
-}
-
-size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
- NOT_PRODUCT(verify_objects_initialized());
- assert(MemRegion(bottom(), end()).contains(p), "p not in space");
- FreeChunk* fc = (FreeChunk*)p;
- if (fc->is_free()) {
- return fc->size();
- } else {
- // Ignore mark word because this may be a recently promoted
- // object whose mark word is used to chain together grey
- // objects (the last one would have a null value).
- assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
- return adjustObjectSize(oop(p)->size());
- }
-}
-
-// This implementation assumes that the property of "being an object" is
-// stable. But being a free chunk may not be (because of parallel
-// promotion.)
-bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
- FreeChunk* fc = (FreeChunk*)p;
- assert(is_in_reserved(p), "Should be in space");
- if (FreeChunk::indicatesFreeChunk(p)) return false;
- Klass* k = oop(p)->klass_or_null_acquire();
- if (k != NULL) {
- // Ignore mark word because it may have been used to
- // chain together promoted objects (the last one
- // would have a null value).
- assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
- return true;
- } else {
- return false; // Was not an object at the start of collection.
- }
-}
-
-// Check if the object is alive. This fact is checked either by consulting
-// the main marking bitmap in the sweeping phase or, if it's a permanent
-// generation and we're not in the sweeping phase, by checking the
-// perm_gen_verify_bit_map where we store the "deadness" information if
-// we did not sweep the perm gen in the most recent previous GC cycle.
-bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
- assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
- "Else races are possible");
- assert(block_is_obj(p), "The address should point to an object");
-
- // If we're sweeping, we use object liveness information from the main bit map
- // for both perm gen and old gen.
- // We don't need to lock the bitmap (live_map or dead_map below), because
- // EITHER we are in the middle of the sweeping phase, and the
- // main marking bit map (live_map below) is locked,
- // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
- // is stable, because it's mutated only in the sweeping phase.
- // NOTE: This method is also used by jmap where, if class unloading is
- // off, the results can return "false" for legitimate perm objects,
- // when we are not in the midst of a sweeping phase, which can result
- // in jmap not reporting certain perm gen objects. This will be moot
- // if/when the perm gen goes away in the future.
- if (_collector->abstract_state() == CMSCollector::Sweeping) {
- CMSBitMap* live_map = _collector->markBitMap();
- return live_map->par_isMarked((HeapWord*) p);
- }
- return true;
-}
-
-bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
- FreeChunk* fc = (FreeChunk*)p;
- assert(is_in_reserved(p), "Should be in space");
- assert(_bt.block_start(p) == p, "Should be a block boundary");
- if (!fc->is_free()) {
- // Ignore mark word because it may have been used to
- // chain together promoted objects (the last one
- // would have a null value).
- assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
- return true;
- }
- return false;
-}
-
-// "MT-safe but not guaranteed MT-precise" (TM); you may get an
-// approximate answer if you don't hold the freelistlock when you call this.
-size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
- size_t size = 0;
- for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- debug_only(
- // We may be calling here without the lock in which case we
- // won't do this modest sanity check.
- if (freelistLock()->owned_by_self()) {
- size_t total_list_size = 0;
- for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
- fc = fc->next()) {
- total_list_size += i;
- }
- assert(total_list_size == i * _indexedFreeList[i].count(),
- "Count in list is incorrect");
- }
- )
- size += i * _indexedFreeList[i].count();
- }
- return size;
-}
-
-HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
- MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
- return allocate(size);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
- return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
-}
-
-HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
- assert_lock_strong(freelistLock());
- HeapWord* res = NULL;
- assert(size == adjustObjectSize(size),
- "use adjustObjectSize() before calling into allocate()");
-
- res = allocate_adaptive_freelists(size);
-
- if (res != NULL) {
- // check that res does lie in this space!
- assert(is_in_reserved(res), "Not in this space!");
- assert(is_aligned((void*)res), "alignment check");
-
- FreeChunk* fc = (FreeChunk*)res;
- fc->markNotFree();
- assert(!fc->is_free(), "shouldn't be marked free");
- assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
- // Verify that the block offset table shows this to
- // be a single block, but not one which is unallocated.
- _bt.verify_single_block(res, size);
- _bt.verify_not_unallocated(res, size);
- // mangle a just allocated object with a distinct pattern.
- debug_only(fc->mangleAllocated(size));
- }
-
- // During GC we do not need to recalculate the stable used value for
- // every allocation in old gen. It is done once at the end of GC instead
- // for performance reasons.
- if (!CMSHeap::heap()->is_gc_active()) {
- recalculate_used_stable();
- }
-
- return res;
-}
-
-HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
- assert_lock_strong(freelistLock());
- HeapWord* res = NULL;
- assert(size == adjustObjectSize(size),
- "use adjustObjectSize() before calling into allocate()");
-
- // Strategy
- // if small
- // exact size from small object indexed list if small
- // small or large linear allocation block (linAB) as appropriate
- // take from lists of greater sized chunks
- // else
- // dictionary
- // small or large linear allocation block if it has the space
- // Try allocating exact size from indexTable first
- if (size < IndexSetSize) {
- res = (HeapWord*) getChunkFromIndexedFreeList(size);
- if(res != NULL) {
- assert(res != (HeapWord*)_indexedFreeList[size].head(),
- "Not removed from free list");
- // no block offset table adjustment is necessary on blocks in
- // the indexed lists.
-
- // Try allocating from the small LinAB
- } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
- (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
- // if successful, the above also adjusts block offset table
- // Note that this call will refill the LinAB to
- // satisfy the request. This is different that
- // evm.
- // Don't record chunk off a LinAB? smallSplitBirth(size);
- } else {
- // Raid the exact free lists larger than size, even if they are not
- // overpopulated.
- res = (HeapWord*) getChunkFromGreater(size);
- }
- } else {
- // Big objects get allocated directly from the dictionary.
- res = (HeapWord*) getChunkFromDictionaryExact(size);
- if (res == NULL) {
- // Try hard not to fail since an allocation failure will likely
- // trigger a synchronous GC. Try to get the space from the
- // allocation blocks.
- res = getChunkFromSmallLinearAllocBlockRemainder(size);
- }
- }
-
- return res;
-}
-
-// A worst-case estimate of the space required (in HeapWords) to expand the heap
-// when promoting obj.
-size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
- // Depending on the object size, expansion may require refilling either a
- // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize
- // is added because the dictionary may over-allocate to avoid fragmentation.
- size_t space = obj_size;
- space += _promoInfo.refillSize() + 2 * MinChunkSize;
- return space;
-}
-
-FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
- FreeChunk* ret;
-
- assert(numWords >= MinChunkSize, "Size is less than minimum");
- assert(linearAllocationWouldFail() || bestFitFirst(),
- "Should not be here");
-
- size_t i;
- size_t currSize = numWords + MinChunkSize;
- assert(is_object_aligned(currSize), "currSize should be aligned");
- for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
- AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
- if (fl->head()) {
- ret = getFromListGreater(fl, numWords);
- assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
- return ret;
- }
- }
-
- currSize = MAX2((size_t)SmallForDictionary,
- (size_t)(numWords + MinChunkSize));
-
- /* Try to get a chunk that satisfies request, while avoiding
- fragmentation that can't be handled. */
- {
- ret = dictionary()->get_chunk(currSize);
- if (ret != NULL) {
- assert(ret->size() - numWords >= MinChunkSize,
- "Chunk is too small");
- _bt.allocated((HeapWord*)ret, ret->size());
- /* Carve returned chunk. */
- (void) splitChunkAndReturnRemainder(ret, numWords);
- /* Label this as no longer a free chunk. */
- assert(ret->is_free(), "This chunk should be free");
- ret->link_prev(NULL);
- }
- assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
- return ret;
- }
- ShouldNotReachHere();
-}
-
-bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
- assert(fc->size() < IndexSetSize, "Size of chunk is too large");
- return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
-}
-
-bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
- assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
- (_smallLinearAllocBlock._word_size == fc->size()),
- "Linear allocation block shows incorrect size");
- return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
- (_smallLinearAllocBlock._word_size == fc->size()));
-}
-
-// Check if the purported free chunk is present either as a linear
-// allocation block, the size-indexed table of (smaller) free blocks,
-// or the larger free blocks kept in the binary tree dictionary.
-bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
- if (verify_chunk_is_linear_alloc_block(fc)) {
- return true;
- } else if (fc->size() < IndexSetSize) {
- return verifyChunkInIndexedFreeLists(fc);
- } else {
- return dictionary()->verify_chunk_in_free_list(fc);
- }
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::assert_locked() const {
- CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
-}
-
-void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
- CMSLockVerifier::assert_locked(lock);
-}
-#endif
-
-FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
- // In the parallel case, the main thread holds the free list lock
- // on behalf the parallel threads.
- FreeChunk* fc;
- {
- // If GC is parallel, this might be called by several threads.
- // This should be rare enough that the locking overhead won't affect
- // the sequential code.
- MutexLocker x(parDictionaryAllocLock(),
- Mutex::_no_safepoint_check_flag);
- fc = getChunkFromDictionary(size);
- }
- if (fc != NULL) {
- fc->dontCoalesce();
- assert(fc->is_free(), "Should be free, but not coalescable");
- // Verify that the block offset table shows this to
- // be a single block, but not one which is unallocated.
- _bt.verify_single_block((HeapWord*)fc, fc->size());
- _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
- }
- return fc;
-}
-
-oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
- assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
- assert_locked();
-
- // if we are tracking promotions, then first ensure space for
- // promotion (including spooling space for saving header if necessary).
- // then allocate and copy, then track promoted info if needed.
- // When tracking (see PromotionInfo::track()), the mark word may
- // be displaced and in this case restoration of the mark word
- // occurs in the (oop_since_save_marks_)iterate phase.
- if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
- return NULL;
- }
- // Call the allocate(size_t, bool) form directly to avoid the
- // additional call through the allocate(size_t) form. Having
- // the compile inline the call is problematic because allocate(size_t)
- // is a virtual method.
- HeapWord* res = allocate(adjustObjectSize(obj_size));
- if (res != NULL) {
- Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
- // if we should be tracking promotions, do so.
- if (_promoInfo.tracking()) {
- _promoInfo.track((PromotedObject*)res);
- }
- }
- return oop(res);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
- assert_locked();
- assert(size >= MinChunkSize, "minimum chunk size");
- assert(size < _smallLinearAllocBlock._allocation_size_limit,
- "maximum from smallLinearAllocBlock");
- return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
- size_t size) {
- assert_locked();
- assert(size >= MinChunkSize, "too small");
- HeapWord* res = NULL;
- // Try to do linear allocation from blk, making sure that
- if (blk->_word_size == 0) {
- // We have probably been unable to fill this either in the prologue or
- // when it was exhausted at the last linear allocation. Bail out until
- // next time.
- assert(blk->_ptr == NULL, "consistency check");
- return NULL;
- }
- assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
- res = getChunkFromLinearAllocBlockRemainder(blk, size);
- if (res != NULL) return res;
-
- // about to exhaust this linear allocation block
- if (blk->_word_size == size) { // exactly satisfied
- res = blk->_ptr;
- _bt.allocated(res, blk->_word_size);
- } else if (size + MinChunkSize <= blk->_refillSize) {
- size_t sz = blk->_word_size;
- // Update _unallocated_block if the size is such that chunk would be
- // returned to the indexed free list. All other chunks in the indexed
- // free lists are allocated from the dictionary so that _unallocated_block
- // has already been adjusted for them. Do it here so that the cost
- // for all chunks added back to the indexed free lists.
- if (sz < SmallForDictionary) {
- _bt.allocated(blk->_ptr, sz);
- }
- // Return the chunk that isn't big enough, and then refill below.
- addChunkToFreeLists(blk->_ptr, sz);
- split_birth(sz);
- // Don't keep statistics on adding back chunk from a LinAB.
- } else {
- // A refilled block would not satisfy the request.
- return NULL;
- }
-
- blk->_ptr = NULL; blk->_word_size = 0;
- refillLinearAllocBlock(blk);
- assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
- "block was replenished");
- if (res != NULL) {
- split_birth(size);
- repairLinearAllocBlock(blk);
- } else if (blk->_ptr != NULL) {
- res = blk->_ptr;
- size_t blk_size = blk->_word_size;
- blk->_word_size -= size;
- blk->_ptr += size;
- split_birth(size);
- repairLinearAllocBlock(blk);
- // Update BOT last so that other (parallel) GC threads see a consistent
- // view of the BOT and free blocks.
- // Above must occur before BOT is updated below.
- OrderAccess::storestore();
- _bt.split_block(res, blk_size, size); // adjust block offset table
- }
- return res;
-}
-
-HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
- LinearAllocBlock* blk,
- size_t size) {
- assert_locked();
- assert(size >= MinChunkSize, "too small");
-
- HeapWord* res = NULL;
- // This is the common case. Keep it simple.
- if (blk->_word_size >= size + MinChunkSize) {
- assert(blk->_ptr != NULL, "consistency check");
- res = blk->_ptr;
- // Note that the BOT is up-to-date for the linAB before allocation. It
- // indicates the start of the linAB. The split_block() updates the
- // BOT for the linAB after the allocation (indicates the start of the
- // next chunk to be allocated).
- size_t blk_size = blk->_word_size;
- blk->_word_size -= size;
- blk->_ptr += size;
- split_birth(size);
- repairLinearAllocBlock(blk);
- // Update BOT last so that other (parallel) GC threads see a consistent
- // view of the BOT and free blocks.
- // Above must occur before BOT is updated below.
- OrderAccess::storestore();
- _bt.split_block(res, blk_size, size); // adjust block offset table
- _bt.allocated(res, size);
- }
- return res;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
- assert_locked();
- assert(size < SmallForDictionary, "just checking");
- FreeChunk* res;
- res = _indexedFreeList[size].get_chunk_at_head();
- if (res == NULL) {
- res = getChunkFromIndexedFreeListHelper(size);
- }
- _bt.verify_not_unallocated((HeapWord*) res, size);
- assert(res == NULL || res->size() == size, "Incorrect block size");
- return res;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
- bool replenish) {
- assert_locked();
- FreeChunk* fc = NULL;
- if (size < SmallForDictionary) {
- assert(_indexedFreeList[size].head() == NULL ||
- _indexedFreeList[size].surplus() <= 0,
- "List for this size should be empty or under populated");
- // Try best fit in exact lists before replenishing the list
- if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
- // Replenish list.
- //
- // Things tried that failed.
- // Tried allocating out of the two LinAB's first before
- // replenishing lists.
- // Tried small linAB of size 256 (size in indexed list)
- // and replenishing indexed lists from the small linAB.
- //
- FreeChunk* newFc = NULL;
- const size_t replenish_size = CMSIndexedFreeListReplenish * size;
- if (replenish_size < SmallForDictionary) {
- // Do not replenish from an underpopulated size.
- if (_indexedFreeList[replenish_size].surplus() > 0 &&
- _indexedFreeList[replenish_size].head() != NULL) {
- newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
- } else if (bestFitFirst()) {
- newFc = bestFitSmall(replenish_size);
- }
- }
- if (newFc == NULL && replenish_size > size) {
- assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
- newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
- }
- // Note: The stats update re split-death of block obtained above
- // will be recorded below precisely when we know we are going to
- // be actually splitting it into more than one pieces below.
- if (newFc != NULL) {
- if (replenish || CMSReplenishIntermediate) {
- // Replenish this list and return one block to caller.
- size_t i;
- FreeChunk *curFc, *nextFc;
- size_t num_blk = newFc->size() / size;
- assert(num_blk >= 1, "Smaller than requested?");
- assert(newFc->size() % size == 0, "Should be integral multiple of request");
- if (num_blk > 1) {
- // we are sure we will be splitting the block just obtained
- // into multiple pieces; record the split-death of the original
- splitDeath(replenish_size);
- }
- // carve up and link blocks 0, ..., num_blk - 2
- // The last chunk is not added to the lists but is returned as the
- // free chunk.
- for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
- i = 0;
- i < (num_blk - 1);
- curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
- i++) {
- curFc->set_size(size);
- // Don't record this as a return in order to try and
- // determine the "returns" from a GC.
- _bt.verify_not_unallocated((HeapWord*) fc, size);
- _indexedFreeList[size].return_chunk_at_tail(curFc, false);
- _bt.mark_block((HeapWord*)curFc, size);
- split_birth(size);
- // Don't record the initial population of the indexed list
- // as a split birth.
- }
-
- // check that the arithmetic was OK above
- assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
- "inconsistency in carving newFc");
- curFc->set_size(size);
- _bt.mark_block((HeapWord*)curFc, size);
- split_birth(size);
- fc = curFc;
- } else {
- // Return entire block to caller
- fc = newFc;
- }
- }
- }
- } else {
- // Get a free chunk from the free chunk dictionary to be returned to
- // replenish the indexed free list.
- fc = getChunkFromDictionaryExact(size);
- }
- // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
- return fc;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
- assert_locked();
- FreeChunk* fc = _dictionary->get_chunk(size);
- if (fc == NULL) {
- return NULL;
- }
- _bt.allocated((HeapWord*)fc, fc->size());
- if (fc->size() >= size + MinChunkSize) {
- fc = splitChunkAndReturnRemainder(fc, size);
- }
- assert(fc->size() >= size, "chunk too small");
- assert(fc->size() < size + MinChunkSize, "chunk too big");
- _bt.verify_single_block((HeapWord*)fc, fc->size());
- return fc;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
- assert_locked();
- FreeChunk* fc = _dictionary->get_chunk(size);
- if (fc == NULL) {
- return fc;
- }
- _bt.allocated((HeapWord*)fc, fc->size());
- if (fc->size() == size) {
- _bt.verify_single_block((HeapWord*)fc, size);
- return fc;
- }
- assert(fc->size() > size, "get_chunk() guarantee");
- if (fc->size() < size + MinChunkSize) {
- // Return the chunk to the dictionary and go get a bigger one.
- returnChunkToDictionary(fc);
- fc = _dictionary->get_chunk(size + MinChunkSize);
- if (fc == NULL) {
- return NULL;
- }
- _bt.allocated((HeapWord*)fc, fc->size());
- }
- assert(fc->size() >= size + MinChunkSize, "tautology");
- fc = splitChunkAndReturnRemainder(fc, size);
- assert(fc->size() == size, "chunk is wrong size");
- _bt.verify_single_block((HeapWord*)fc, size);
- return fc;
-}
-
-void
-CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
- assert_locked();
-
- size_t size = chunk->size();
- _bt.verify_single_block((HeapWord*)chunk, size);
- // adjust _unallocated_block downward, as necessary
- _bt.freed((HeapWord*)chunk, size);
- _dictionary->return_chunk(chunk);
-#ifndef PRODUCT
- if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
- TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
- TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
- tl->verify_stats();
- }
-#endif // PRODUCT
-}
-
-void
-CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
- assert_locked();
- size_t size = fc->size();
- _bt.verify_single_block((HeapWord*) fc, size);
- _bt.verify_not_unallocated((HeapWord*) fc, size);
- _indexedFreeList[size].return_chunk_at_tail(fc);
-#ifndef PRODUCT
- if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
- _indexedFreeList[size].verify_stats();
- }
-#endif // PRODUCT
-}
-
-// Add chunk to end of last block -- if it's the largest
-// block -- and update BOT and census data. We would
-// of course have preferred to coalesce it with the
-// last block, but it's currently less expensive to find the
-// largest block than it is to find the last.
-void
-CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
- HeapWord* chunk, size_t size) {
- // check that the chunk does lie in this space!
- assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
- // One of the parallel gc task threads may be here
- // whilst others are allocating.
- Mutex* lock = &_parDictionaryAllocLock;
- FreeChunk* ec;
- {
- MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
- ec = dictionary()->find_largest_dict(); // get largest block
- if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
- // It's a coterminal block - we can coalesce.
- size_t old_size = ec->size();
- coalDeath(old_size);
- removeChunkFromDictionary(ec);
- size += old_size;
- } else {
- ec = (FreeChunk*)chunk;
- }
- }
- ec->set_size(size);
- debug_only(ec->mangleFreed(size));
- if (size < SmallForDictionary) {
- lock = _indexedFreeListParLocks[size];
- }
- MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
- addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
- // record the birth under the lock since the recording involves
- // manipulation of the list on which the chunk lives and
- // if the chunk is allocated and is the last on the list,
- // the list can go away.
- coalBirth(size);
-}
-
-void
-CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
- size_t size) {
- // check that the chunk does lie in this space!
- assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
- assert_locked();
- _bt.verify_single_block(chunk, size);
-
- FreeChunk* fc = (FreeChunk*) chunk;
- fc->set_size(size);
- debug_only(fc->mangleFreed(size));
- if (size < SmallForDictionary) {
- returnChunkToFreeList(fc);
- } else {
- returnChunkToDictionary(fc);
- }
-}
-
-void
-CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
- size_t size, bool coalesced) {
- assert_locked();
- assert(chunk != NULL, "null chunk");
- if (coalesced) {
- // repair BOT
- _bt.single_block(chunk, size);
- }
- addChunkToFreeLists(chunk, size);
-}
-
-// We _must_ find the purported chunk on our free lists;
-// we assert if we don't.
-void
-CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
- size_t size = fc->size();
- assert_locked();
- debug_only(verifyFreeLists());
- if (size < SmallForDictionary) {
- removeChunkFromIndexedFreeList(fc);
- } else {
- removeChunkFromDictionary(fc);
- }
- _bt.verify_single_block((HeapWord*)fc, size);
- debug_only(verifyFreeLists());
-}
-
-void
-CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
- size_t size = fc->size();
- assert_locked();
- assert(fc != NULL, "null chunk");
- _bt.verify_single_block((HeapWord*)fc, size);
- _dictionary->remove_chunk(fc);
- // adjust _unallocated_block upward, as necessary
- _bt.allocated((HeapWord*)fc, size);
-}
-
-void
-CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
- assert_locked();
- size_t size = fc->size();
- _bt.verify_single_block((HeapWord*)fc, size);
- NOT_PRODUCT(
- if (FLSVerifyIndexTable) {
- verifyIndexedFreeList(size);
- }
- )
- _indexedFreeList[size].remove_chunk(fc);
- NOT_PRODUCT(
- if (FLSVerifyIndexTable) {
- verifyIndexedFreeList(size);
- }
- )
-}
-
-FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
- /* A hint is the next larger size that has a surplus.
- Start search at a size large enough to guarantee that
- the excess is >= MIN_CHUNK. */
- size_t start = align_object_size(numWords + MinChunkSize);
- if (start < IndexSetSize) {
- AdaptiveFreeList<FreeChunk>* it = _indexedFreeList;
- size_t hint = _indexedFreeList[start].hint();
- while (hint < IndexSetSize) {
- assert(is_object_aligned(hint), "hint should be aligned");
- AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
- if (fl->surplus() > 0 && fl->head() != NULL) {
- // Found a list with surplus, reset original hint
- // and split out a free chunk which is returned.
- _indexedFreeList[start].set_hint(hint);
- FreeChunk* res = getFromListGreater(fl, numWords);
- assert(res == NULL || res->is_free(),
- "Should be returning a free chunk");
- return res;
- }
- hint = fl->hint(); /* keep looking */
- }
- /* None found. */
- it[start].set_hint(IndexSetSize);
- }
- return NULL;
-}
-
-/* Requires fl->size >= numWords + MinChunkSize */
-FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
- size_t numWords) {
- FreeChunk *curr = fl->head();
- size_t oldNumWords = curr->size();
- assert(numWords >= MinChunkSize, "Word size is too small");
- assert(curr != NULL, "List is empty");
- assert(oldNumWords >= numWords + MinChunkSize,
- "Size of chunks in the list is too small");
-
- fl->remove_chunk(curr);
- // recorded indirectly by splitChunkAndReturnRemainder -
- // smallSplit(oldNumWords, numWords);
- FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
- // Does anything have to be done for the remainder in terms of
- // fixing the card table?
- assert(new_chunk == NULL || new_chunk->is_free(),
- "Should be returning a free chunk");
- return new_chunk;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
- size_t new_size) {
- assert_locked();
- size_t size = chunk->size();
- assert(size > new_size, "Split from a smaller block?");
- assert(is_aligned(chunk), "alignment problem");
- assert(size == adjustObjectSize(size), "alignment problem");
- size_t rem_sz = size - new_size;
- assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
- assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
- FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
- assert(is_aligned(ffc), "alignment problem");
- ffc->set_size(rem_sz);
- ffc->link_next(NULL);
- ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
- // Above must occur before BOT is updated below.
- // adjust block offset table
- OrderAccess::storestore();
- assert(chunk->is_free() && ffc->is_free(), "Error");
- _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
- if (rem_sz < SmallForDictionary) {
- // The freeList lock is held, but multiple GC task threads might be executing in parallel.
- bool is_par = Thread::current()->is_GC_task_thread();
- if (is_par) _indexedFreeListParLocks[rem_sz]->lock_without_safepoint_check();
- returnChunkToFreeList(ffc);
- split(size, rem_sz);
- if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
- } else {
- returnChunkToDictionary(ffc);
- split(size, rem_sz);
- }
- chunk->set_size(new_size);
- return chunk;
-}
-
-void
-CompactibleFreeListSpace::sweep_completed() {
- // Now that space is probably plentiful, refill linear
- // allocation blocks as needed.
- refillLinearAllocBlocksIfNeeded();
-}
-
-void
-CompactibleFreeListSpace::gc_prologue() {
- assert_locked();
- reportFreeListStatistics("Before GC:");
- refillLinearAllocBlocksIfNeeded();
-}
-
-void
-CompactibleFreeListSpace::gc_epilogue() {
- assert_locked();
- assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
- _promoInfo.stopTrackingPromotions();
- repairLinearAllocationBlocks();
- reportFreeListStatistics("After GC:");
-}
-
-// Iteration support, mostly delegated from a CMS generation
-
-void CompactibleFreeListSpace::save_marks() {
- assert(Thread::current()->is_VM_thread(),
- "Global variable should only be set when single-threaded");
- // Mark the "end" of the used space at the time of this call;
- // note, however, that promoted objects from this point
- // on are tracked in the _promoInfo below.
- set_saved_mark_word(unallocated_block());
-#ifdef ASSERT
- // Check the sanity of save_marks() etc.
- MemRegion ur = used_region();
- MemRegion urasm = used_region_at_save_marks();
- assert(ur.contains(urasm),
- " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
- " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
- p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
-#endif
- // inform allocator that promotions should be tracked.
- assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
- _promoInfo.startTrackingPromotions();
-}
-
-bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
- assert(_promoInfo.tracking(), "No preceding save_marks?");
- return _promoInfo.noPromotions();
-}
-
-bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
- return _smallLinearAllocBlock._word_size == 0;
-}
-
-void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
- // Fix up linear allocation blocks to look like free blocks
- repairLinearAllocBlock(&_smallLinearAllocBlock);
-}
-
-void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
- assert_locked();
- if (blk->_ptr != NULL) {
- assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
- "Minimum block size requirement");
- FreeChunk* fc = (FreeChunk*)(blk->_ptr);
- fc->set_size(blk->_word_size);
- fc->link_prev(NULL); // mark as free
- fc->dontCoalesce();
- assert(fc->is_free(), "just marked it free");
- assert(fc->cantCoalesce(), "just marked it uncoalescable");
- }
-}
-
-void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
- assert_locked();
- if (_smallLinearAllocBlock._ptr == NULL) {
- assert(_smallLinearAllocBlock._word_size == 0,
- "Size of linAB should be zero if the ptr is NULL");
- // Reset the linAB refill and allocation size limit.
- _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
- }
- refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
-}
-
-void
-CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
- assert_locked();
- assert((blk->_ptr == NULL && blk->_word_size == 0) ||
- (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
- "blk invariant");
- if (blk->_ptr == NULL) {
- refillLinearAllocBlock(blk);
- }
-}
-
-void
-CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
- assert_locked();
- assert(blk->_word_size == 0 && blk->_ptr == NULL,
- "linear allocation block should be empty");
- FreeChunk* fc;
- if (blk->_refillSize < SmallForDictionary &&
- (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
- // A linAB's strategy might be to use small sizes to reduce
- // fragmentation but still get the benefits of allocation from a
- // linAB.
- } else {
- fc = getChunkFromDictionary(blk->_refillSize);
- }
- if (fc != NULL) {
- blk->_ptr = (HeapWord*)fc;
- blk->_word_size = fc->size();
- fc->dontCoalesce(); // to prevent sweeper from sweeping us up
- }
-}
-
-// Support for compaction
-void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
- scan_and_forward(this, cp);
- // Prepare_for_compaction() uses the space between live objects
- // so that later phase can skip dead space quickly. So verification
- // of the free lists doesn't work after.
-}
-
-void CompactibleFreeListSpace::adjust_pointers() {
- // In other versions of adjust_pointers(), a bail out
- // based on the amount of live data in the generation
- // (i.e., if 0, bail out) may be used.
- // Cannot test used() == 0 here because the free lists have already
- // been mangled by the compaction.
-
- scan_and_adjust_pointers(this);
- // See note about verification in prepare_for_compaction().
-}
-
-void CompactibleFreeListSpace::compact() {
- scan_and_compact(this);
-}
-
-// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
-// where fbs is free block sizes
-double CompactibleFreeListSpace::flsFrag() const {
- size_t itabFree = totalSizeInIndexedFreeLists();
- double frag = 0.0;
- size_t i;
-
- for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- double sz = i;
- frag += _indexedFreeList[i].count() * (sz * sz);
- }
-
- double totFree = itabFree +
- _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
- if (totFree > 0) {
- frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
- (totFree * totFree));
- frag = (double)1.0 - frag;
- } else {
- assert(frag == 0.0, "Follows from totFree == 0");
- }
- return frag;
-}
-
-void CompactibleFreeListSpace::beginSweepFLCensus(
- float inter_sweep_current,
- float inter_sweep_estimate,
- float intra_sweep_estimate) {
- assert_locked();
- size_t i;
- for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
- log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
- fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
- fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
- fl->set_before_sweep(fl->count());
- fl->set_bfr_surp(fl->surplus());
- }
- _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
- inter_sweep_current,
- inter_sweep_estimate,
- intra_sweep_estimate);
-}
-
-void CompactibleFreeListSpace::setFLSurplus() {
- assert_locked();
- size_t i;
- for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
- fl->set_surplus(fl->count() -
- (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
- }
-}
-
-void CompactibleFreeListSpace::setFLHints() {
- assert_locked();
- size_t i;
- size_t h = IndexSetSize;
- for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
- AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
- fl->set_hint(h);
- if (fl->surplus() > 0) {
- h = i;
- }
- }
-}
-
-void CompactibleFreeListSpace::clearFLCensus() {
- assert_locked();
- size_t i;
- for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
- fl->set_prev_sweep(fl->count());
- fl->set_coal_births(0);
- fl->set_coal_deaths(0);
- fl->set_split_births(0);
- fl->set_split_deaths(0);
- }
-}
-
-void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
- log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
- setFLSurplus();
- setFLHints();
- printFLCensus(sweep_count);
- clearFLCensus();
- assert_locked();
- _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
-}
-
-bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
- if (size < SmallForDictionary) {
- AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
- return (fl->coal_desired() < 0) ||
- ((int)fl->count() > fl->coal_desired());
- } else {
- return dictionary()->coal_dict_over_populated(size);
- }
-}
-
-void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
- assert(size < SmallForDictionary, "Size too large for indexed list");
- AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
- fl->increment_coal_births();
- fl->increment_surplus();
-}
-
-void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
- assert(size < SmallForDictionary, "Size too large for indexed list");
- AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
- fl->increment_coal_deaths();
- fl->decrement_surplus();
-}
-
-void CompactibleFreeListSpace::coalBirth(size_t size) {
- if (size < SmallForDictionary) {
- smallCoalBirth(size);
- } else {
- dictionary()->dict_census_update(size,
- false /* split */,
- true /* birth */);
- }
-}
-
-void CompactibleFreeListSpace::coalDeath(size_t size) {
- if(size < SmallForDictionary) {
- smallCoalDeath(size);
- } else {
- dictionary()->dict_census_update(size,
- false /* split */,
- false /* birth */);
- }
-}
-
-void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
- assert(size < SmallForDictionary, "Size too large for indexed list");
- AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
- fl->increment_split_births();
- fl->increment_surplus();
-}
-
-void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
- assert(size < SmallForDictionary, "Size too large for indexed list");
- AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
- fl->increment_split_deaths();
- fl->decrement_surplus();
-}
-
-void CompactibleFreeListSpace::split_birth(size_t size) {
- if (size < SmallForDictionary) {
- smallSplitBirth(size);
- } else {
- dictionary()->dict_census_update(size,
- true /* split */,
- true /* birth */);
- }
-}
-
-void CompactibleFreeListSpace::splitDeath(size_t size) {
- if (size < SmallForDictionary) {
- smallSplitDeath(size);
- } else {
- dictionary()->dict_census_update(size,
- true /* split */,
- false /* birth */);
- }
-}
-
-void CompactibleFreeListSpace::split(size_t from, size_t to1) {
- size_t to2 = from - to1;
- splitDeath(from);
- split_birth(to1);
- split_birth(to2);
-}
-
-void CompactibleFreeListSpace::print() const {
- print_on(tty);
-}
-
-void CompactibleFreeListSpace::prepare_for_verify() {
- assert_locked();
- repairLinearAllocationBlocks();
- // Verify that the SpoolBlocks look like free blocks of
- // appropriate sizes... To be done ...
-}
-
-class VerifyAllBlksClosure: public BlkClosure {
- private:
- const CompactibleFreeListSpace* _sp;
- const MemRegion _span;
- HeapWord* _last_addr;
- size_t _last_size;
- bool _last_was_obj;
- bool _last_was_live;
-
- public:
- VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
- MemRegion span) : _sp(sp), _span(span),
- _last_addr(NULL), _last_size(0),
- _last_was_obj(false), _last_was_live(false) { }
-
- virtual size_t do_blk(HeapWord* addr) {
- size_t res;
- bool was_obj = false;
- bool was_live = false;
- if (_sp->block_is_obj(addr)) {
- was_obj = true;
- oop p = oop(addr);
- guarantee(oopDesc::is_oop(p), "Should be an oop");
- res = _sp->adjustObjectSize(p->size());
- if (_sp->obj_is_alive(addr)) {
- was_live = true;
- oopDesc::verify(p);
- }
- } else {
- FreeChunk* fc = (FreeChunk*)addr;
- res = fc->size();
- if (FLSVerifyLists && !fc->cantCoalesce()) {
- guarantee(_sp->verify_chunk_in_free_list(fc),
- "Chunk should be on a free list");
- }
- }
- if (res == 0) {
- Log(gc, verify) log;
- log.error("Livelock: no rank reduction!");
- log.error(" Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
- " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
- p2i(addr), res, was_obj ?"true":"false", was_live ?"true":"false",
- p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
- LogStream ls(log.error());
- _sp->print_on(&ls);
- guarantee(false, "Verification failed.");
- }
- _last_addr = addr;
- _last_size = res;
- _last_was_obj = was_obj;
- _last_was_live = was_live;
- return res;
- }
-};
-
-class VerifyAllOopsClosure: public BasicOopIterateClosure {
- private:
- const CMSCollector* _collector;
- const CompactibleFreeListSpace* _sp;
- const MemRegion _span;
- const bool _past_remark;
- const CMSBitMap* _bit_map;
-
- protected:
- void do_oop(void* p, oop obj) {
- if (_span.contains(obj)) { // the interior oop points into CMS heap
- if (!_span.contains(p)) { // reference from outside CMS heap
- // Should be a valid object; the first disjunct below allows
- // us to sidestep an assertion in block_is_obj() that insists
- // that p be in _sp. Note that several generations (and spaces)
- // are spanned by _span (CMS heap) above.
- guarantee(!_sp->is_in_reserved(obj) ||
- _sp->block_is_obj((HeapWord*)obj),
- "Should be an object");
- guarantee(oopDesc::is_oop(obj), "Should be an oop");
- oopDesc::verify(obj);
- if (_past_remark) {
- // Remark has been completed, the object should be marked
- _bit_map->isMarked((HeapWord*)obj);
- }
- } else { // reference within CMS heap
- if (_past_remark) {
- // Remark has been completed -- so the referent should have
- // been marked, if referring object is.
- if (_bit_map->isMarked(_collector->block_start(p))) {
- guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
- }
- }
- }
- } else if (_sp->is_in_reserved(p)) {
- // the reference is from FLS, and points out of FLS
- guarantee(oopDesc::is_oop(obj), "Should be an oop");
- oopDesc::verify(obj);
- }
- }
-
- template <class T> void do_oop_work(T* p) {
- T heap_oop = RawAccess<>::oop_load(p);
- if (!CompressedOops::is_null(heap_oop)) {
- oop obj = CompressedOops::decode_not_null(heap_oop);
- do_oop(p, obj);
- }
- }
-
- public:
- VerifyAllOopsClosure(const CMSCollector* collector,
- const CompactibleFreeListSpace* sp, MemRegion span,
- bool past_remark, CMSBitMap* bit_map) :
- _collector(collector), _sp(sp), _span(span),
- _past_remark(past_remark), _bit_map(bit_map) { }
-
- virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); }
- virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
-};
-
-void CompactibleFreeListSpace::verify() const {
- assert_lock_strong(&_freelistLock);
- verify_objects_initialized();
- MemRegion span = _collector->_span;
- bool past_remark = (_collector->abstract_state() ==
- CMSCollector::Sweeping);
-
- ResourceMark rm;
- HandleMark hm;
-
- // Check integrity of CFL data structures
- _promoInfo.verify();
- _dictionary->verify();
- if (FLSVerifyIndexTable) {
- verifyIndexedFreeLists();
- }
- // Check integrity of all objects and free blocks in space
- {
- VerifyAllBlksClosure cl(this, span);
- ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const
- }
- // Check that all references in the heap to FLS
- // are to valid objects in FLS or that references in
- // FLS are to valid objects elsewhere in the heap
- if (FLSVerifyAllHeapReferences)
- {
- VerifyAllOopsClosure cl(_collector, this, span, past_remark,
- _collector->markBitMap());
-
- // Iterate over all oops in the heap.
- CMSHeap::heap()->oop_iterate(&cl);
- }
-
- if (VerifyObjectStartArray) {
- // Verify the block offset table
- _bt.verify();
- }
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::verifyFreeLists() const {
- if (FLSVerifyLists) {
- _dictionary->verify();
- verifyIndexedFreeLists();
- } else {
- if (FLSVerifyDictionary) {
- _dictionary->verify();
- }
- if (FLSVerifyIndexTable) {
- verifyIndexedFreeLists();
- }
- }
-}
-#endif
-
-void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
- size_t i = 0;
- for (; i < IndexSetStart; i++) {
- guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
- }
- for (; i < IndexSetSize; i++) {
- verifyIndexedFreeList(i);
- }
-}
-
-void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
- FreeChunk* fc = _indexedFreeList[size].head();
- FreeChunk* tail = _indexedFreeList[size].tail();
- size_t num = _indexedFreeList[size].count();
- size_t n = 0;
- guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
- "Slot should have been empty");
- for (; fc != NULL; fc = fc->next(), n++) {
- guarantee(fc->size() == size, "Size inconsistency");
- guarantee(fc->is_free(), "!free?");
- guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
- guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
- }
- guarantee(n == num, "Incorrect count");
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::check_free_list_consistency() const {
- assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
- "Some sizes can't be allocated without recourse to"
- " linear allocation buffers");
- assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
- "else MIN_TREE_CHUNK_SIZE is wrong");
- assert(IndexSetStart != 0, "IndexSetStart not initialized");
- assert(IndexSetStride != 0, "IndexSetStride not initialized");
-}
-#endif
-
-void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
- assert_lock_strong(&_freelistLock);
- LogTarget(Debug, gc, freelist, census) log;
- if (!log.is_enabled()) {
- return;
- }
- AdaptiveFreeList<FreeChunk> total;
- log.print("end sweep# " SIZE_FORMAT, sweep_count);
- ResourceMark rm;
- LogStream ls(log);
- outputStream* out = &ls;
- AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
- size_t total_free = 0;
- for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
- const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
- total_free += fl->count() * fl->size();
- if (i % (40*IndexSetStride) == 0) {
- AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
- }
- fl->print_on(out);
- total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() );
- total.set_surplus( total.surplus() + fl->surplus() );
- total.set_desired( total.desired() + fl->desired() );
- total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() );
- total.set_before_sweep(total.before_sweep() + fl->before_sweep());
- total.set_count( total.count() + fl->count() );
- total.set_coal_births( total.coal_births() + fl->coal_births() );
- total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() );
- total.set_split_births(total.split_births() + fl->split_births());
- total.set_split_deaths(total.split_deaths() + fl->split_deaths());
- }
- total.print_on(out, "TOTAL");
- log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
- log.print("growth: %8.5f deficit: %8.5f",
- (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
- (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
- (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
- _dictionary->print_dict_census(out);
-}
-
-///////////////////////////////////////////////////////////////////////////
-// CompactibleFreeListSpaceLAB
-///////////////////////////////////////////////////////////////////////////
-
-#define VECTOR_257(x) \
- /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
- { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
- x }
-
-// Initialize with default setting for CMS, _not_
-// generic OldPLABSize, whose static default is different; if overridden at the
-// command-line, this will get reinitialized via a call to
-// modify_initialization() below.
-AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[] =
- VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
-size_t CompactibleFreeListSpaceLAB::_global_num_blocks[] = VECTOR_257(0);
-uint CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
-
-CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
- _cfls(cfls)
-{
- assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
- for (size_t i = CompactibleFreeListSpace::IndexSetStart;
- i < CompactibleFreeListSpace::IndexSetSize;
- i += CompactibleFreeListSpace::IndexSetStride) {
- _indexedFreeList[i].set_size(i);
- _num_blocks[i] = 0;
- }
-}
-
-static bool _CFLS_LAB_modified = false;
-
-void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
- assert(!_CFLS_LAB_modified, "Call only once");
- _CFLS_LAB_modified = true;
- for (size_t i = CompactibleFreeListSpace::IndexSetStart;
- i < CompactibleFreeListSpace::IndexSetSize;
- i += CompactibleFreeListSpace::IndexSetStride) {
- _blocks_to_claim[i].modify(n, wt, true /* force */);
- }
-}
-
-HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
- FreeChunk* res;
- assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
- if (word_sz >= CompactibleFreeListSpace::IndexSetSize) {
- // This locking manages sync with other large object allocations.
- MutexLocker x(_cfls->parDictionaryAllocLock(),
- Mutex::_no_safepoint_check_flag);
- res = _cfls->getChunkFromDictionaryExact(word_sz);
- if (res == NULL) return NULL;
- } else {
- AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
- if (fl->count() == 0) {
- // Attempt to refill this local free list.
- get_from_global_pool(word_sz, fl);
- // If it didn't work, give up.
- if (fl->count() == 0) return NULL;
- }
- res = fl->get_chunk_at_head();
- assert(res != NULL, "Why was count non-zero?");
- }
- res->markNotFree();
- assert(!res->is_free(), "shouldn't be marked free");
- assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
- // mangle a just allocated object with a distinct pattern.
- debug_only(res->mangleAllocated(word_sz));
- return (HeapWord*)res;
-}
-
-// Get a chunk of blocks of the right size and update related
-// book-keeping stats
-void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
- // Get the #blocks we want to claim
- size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
- assert(n_blks > 0, "Error");
- assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
- // In some cases, when the application has a phase change,
- // there may be a sudden and sharp shift in the object survival
- // profile, and updating the counts at the end of a scavenge
- // may not be quick enough, giving rise to large scavenge pauses
- // during these phase changes. It is beneficial to detect such
- // changes on-the-fly during a scavenge and avoid such a phase-change
- // pothole. The following code is a heuristic attempt to do that.
- // It is protected by a product flag until we have gained
- // enough experience with this heuristic and fine-tuned its behavior.
- // WARNING: This might increase fragmentation if we overreact to
- // small spikes, so some kind of historical smoothing based on
- // previous experience with the greater reactivity might be useful.
- // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
- // default.
- if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
- //
- // On a 32-bit VM, the denominator can become zero because of integer overflow,
- // which is why there is a cast to double.
- //
- size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
- n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
- n_blks = MIN2(n_blks, CMSOldPLABMax);
- }
- assert(n_blks > 0, "Error");
- _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
- // Update stats table entry for this block size
- _num_blocks[word_sz] += fl->count();
-}
-
-void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
- for (size_t i = CompactibleFreeListSpace::IndexSetStart;
- i < CompactibleFreeListSpace::IndexSetSize;
- i += CompactibleFreeListSpace::IndexSetStride) {
- assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
- "Counter inconsistency");
- if (_global_num_workers[i] > 0) {
- // Need to smooth wrt historical average
- if (ResizeOldPLAB) {
- _blocks_to_claim[i].sample(
- MAX2(CMSOldPLABMin,
- MIN2(CMSOldPLABMax,
- _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
- }
- // Reset counters for next round
- _global_num_workers[i] = 0;
- _global_num_blocks[i] = 0;
- log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
- }
- }
-}
-
-// If this is changed in the future to allow parallel
-// access, one would need to take the FL locks and,
-// depending on how it is used, stagger access from
-// parallel threads to reduce contention.
-void CompactibleFreeListSpaceLAB::retire(int tid) {
- // We run this single threaded with the world stopped;
- // so no need for locks and such.
- NOT_PRODUCT(Thread* t = Thread::current();)
- assert(Thread::current()->is_VM_thread(), "Error");
- for (size_t i = CompactibleFreeListSpace::IndexSetStart;
- i < CompactibleFreeListSpace::IndexSetSize;
- i += CompactibleFreeListSpace::IndexSetStride) {
- assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
- "Can't retire more than what we obtained");
- if (_num_blocks[i] > 0) {
- size_t num_retire = _indexedFreeList[i].count();
- assert(_num_blocks[i] > num_retire, "Should have used at least one");
- {
- // MutexLocker x(_cfls->_indexedFreeListParLocks[i],
- // Mutex::_no_safepoint_check_flag);
-
- // Update globals stats for num_blocks used
- _global_num_blocks[i] += (_num_blocks[i] - num_retire);
- _global_num_workers[i]++;
- assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
- if (num_retire > 0) {
- _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
- // Reset this list.
- _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
- _indexedFreeList[i].set_size(i);
- }
- }
- log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
- tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
- // Reset stats for next round
- _num_blocks[i] = 0;
- }
- }
-}
-
-// Used by par_get_chunk_of_blocks() for the chunks from the
-// indexed_free_lists. Looks for a chunk with size that is a multiple
-// of "word_sz" and if found, splits it into "word_sz" chunks and add
-// to the free list "fl". "n" is the maximum number of chunks to
-// be added to "fl".
-bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
-
- // We'll try all multiples of word_sz in the indexed set, starting with
- // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
- // then try getting a big chunk and splitting it.
- {
- bool found;
- int k;
- size_t cur_sz;
- for (k = 1, cur_sz = k * word_sz, found = false;
- (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
- (CMSSplitIndexedFreeListBlocks || k <= 1);
- k++, cur_sz = k * word_sz) {
- AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty.
- fl_for_cur_sz.set_size(cur_sz);
- {
- MutexLocker x(_indexedFreeListParLocks[cur_sz],
- Mutex::_no_safepoint_check_flag);
- AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
- if (gfl->count() != 0) {
- // nn is the number of chunks of size cur_sz that
- // we'd need to split k-ways each, in order to create
- // "n" chunks of size word_sz each.
- const size_t nn = MAX2(n/k, (size_t)1);
- gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
- found = true;
- if (k > 1) {
- // Update split death stats for the cur_sz-size blocks list:
- // we increment the split death count by the number of blocks
- // we just took from the cur_sz-size blocks list and which
- // we will be splitting below.
- ssize_t deaths = gfl->split_deaths() +
- fl_for_cur_sz.count();
- gfl->set_split_deaths(deaths);
- }
- }
- }
- // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
- if (found) {
- if (k == 1) {
- fl->prepend(&fl_for_cur_sz);
- } else {
- // Divide each block on fl_for_cur_sz up k ways.
- FreeChunk* fc;
- while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
- // Must do this in reverse order, so that anybody attempting to
- // access the main chunk sees it as a single free block until we
- // change it.
- size_t fc_size = fc->size();
- assert(fc->is_free(), "Error");
- for (int i = k-1; i >= 0; i--) {
- FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
- assert((i != 0) ||
- ((fc == ffc) && ffc->is_free() &&
- (ffc->size() == k*word_sz) && (fc_size == word_sz)),
- "Counting error");
- ffc->set_size(word_sz);
- ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
- ffc->link_next(NULL);
- // Above must occur before BOT is updated below.
- OrderAccess::storestore();
- // splitting from the right, fc_size == i * word_sz
- _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
- fc_size -= word_sz;
- assert(fc_size == i*word_sz, "Error");
- _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
- _bt.verify_single_block((HeapWord*)fc, fc_size);
- _bt.verify_single_block((HeapWord*)ffc, word_sz);
- // Push this on "fl".
- fl->return_chunk_at_head(ffc);
- }
- // TRAP
- assert(fl->tail()->next() == NULL, "List invariant.");
- }
- }
- // Update birth stats for this block size.
- size_t num = fl->count();
- MutexLocker x(_indexedFreeListParLocks[word_sz],
- Mutex::_no_safepoint_check_flag);
- ssize_t births = _indexedFreeList[word_sz].split_births() + num;
- _indexedFreeList[word_sz].set_split_births(births);
- return true;
- }
- }
- return found;
- }
-}
-
-FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
-
- FreeChunk* fc = NULL;
- FreeChunk* rem_fc = NULL;
- size_t rem;
- {
- MutexLocker x(parDictionaryAllocLock(),
- Mutex::_no_safepoint_check_flag);
- while (n > 0) {
- fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
- if (fc != NULL) {
- break;
- } else {
- n--;
- }
- }
- if (fc == NULL) return NULL;
- // Otherwise, split up that block.
- assert((ssize_t)n >= 1, "Control point invariant");
- assert(fc->is_free(), "Error: should be a free block");
- _bt.verify_single_block((HeapWord*)fc, fc->size());
- const size_t nn = fc->size() / word_sz;
- n = MIN2(nn, n);
- assert((ssize_t)n >= 1, "Control point invariant");
- rem = fc->size() - n * word_sz;
- // If there is a remainder, and it's too small, allocate one fewer.
- if (rem > 0 && rem < MinChunkSize) {
- n--; rem += word_sz;
- }
- // Note that at this point we may have n == 0.
- assert((ssize_t)n >= 0, "Control point invariant");
-
- // If n is 0, the chunk fc that was found is not large
- // enough to leave a viable remainder. We are unable to
- // allocate even one block. Return fc to the
- // dictionary and return, leaving "fl" empty.
- if (n == 0) {
- returnChunkToDictionary(fc);
- return NULL;
- }
-
- _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk
- dictionary()->dict_census_update(fc->size(),
- true /*split*/,
- false /*birth*/);
-
- // First return the remainder, if any.
- // Note that we hold the lock until we decide if we're going to give
- // back the remainder to the dictionary, since a concurrent allocation
- // may otherwise see the heap as empty. (We're willing to take that
- // hit if the block is a small block.)
- if (rem > 0) {
- size_t prefix_size = n * word_sz;
- rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
- rem_fc->set_size(rem);
- rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
- rem_fc->link_next(NULL);
- // Above must occur before BOT is updated below.
- assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
- OrderAccess::storestore();
- _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
- assert(fc->is_free(), "Error");
- fc->set_size(prefix_size);
- if (rem >= IndexSetSize) {
- returnChunkToDictionary(rem_fc);
- dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
- rem_fc = NULL;
- }
- // Otherwise, return it to the small list below.
- }
- }
- if (rem_fc != NULL) {
- MutexLocker x(_indexedFreeListParLocks[rem],
- Mutex::_no_safepoint_check_flag);
- _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
- _indexedFreeList[rem].return_chunk_at_head(rem_fc);
- smallSplitBirth(rem);
- }
- assert(n * word_sz == fc->size(),
- "Chunk size " SIZE_FORMAT " is not exactly splittable by "
- SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
- fc->size(), n, word_sz);
- return fc;
-}
-
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
-
- FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
-
- if (fc == NULL) {
- return;
- }
-
- size_t n = fc->size() / word_sz;
-
- assert((ssize_t)n > 0, "Consistency");
- // Now do the splitting up.
- // Must do this in reverse order, so that anybody attempting to
- // access the main chunk sees it as a single free block until we
- // change it.
- size_t fc_size = n * word_sz;
- // All but first chunk in this loop
- for (ssize_t i = n-1; i > 0; i--) {
- FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
- ffc->set_size(word_sz);
- ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
- ffc->link_next(NULL);
- // Above must occur before BOT is updated below.
- OrderAccess::storestore();
- // splitting from the right, fc_size == (n - i + 1) * wordsize
- _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
- fc_size -= word_sz;
- _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
- _bt.verify_single_block((HeapWord*)ffc, ffc->size());
- _bt.verify_single_block((HeapWord*)fc, fc_size);
- // Push this on "fl".
- fl->return_chunk_at_head(ffc);
- }
- // First chunk
- assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
- // The blocks above should show their new sizes before the first block below
- fc->set_size(word_sz);
- fc->link_prev(NULL); // idempotent wrt free-ness, see assert above
- fc->link_next(NULL);
- _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
- _bt.verify_single_block((HeapWord*)fc, fc->size());
- fl->return_chunk_at_head(fc);
-
- assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
- {
- // Update the stats for this block size.
- MutexLocker x(_indexedFreeListParLocks[word_sz],
- Mutex::_no_safepoint_check_flag);
- const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
- _indexedFreeList[word_sz].set_split_births(births);
- // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
- // _indexedFreeList[word_sz].set_surplus(new_surplus);
- }
-
- // TRAP
- assert(fl->tail()->next() == NULL, "List invariant.");
-}
-
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
- assert(fl->count() == 0, "Precondition.");
- assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
- "Precondition");
-
- if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
- // Got it
- return;
- }
-
- // Otherwise, we'll split a block from the dictionary.
- par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
-}
-
-const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
- const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
- return ergo_max;
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel rescan. See CMSParRemarkTask where this is currently used.
-// XXX Need to suitably abstract and generalize this and the next
-// method into one.
-void
-CompactibleFreeListSpace::
-initialize_sequential_subtasks_for_rescan(int n_threads) {
- // The "size" of each task is fixed according to rescan_task_size.
- assert(n_threads > 0, "Unexpected n_threads argument");
- const size_t task_size = rescan_task_size();
- size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
- assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
- assert(n_tasks == 0 ||
- ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
- (used_region().start() + n_tasks*task_size >= used_region().end())),
- "n_tasks calculation incorrect");
- SequentialSubTasksDone* pst = conc_par_seq_tasks();
- assert(!pst->valid(), "Clobbering existing data?");
- // Sets the condition for completion of the subtask (how many threads
- // need to finish in order to be done).
- pst->set_n_threads(n_threads);
- pst->set_n_tasks((int)n_tasks);
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
-void
-CompactibleFreeListSpace::
-initialize_sequential_subtasks_for_marking(int n_threads,
- HeapWord* low) {
- // The "size" of each task is fixed according to rescan_task_size.
- assert(n_threads > 0, "Unexpected n_threads argument");
- const size_t task_size = marking_task_size();
- assert(task_size > CardTable::card_size_in_words &&
- (task_size % CardTable::card_size_in_words == 0),
- "Otherwise arithmetic below would be incorrect");
- MemRegion span = _old_gen->reserved();
- if (low != NULL) {
- if (span.contains(low)) {
- // Align low down to a card boundary so that
- // we can use block_offset_careful() on span boundaries.
- HeapWord* aligned_low = align_down(low, CardTable::card_size);
- // Clip span prefix at aligned_low
- span = span.intersection(MemRegion(aligned_low, span.end()));
- } else if (low > span.end()) {
- span = MemRegion(low, low); // Null region
- } // else use entire span
- }
- assert(span.is_empty() ||
- ((uintptr_t)span.start() % CardTable::card_size == 0),
- "span should start at a card boundary");
- size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
- assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
- assert(n_tasks == 0 ||
- ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
- (span.start() + n_tasks*task_size >= span.end())),
- "n_tasks calculation incorrect");
- SequentialSubTasksDone* pst = conc_par_seq_tasks();
- assert(!pst->valid(), "Clobbering existing data?");
- // Sets the condition for completion of the subtask (how many threads
- // need to finish in order to be done).
- pst->set_n_threads(n_threads);
- pst->set_n_tasks((int)n_tasks);
-}
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,758 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
-#define SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
-
-#include "gc/cms/adaptiveFreeList.hpp"
-#include "gc/cms/promotionInfo.hpp"
-#include "gc/shared/blockOffsetTable.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/space.hpp"
-#include "logging/log.hpp"
-#include "memory/binaryTreeDictionary.hpp"
-#include "memory/freeList.hpp"
-
-// Classes in support of keeping track of promotions into a non-Contiguous
-// space, in this case a CompactibleFreeListSpace.
-
-// Forward declarations
-class CMSCollector;
-class CompactibleFreeListSpace;
-class ConcurrentMarkSweepGeneration;
-class BlkClosure;
-class BlkClosureCareful;
-class FreeChunk;
-class UpwardsObjectClosure;
-class ObjectClosureCareful;
-class Klass;
-
-class AFLBinaryTreeDictionary : public BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> > {
- public:
- AFLBinaryTreeDictionary(MemRegion mr)
- : BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> >(mr) {}
-
- // Find the list with size "size" in the binary tree and update
- // the statistics in the list according to "split" (chunk was
- // split or coalesce) and "birth" (chunk was added or removed).
- void dict_census_update(size_t size, bool split, bool birth);
- // Return true if the dictionary is overpopulated (more chunks of
- // this size than desired) for size "size".
- bool coal_dict_over_populated(size_t size);
- // Methods called at the beginning of a sweep to prepare the
- // statistics for the sweep.
- void begin_sweep_dict_census(double coalSurplusPercent,
- float inter_sweep_current,
- float inter_sweep_estimate,
- float intra_sweep_estimate);
- // Methods called after the end of a sweep to modify the
- // statistics for the sweep.
- void end_sweep_dict_census(double splitSurplusPercent);
- // Accessors for statistics
- void set_tree_surplus(double splitSurplusPercent);
- void set_tree_hints(void);
- // Reset statistics for all the lists in the tree.
- void clear_tree_census(void);
- // Print the statistics for all the lists in the tree. Also may
- // print out summaries.
- void print_dict_census(outputStream* st) const;
-};
-
-class LinearAllocBlock {
- public:
- LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
- _allocation_size_limit(0) {}
- void set(HeapWord* ptr, size_t word_size, size_t refill_size,
- size_t allocation_size_limit) {
- _ptr = ptr;
- _word_size = word_size;
- _refillSize = refill_size;
- _allocation_size_limit = allocation_size_limit;
- }
- HeapWord* _ptr;
- size_t _word_size;
- size_t _refillSize;
- size_t _allocation_size_limit; // Largest size that will be allocated
-
- void print_on(outputStream* st) const;
-};
-
-// Concrete subclass of CompactibleSpace that implements
-// a free list space, such as used in the concurrent mark sweep
-// generation.
-
-class CompactibleFreeListSpace: public CompactibleSpace {
- friend class VMStructs;
- friend class ConcurrentMarkSweepGeneration;
- friend class CMSCollector;
- // Local alloc buffer for promotion into this space.
- friend class CompactibleFreeListSpaceLAB;
- // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
- template <typename SpaceType>
- friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
- template <typename SpaceType>
- friend void CompactibleSpace::scan_and_compact(SpaceType* space);
- template <typename SpaceType>
- friend void CompactibleSpace::verify_up_to_first_dead(SpaceType* space);
- template <typename SpaceType>
- friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
-
- // "Size" of chunks of work (executed during parallel remark phases
- // of CMS collection); this probably belongs in CMSCollector, although
- // it's cached here because it's used in
- // initialize_sequential_subtasks_for_rescan() which modifies
- // par_seq_tasks which also lives in Space. XXX
- const size_t _rescan_task_size;
- const size_t _marking_task_size;
-
- // Yet another sequential tasks done structure. This supports
- // CMS GC, where we have threads dynamically
- // claiming sub-tasks from a larger parallel task.
- SequentialSubTasksDone _conc_par_seq_tasks;
-
- BlockOffsetArrayNonContigSpace _bt;
-
- CMSCollector* _collector;
- ConcurrentMarkSweepGeneration* _old_gen;
-
- // Data structures for free blocks (used during allocation/sweeping)
-
- // Allocation is done linearly from two different blocks depending on
- // whether the request is small or large, in an effort to reduce
- // fragmentation. We assume that any locking for allocation is done
- // by the containing generation. Thus, none of the methods in this
- // space are re-entrant.
- enum SomeConstants {
- SmallForLinearAlloc = 16, // size < this then use _sLAB
- SmallForDictionary = 257, // size < this then use _indexedFreeList
- IndexSetSize = SmallForDictionary // keep this odd-sized
- };
- static size_t IndexSetStart;
- static size_t IndexSetStride;
- static size_t _min_chunk_size_in_bytes;
-
- private:
- enum FitStrategyOptions {
- FreeBlockStrategyNone = 0,
- FreeBlockBestFitFirst
- };
-
- PromotionInfo _promoInfo;
-
- // Helps to impose a global total order on freelistLock ranks;
- // assumes that CFLSpace's are allocated in global total order
- static int _lockRank;
-
- // A lock protecting the free lists and free blocks;
- // mutable because of ubiquity of locking even for otherwise const methods
- mutable Mutex _freelistLock;
-
- // Locking verifier convenience function
- void assert_locked() const PRODUCT_RETURN;
- void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
-
- // Linear allocation blocks
- LinearAllocBlock _smallLinearAllocBlock;
-
- AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks
-
- // Indexed array for small size blocks
- AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
-
- // Allocation strategy
- bool _fitStrategy; // Use best fit strategy
-
- // This is an address close to the largest free chunk in the heap.
- // It is currently assumed to be at the end of the heap. Free
- // chunks with addresses greater than nearLargestChunk are coalesced
- // in an effort to maintain a large chunk at the end of the heap.
- HeapWord* _nearLargestChunk;
-
- // Used to keep track of limit of sweep for the space
- HeapWord* _sweep_limit;
-
- // Stable value of used().
- size_t _used_stable;
-
- // Used to make the young collector update the mod union table
- MemRegionClosure* _preconsumptionDirtyCardClosure;
-
- // Support for compacting cms
- HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
- HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
-
- // Initialization helpers.
- void initializeIndexedFreeListArray();
-
- // Extra stuff to manage promotion parallelism.
-
- // A lock protecting the dictionary during par promotion allocation.
- mutable Mutex _parDictionaryAllocLock;
- Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
-
- // Locks protecting the exact lists during par promotion allocation.
- Mutex* _indexedFreeListParLocks[IndexSetSize];
-
- // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
- // required to be smaller than "IndexSetSize".) If successful,
- // adds them to "fl", which is required to be an empty free list.
- // If the count of "fl" is negative, it's absolute value indicates a
- // number of free chunks that had been previously "borrowed" from global
- // list of size "word_sz", and must now be decremented.
- void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
-
- // Used by par_get_chunk_of_blocks() for the chunks from the
- // indexed_free_lists.
- bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
-
- // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
- // evenly splittable into "n" "word_sz" chunks. Returns that
- // evenly splittable chunk. May split a larger chunk to get the
- // evenly splittable chunk.
- FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
-
- // Used by par_get_chunk_of_blocks() for the chunks from the
- // dictionary.
- void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
-
- // Allocation helper functions
- // Allocate using a strategy that takes from the indexed free lists
- // first. This allocation strategy assumes a companion sweeping
- // strategy that attempts to keep the needed number of chunks in each
- // indexed free lists.
- HeapWord* allocate_adaptive_freelists(size_t size);
-
- // Gets a chunk from the linear allocation block (LinAB). If there
- // is not enough space in the LinAB, refills it.
- HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
- HeapWord* getChunkFromSmallLinearAllocBlock(size_t size);
- // Get a chunk from the space remaining in the linear allocation block. Do
- // not attempt to refill if the space is not available, return NULL. Do the
- // repairs on the linear allocation block as appropriate.
- HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
- inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size);
-
- // Helper function for getChunkFromIndexedFreeList.
- // Replenish the indexed free list for this "size". Do not take from an
- // underpopulated size.
- FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
-
- // Get a chunk from the indexed free list. If the indexed free list
- // does not have a free chunk, try to replenish the indexed free list
- // then get the free chunk from the replenished indexed free list.
- inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
-
- // The returned chunk may be larger than requested (or null).
- FreeChunk* getChunkFromDictionary(size_t size);
- // The returned chunk is the exact size requested (or null).
- FreeChunk* getChunkFromDictionaryExact(size_t size);
-
- // Find a chunk in the indexed free list that is the best
- // fit for size "numWords".
- FreeChunk* bestFitSmall(size_t numWords);
- // For free list "fl" of chunks of size > numWords,
- // remove a chunk, split off a chunk of size numWords
- // and return it. The split off remainder is returned to
- // the free lists. The old name for getFromListGreater
- // was lookInListGreater.
- FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
- // Get a chunk in the indexed free list or dictionary,
- // by considering a larger chunk and splitting it.
- FreeChunk* getChunkFromGreater(size_t numWords);
- // Verify that the given chunk is in the indexed free lists.
- bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
- // Remove the specified chunk from the indexed free lists.
- void removeChunkFromIndexedFreeList(FreeChunk* fc);
- // Remove the specified chunk from the dictionary.
- void removeChunkFromDictionary(FreeChunk* fc);
- // Split a free chunk into a smaller free chunk of size "new_size".
- // Return the smaller free chunk and return the remainder to the
- // free lists.
- FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
- // Add a chunk to the free lists.
- void addChunkToFreeLists(HeapWord* chunk, size_t size);
- // Add a chunk to the free lists, preferring to suffix it
- // to the last free chunk at end of space if possible, and
- // updating the block census stats as well as block offset table.
- // Take any locks as appropriate if we are multithreaded.
- void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
- // Add a free chunk to the indexed free lists.
- void returnChunkToFreeList(FreeChunk* chunk);
- // Add a free chunk to the dictionary.
- void returnChunkToDictionary(FreeChunk* chunk);
-
- // Functions for maintaining the linear allocation buffers (LinAB).
- // Repairing a linear allocation block refers to operations
- // performed on the remainder of a LinAB after an allocation
- // has been made from it.
- void repairLinearAllocationBlocks();
- void repairLinearAllocBlock(LinearAllocBlock* blk);
- void refillLinearAllocBlock(LinearAllocBlock* blk);
- void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
- void refillLinearAllocBlocksIfNeeded();
-
- void verify_objects_initialized() const;
-
- // Statistics reporting helper functions
- void reportFreeListStatistics(const char* title) const;
- void reportIndexedFreeListStatistics(outputStream* st) const;
- size_t maxChunkSizeInIndexedFreeLists() const;
- size_t numFreeBlocksInIndexedFreeLists() const;
- // Accessor
- HeapWord* unallocated_block() const {
- if (BlockOffsetArrayUseUnallocatedBlock) {
- HeapWord* ub = _bt.unallocated_block();
- assert(ub >= bottom() &&
- ub <= end(), "space invariant");
- return ub;
- } else {
- return end();
- }
- }
- void freed(HeapWord* start, size_t size) {
- _bt.freed(start, size);
- }
-
- // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
- // See comments for CompactibleSpace for more information.
- inline HeapWord* scan_limit() const {
- return end();
- }
-
- inline bool scanned_block_is_obj(const HeapWord* addr) const {
- return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
- }
-
- inline size_t scanned_block_size(const HeapWord* addr) const {
- return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
- }
-
- inline size_t adjust_obj_size(size_t size) const {
- return adjustObjectSize(size);
- }
-
- inline size_t obj_size(const HeapWord* addr) const;
-
- protected:
- // Reset the indexed free list to its initial empty condition.
- void resetIndexedFreeListArray();
- // Reset to an initial state with a single free block described
- // by the MemRegion parameter.
- void reset(MemRegion mr);
- // Return the total number of words in the indexed free lists.
- size_t totalSizeInIndexedFreeLists() const;
-
- public:
- // Constructor
- CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr);
- // Accessors
- bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
- AFLBinaryTreeDictionary* dictionary() const { return _dictionary; }
- HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
- void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
-
- // Set CMS global values.
- static void set_cms_values();
-
- // Return the free chunk at the end of the space. If no such
- // chunk exists, return NULL.
- FreeChunk* find_chunk_at_end();
-
- void set_collector(CMSCollector* collector) { _collector = collector; }
-
- // Support for parallelization of rescan and marking.
- const size_t rescan_task_size() const { return _rescan_task_size; }
- const size_t marking_task_size() const { return _marking_task_size; }
- // Return ergonomic max size for CMSRescanMultiple and CMSConcMarkMultiple.
- const size_t max_flag_size_for_task_size() const;
- SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
- void initialize_sequential_subtasks_for_rescan(int n_threads);
- void initialize_sequential_subtasks_for_marking(int n_threads,
- HeapWord* low = NULL);
-
- virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
- return _preconsumptionDirtyCardClosure;
- }
-
- void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
- _preconsumptionDirtyCardClosure = cl;
- }
-
- // Space enquiries
- size_t used() const;
- size_t free() const;
- size_t max_alloc_in_words() const;
- // XXX: should have a less conservative used_region() than that of
- // Space; we could consider keeping track of highest allocated
- // address and correcting that at each sweep, as the sweeper
- // goes through the entire allocated part of the generation. We
- // could also use that information to keep the sweeper from
- // sweeping more than is necessary. The allocator and sweeper will
- // of course need to synchronize on this, since the sweeper will
- // try to bump down the address and the allocator will try to bump it up.
- // For now, however, we'll just use the default used_region()
- // which overestimates the region by returning the entire
- // committed region (this is safe, but inefficient).
-
- // Returns monotonically increasing stable used space bytes for CMS.
- // This is required for jstat and other memory monitoring tools
- // that might otherwise see inconsistent used space values during a garbage
- // collection, promotion or allocation into compactibleFreeListSpace.
- // The value returned by this function might be smaller than the
- // actual value.
- size_t used_stable() const;
- // Recalculate and cache the current stable used() value. Only to be called
- // in places where we can be sure that the result is stable.
- void recalculate_used_stable();
-
- // Returns a subregion of the space containing all the objects in
- // the space.
- MemRegion used_region() const {
- return MemRegion(bottom(),
- BlockOffsetArrayUseUnallocatedBlock ?
- unallocated_block() : end());
- }
-
- virtual bool is_free_block(const HeapWord* p) const;
-
- // Resizing support
- void set_end(HeapWord* value); // override
-
- // Never mangle CompactibleFreeListSpace
- void mangle_unused_area() {}
- void mangle_unused_area_complete() {}
-
- // Mutual exclusion support
- Mutex* freelistLock() const { return &_freelistLock; }
-
- // Iteration support
- void oop_iterate(OopIterateClosure* cl);
-
- void object_iterate(ObjectClosure* blk);
- // Apply the closure to each object in the space whose references
- // point to objects in the heap. The usage of CompactibleFreeListSpace
- // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
- // objects in the space with references to objects that are no longer
- // valid. For example, an object may reference another object
- // that has already been sweep up (collected). This method uses
- // obj_is_alive() to determine whether it is safe to iterate of
- // an object.
- void safe_object_iterate(ObjectClosure* blk);
-
- // Iterate over all objects that intersect with mr, calling "cl->do_object"
- // on each. There is an exception to this: if this closure has already
- // been invoked on an object, it may skip such objects in some cases. This is
- // Most likely to happen in an "upwards" (ascending address) iteration of
- // MemRegions.
- void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
-
- // Requires that "mr" be entirely within the space.
- // Apply "cl->do_object" to all objects that intersect with "mr".
- // If the iteration encounters an unparseable portion of the region,
- // terminate the iteration and return the address of the start of the
- // subregion that isn't done. Return of "NULL" indicates that the
- // iteration completed.
- HeapWord* object_iterate_careful_m(MemRegion mr,
- ObjectClosureCareful* cl);
-
- // Override: provides a DCTO_CL specific to this kind of space.
- DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
- CardTable::PrecisionStyle precision,
- HeapWord* boundary,
- bool parallel);
-
- void blk_iterate(BlkClosure* cl);
- void blk_iterate_careful(BlkClosureCareful* cl);
- HeapWord* block_start_const(const void* p) const;
- HeapWord* block_start_careful(const void* p) const;
- size_t block_size(const HeapWord* p) const;
- size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
- bool block_is_obj(const HeapWord* p) const;
- bool obj_is_alive(const HeapWord* p) const;
- size_t block_size_nopar(const HeapWord* p) const;
- bool block_is_obj_nopar(const HeapWord* p) const;
-
- // Iteration support for promotion
- void save_marks();
- bool no_allocs_since_save_marks();
-
- // Iteration support for sweeping
- void save_sweep_limit() {
- _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
- unallocated_block() : end();
- log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT
- " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
- p2i(_sweep_limit), p2i(bottom()), p2i(end()));
- }
- NOT_PRODUCT(
- void clear_sweep_limit() { _sweep_limit = NULL; }
- )
- HeapWord* sweep_limit() { return _sweep_limit; }
-
- // Apply "blk->do_oop" to the addresses of all reference fields in objects
- // promoted into this generation since the most recent save_marks() call.
- // Fields in objects allocated by applications of the closure
- // *are* included in the iteration. Thus, when the iteration completes
- // there should be no further such objects remaining.
- template <typename OopClosureType>
- void oop_since_save_marks_iterate(OopClosureType* blk);
-
- // Allocation support
- HeapWord* allocate(size_t size);
- HeapWord* par_allocate(size_t size);
-
- oop promote(oop obj, size_t obj_size);
- void gc_prologue();
- void gc_epilogue();
-
- // This call is used by a containing CMS generation / collector
- // to inform the CFLS space that a sweep has been completed
- // and that the space can do any related house-keeping functions.
- void sweep_completed();
-
- // For an object in this space, the mark-word's two
- // LSB's having the value [11] indicates that it has been
- // promoted since the most recent call to save_marks() on
- // this generation and has not subsequently been iterated
- // over (using oop_since_save_marks_iterate() above).
- // This property holds only for single-threaded collections,
- // and is typically used for Cheney scans; for MT scavenges,
- // the property holds for all objects promoted during that
- // scavenge for the duration of the scavenge and is used
- // by card-scanning to avoid scanning objects (being) promoted
- // during that scavenge.
- bool obj_allocated_since_save_marks(const oop obj) const {
- assert(is_in_reserved(obj), "Wrong space?");
- return ((PromotedObject*)obj)->hasPromotedMark();
- }
-
- // A worst-case estimate of the space required (in HeapWords) to expand the
- // heap when promoting an obj of size obj_size.
- size_t expansionSpaceRequired(size_t obj_size) const;
-
- FreeChunk* allocateScratch(size_t size);
-
- // Returns true if either the small or large linear allocation buffer is empty.
- bool linearAllocationWouldFail() const;
-
- // Adjust the chunk for the minimum size. This version is called in
- // most cases in CompactibleFreeListSpace methods.
- inline static size_t adjustObjectSize(size_t size) {
- return align_object_size(MAX2(size, (size_t)MinChunkSize));
- }
- // This is a virtual version of adjustObjectSize() that is called
- // only occasionally when the compaction space changes and the type
- // of the new compaction space is is only known to be CompactibleSpace.
- size_t adjust_object_size_v(size_t size) const {
- return adjustObjectSize(size);
- }
- // Minimum size of a free block.
- virtual size_t minimum_free_block_size() const { return MinChunkSize; }
- void removeFreeChunkFromFreeLists(FreeChunk* chunk);
- void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
- bool coalesced);
-
- // Support for compaction.
- void prepare_for_compaction(CompactPoint* cp);
- void adjust_pointers();
- void compact();
- // Reset the space to reflect the fact that a compaction of the
- // space has been done.
- virtual void reset_after_compaction();
-
- // Debugging support.
- void print() const;
- void print_on(outputStream* st) const;
- void prepare_for_verify();
- void verify() const;
- void verifyFreeLists() const PRODUCT_RETURN;
- void verifyIndexedFreeLists() const;
- void verifyIndexedFreeList(size_t size) const;
- // Verify that the given chunk is in the free lists:
- // i.e. either the binary tree dictionary, the indexed free lists
- // or the linear allocation block.
- bool verify_chunk_in_free_list(FreeChunk* fc) const;
- // Verify that the given chunk is the linear allocation block.
- bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
- // Do some basic checks on the the free lists.
- void check_free_list_consistency() const PRODUCT_RETURN;
-
- // Printing support
- void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
- void print_indexed_free_lists(outputStream* st) const;
- void print_dictionary_free_lists(outputStream* st) const;
- void print_promo_info_blocks(outputStream* st) const;
-
- NOT_PRODUCT (
- void initializeIndexedFreeListArrayReturnedBytes();
- size_t sumIndexedFreeListArrayReturnedBytes();
- // Return the total number of chunks in the indexed free lists.
- size_t totalCountInIndexedFreeLists() const;
- // Return the total number of chunks in the space.
- size_t totalCount();
- )
-
- // The census consists of counts of the quantities such as
- // the current count of the free chunks, number of chunks
- // created as a result of the split of a larger chunk or
- // coalescing of smaller chucks, etc. The counts in the
- // census is used to make decisions on splitting and
- // coalescing of chunks during the sweep of garbage.
-
- // Print the statistics for the free lists.
- void printFLCensus(size_t sweep_count) const;
-
- // Statistics functions
- // Initialize census for lists before the sweep.
- void beginSweepFLCensus(float inter_sweep_current,
- float inter_sweep_estimate,
- float intra_sweep_estimate);
- // Set the surplus for each of the free lists.
- void setFLSurplus();
- // Set the hint for each of the free lists.
- void setFLHints();
- // Clear the census for each of the free lists.
- void clearFLCensus();
- // Perform functions for the census after the end of the sweep.
- void endSweepFLCensus(size_t sweep_count);
- // Return true if the count of free chunks is greater
- // than the desired number of free chunks.
- bool coalOverPopulated(size_t size);
-
-// Record (for each size):
-//
-// split-births = #chunks added due to splits in (prev-sweep-end,
-// this-sweep-start)
-// split-deaths = #chunks removed for splits in (prev-sweep-end,
-// this-sweep-start)
-// num-curr = #chunks at start of this sweep
-// num-prev = #chunks at end of previous sweep
-//
-// The above are quantities that are measured. Now define:
-//
-// num-desired := num-prev + split-births - split-deaths - num-curr
-//
-// Roughly, num-prev + split-births is the supply,
-// split-deaths is demand due to other sizes
-// and num-curr is what we have left.
-//
-// Thus, num-desired is roughly speaking the "legitimate demand"
-// for blocks of this size and what we are striving to reach at the
-// end of the current sweep.
-//
-// For a given list, let num-len be its current population.
-// Define, for a free list of a given size:
-//
-// coal-overpopulated := num-len >= num-desired * coal-surplus
-// (coal-surplus is set to 1.05, i.e. we allow a little slop when
-// coalescing -- we do not coalesce unless we think that the current
-// supply has exceeded the estimated demand by more than 5%).
-//
-// For the set of sizes in the binary tree, which is neither dense nor
-// closed, it may be the case that for a particular size we have never
-// had, or do not now have, or did not have at the previous sweep,
-// chunks of that size. We need to extend the definition of
-// coal-overpopulated to such sizes as well:
-//
-// For a chunk in/not in the binary tree, extend coal-overpopulated
-// defined above to include all sizes as follows:
-//
-// . a size that is non-existent is coal-overpopulated
-// . a size that has a num-desired <= 0 as defined above is
-// coal-overpopulated.
-//
-// Also define, for a chunk heap-offset C and mountain heap-offset M:
-//
-// close-to-mountain := C >= 0.99 * M
-//
-// Now, the coalescing strategy is:
-//
-// Coalesce left-hand chunk with right-hand chunk if and
-// only if:
-//
-// EITHER
-// . left-hand chunk is of a size that is coal-overpopulated
-// OR
-// . right-hand chunk is close-to-mountain
- void smallCoalBirth(size_t size);
- void smallCoalDeath(size_t size);
- void coalBirth(size_t size);
- void coalDeath(size_t size);
- void smallSplitBirth(size_t size);
- void smallSplitDeath(size_t size);
- void split_birth(size_t size);
- void splitDeath(size_t size);
- void split(size_t from, size_t to1);
-
- double flsFrag() const;
-};
-
-// A parallel-GC-thread-local allocation buffer for allocation into a
-// CompactibleFreeListSpace.
-class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> {
- // The space that this buffer allocates into.
- CompactibleFreeListSpace* _cfls;
-
- // Our local free lists.
- AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
-
- // Initialized from a command-line arg.
-
- // Allocation statistics in support of dynamic adjustment of
- // #blocks to claim per get_from_global_pool() call below.
- static AdaptiveWeightedAverage
- _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
- static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
- static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
- size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
-
- // Internal work method
- void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
-
-public:
- static const int _default_dynamic_old_plab_size = 16;
- static const int _default_static_old_plab_size = 50;
-
- CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls);
-
- // Allocate and return a block of the given size, or else return NULL.
- HeapWord* alloc(size_t word_sz);
-
- // Return any unused portions of the buffer to the global pool.
- void retire(int tid);
-
- // Dynamic OldPLABSize sizing
- static void compute_desired_plab_size();
- // When the settings are modified from default static initialization
- static void modify_initialization(size_t n, unsigned wt);
-};
-
-size_t PromotionInfo::refillSize() const {
- const size_t CMSSpoolBlockSize = 256;
- const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markWord)
- * CMSSpoolBlockSize);
- return CompactibleFreeListSpace::adjustObjectSize(sz);
-}
-
-#endif // SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_INLINE_HPP
-#define SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_INLINE_HPP
-
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/promotionInfo.inline.hpp"
-
-template <typename OopClosureType>
-void CompactibleFreeListSpace::oop_since_save_marks_iterate(OopClosureType* blk) {
- _promoInfo.promoted_oops_iterate(blk);
-
- // This also restores any displaced headers and removes the elements from
- // the iteration set as they are processed, so that we have a clean slate
- // at the end of the iteration. Note, thus, that if new objects are
- // promoted as a result of the iteration they are iterated over as well.
- assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
-}
-
-#endif // SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_INLINE_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,8145 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc/cms/cmsGCStats.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsOopClosures.inline.hpp"
-#include "gc/cms/cmsVMOperations.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/cms/promotionInfo.inline.hpp"
-#include "gc/serial/genMarkSweep.hpp"
-#include "gc/serial/tenuredGeneration.hpp"
-#include "gc/shared/adaptiveSizePolicy.hpp"
-#include "gc/shared/cardGeneration.inline.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/collectorCounters.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcPolicyCounters.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
-#include "gc/shared/referencePolicy.hpp"
-#include "gc/shared/referenceProcessorPhaseTimes.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "gc/shared/strongRootsScope.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "gc/shared/weakProcessor.hpp"
-#include "gc/shared/workerPolicy.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "memory/allocation.hpp"
-#include "memory/binaryTreeDictionary.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "memory/padded.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/flags/flagSetting.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/memoryService.hpp"
-#include "services/runtimeService.hpp"
-#include "utilities/align.hpp"
-#include "utilities/stack.inline.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci.hpp"
-#endif
-
-// statics
-CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
-bool CMSCollector::_full_gc_requested = false;
-GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
-
-//////////////////////////////////////////////////////////////////
-// In support of CMS/VM thread synchronization
-//////////////////////////////////////////////////////////////////
-// We split use of the CGC_lock into 2 "levels".
-// The low-level locking is of the usual CGC_lock monitor. We introduce
-// a higher level "token" (hereafter "CMS token") built on top of the
-// low level monitor (hereafter "CGC lock").
-// The token-passing protocol gives priority to the VM thread. The
-// CMS-lock doesn't provide any fairness guarantees, but clients
-// should ensure that it is only held for very short, bounded
-// durations.
-//
-// When either of the CMS thread or the VM thread is involved in
-// collection operations during which it does not want the other
-// thread to interfere, it obtains the CMS token.
-//
-// If either thread tries to get the token while the other has
-// it, that thread waits. However, if the VM thread and CMS thread
-// both want the token, then the VM thread gets priority while the
-// CMS thread waits. This ensures, for instance, that the "concurrent"
-// phases of the CMS thread's work do not block out the VM thread
-// for long periods of time as the CMS thread continues to hog
-// the token. (See bug 4616232).
-//
-// The baton-passing functions are, however, controlled by the
-// flags _foregroundGCShouldWait and _foregroundGCIsActive,
-// and here the low-level CMS lock, not the high level token,
-// ensures mutual exclusion.
-//
-// Two important conditions that we have to satisfy:
-// 1. if a thread does a low-level wait on the CMS lock, then it
-// relinquishes the CMS token if it were holding that token
-// when it acquired the low-level CMS lock.
-// 2. any low-level notifications on the low-level lock
-// should only be sent when a thread has relinquished the token.
-//
-// In the absence of either property, we'd have potential deadlock.
-//
-// We protect each of the CMS (concurrent and sequential) phases
-// with the CMS _token_, not the CMS _lock_.
-//
-// The only code protected by CMS lock is the token acquisition code
-// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
-// baton-passing code.
-//
-// Unfortunately, i couldn't come up with a good abstraction to factor and
-// hide the naked CGC_lock manipulation in the baton-passing code
-// further below. That's something we should try to do. Also, the proof
-// of correctness of this 2-level locking scheme is far from obvious,
-// and potentially quite slippery. We have an uneasy suspicion, for instance,
-// that there may be a theoretical possibility of delay/starvation in the
-// low-level lock/wait/notify scheme used for the baton-passing because of
-// potential interference with the priority scheme embodied in the
-// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
-// invocation further below and marked with "XXX 20011219YSR".
-// Indeed, as we note elsewhere, this may become yet more slippery
-// in the presence of multiple CMS and/or multiple VM threads. XXX
-
-class CMSTokenSync: public StackObj {
- private:
- bool _is_cms_thread;
- public:
- CMSTokenSync(bool is_cms_thread):
- _is_cms_thread(is_cms_thread) {
- assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
- "Incorrect argument to constructor");
- ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
- }
-
- ~CMSTokenSync() {
- assert(_is_cms_thread ?
- ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
- ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
- "Incorrect state");
- ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
- }
-};
-
-// Convenience class that does a CMSTokenSync, and then acquires
-// upto three locks.
-class CMSTokenSyncWithLocks: public CMSTokenSync {
- private:
- // Note: locks are acquired in textual declaration order
- // and released in the opposite order
- MutexLocker _locker1, _locker2, _locker3;
- public:
- CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
- Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
- CMSTokenSync(is_cms_thread),
- _locker1(mutex1, Mutex::_no_safepoint_check_flag),
- _locker2(mutex2, Mutex::_no_safepoint_check_flag),
- _locker3(mutex3, Mutex::_no_safepoint_check_flag)
- { }
-};
-
-
-//////////////////////////////////////////////////////////////////
-// Concurrent Mark-Sweep Generation /////////////////////////////
-//////////////////////////////////////////////////////////////////
-
-NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
-
-// This struct contains per-thread things necessary to support parallel
-// young-gen collection.
-class CMSParGCThreadState: public CHeapObj<mtGC> {
- public:
- CompactibleFreeListSpaceLAB lab;
- PromotionInfo promo;
-
- // Constructor.
- CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
- promo.setSpace(cfls);
- }
-};
-
-ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
- ReservedSpace rs,
- size_t initial_byte_size,
- size_t min_byte_size,
- size_t max_byte_size,
- CardTableRS* ct) :
- CardGeneration(rs, initial_byte_size, ct),
- _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
- _did_compact(false)
-{
- HeapWord* bottom = (HeapWord*) _virtual_space.low();
- HeapWord* end = (HeapWord*) _virtual_space.high();
-
- _direct_allocated_words = 0;
- NOT_PRODUCT(
- _numObjectsPromoted = 0;
- _numWordsPromoted = 0;
- _numObjectsAllocated = 0;
- _numWordsAllocated = 0;
- )
-
- _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
- NOT_PRODUCT(debug_cms_space = _cmsSpace;)
- _cmsSpace->_old_gen = this;
-
- _gc_stats = new CMSGCStats();
-
- // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
- // offsets match. The ability to tell free chunks from objects
- // depends on this property.
- debug_only(
- FreeChunk* junk = NULL;
- assert(UseCompressedClassPointers ||
- junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
- "Offset of FreeChunk::_prev within FreeChunk must match"
- " that of OopDesc::_klass within OopDesc");
- )
-
- _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
- for (uint i = 0; i < ParallelGCThreads; i++) {
- _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
- }
-
- _incremental_collection_failed = false;
- // The "dilatation_factor" is the expansion that can occur on
- // account of the fact that the minimum object size in the CMS
- // generation may be larger than that in, say, a contiguous young
- // generation.
- // Ideally, in the calculation below, we'd compute the dilatation
- // factor as: MinChunkSize/(promoting_gen's min object size)
- // Since we do not have such a general query interface for the
- // promoting generation, we'll instead just use the minimum
- // object size (which today is a header's worth of space);
- // note that all arithmetic is in units of HeapWords.
- assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
- assert(_dilatation_factor >= 1.0, "from previous assert");
-
- initialize_performance_counters(min_byte_size, max_byte_size);
-}
-
-
-// The field "_initiating_occupancy" represents the occupancy percentage
-// at which we trigger a new collection cycle. Unless explicitly specified
-// via CMSInitiatingOccupancyFraction (argument "io" below), it
-// is calculated by:
-//
-// Let "f" be MinHeapFreeRatio in
-//
-// _initiating_occupancy = 100-f +
-// f * (CMSTriggerRatio/100)
-// where CMSTriggerRatio is the argument "tr" below.
-//
-// That is, if we assume the heap is at its desired maximum occupancy at the
-// end of a collection, we let CMSTriggerRatio of the (purported) free
-// space be allocated before initiating a new collection cycle.
-//
-void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
- assert(io <= 100 && tr <= 100, "Check the arguments");
- if (io >= 0) {
- _initiating_occupancy = (double)io / 100.0;
- } else {
- _initiating_occupancy = ((100 - MinHeapFreeRatio) +
- (double)(tr * MinHeapFreeRatio) / 100.0)
- / 100.0;
- }
-}
-
-void ConcurrentMarkSweepGeneration::ref_processor_init() {
- assert(collector() != NULL, "no collector");
- collector()->ref_processor_init();
-}
-
-void CMSCollector::ref_processor_init() {
- if (_ref_processor == NULL) {
- // Allocate and initialize a reference processor
- _ref_processor =
- new ReferenceProcessor(&_span_based_discoverer,
- (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
- ParallelGCThreads, // mt processing degree
- _cmsGen->refs_discovery_is_mt(), // mt discovery
- MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
- _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
- &_is_alive_closure, // closure for liveness info
- false); // disable adjusting number of processing threads
- // Initialize the _ref_processor field of CMSGen
- _cmsGen->set_ref_processor(_ref_processor);
-
- }
-}
-
-AdaptiveSizePolicy* CMSCollector::size_policy() {
- return CMSHeap::heap()->size_policy();
-}
-
-void ConcurrentMarkSweepGeneration::initialize_performance_counters(size_t min_old_size,
- size_t max_old_size) {
-
- const char* gen_name = "old";
- // Generation Counters - generation 1, 1 subspace
- _gen_counters = new GenerationCounters(gen_name, 1, 1,
- min_old_size, max_old_size, &_virtual_space);
-
- _space_counters = new GSpaceCounters(gen_name, 0,
- _virtual_space.reserved_size(),
- this, _gen_counters);
-}
-
-CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
- _cms_gen(cms_gen)
-{
- assert(alpha <= 100, "bad value");
- _saved_alpha = alpha;
-
- // Initialize the alphas to the bootstrap value of 100.
- _gc0_alpha = _cms_alpha = 100;
-
- _cms_begin_time.update();
- _cms_end_time.update();
-
- _gc0_duration = 0.0;
- _gc0_period = 0.0;
- _gc0_promoted = 0;
-
- _cms_duration = 0.0;
- _cms_period = 0.0;
- _cms_allocated = 0;
-
- _cms_used_at_gc0_begin = 0;
- _cms_used_at_gc0_end = 0;
- _allow_duty_cycle_reduction = false;
- _valid_bits = 0;
-}
-
-double CMSStats::cms_free_adjustment_factor(size_t free) const {
- // TBD: CR 6909490
- return 1.0;
-}
-
-void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
-}
-
-// If promotion failure handling is on use
-// the padded average size of the promotion for each
-// young generation collection.
-double CMSStats::time_until_cms_gen_full() const {
- size_t cms_free = _cms_gen->cmsSpace()->free();
- CMSHeap* heap = CMSHeap::heap();
- size_t expected_promotion = MIN2(heap->young_gen()->capacity(),
- (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
- if (cms_free > expected_promotion) {
- // Start a cms collection if there isn't enough space to promote
- // for the next young collection. Use the padded average as
- // a safety factor.
- cms_free -= expected_promotion;
-
- // Adjust by the safety factor.
- double cms_free_dbl = (double)cms_free;
- double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
- // Apply a further correction factor which tries to adjust
- // for recent occurance of concurrent mode failures.
- cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
- cms_free_dbl = cms_free_dbl * cms_adjustment;
-
- log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
- cms_free, expected_promotion);
- log_trace(gc)(" cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
- // Add 1 in case the consumption rate goes to zero.
- return cms_free_dbl / (cms_consumption_rate() + 1.0);
- }
- return 0.0;
-}
-
-// Compare the duration of the cms collection to the
-// time remaining before the cms generation is empty.
-// Note that the time from the start of the cms collection
-// to the start of the cms sweep (less than the total
-// duration of the cms collection) can be used. This
-// has been tried and some applications experienced
-// promotion failures early in execution. This was
-// possibly because the averages were not accurate
-// enough at the beginning.
-double CMSStats::time_until_cms_start() const {
- // We add "gc0_period" to the "work" calculation
- // below because this query is done (mostly) at the
- // end of a scavenge, so we need to conservatively
- // account for that much possible delay
- // in the query so as to avoid concurrent mode failures
- // due to starting the collection just a wee bit too
- // late.
- double work = cms_duration() + gc0_period();
- double deadline = time_until_cms_gen_full();
- // If a concurrent mode failure occurred recently, we want to be
- // more conservative and halve our expected time_until_cms_gen_full()
- if (work > deadline) {
- log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
- cms_duration(), gc0_period(), time_until_cms_gen_full());
- return 0.0;
- }
- return work - deadline;
-}
-
-#ifndef PRODUCT
-void CMSStats::print_on(outputStream *st) const {
- st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
- st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
- gc0_duration(), gc0_period(), gc0_promoted());
- st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
- cms_duration(), cms_period(), cms_allocated());
- st->print(",cms_since_beg=%g,cms_since_end=%g",
- cms_time_since_begin(), cms_time_since_end());
- st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
- _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
-
- if (valid()) {
- st->print(",promo_rate=%g,cms_alloc_rate=%g",
- promotion_rate(), cms_allocation_rate());
- st->print(",cms_consumption_rate=%g,time_until_full=%g",
- cms_consumption_rate(), time_until_cms_gen_full());
- }
- st->cr();
-}
-#endif // #ifndef PRODUCT
-
-CMSCollector::CollectorState CMSCollector::_collectorState =
- CMSCollector::Idling;
-bool CMSCollector::_foregroundGCIsActive = false;
-bool CMSCollector::_foregroundGCShouldWait = false;
-
-CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
- CardTableRS* ct):
- _overflow_list(NULL),
- _conc_workers(NULL), // may be set later
- _completed_initialization(false),
- _collection_count_start(0),
- _should_unload_classes(CMSClassUnloadingEnabled),
- _concurrent_cycles_since_last_unload(0),
- _roots_scanning_options(GenCollectedHeap::SO_None),
- _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
- _verifying(false),
- _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
- _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
- _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
- _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
- _cms_start_registered(false),
- _cmsGen(cmsGen),
- // Adjust span to cover old (cms) gen
- _span(cmsGen->reserved()),
- _ct(ct),
- _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
- _modUnionTable((CardTable::card_shift - LogHeapWordSize),
- -1 /* lock-free */, "No_lock" /* dummy */),
- _restart_addr(NULL),
- _ser_pmc_preclean_ovflw(0),
- _ser_pmc_remark_ovflw(0),
- _par_pmc_remark_ovflw(0),
- _ser_kac_preclean_ovflw(0),
- _ser_kac_ovflw(0),
- _par_kac_ovflw(0),
-#ifndef PRODUCT
- _num_par_pushes(0),
-#endif
- _span_based_discoverer(_span),
- _ref_processor(NULL), // will be set later
- // Construct the is_alive_closure with _span & markBitMap
- _is_alive_closure(_span, &_markBitMap),
- _modUnionClosurePar(&_modUnionTable),
- _between_prologue_and_epilogue(false),
- _abort_preclean(false),
- _start_sampling(false),
- _stats(cmsGen),
- _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
- //verify that this lock should be acquired with safepoint check.
- Monitor::_safepoint_check_never)),
- _eden_chunk_array(NULL), // may be set in ctor body
- _eden_chunk_index(0), // -- ditto --
- _eden_chunk_capacity(0), // -- ditto --
- _survivor_chunk_array(NULL), // -- ditto --
- _survivor_chunk_index(0), // -- ditto --
- _survivor_chunk_capacity(0), // -- ditto --
- _survivor_plab_array(NULL) // -- ditto --
-{
- // Now expand the span and allocate the collection support structures
- // (MUT, marking bit map etc.) to cover both generations subject to
- // collection.
-
- // For use by dirty card to oop closures.
- _cmsGen->cmsSpace()->set_collector(this);
-
- // Allocate MUT and marking bit map
- {
- MutexLocker x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
- if (!_markBitMap.allocate(_span)) {
- log_warning(gc)("Failed to allocate CMS Bit Map");
- return;
- }
- assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
- }
- {
- _modUnionTable.allocate(_span);
- assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
- }
-
- if (!_markStack.allocate(MarkStackSize)) {
- log_warning(gc)("Failed to allocate CMS Marking Stack");
- return;
- }
-
- // Support for multi-threaded concurrent phases
- if (CMSConcurrentMTEnabled) {
- if (FLAG_IS_DEFAULT(ConcGCThreads)) {
- // just for now
- FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
- }
- if (ConcGCThreads > 1) {
- _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
- ConcGCThreads, true);
- if (_conc_workers == NULL) {
- log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
- CMSConcurrentMTEnabled = false;
- } else {
- _conc_workers->initialize_workers();
- }
- } else {
- CMSConcurrentMTEnabled = false;
- }
- }
- if (!CMSConcurrentMTEnabled) {
- ConcGCThreads = 0;
- } else {
- // Turn off CMSCleanOnEnter optimization temporarily for
- // the MT case where it's not fixed yet; see 6178663.
- CMSCleanOnEnter = false;
- }
- assert((_conc_workers != NULL) == (ConcGCThreads > 1),
- "Inconsistency");
- log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
- log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
-
- // Parallel task queues; these are shared for the
- // concurrent and stop-world phases of CMS, but
- // are not shared with parallel scavenge (ParNew).
- {
- uint i;
- uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
-
- if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
- || ParallelRefProcEnabled)
- && num_queues > 0) {
- _task_queues = new OopTaskQueueSet(num_queues);
- if (_task_queues == NULL) {
- log_warning(gc)("task_queues allocation failure.");
- return;
- }
- typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
- for (i = 0; i < num_queues; i++) {
- PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
- if (q == NULL) {
- log_warning(gc)("work_queue allocation failure.");
- return;
- }
- _task_queues->register_queue(i, q);
- }
- for (i = 0; i < num_queues; i++) {
- _task_queues->queue(i)->initialize();
- }
- }
- }
-
- _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
-
- // Clip CMSBootstrapOccupancy between 0 and 100.
- _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
-
- // Now tell CMS generations the identity of their collector
- ConcurrentMarkSweepGeneration::set_collector(this);
-
- // Create & start a CMS thread for this CMS collector
- _cmsThread = ConcurrentMarkSweepThread::start(this);
- assert(cmsThread() != NULL, "CMS Thread should have been created");
- assert(cmsThread()->collector() == this,
- "CMS Thread should refer to this gen");
- assert(CGC_lock != NULL, "Where's the CGC_lock?");
-
- // Support for parallelizing young gen rescan
- CMSHeap* heap = CMSHeap::heap();
- _young_gen = heap->young_gen();
- if (heap->supports_inline_contig_alloc()) {
- _top_addr = heap->top_addr();
- _end_addr = heap->end_addr();
- assert(_young_gen != NULL, "no _young_gen");
- _eden_chunk_index = 0;
- _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
- _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
- }
-
- // Support for parallelizing survivor space rescan
- if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
- const size_t max_plab_samples =
- _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
-
- _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
- _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
- _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
- _survivor_chunk_capacity = max_plab_samples;
- for (uint i = 0; i < ParallelGCThreads; i++) {
- HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
- ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
- assert(cur->end() == 0, "Should be 0");
- assert(cur->array() == vec, "Should be vec");
- assert(cur->capacity() == max_plab_samples, "Error");
- }
- }
-
- NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
- _gc_counters = new CollectorCounters("CMS full collection pauses", 1);
- _cgc_counters = new CollectorCounters("CMS concurrent cycle pauses", 2);
- _completed_initialization = true;
- _inter_sweep_timer.start(); // start of time
-}
-
-const char* ConcurrentMarkSweepGeneration::name() const {
- return "concurrent mark-sweep generation";
-}
-void ConcurrentMarkSweepGeneration::update_counters() {
- if (UsePerfData) {
- _space_counters->update_all();
- _gen_counters->update_all();
- }
-}
-
-// this is an optimized version of update_counters(). it takes the
-// used value as a parameter rather than computing it.
-//
-void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
- if (UsePerfData) {
- _space_counters->update_used(used);
- _space_counters->update_capacity();
- _gen_counters->update_all();
- }
-}
-
-void ConcurrentMarkSweepGeneration::print() const {
- Generation::print();
- cmsSpace()->print();
-}
-
-#ifndef PRODUCT
-void ConcurrentMarkSweepGeneration::print_statistics() {
- cmsSpace()->printFLCensus(0);
-}
-#endif
-
-size_t
-ConcurrentMarkSweepGeneration::contiguous_available() const {
- // dld proposes an improvement in precision here. If the committed
- // part of the space ends in a free block we should add that to
- // uncommitted size in the calculation below. Will make this
- // change later, staying with the approximation below for the
- // time being. -- ysr.
- return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
-}
-
-size_t
-ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
- return _cmsSpace->max_alloc_in_words() * HeapWordSize;
-}
-
-size_t ConcurrentMarkSweepGeneration::used_stable() const {
- return cmsSpace()->used_stable();
-}
-
-size_t ConcurrentMarkSweepGeneration::max_available() const {
- return free() + _virtual_space.uncommitted_size();
-}
-
-bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
- size_t available = max_available();
- size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
- bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
- log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
- res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
- return res;
-}
-
-// At a promotion failure dump information on block layout in heap
-// (cms old generation).
-void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
- Log(gc, promotion) log;
- if (log.is_trace()) {
- LogStream ls(log.trace());
- cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);
- }
-}
-
-void ConcurrentMarkSweepGeneration::reset_after_compaction() {
- // Clear the promotion information. These pointers can be adjusted
- // along with all the other pointers into the heap but
- // compaction is expected to be a rare event with
- // a heap using cms so don't do it without seeing the need.
- for (uint i = 0; i < ParallelGCThreads; i++) {
- _par_gc_thread_states[i]->promo.reset();
- }
-}
-
-void ConcurrentMarkSweepGeneration::compute_new_size() {
- assert_locked_or_safepoint(Heap_lock);
-
- // If incremental collection failed, we just want to expand
- // to the limit.
- if (incremental_collection_failed()) {
- clear_incremental_collection_failed();
- grow_to_reserved();
- return;
- }
-
- // The heap has been compacted but not reset yet.
- // Any metric such as free() or used() will be incorrect.
-
- CardGeneration::compute_new_size();
-
- // Reset again after a possible resizing
- if (did_compact()) {
- cmsSpace()->reset_after_compaction();
- }
-}
-
-void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
- assert_locked_or_safepoint(Heap_lock);
-
- // If incremental collection failed, we just want to expand
- // to the limit.
- if (incremental_collection_failed()) {
- clear_incremental_collection_failed();
- grow_to_reserved();
- return;
- }
-
- double free_percentage = ((double) free()) / capacity();
- double desired_free_percentage = (double) MinHeapFreeRatio / 100;
- double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
-
- // compute expansion delta needed for reaching desired free percentage
- if (free_percentage < desired_free_percentage) {
- size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
- assert(desired_capacity >= capacity(), "invalid expansion size");
- size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
- Log(gc) log;
- if (log.is_trace()) {
- size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
- log.trace("From compute_new_size: ");
- log.trace(" Free fraction %f", free_percentage);
- log.trace(" Desired free fraction %f", desired_free_percentage);
- log.trace(" Maximum free fraction %f", maximum_free_percentage);
- log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000);
- log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
- CMSHeap* heap = CMSHeap::heap();
- size_t young_size = heap->young_gen()->capacity();
- log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000);
- log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
- log.trace(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
- log.trace(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
- }
- // safe if expansion fails
- expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
- log.trace(" Expanded free fraction %f", ((double) free()) / capacity());
- } else {
- size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
- assert(desired_capacity <= capacity(), "invalid expansion size");
- size_t shrink_bytes = capacity() - desired_capacity;
- // Don't shrink unless the delta is greater than the minimum shrink we want
- if (shrink_bytes >= MinHeapDeltaBytes) {
- shrink_free_list_by(shrink_bytes);
- }
- }
-}
-
-Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
- return cmsSpace()->freelistLock();
-}
-
-HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
- CMSSynchronousYieldRequest yr;
- MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
- return have_lock_and_allocate(size, tlab);
-}
-
-HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
- bool tlab /* ignored */) {
- assert_lock_strong(freelistLock());
- size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
- HeapWord* res = cmsSpace()->allocate(adjustedSize);
- // Allocate the object live (grey) if the background collector has
- // started marking. This is necessary because the marker may
- // have passed this address and consequently this object will
- // not otherwise be greyed and would be incorrectly swept up.
- // Note that if this object contains references, the writing
- // of those references will dirty the card containing this object
- // allowing the object to be blackened (and its references scanned)
- // either during a preclean phase or at the final checkpoint.
- if (res != NULL) {
- // We may block here with an uninitialized object with
- // its mark-bit or P-bits not yet set. Such objects need
- // to be safely navigable by block_start().
- assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
- assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
- collector()->direct_allocated(res, adjustedSize);
- _direct_allocated_words += adjustedSize;
- // allocation counters
- NOT_PRODUCT(
- _numObjectsAllocated++;
- _numWordsAllocated += (int)adjustedSize;
- )
- }
- return res;
-}
-
-// In the case of direct allocation by mutators in a generation that
-// is being concurrently collected, the object must be allocated
-// live (grey) if the background collector has started marking.
-// This is necessary because the marker may
-// have passed this address and consequently this object will
-// not otherwise be greyed and would be incorrectly swept up.
-// Note that if this object contains references, the writing
-// of those references will dirty the card containing this object
-// allowing the object to be blackened (and its references scanned)
-// either during a preclean phase or at the final checkpoint.
-void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
- assert(_markBitMap.covers(start, size), "Out of bounds");
- if (_collectorState >= Marking) {
- MutexLocker y(_markBitMap.lock(),
- Mutex::_no_safepoint_check_flag);
- // [see comments preceding SweepClosure::do_blk() below for details]
- //
- // Can the P-bits be deleted now? JJJ
- //
- // 1. need to mark the object as live so it isn't collected
- // 2. need to mark the 2nd bit to indicate the object may be uninitialized
- // 3. need to mark the end of the object so marking, precleaning or sweeping
- // can skip over uninitialized or unparsable objects. An allocated
- // object is considered uninitialized for our purposes as long as
- // its klass word is NULL. All old gen objects are parsable
- // as soon as they are initialized.)
- _markBitMap.mark(start); // object is live
- _markBitMap.mark(start + 1); // object is potentially uninitialized?
- _markBitMap.mark(start + size - 1);
- // mark end of object
- }
- // check that oop looks uninitialized
- assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
-}
-
-void CMSCollector::promoted(bool par, HeapWord* start,
- bool is_obj_array, size_t obj_size) {
- assert(_markBitMap.covers(start), "Out of bounds");
- // See comment in direct_allocated() about when objects should
- // be allocated live.
- if (_collectorState >= Marking) {
- // we already hold the marking bit map lock, taken in
- // the prologue
- if (par) {
- _markBitMap.par_mark(start);
- } else {
- _markBitMap.mark(start);
- }
- // We don't need to mark the object as uninitialized (as
- // in direct_allocated above) because this is being done with the
- // world stopped and the object will be initialized by the
- // time the marking, precleaning or sweeping get to look at it.
- // But see the code for copying objects into the CMS generation,
- // where we need to ensure that concurrent readers of the
- // block offset table are able to safely navigate a block that
- // is in flux from being free to being allocated (and in
- // transition while being copied into) and subsequently
- // becoming a bona-fide object when the copy/promotion is complete.
- assert(SafepointSynchronize::is_at_safepoint(),
- "expect promotion only at safepoints");
-
- if (_collectorState < Sweeping) {
- // Mark the appropriate cards in the modUnionTable, so that
- // this object gets scanned before the sweep. If this is
- // not done, CMS generation references in the object might
- // not get marked.
- // For the case of arrays, which are otherwise precisely
- // marked, we need to dirty the entire array, not just its head.
- if (is_obj_array) {
- // The [par_]mark_range() method expects mr.end() below to
- // be aligned to the granularity of a bit's representation
- // in the heap. In the case of the MUT below, that's a
- // card size.
- MemRegion mr(start,
- align_up(start + obj_size,
- CardTable::card_size /* bytes */));
- if (par) {
- _modUnionTable.par_mark_range(mr);
- } else {
- _modUnionTable.mark_range(mr);
- }
- } else { // not an obj array; we can just mark the head
- if (par) {
- _modUnionTable.par_mark(start);
- } else {
- _modUnionTable.mark(start);
- }
- }
- }
- }
-}
-
-oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
- assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
- // allocate, copy and if necessary update promoinfo --
- // delegate to underlying space.
- assert_lock_strong(freelistLock());
-
-#ifndef PRODUCT
- if (CMSHeap::heap()->promotion_should_fail()) {
- return NULL;
- }
-#endif // #ifndef PRODUCT
-
- oop res = _cmsSpace->promote(obj, obj_size);
- if (res == NULL) {
- // expand and retry
- size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
- expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
- // Since this is the old generation, we don't try to promote
- // into a more senior generation.
- res = _cmsSpace->promote(obj, obj_size);
- }
- if (res != NULL) {
- // See comment in allocate() about when objects should
- // be allocated live.
- assert(oopDesc::is_oop(obj), "Will dereference klass pointer below");
- collector()->promoted(false, // Not parallel
- (HeapWord*)res, obj->is_objArray(), obj_size);
- // promotion counters
- NOT_PRODUCT(
- _numObjectsPromoted++;
- _numWordsPromoted +=
- (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
- )
- }
- return res;
-}
-
-
-// IMPORTANT: Notes on object size recognition in CMS.
-// ---------------------------------------------------
-// A block of storage in the CMS generation is always in
-// one of three states. A free block (FREE), an allocated
-// object (OBJECT) whose size() method reports the correct size,
-// and an intermediate state (TRANSIENT) in which its size cannot
-// be accurately determined.
-// STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
-// -----------------------------------------------------
-// FREE: klass_word & 1 == 1; mark_word holds block size
-//
-// OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
-// obj->size() computes correct size
-//
-// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
-//
-// STATE IDENTIFICATION: (64 bit+COOPS)
-// ------------------------------------
-// FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
-//
-// OBJECT: klass_word installed; klass_word != 0;
-// obj->size() computes correct size
-//
-// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
-//
-//
-// STATE TRANSITION DIAGRAM
-//
-// mut / parnew mut / parnew
-// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
-// ^ |
-// |------------------------ DEAD <------------------------------------|
-// sweep mut
-//
-// While a block is in TRANSIENT state its size cannot be determined
-// so readers will either need to come back later or stall until
-// the size can be determined. Note that for the case of direct
-// allocation, P-bits, when available, may be used to determine the
-// size of an object that may not yet have been initialized.
-
-// Things to support parallel young-gen collection.
-oop
-ConcurrentMarkSweepGeneration::par_promote(int thread_num,
- oop old, markWord m,
- size_t word_sz) {
-#ifndef PRODUCT
- if (CMSHeap::heap()->promotion_should_fail()) {
- return NULL;
- }
-#endif // #ifndef PRODUCT
-
- CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
- PromotionInfo* promoInfo = &ps->promo;
- // if we are tracking promotions, then first ensure space for
- // promotion (including spooling space for saving header if necessary).
- // then allocate and copy, then track promoted info if needed.
- // When tracking (see PromotionInfo::track()), the mark word may
- // be displaced and in this case restoration of the mark word
- // occurs in the (oop_since_save_marks_)iterate phase.
- if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
- // Out of space for allocating spooling buffers;
- // try expanding and allocating spooling buffers.
- if (!expand_and_ensure_spooling_space(promoInfo)) {
- return NULL;
- }
- }
- assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
- const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
- HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
- if (obj_ptr == NULL) {
- obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
- if (obj_ptr == NULL) {
- return NULL;
- }
- }
- oop obj = oop(obj_ptr);
- OrderAccess::storestore();
- assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
- assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
- // IMPORTANT: See note on object initialization for CMS above.
- // Otherwise, copy the object. Here we must be careful to insert the
- // klass pointer last, since this marks the block as an allocated object.
- // Except with compressed oops it's the mark word.
- HeapWord* old_ptr = (HeapWord*)old;
- // Restore the mark word copied above.
- obj->set_mark_raw(m);
- assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
- assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
- OrderAccess::storestore();
-
- if (UseCompressedClassPointers) {
- // Copy gap missed by (aligned) header size calculation below
- obj->set_klass_gap(old->klass_gap());
- }
- if (word_sz > (size_t)oopDesc::header_size()) {
- Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
- obj_ptr + oopDesc::header_size(),
- word_sz - oopDesc::header_size());
- }
-
- // Now we can track the promoted object, if necessary. We take care
- // to delay the transition from uninitialized to full object
- // (i.e., insertion of klass pointer) until after, so that it
- // atomically becomes a promoted object.
- if (promoInfo->tracking()) {
- promoInfo->track((PromotedObject*)obj, old->klass());
- }
- assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
- assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
- assert(oopDesc::is_oop(old), "Will use and dereference old klass ptr below");
-
- // Finally, install the klass pointer (this should be volatile).
- OrderAccess::storestore();
- obj->set_klass(old->klass());
- // We should now be able to calculate the right size for this object
- assert(oopDesc::is_oop(obj) && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
-
- collector()->promoted(true, // parallel
- obj_ptr, old->is_objArray(), word_sz);
-
- NOT_PRODUCT(
- Atomic::inc(&_numObjectsPromoted);
- Atomic::add(alloc_sz, &_numWordsPromoted);
- )
-
- return obj;
-}
-
-void
-ConcurrentMarkSweepGeneration::
-par_promote_alloc_done(int thread_num) {
- CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
- ps->lab.retire(thread_num);
-}
-
-void
-ConcurrentMarkSweepGeneration::
-par_oop_since_save_marks_iterate_done(int thread_num) {
- CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
- ParScanWithoutBarrierClosure* dummy_cl = NULL;
- ps->promo.promoted_oops_iterate(dummy_cl);
-
- // Because card-scanning has been completed, subsequent phases
- // (e.g., reference processing) will not need to recognize which
- // objects have been promoted during this GC. So, we can now disable
- // promotion tracking.
- ps->promo.stopTrackingPromotions();
-}
-
-bool ConcurrentMarkSweepGeneration::should_collect(bool full,
- size_t size,
- bool tlab)
-{
- // We allow a STW collection only if a full
- // collection was requested.
- return full || should_allocate(size, tlab); // FIX ME !!!
- // This and promotion failure handling are connected at the
- // hip and should be fixed by untying them.
-}
-
-bool CMSCollector::shouldConcurrentCollect() {
- LogTarget(Trace, gc) log;
-
- if (_full_gc_requested) {
- log.print("CMSCollector: collect because of explicit gc request (or GCLocker)");
- return true;
- }
-
- FreelistLocker x(this);
- // ------------------------------------------------------------------
- // Print out lots of information which affects the initiation of
- // a collection.
- if (log.is_enabled() && stats().valid()) {
- log.print("CMSCollector shouldConcurrentCollect: ");
-
- LogStream out(log);
- stats().print_on(&out);
-
- log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
- log.print("free=" SIZE_FORMAT, _cmsGen->free());
- log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
- log.print("promotion_rate=%g", stats().promotion_rate());
- log.print("cms_allocation_rate=%g", stats().cms_allocation_rate());
- log.print("occupancy=%3.7f", _cmsGen->occupancy());
- log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
- log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
- log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end());
- log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
- }
- // ------------------------------------------------------------------
-
- // If the estimated time to complete a cms collection (cms_duration())
- // is less than the estimated time remaining until the cms generation
- // is full, start a collection.
- if (!UseCMSInitiatingOccupancyOnly) {
- if (stats().valid()) {
- if (stats().time_until_cms_start() == 0.0) {
- return true;
- }
- } else {
- // We want to conservatively collect somewhat early in order
- // to try and "bootstrap" our CMS/promotion statistics;
- // this branch will not fire after the first successful CMS
- // collection because the stats should then be valid.
- if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
- log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
- _cmsGen->occupancy(), _bootstrap_occupancy);
- return true;
- }
- }
- }
-
- // Otherwise, we start a collection cycle if
- // old gen want a collection cycle started. Each may use
- // an appropriate criterion for making this decision.
- // XXX We need to make sure that the gen expansion
- // criterion dovetails well with this. XXX NEED TO FIX THIS
- if (_cmsGen->should_concurrent_collect()) {
- log.print("CMS old gen initiated");
- return true;
- }
-
- // We start a collection if we believe an incremental collection may fail;
- // this is not likely to be productive in practice because it's probably too
- // late anyway.
- CMSHeap* heap = CMSHeap::heap();
- if (heap->incremental_collection_will_fail(true /* consult_young */)) {
- log.print("CMSCollector: collect because incremental collection will fail ");
- return true;
- }
-
- if (MetaspaceGC::should_concurrent_collect()) {
- log.print("CMSCollector: collect for metadata allocation ");
- return true;
- }
-
- // CMSTriggerInterval starts a CMS cycle if enough time has passed.
- if (CMSTriggerInterval >= 0) {
- if (CMSTriggerInterval == 0) {
- // Trigger always
- return true;
- }
-
- // Check the CMS time since begin (we do not check the stats validity
- // as we want to be able to trigger the first CMS cycle as well)
- if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
- if (stats().valid()) {
- log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
- stats().cms_time_since_begin());
- } else {
- log.print("CMSCollector: collect because of trigger interval (first collection)");
- }
- return true;
- }
- }
-
- return false;
-}
-
-void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
-
-// Clear _expansion_cause fields of constituent generations
-void CMSCollector::clear_expansion_cause() {
- _cmsGen->clear_expansion_cause();
-}
-
-// We should be conservative in starting a collection cycle. To
-// start too eagerly runs the risk of collecting too often in the
-// extreme. To collect too rarely falls back on full collections,
-// which works, even if not optimum in terms of concurrent work.
-// As a work around for too eagerly collecting, use the flag
-// UseCMSInitiatingOccupancyOnly. This also has the advantage of
-// giving the user an easily understandable way of controlling the
-// collections.
-// We want to start a new collection cycle if any of the following
-// conditions hold:
-// . our current occupancy exceeds the configured initiating occupancy
-// for this generation, or
-// . we recently needed to expand this space and have not, since that
-// expansion, done a collection of this generation, or
-// . the underlying space believes that it may be a good idea to initiate
-// a concurrent collection (this may be based on criteria such as the
-// following: the space uses linear allocation and linear allocation is
-// going to fail, or there is believed to be excessive fragmentation in
-// the generation, etc... or ...
-// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
-// the case of the old generation; see CR 6543076):
-// we may be approaching a point at which allocation requests may fail because
-// we will be out of sufficient free space given allocation rate estimates.]
-bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
-
- assert_lock_strong(freelistLock());
- if (occupancy() > initiating_occupancy()) {
- log_trace(gc)(" %s: collect because of occupancy %f / %f ",
- short_name(), occupancy(), initiating_occupancy());
- return true;
- }
- if (UseCMSInitiatingOccupancyOnly) {
- return false;
- }
- if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
- log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
- return true;
- }
- return false;
-}
-
-void ConcurrentMarkSweepGeneration::collect(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool tlab)
-{
- collector()->collect(full, clear_all_soft_refs, size, tlab);
-}
-
-void CMSCollector::collect(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool tlab)
-{
- // The following "if" branch is present for defensive reasons.
- // In the current uses of this interface, it can be replaced with:
- // assert(!GCLocker.is_active(), "Can't be called otherwise");
- // But I am not placing that assert here to allow future
- // generality in invoking this interface.
- if (GCLocker::is_active()) {
- // A consistency test for GCLocker
- assert(GCLocker::needs_gc(), "Should have been set already");
- // Skip this foreground collection, instead
- // expanding the heap if necessary.
- // Need the free list locks for the call to free() in compute_new_size()
- compute_new_size();
- return;
- }
- acquire_control_and_collect(full, clear_all_soft_refs);
-}
-
-void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
- CMSHeap* heap = CMSHeap::heap();
- unsigned int gc_count = heap->total_full_collections();
- if (gc_count == full_gc_count) {
- MutexLocker y(CGC_lock, Mutex::_no_safepoint_check_flag);
- _full_gc_requested = true;
- _full_gc_cause = cause;
- CGC_lock->notify(); // nudge CMS thread
- } else {
- assert(gc_count > full_gc_count, "Error: causal loop");
- }
-}
-
-bool CMSCollector::is_external_interruption() {
- GCCause::Cause cause = CMSHeap::heap()->gc_cause();
- return GCCause::is_user_requested_gc(cause) ||
- GCCause::is_serviceability_requested_gc(cause);
-}
-
-void CMSCollector::report_concurrent_mode_interruption() {
- if (is_external_interruption()) {
- log_debug(gc)("Concurrent mode interrupted");
- } else {
- log_debug(gc)("Concurrent mode failure");
- _gc_tracer_cm->report_concurrent_mode_failure();
- }
-}
-
-
-// The foreground and background collectors need to coordinate in order
-// to make sure that they do not mutually interfere with CMS collections.
-// When a background collection is active,
-// the foreground collector may need to take over (preempt) and
-// synchronously complete an ongoing collection. Depending on the
-// frequency of the background collections and the heap usage
-// of the application, this preemption can be seldom or frequent.
-// There are only certain
-// points in the background collection that the "collection-baton"
-// can be passed to the foreground collector.
-//
-// The foreground collector will wait for the baton before
-// starting any part of the collection. The foreground collector
-// will only wait at one location.
-//
-// The background collector will yield the baton before starting a new
-// phase of the collection (e.g., before initial marking, marking from roots,
-// precleaning, final re-mark, sweep etc.) This is normally done at the head
-// of the loop which switches the phases. The background collector does some
-// of the phases (initial mark, final re-mark) with the world stopped.
-// Because of locking involved in stopping the world,
-// the foreground collector should not block waiting for the background
-// collector when it is doing a stop-the-world phase. The background
-// collector will yield the baton at an additional point just before
-// it enters a stop-the-world phase. Once the world is stopped, the
-// background collector checks the phase of the collection. If the
-// phase has not changed, it proceeds with the collection. If the
-// phase has changed, it skips that phase of the collection. See
-// the comments on the use of the Heap_lock in collect_in_background().
-//
-// Variable used in baton passing.
-// _foregroundGCIsActive - Set to true by the foreground collector when
-// it wants the baton. The foreground clears it when it has finished
-// the collection.
-// _foregroundGCShouldWait - Set to true by the background collector
-// when it is running. The foreground collector waits while
-// _foregroundGCShouldWait is true.
-// CGC_lock - monitor used to protect access to the above variables
-// and to notify the foreground and background collectors.
-// _collectorState - current state of the CMS collection.
-//
-// The foreground collector
-// acquires the CGC_lock
-// sets _foregroundGCIsActive
-// waits on the CGC_lock for _foregroundGCShouldWait to be false
-// various locks acquired in preparation for the collection
-// are released so as not to block the background collector
-// that is in the midst of a collection
-// proceeds with the collection
-// clears _foregroundGCIsActive
-// returns
-//
-// The background collector in a loop iterating on the phases of the
-// collection
-// acquires the CGC_lock
-// sets _foregroundGCShouldWait
-// if _foregroundGCIsActive is set
-// clears _foregroundGCShouldWait, notifies _CGC_lock
-// waits on _CGC_lock for _foregroundGCIsActive to become false
-// and exits the loop.
-// otherwise
-// proceed with that phase of the collection
-// if the phase is a stop-the-world phase,
-// yield the baton once more just before enqueueing
-// the stop-world CMS operation (executed by the VM thread).
-// returns after all phases of the collection are done
-//
-
-void CMSCollector::acquire_control_and_collect(bool full,
- bool clear_all_soft_refs) {
- assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
- assert(!Thread::current()->is_ConcurrentGC_thread(),
- "shouldn't try to acquire control from self!");
-
- // Start the protocol for acquiring control of the
- // collection from the background collector (aka CMS thread).
- assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
- "VM thread should have CMS token");
- // Remember the possibly interrupted state of an ongoing
- // concurrent collection
- CollectorState first_state = _collectorState;
-
- // Signal to a possibly ongoing concurrent collection that
- // we want to do a foreground collection.
- _foregroundGCIsActive = true;
-
- // release locks and wait for a notify from the background collector
- // releasing the locks in only necessary for phases which
- // do yields to improve the granularity of the collection.
- assert_lock_strong(bitMapLock());
- // We need to lock the Free list lock for the space that we are
- // currently collecting.
- assert(haveFreelistLocks(), "Must be holding free list locks");
- bitMapLock()->unlock();
- releaseFreelistLocks();
- {
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- if (_foregroundGCShouldWait) {
- // We are going to be waiting for action for the CMS thread;
- // it had better not be gone (for instance at shutdown)!
- assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
- "CMS thread must be running");
- // Wait here until the background collector gives us the go-ahead
- ConcurrentMarkSweepThread::clear_CMS_flag(
- ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
- // Get a possibly blocked CMS thread going:
- // Note that we set _foregroundGCIsActive true above,
- // without protection of the CGC_lock.
- CGC_lock->notify();
- assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
- "Possible deadlock");
- while (_foregroundGCShouldWait) {
- // wait for notification
- CGC_lock->wait_without_safepoint_check();
- // Possibility of delay/starvation here, since CMS token does
- // not know to give priority to VM thread? Actually, i think
- // there wouldn't be any delay/starvation, but the proof of
- // that "fact" (?) appears non-trivial. XXX 20011219YSR
- }
- ConcurrentMarkSweepThread::set_CMS_flag(
- ConcurrentMarkSweepThread::CMS_vm_has_token);
- }
- }
- // The CMS_token is already held. Get back the other locks.
- assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
- "VM thread should have CMS token");
- getFreelistLocks();
- bitMapLock()->lock_without_safepoint_check();
- log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
- p2i(Thread::current()), first_state);
- log_debug(gc, state)(" gets control with state %d", _collectorState);
-
- // Inform cms gen if this was due to partial collection failing.
- // The CMS gen may use this fact to determine its expansion policy.
- CMSHeap* heap = CMSHeap::heap();
- if (heap->incremental_collection_will_fail(false /* don't consult_young */)) {
- assert(!_cmsGen->incremental_collection_failed(),
- "Should have been noticed, reacted to and cleared");
- _cmsGen->set_incremental_collection_failed();
- }
-
- if (first_state > Idling) {
- report_concurrent_mode_interruption();
- }
-
- set_did_compact(true);
-
- // If the collection is being acquired from the background
- // collector, there may be references on the discovered
- // references lists. Abandon those references, since some
- // of them may have become unreachable after concurrent
- // discovery; the STW compacting collector will redo discovery
- // more precisely, without being subject to floating garbage.
- // Leaving otherwise unreachable references in the discovered
- // lists would require special handling.
- ref_processor()->disable_discovery();
- ref_processor()->abandon_partial_discovery();
- ref_processor()->verify_no_references_recorded();
-
- if (first_state > Idling) {
- save_heap_summary();
- }
-
- do_compaction_work(clear_all_soft_refs);
-
- // Has the GC time limit been exceeded?
- size_t max_eden_size = _young_gen->max_eden_size();
- GCCause::Cause gc_cause = heap->gc_cause();
- size_policy()->check_gc_overhead_limit(_young_gen->eden()->used(),
- _cmsGen->max_capacity(),
- max_eden_size,
- full,
- gc_cause,
- heap->soft_ref_policy());
-
- // Reset the expansion cause, now that we just completed
- // a collection cycle.
- clear_expansion_cause();
- _foregroundGCIsActive = false;
- return;
-}
-
-// Resize the tenured generation
-// after obtaining the free list locks for the
-// two generations.
-void CMSCollector::compute_new_size() {
- assert_locked_or_safepoint(Heap_lock);
- FreelistLocker z(this);
- MetaspaceGC::compute_new_size();
- _cmsGen->compute_new_size_free_list();
- // recalculate CMS used space after CMS collection
- _cmsGen->cmsSpace()->recalculate_used_stable();
-}
-
-// A work method used by the foreground collector to do
-// a mark-sweep-compact.
-void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
- CMSHeap* heap = CMSHeap::heap();
-
- STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
- gc_timer->register_gc_start();
-
- SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
- gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
-
- heap->pre_full_gc_dump(gc_timer);
-
- GCTraceTime(Trace, gc, phases) t("CMS:MSC");
-
- // Temporarily widen the span of the weak reference processing to
- // the entire heap.
- MemRegion new_span(CMSHeap::heap()->reserved_region());
- ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
- // Temporarily, clear the "is_alive_non_header" field of the
- // reference processor.
- ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
- // Temporarily make reference _processing_ single threaded (non-MT).
- ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
- // Temporarily make refs discovery atomic
- ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
- // Temporarily make reference _discovery_ single threaded (non-MT)
- ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
-
- ref_processor()->set_enqueuing_is_done(false);
- ref_processor()->enable_discovery();
- ref_processor()->setup_policy(clear_all_soft_refs);
- // If an asynchronous collection finishes, the _modUnionTable is
- // all clear. If we are assuming the collection from an asynchronous
- // collection, clear the _modUnionTable.
- assert(_collectorState != Idling || _modUnionTable.isAllClear(),
- "_modUnionTable should be clear if the baton was not passed");
- _modUnionTable.clear_all();
- assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(),
- "mod union for klasses should be clear if the baton was passed");
- _ct->cld_rem_set()->clear_mod_union();
-
-
- // We must adjust the allocation statistics being maintained
- // in the free list space. We do so by reading and clearing
- // the sweep timer and updating the block flux rate estimates below.
- assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
- if (_inter_sweep_timer.is_active()) {
- _inter_sweep_timer.stop();
- // Note that we do not use this sample to update the _inter_sweep_estimate.
- _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
- _inter_sweep_estimate.padded_average(),
- _intra_sweep_estimate.padded_average());
- }
-
- GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
- #ifdef ASSERT
- CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
- size_t free_size = cms_space->free();
- assert(free_size ==
- pointer_delta(cms_space->end(), cms_space->compaction_top())
- * HeapWordSize,
- "All the free space should be compacted into one chunk at top");
- assert(cms_space->dictionary()->total_chunk_size(
- debug_only(cms_space->freelistLock())) == 0 ||
- cms_space->totalSizeInIndexedFreeLists() == 0,
- "All the free space should be in a single chunk");
- size_t num = cms_space->totalCount();
- assert((free_size == 0 && num == 0) ||
- (free_size > 0 && (num == 1 || num == 2)),
- "There should be at most 2 free chunks after compaction");
- #endif // ASSERT
- _collectorState = Resetting;
- assert(_restart_addr == NULL,
- "Should have been NULL'd before baton was passed");
- reset_stw();
- _cmsGen->reset_after_compaction();
- _concurrent_cycles_since_last_unload = 0;
-
- // Clear any data recorded in the PLAB chunk arrays.
- if (_survivor_plab_array != NULL) {
- reset_survivor_plab_arrays();
- }
-
- // Adjust the per-size allocation stats for the next epoch.
- _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
- // Restart the "inter sweep timer" for the next epoch.
- _inter_sweep_timer.reset();
- _inter_sweep_timer.start();
-
- // No longer a need to do a concurrent collection for Metaspace.
- MetaspaceGC::set_should_concurrent_collect(false);
-
- heap->post_full_gc_dump(gc_timer);
-
- gc_timer->register_gc_end();
-
- gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
-
- // For a mark-sweep-compact, compute_new_size() will be called
- // in the heap's do_collection() method.
-}
-
-void CMSCollector::print_eden_and_survivor_chunk_arrays() {
- Log(gc, heap) log;
- if (!log.is_trace()) {
- return;
- }
-
- ContiguousSpace* eden_space = _young_gen->eden();
- ContiguousSpace* from_space = _young_gen->from();
- ContiguousSpace* to_space = _young_gen->to();
- // Eden
- if (_eden_chunk_array != NULL) {
- log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
- p2i(eden_space->bottom()), p2i(eden_space->top()),
- p2i(eden_space->end()), eden_space->capacity());
- log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
- _eden_chunk_index, _eden_chunk_capacity);
- for (size_t i = 0; i < _eden_chunk_index; i++) {
- log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
- }
- }
- // Survivor
- if (_survivor_chunk_array != NULL) {
- log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
- p2i(from_space->bottom()), p2i(from_space->top()),
- p2i(from_space->end()), from_space->capacity());
- log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
- _survivor_chunk_index, _survivor_chunk_capacity);
- for (size_t i = 0; i < _survivor_chunk_index; i++) {
- log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
- }
- }
-}
-
-void CMSCollector::getFreelistLocks() const {
- // Get locks for all free lists in all generations that this
- // collector is responsible for
- _cmsGen->freelistLock()->lock_without_safepoint_check();
-}
-
-void CMSCollector::releaseFreelistLocks() const {
- // Release locks for all free lists in all generations that this
- // collector is responsible for
- _cmsGen->freelistLock()->unlock();
-}
-
-bool CMSCollector::haveFreelistLocks() const {
- // Check locks for all free lists in all generations that this
- // collector is responsible for
- assert_lock_strong(_cmsGen->freelistLock());
- PRODUCT_ONLY(ShouldNotReachHere());
- return true;
-}
-
-// A utility class that is used by the CMS collector to
-// temporarily "release" the foreground collector from its
-// usual obligation to wait for the background collector to
-// complete an ongoing phase before proceeding.
-class ReleaseForegroundGC: public StackObj {
- private:
- CMSCollector* _c;
- public:
- ReleaseForegroundGC(CMSCollector* c) : _c(c) {
- assert(_c->_foregroundGCShouldWait, "Else should not need to call");
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- // allow a potentially blocked foreground collector to proceed
- _c->_foregroundGCShouldWait = false;
- if (_c->_foregroundGCIsActive) {
- CGC_lock->notify();
- }
- assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "Possible deadlock");
- }
-
- ~ReleaseForegroundGC() {
- assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- _c->_foregroundGCShouldWait = true;
- }
-};
-
-void CMSCollector::collect_in_background(GCCause::Cause cause) {
- assert(Thread::current()->is_ConcurrentGC_thread(),
- "A CMS asynchronous collection is only allowed on a CMS thread.");
-
- CMSHeap* heap = CMSHeap::heap();
- {
- MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
- FreelistLocker fll(this);
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- if (_foregroundGCIsActive) {
- // The foreground collector is. Skip this
- // background collection.
- assert(!_foregroundGCShouldWait, "Should be clear");
- return;
- } else {
- assert(_collectorState == Idling, "Should be idling before start.");
- _collectorState = InitialMarking;
- register_gc_start(cause);
- // Reset the expansion cause, now that we are about to begin
- // a new cycle.
- clear_expansion_cause();
-
- // Clear the MetaspaceGC flag since a concurrent collection
- // is starting but also clear it after the collection.
- MetaspaceGC::set_should_concurrent_collect(false);
- }
- // Decide if we want to enable class unloading as part of the
- // ensuing concurrent GC cycle.
- update_should_unload_classes();
- _full_gc_requested = false; // acks all outstanding full gc requests
- _full_gc_cause = GCCause::_no_gc;
- // Signal that we are about to start a collection
- heap->increment_total_full_collections(); // ... starting a collection cycle
- _collection_count_start = heap->total_full_collections();
- }
-
- size_t prev_used = _cmsGen->used();
-
- // The change of the collection state is normally done at this level;
- // the exceptions are phases that are executed while the world is
- // stopped. For those phases the change of state is done while the
- // world is stopped. For baton passing purposes this allows the
- // background collector to finish the phase and change state atomically.
- // The foreground collector cannot wait on a phase that is done
- // while the world is stopped because the foreground collector already
- // has the world stopped and would deadlock.
- while (_collectorState != Idling) {
- log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
- p2i(Thread::current()), _collectorState);
- // The foreground collector
- // holds the Heap_lock throughout its collection.
- // holds the CMS token (but not the lock)
- // except while it is waiting for the background collector to yield.
- //
- // The foreground collector should be blocked (not for long)
- // if the background collector is about to start a phase
- // executed with world stopped. If the background
- // collector has already started such a phase, the
- // foreground collector is blocked waiting for the
- // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
- // are executed in the VM thread.
- //
- // The locking order is
- // PendingListLock (PLL) -- if applicable (FinalMarking)
- // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
- // CMS token (claimed in
- // stop_world_and_do() -->
- // safepoint_synchronize() -->
- // CMSThread::synchronize())
-
- {
- // Check if the FG collector wants us to yield.
- CMSTokenSync x(true); // is cms thread
- if (waitForForegroundGC()) {
- // We yielded to a foreground GC, nothing more to be
- // done this round.
- assert(_foregroundGCShouldWait == false, "We set it to false in "
- "waitForForegroundGC()");
- log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
- p2i(Thread::current()), _collectorState);
- return;
- } else {
- // The background collector can run but check to see if the
- // foreground collector has done a collection while the
- // background collector was waiting to get the CGC_lock
- // above. If yes, break so that _foregroundGCShouldWait
- // is cleared before returning.
- if (_collectorState == Idling) {
- break;
- }
- }
- }
-
- assert(_foregroundGCShouldWait, "Foreground collector, if active, "
- "should be waiting");
-
- switch (_collectorState) {
- case InitialMarking:
- {
- ReleaseForegroundGC x(this);
- stats().record_cms_begin();
- VM_CMS_Initial_Mark initial_mark_op(this);
- VMThread::execute(&initial_mark_op);
- }
- // The collector state may be any legal state at this point
- // since the background collector may have yielded to the
- // foreground collector.
- break;
- case Marking:
- // initial marking in checkpointRootsInitialWork has been completed
- if (markFromRoots()) { // we were successful
- assert(_collectorState == Precleaning, "Collector state should "
- "have changed");
- } else {
- assert(_foregroundGCIsActive, "Internal state inconsistency");
- }
- break;
- case Precleaning:
- // marking from roots in markFromRoots has been completed
- preclean();
- assert(_collectorState == AbortablePreclean ||
- _collectorState == FinalMarking,
- "Collector state should have changed");
- break;
- case AbortablePreclean:
- abortable_preclean();
- assert(_collectorState == FinalMarking, "Collector state should "
- "have changed");
- break;
- case FinalMarking:
- {
- ReleaseForegroundGC x(this);
-
- VM_CMS_Final_Remark final_remark_op(this);
- VMThread::execute(&final_remark_op);
- }
- assert(_foregroundGCShouldWait, "block post-condition");
- break;
- case Sweeping:
- // final marking in checkpointRootsFinal has been completed
- sweep();
- assert(_collectorState == Resizing, "Collector state change "
- "to Resizing must be done under the free_list_lock");
-
- case Resizing: {
- // Sweeping has been completed...
- // At this point the background collection has completed.
- // Don't move the call to compute_new_size() down
- // into code that might be executed if the background
- // collection was preempted.
- {
- ReleaseForegroundGC x(this); // unblock FG collection
- MutexLocker y(Heap_lock, Mutex::_no_safepoint_check_flag);
- CMSTokenSync z(true); // not strictly needed.
- if (_collectorState == Resizing) {
- compute_new_size();
- save_heap_summary();
- _collectorState = Resetting;
- } else {
- assert(_collectorState == Idling, "The state should only change"
- " because the foreground collector has finished the collection");
- }
- }
- break;
- }
- case Resetting:
- // CMS heap resizing has been completed
- reset_concurrent();
- assert(_collectorState == Idling, "Collector state should "
- "have changed");
-
- MetaspaceGC::set_should_concurrent_collect(false);
-
- stats().record_cms_end();
- // Don't move the concurrent_phases_end() and compute_new_size()
- // calls to here because a preempted background collection
- // has it's state set to "Resetting".
- break;
- case Idling:
- default:
- ShouldNotReachHere();
- break;
- }
- log_debug(gc, state)(" Thread " INTPTR_FORMAT " done - next CMS state %d",
- p2i(Thread::current()), _collectorState);
- assert(_foregroundGCShouldWait, "block post-condition");
- }
-
- // Should this be in gc_epilogue?
- heap->counters()->update_counters();
-
- {
- // Clear _foregroundGCShouldWait and, in the event that the
- // foreground collector is waiting, notify it, before
- // returning.
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- _foregroundGCShouldWait = false;
- if (_foregroundGCIsActive) {
- CGC_lock->notify();
- }
- assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "Possible deadlock");
- }
- log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
- p2i(Thread::current()), _collectorState);
- log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
- prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
-}
-
-void CMSCollector::register_gc_start(GCCause::Cause cause) {
- _cms_start_registered = true;
- _gc_timer_cm->register_gc_start();
- _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
-}
-
-void CMSCollector::register_gc_end() {
- if (_cms_start_registered) {
- report_heap_summary(GCWhen::AfterGC);
-
- _gc_timer_cm->register_gc_end();
- _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
- _cms_start_registered = false;
- }
-}
-
-void CMSCollector::save_heap_summary() {
- CMSHeap* heap = CMSHeap::heap();
- _last_heap_summary = heap->create_heap_summary();
- _last_metaspace_summary = heap->create_metaspace_summary();
-}
-
-void CMSCollector::report_heap_summary(GCWhen::Type when) {
- _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
- _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
-}
-
-bool CMSCollector::waitForForegroundGC() {
- bool res = false;
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should have CMS token");
- // Block the foreground collector until the
- // background collectors decides whether to
- // yield.
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- _foregroundGCShouldWait = true;
- if (_foregroundGCIsActive) {
- // The background collector yields to the
- // foreground collector and returns a value
- // indicating that it has yielded. The foreground
- // collector can proceed.
- res = true;
- _foregroundGCShouldWait = false;
- ConcurrentMarkSweepThread::clear_CMS_flag(
- ConcurrentMarkSweepThread::CMS_cms_has_token);
- ConcurrentMarkSweepThread::set_CMS_flag(
- ConcurrentMarkSweepThread::CMS_cms_wants_token);
- // Get a possibly blocked foreground thread going
- CGC_lock->notify();
- log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
- p2i(Thread::current()), _collectorState);
- while (_foregroundGCIsActive) {
- CGC_lock->wait_without_safepoint_check();
- }
- ConcurrentMarkSweepThread::set_CMS_flag(
- ConcurrentMarkSweepThread::CMS_cms_has_token);
- ConcurrentMarkSweepThread::clear_CMS_flag(
- ConcurrentMarkSweepThread::CMS_cms_wants_token);
- }
- log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
- p2i(Thread::current()), _collectorState);
- return res;
-}
-
-// Because of the need to lock the free lists and other structures in
-// the collector, common to all the generations that the collector is
-// collecting, we need the gc_prologues of individual CMS generations
-// delegate to their collector. It may have been simpler had the
-// current infrastructure allowed one to call a prologue on a
-// collector. In the absence of that we have the generation's
-// prologue delegate to the collector, which delegates back
-// some "local" work to a worker method in the individual generations
-// that it's responsible for collecting, while itself doing any
-// work common to all generations it's responsible for. A similar
-// comment applies to the gc_epilogue()'s.
-// The role of the variable _between_prologue_and_epilogue is to
-// enforce the invocation protocol.
-void CMSCollector::gc_prologue(bool full) {
- // Call gc_prologue_work() for the CMSGen
- // we are responsible for.
-
- // The following locking discipline assumes that we are only called
- // when the world is stopped.
- assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
-
- // The CMSCollector prologue must call the gc_prologues for the
- // "generations" that it's responsible
- // for.
-
- assert( Thread::current()->is_VM_thread()
- || ( CMSScavengeBeforeRemark
- && Thread::current()->is_ConcurrentGC_thread()),
- "Incorrect thread type for prologue execution");
-
- if (_between_prologue_and_epilogue) {
- // We have already been invoked; this is a gc_prologue delegation
- // from yet another CMS generation that we are responsible for, just
- // ignore it since all relevant work has already been done.
- return;
- }
-
- // set a bit saying prologue has been called; cleared in epilogue
- _between_prologue_and_epilogue = true;
- // Claim locks for common data structures, then call gc_prologue_work()
- // for each CMSGen.
-
- getFreelistLocks(); // gets free list locks on constituent spaces
- bitMapLock()->lock_without_safepoint_check();
-
- // Should call gc_prologue_work() for all cms gens we are responsible for
- bool duringMarking = _collectorState >= Marking
- && _collectorState < Sweeping;
-
- // The young collections clear the modified oops state, which tells if
- // there are any modified oops in the class. The remark phase also needs
- // that information. Tell the young collection to save the union of all
- // modified klasses.
- if (duringMarking) {
- _ct->cld_rem_set()->set_accumulate_modified_oops(true);
- }
-
- bool registerClosure = duringMarking;
-
- _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
-
- if (!full) {
- stats().record_gc0_begin();
- }
-}
-
-void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
-
- _capacity_at_prologue = capacity();
- _used_at_prologue = used();
- _cmsSpace->recalculate_used_stable();
-
- // We enable promotion tracking so that card-scanning can recognize
- // which objects have been promoted during this GC and skip them.
- for (uint i = 0; i < ParallelGCThreads; i++) {
- _par_gc_thread_states[i]->promo.startTrackingPromotions();
- }
-
- // Delegate to CMScollector which knows how to coordinate between
- // this and any other CMS generations that it is responsible for
- // collecting.
- collector()->gc_prologue(full);
-}
-
-// This is a "private" interface for use by this generation's CMSCollector.
-// Not to be called directly by any other entity (for instance,
-// GenCollectedHeap, which calls the "public" gc_prologue method above).
-void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
- bool registerClosure, ModUnionClosure* modUnionClosure) {
- assert(!incremental_collection_failed(), "Shouldn't be set yet");
- assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
- "Should be NULL");
- if (registerClosure) {
- cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
- }
- cmsSpace()->gc_prologue();
- // Clear stat counters
- NOT_PRODUCT(
- assert(_numObjectsPromoted == 0, "check");
- assert(_numWordsPromoted == 0, "check");
- log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
- _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
- _numObjectsAllocated = 0;
- _numWordsAllocated = 0;
- )
-}
-
-void CMSCollector::gc_epilogue(bool full) {
- // The following locking discipline assumes that we are only called
- // when the world is stopped.
- assert(SafepointSynchronize::is_at_safepoint(),
- "world is stopped assumption");
-
- // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
- // if linear allocation blocks need to be appropriately marked to allow the
- // the blocks to be parsable. We also check here whether we need to nudge the
- // CMS collector thread to start a new cycle (if it's not already active).
- assert( Thread::current()->is_VM_thread()
- || ( CMSScavengeBeforeRemark
- && Thread::current()->is_ConcurrentGC_thread()),
- "Incorrect thread type for epilogue execution");
-
- if (!_between_prologue_and_epilogue) {
- // We have already been invoked; this is a gc_epilogue delegation
- // from yet another CMS generation that we are responsible for, just
- // ignore it since all relevant work has already been done.
- return;
- }
- assert(haveFreelistLocks(), "must have freelist locks");
- assert_lock_strong(bitMapLock());
-
- _ct->cld_rem_set()->set_accumulate_modified_oops(false);
-
- _cmsGen->gc_epilogue_work(full);
-
- if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
- // in case sampling was not already enabled, enable it
- _start_sampling = true;
- }
- // reset _eden_chunk_array so sampling starts afresh
- _eden_chunk_index = 0;
-
- size_t cms_used = _cmsGen->cmsSpace()->used();
- _cmsGen->cmsSpace()->recalculate_used_stable();
-
- // update performance counters - this uses a special version of
- // update_counters() that allows the utilization to be passed as a
- // parameter, avoiding multiple calls to used().
- //
- _cmsGen->update_counters(cms_used);
-
- bitMapLock()->unlock();
- releaseFreelistLocks();
-
- if (!CleanChunkPoolAsync) {
- Chunk::clean_chunk_pool();
- }
-
- set_did_compact(false);
- _between_prologue_and_epilogue = false; // ready for next cycle
-}
-
-void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
- collector()->gc_epilogue(full);
-
- // When using ParNew, promotion tracking should have already been
- // disabled. However, the prologue (which enables promotion
- // tracking) and epilogue are called irrespective of the type of
- // GC. So they will also be called before and after Full GCs, during
- // which promotion tracking will not be explicitly disabled. So,
- // it's safer to also disable it here too (to be symmetric with
- // enabling it in the prologue).
- for (uint i = 0; i < ParallelGCThreads; i++) {
- _par_gc_thread_states[i]->promo.stopTrackingPromotions();
- }
-}
-
-void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
- assert(!incremental_collection_failed(), "Should have been cleared");
- cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
- cmsSpace()->gc_epilogue();
- // Print stat counters
- NOT_PRODUCT(
- assert(_numObjectsAllocated == 0, "check");
- assert(_numWordsAllocated == 0, "check");
- log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
- _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
- _numObjectsPromoted = 0;
- _numWordsPromoted = 0;
- )
-
- // Call down the chain in contiguous_available needs the freelistLock
- // so print this out before releasing the freeListLock.
- log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
-}
-
-#ifndef PRODUCT
-bool CMSCollector::have_cms_token() {
- Thread* thr = Thread::current();
- if (thr->is_VM_thread()) {
- return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
- } else if (thr->is_ConcurrentGC_thread()) {
- return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
- } else if (thr->is_GC_task_thread()) {
- return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
- ParGCRareEvent_lock->owned_by_self();
- }
- return false;
-}
-
-// Check reachability of the given heap address in CMS generation,
-// treating all other generations as roots.
-bool CMSCollector::is_cms_reachable(HeapWord* addr) {
- // We could "guarantee" below, rather than assert, but I'll
- // leave these as "asserts" so that an adventurous debugger
- // could try this in the product build provided some subset of
- // the conditions were met, provided they were interested in the
- // results and knew that the computation below wouldn't interfere
- // with other concurrent computations mutating the structures
- // being read or written.
- assert(SafepointSynchronize::is_at_safepoint(),
- "Else mutations in object graph will make answer suspect");
- assert(have_cms_token(), "Should hold cms token");
- assert(haveFreelistLocks(), "must hold free list locks");
- assert_lock_strong(bitMapLock());
-
- // Clear the marking bit map array before starting, but, just
- // for kicks, first report if the given address is already marked
- tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
- _markBitMap.isMarked(addr) ? "" : " not");
-
- if (verify_after_remark()) {
- MutexLocker x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
- bool result = verification_mark_bm()->isMarked(addr);
- tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
- result ? "IS" : "is NOT");
- return result;
- } else {
- tty->print_cr("Could not compute result");
- return false;
- }
-}
-#endif
-
-void
-CMSCollector::print_on_error(outputStream* st) {
- CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
- if (collector != NULL) {
- CMSBitMap* bitmap = &collector->_markBitMap;
- st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
- bitmap->print_on_error(st, " Bits: ");
-
- st->cr();
-
- CMSBitMap* mut_bitmap = &collector->_modUnionTable;
- st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
- mut_bitmap->print_on_error(st, " Bits: ");
- }
-}
-
-////////////////////////////////////////////////////////
-// CMS Verification Support
-////////////////////////////////////////////////////////
-// Following the remark phase, the following invariant
-// should hold -- each object in the CMS heap which is
-// marked in markBitMap() should be marked in the verification_mark_bm().
-
-class VerifyMarkedClosure: public BitMapClosure {
- CMSBitMap* _marks;
- bool _failed;
-
- public:
- VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
-
- bool do_bit(size_t offset) {
- HeapWord* addr = _marks->offsetToHeapWord(offset);
- if (!_marks->isMarked(addr)) {
- Log(gc, verify) log;
- ResourceMark rm;
- LogStream ls(log.error());
- oop(addr)->print_on(&ls);
- log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
- _failed = true;
- }
- return true;
- }
-
- bool failed() { return _failed; }
-};
-
-bool CMSCollector::verify_after_remark() {
- GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
- MutexLocker ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
- static bool init = false;
-
- assert(SafepointSynchronize::is_at_safepoint(),
- "Else mutations in object graph will make answer suspect");
- assert(have_cms_token(),
- "Else there may be mutual interference in use of "
- " verification data structures");
- assert(_collectorState > Marking && _collectorState <= Sweeping,
- "Else marking info checked here may be obsolete");
- assert(haveFreelistLocks(), "must hold free list locks");
- assert_lock_strong(bitMapLock());
-
-
- // Allocate marking bit map if not already allocated
- if (!init) { // first time
- if (!verification_mark_bm()->allocate(_span)) {
- return false;
- }
- init = true;
- }
-
- assert(verification_mark_stack()->isEmpty(), "Should be empty");
-
- // Turn off refs discovery -- so we will be tracing through refs.
- // This is as intended, because by this time
- // GC must already have cleared any refs that need to be cleared,
- // and traced those that need to be marked; moreover,
- // the marking done here is not going to interfere in any
- // way with the marking information used by GC.
- NoRefDiscovery no_discovery(ref_processor());
-
-#if COMPILER2_OR_JVMCI
- DerivedPointerTableDeactivate dpt_deact;
-#endif
-
- // Clear any marks from a previous round
- verification_mark_bm()->clear_all();
- assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
- verify_work_stacks_empty();
-
- CMSHeap* heap = CMSHeap::heap();
- heap->ensure_parsability(false); // fill TLABs, but no need to retire them
- // Update the saved marks which may affect the root scans.
- heap->save_marks();
-
- if (CMSRemarkVerifyVariant == 1) {
- // In this first variant of verification, we complete
- // all marking, then check if the new marks-vector is
- // a subset of the CMS marks-vector.
- verify_after_remark_work_1();
- } else {
- guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
- // In this second variant of verification, we flag an error
- // (i.e. an object reachable in the new marks-vector not reachable
- // in the CMS marks-vector) immediately, also indicating the
- // identify of an object (A) that references the unmarked object (B) --
- // presumably, a mutation to A failed to be picked up by preclean/remark?
- verify_after_remark_work_2();
- }
-
- return true;
-}
-
-void CMSCollector::verify_after_remark_work_1() {
- ResourceMark rm;
- HandleMark hm;
- CMSHeap* heap = CMSHeap::heap();
-
- // Get a clear set of claim bits for the roots processing to work with.
- ClassLoaderDataGraph::clear_claimed_marks();
-
- // Mark from roots one level into CMS
- MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
- heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-
- {
- StrongRootsScope srs(1);
-
- heap->cms_process_roots(&srs,
- true, // young gen as roots
- GenCollectedHeap::ScanningOption(roots_scanning_options()),
- should_unload_classes(),
- ¬Older,
- NULL);
- }
-
- // Now mark from the roots
- MarkFromRootsClosure markFromRootsClosure(this, _span,
- verification_mark_bm(), verification_mark_stack(),
- false /* don't yield */, true /* verifying */);
- assert(_restart_addr == NULL, "Expected pre-condition");
- verification_mark_bm()->iterate(&markFromRootsClosure);
- while (_restart_addr != NULL) {
- // Deal with stack overflow: by restarting at the indicated
- // address.
- HeapWord* ra = _restart_addr;
- markFromRootsClosure.reset(ra);
- _restart_addr = NULL;
- verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
- }
- assert(verification_mark_stack()->isEmpty(), "Should have been drained");
- verify_work_stacks_empty();
-
- // Marking completed -- now verify that each bit marked in
- // verification_mark_bm() is also marked in markBitMap(); flag all
- // errors by printing corresponding objects.
- VerifyMarkedClosure vcl(markBitMap());
- verification_mark_bm()->iterate(&vcl);
- if (vcl.failed()) {
- Log(gc, verify) log;
- log.error("Failed marking verification after remark");
- ResourceMark rm;
- LogStream ls(log.error());
- heap->print_on(&ls);
- fatal("CMS: failed marking verification after remark");
- }
-}
-
-class VerifyCLDOopsCLDClosure : public CLDClosure {
- class VerifyCLDOopsClosure : public OopClosure {
- CMSBitMap* _bitmap;
- public:
- VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
- void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
- void do_oop(narrowOop* p) { ShouldNotReachHere(); }
- } _oop_closure;
- public:
- VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
- void do_cld(ClassLoaderData* cld) {
- cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, false);
- }
-};
-
-void CMSCollector::verify_after_remark_work_2() {
- ResourceMark rm;
- HandleMark hm;
- CMSHeap* heap = CMSHeap::heap();
-
- // Get a clear set of claim bits for the roots processing to work with.
- ClassLoaderDataGraph::clear_claimed_marks();
-
- // Mark from roots one level into CMS
- MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
- markBitMap());
- CLDToOopClosure cld_closure(¬Older, ClassLoaderData::_claim_strong);
-
- heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-
- {
- StrongRootsScope srs(1);
-
- heap->cms_process_roots(&srs,
- true, // young gen as roots
- GenCollectedHeap::ScanningOption(roots_scanning_options()),
- should_unload_classes(),
- ¬Older,
- &cld_closure);
- }
-
- // Now mark from the roots
- MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
- verification_mark_bm(), markBitMap(), verification_mark_stack());
- assert(_restart_addr == NULL, "Expected pre-condition");
- verification_mark_bm()->iterate(&markFromRootsClosure);
- while (_restart_addr != NULL) {
- // Deal with stack overflow: by restarting at the indicated
- // address.
- HeapWord* ra = _restart_addr;
- markFromRootsClosure.reset(ra);
- _restart_addr = NULL;
- verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
- }
- assert(verification_mark_stack()->isEmpty(), "Should have been drained");
- verify_work_stacks_empty();
-
- VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
- ClassLoaderDataGraph::cld_do(&verify_cld_oops);
-
- // Marking completed -- now verify that each bit marked in
- // verification_mark_bm() is also marked in markBitMap(); flag all
- // errors by printing corresponding objects.
- VerifyMarkedClosure vcl(markBitMap());
- verification_mark_bm()->iterate(&vcl);
- assert(!vcl.failed(), "Else verification above should not have succeeded");
-}
-
-void ConcurrentMarkSweepGeneration::save_marks() {
- // delegate to CMS space
- cmsSpace()->save_marks();
-}
-
-bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
- return cmsSpace()->no_allocs_since_save_marks();
-}
-
-void
-ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) {
- if (freelistLock()->owned_by_self()) {
- Generation::oop_iterate(cl);
- } else {
- MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
- Generation::oop_iterate(cl);
- }
-}
-
-void
-ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
- if (freelistLock()->owned_by_self()) {
- Generation::object_iterate(cl);
- } else {
- MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
- Generation::object_iterate(cl);
- }
-}
-
-void
-ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
- if (freelistLock()->owned_by_self()) {
- Generation::safe_object_iterate(cl);
- } else {
- MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
- Generation::safe_object_iterate(cl);
- }
-}
-
-void
-ConcurrentMarkSweepGeneration::post_compact() {
-}
-
-void
-ConcurrentMarkSweepGeneration::prepare_for_verify() {
- // Fix the linear allocation blocks to look like free blocks.
-
- // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
- // are not called when the heap is verified during universe initialization and
- // at vm shutdown.
- if (freelistLock()->owned_by_self()) {
- cmsSpace()->prepare_for_verify();
- } else {
- MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag);
- cmsSpace()->prepare_for_verify();
- }
-}
-
-void
-ConcurrentMarkSweepGeneration::verify() {
- // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
- // are not called when the heap is verified during universe initialization and
- // at vm shutdown.
- if (freelistLock()->owned_by_self()) {
- cmsSpace()->verify();
- } else {
- MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag);
- cmsSpace()->verify();
- }
-}
-
-void CMSCollector::verify() {
- _cmsGen->verify();
-}
-
-#ifndef PRODUCT
-bool CMSCollector::overflow_list_is_empty() const {
- assert(_num_par_pushes >= 0, "Inconsistency");
- if (_overflow_list == NULL) {
- assert(_num_par_pushes == 0, "Inconsistency");
- }
- return _overflow_list == NULL;
-}
-
-// The methods verify_work_stacks_empty() and verify_overflow_empty()
-// merely consolidate assertion checks that appear to occur together frequently.
-void CMSCollector::verify_work_stacks_empty() const {
- assert(_markStack.isEmpty(), "Marking stack should be empty");
- assert(overflow_list_is_empty(), "Overflow list should be empty");
-}
-
-void CMSCollector::verify_overflow_empty() const {
- assert(overflow_list_is_empty(), "Overflow list should be empty");
- assert(no_preserved_marks(), "No preserved marks");
-}
-#endif // PRODUCT
-
-// Decide if we want to enable class unloading as part of the
-// ensuing concurrent GC cycle. We will collect and
-// unload classes if it's the case that:
-// (a) class unloading is enabled at the command line, and
-// (b) old gen is getting really full
-// NOTE: Provided there is no change in the state of the heap between
-// calls to this method, it should have idempotent results. Moreover,
-// its results should be monotonically increasing (i.e. going from 0 to 1,
-// but not 1 to 0) between successive calls between which the heap was
-// not collected. For the implementation below, it must thus rely on
-// the property that concurrent_cycles_since_last_unload()
-// will not decrease unless a collection cycle happened and that
-// _cmsGen->is_too_full() are
-// themselves also monotonic in that sense. See check_monotonicity()
-// below.
-void CMSCollector::update_should_unload_classes() {
- _should_unload_classes = false;
- if (CMSClassUnloadingEnabled) {
- _should_unload_classes = (concurrent_cycles_since_last_unload() >=
- CMSClassUnloadingMaxInterval)
- || _cmsGen->is_too_full();
- }
-}
-
-bool ConcurrentMarkSweepGeneration::is_too_full() const {
- bool res = should_concurrent_collect();
- res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
- return res;
-}
-
-void CMSCollector::setup_cms_unloading_and_verification_state() {
- const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
- || VerifyBeforeExit;
- const int rso = GenCollectedHeap::SO_AllCodeCache;
-
- // We set the proper root for this CMS cycle here.
- if (should_unload_classes()) { // Should unload classes this cycle
- remove_root_scanning_option(rso); // Shrink the root set appropriately
- set_verifying(should_verify); // Set verification state for this cycle
- return; // Nothing else needs to be done at this time
- }
-
- // Not unloading classes this cycle
- assert(!should_unload_classes(), "Inconsistency!");
-
- // If we are not unloading classes then add SO_AllCodeCache to root
- // scanning options.
- add_root_scanning_option(rso);
-
- if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
- set_verifying(true);
- } else if (verifying() && !should_verify) {
- // We were verifying, but some verification flags got disabled.
- set_verifying(false);
- // Exclude symbols, strings and code cache elements from root scanning to
- // reduce IM and RM pauses.
- remove_root_scanning_option(rso);
- }
-}
-
-
-#ifndef PRODUCT
-HeapWord* CMSCollector::block_start(const void* p) const {
- const HeapWord* addr = (HeapWord*)p;
- if (_span.contains(p)) {
- if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
- return _cmsGen->cmsSpace()->block_start(p);
- }
- }
- return NULL;
-}
-#endif
-
-HeapWord*
-ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
- bool tlab,
- bool parallel) {
- CMSSynchronousYieldRequest yr;
- assert(!tlab, "Can't deal with TLAB allocation");
- MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
- expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
- if (GCExpandToAllocateDelayMillis > 0) {
- os::naked_sleep(GCExpandToAllocateDelayMillis);
- }
- return have_lock_and_allocate(word_size, tlab);
-}
-
-void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
- size_t bytes,
- size_t expand_bytes,
- CMSExpansionCause::Cause cause)
-{
-
- bool success = expand(bytes, expand_bytes);
-
- // remember why we expanded; this information is used
- // by shouldConcurrentCollect() when making decisions on whether to start
- // a new CMS cycle.
- if (success) {
- set_expansion_cause(cause);
- log_trace(gc)("Expanded CMS gen for %s", CMSExpansionCause::to_string(cause));
- }
-}
-
-HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
- HeapWord* res = NULL;
- MutexLocker x(ParGCRareEvent_lock);
- while (true) {
- // Expansion by some other thread might make alloc OK now:
- res = ps->lab.alloc(word_sz);
- if (res != NULL) return res;
- // If there's not enough expansion space available, give up.
- if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
- return NULL;
- }
- // Otherwise, we try expansion.
- expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
- // Now go around the loop and try alloc again;
- // A competing par_promote might beat us to the expansion space,
- // so we may go around the loop again if promotion fails again.
- if (GCExpandToAllocateDelayMillis > 0) {
- os::naked_sleep(GCExpandToAllocateDelayMillis);
- }
- }
-}
-
-
-bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
- PromotionInfo* promo) {
- MutexLocker x(ParGCRareEvent_lock);
- size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
- while (true) {
- // Expansion by some other thread might make alloc OK now:
- if (promo->ensure_spooling_space()) {
- assert(promo->has_spooling_space(),
- "Post-condition of successful ensure_spooling_space()");
- return true;
- }
- // If there's not enough expansion space available, give up.
- if (_virtual_space.uncommitted_size() < refill_size_bytes) {
- return false;
- }
- // Otherwise, we try expansion.
- expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
- // Now go around the loop and try alloc again;
- // A competing allocation might beat us to the expansion space,
- // so we may go around the loop again if allocation fails again.
- if (GCExpandToAllocateDelayMillis > 0) {
- os::naked_sleep(GCExpandToAllocateDelayMillis);
- }
- }
-}
-
-void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
- // Only shrink if a compaction was done so that all the free space
- // in the generation is in a contiguous block at the end.
- if (did_compact()) {
- CardGeneration::shrink(bytes);
- }
-}
-
-void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
- assert_locked_or_safepoint(Heap_lock);
-}
-
-void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
- assert_locked_or_safepoint(Heap_lock);
- assert_lock_strong(freelistLock());
- log_trace(gc)("Shrinking of CMS not yet implemented");
- return;
-}
-
-
-// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
-// phases.
-class CMSPhaseAccounting: public StackObj {
- public:
- CMSPhaseAccounting(CMSCollector *collector,
- const char *title);
- ~CMSPhaseAccounting();
-
- private:
- CMSCollector *_collector;
- const char *_title;
- GCTraceConcTime(Info, gc) _trace_time;
-
- public:
- // Not MT-safe; so do not pass around these StackObj's
- // where they may be accessed by other threads.
- double wallclock_millis() {
- return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
- }
-};
-
-CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
- const char *title) :
- _collector(collector), _title(title), _trace_time(title) {
-
- _collector->resetYields();
- _collector->resetTimer();
- _collector->startTimer();
- _collector->gc_timer_cm()->register_gc_concurrent_start(title);
-}
-
-CMSPhaseAccounting::~CMSPhaseAccounting() {
- _collector->gc_timer_cm()->register_gc_concurrent_end();
- _collector->stopTimer();
- log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_millis(_collector->timerTicks()));
- log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
-}
-
-// CMS work
-
-// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
-class CMSParMarkTask : public AbstractGangTask {
- protected:
- CMSCollector* _collector;
- uint _n_workers;
- CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
- AbstractGangTask(name),
- _collector(collector),
- _n_workers(n_workers) {}
- // Work method in support of parallel rescan ... of young gen spaces
- void do_young_space_rescan(OopsInGenClosure* cl,
- ContiguousSpace* space,
- HeapWord** chunk_array, size_t chunk_top);
- void work_on_young_gen_roots(OopsInGenClosure* cl);
-};
-
-// Parallel initial mark task
-class CMSParInitialMarkTask: public CMSParMarkTask {
- StrongRootsScope* _strong_roots_scope;
- public:
- CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
- CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
- _strong_roots_scope(strong_roots_scope) {}
- void work(uint worker_id);
-};
-
-// Checkpoint the roots into this generation from outside
-// this generation. [Note this initial checkpoint need only
-// be approximate -- we'll do a catch up phase subsequently.]
-void CMSCollector::checkpointRootsInitial() {
- assert(_collectorState == InitialMarking, "Wrong collector state");
- check_correct_thread_executing();
- TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
-
- save_heap_summary();
- report_heap_summary(GCWhen::BeforeGC);
-
- ReferenceProcessor* rp = ref_processor();
- assert(_restart_addr == NULL, "Control point invariant");
- {
- // acquire locks for subsequent manipulations
- MutexLocker x(bitMapLock(),
- Mutex::_no_safepoint_check_flag);
- checkpointRootsInitialWork();
- // enable ("weak") refs discovery
- rp->enable_discovery();
- _collectorState = Marking;
- }
-
- _cmsGen->cmsSpace()->recalculate_used_stable();
-}
-
-void CMSCollector::checkpointRootsInitialWork() {
- assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
- assert(_collectorState == InitialMarking, "just checking");
-
- // Already have locks.
- assert_lock_strong(bitMapLock());
- assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
-
- // Setup the verification and class unloading state for this
- // CMS collection cycle.
- setup_cms_unloading_and_verification_state();
-
- GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
-
- // Reset all the PLAB chunk arrays if necessary.
- if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
- reset_survivor_plab_arrays();
- }
-
- ResourceMark rm;
- HandleMark hm;
-
- MarkRefsIntoClosure notOlder(_span, &_markBitMap);
- CMSHeap* heap = CMSHeap::heap();
-
- verify_work_stacks_empty();
- verify_overflow_empty();
-
- heap->ensure_parsability(false); // fill TLABs, but no need to retire them
- // Update the saved marks which may affect the root scans.
- heap->save_marks();
-
- // weak reference processing has not started yet.
- ref_processor()->set_enqueuing_is_done(false);
-
- // Need to remember all newly created CLDs,
- // so that we can guarantee that the remark finds them.
- ClassLoaderDataGraph::remember_new_clds(true);
-
- // Whenever a CLD is found, it will be claimed before proceeding to mark
- // the klasses. The claimed marks need to be cleared before marking starts.
- ClassLoaderDataGraph::clear_claimed_marks();
-
- print_eden_and_survivor_chunk_arrays();
-
- {
-#if COMPILER2_OR_JVMCI
- DerivedPointerTableDeactivate dpt_deact;
-#endif
- if (CMSParallelInitialMarkEnabled) {
- // The parallel version.
- WorkGang* workers = heap->workers();
- assert(workers != NULL, "Need parallel worker threads.");
- uint n_workers = workers->active_workers();
-
- StrongRootsScope srs(n_workers);
-
- CMSParInitialMarkTask tsk(this, &srs, n_workers);
- initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
- // If the total workers is greater than 1, then multiple workers
- // may be used at some time and the initialization has been set
- // such that the single threaded path cannot be used.
- if (workers->total_workers() > 1) {
- workers->run_task(&tsk);
- } else {
- tsk.work(0);
- }
- } else {
- // The serial version.
- CLDToOopClosure cld_closure(¬Older, ClassLoaderData::_claim_strong);
- heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-
- StrongRootsScope srs(1);
-
- heap->cms_process_roots(&srs,
- true, // young gen as roots
- GenCollectedHeap::ScanningOption(roots_scanning_options()),
- should_unload_classes(),
- ¬Older,
- &cld_closure);
- }
- }
-
- // Clear mod-union table; it will be dirtied in the prologue of
- // CMS generation per each young generation collection.
-
- assert(_modUnionTable.isAllClear(),
- "Was cleared in most recent final checkpoint phase"
- " or no bits are set in the gc_prologue before the start of the next "
- "subsequent marking phase.");
-
- assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be");
-
- // Save the end of the used_region of the constituent generations
- // to be used to limit the extent of sweep in each generation.
- save_sweep_limits();
- verify_overflow_empty();
-}
-
-bool CMSCollector::markFromRoots() {
- // we might be tempted to assert that:
- // assert(!SafepointSynchronize::is_at_safepoint(),
- // "inconsistent argument?");
- // However that wouldn't be right, because it's possible that
- // a safepoint is indeed in progress as a young generation
- // stop-the-world GC happens even as we mark in this generation.
- assert(_collectorState == Marking, "inconsistent state?");
- check_correct_thread_executing();
- verify_overflow_empty();
-
- // Weak ref discovery note: We may be discovering weak
- // refs in this generation concurrent (but interleaved) with
- // weak ref discovery by the young generation collector.
-
- CMSTokenSyncWithLocks ts(true, bitMapLock());
- GCTraceCPUTime tcpu;
- CMSPhaseAccounting pa(this, "Concurrent Mark");
- bool res = markFromRootsWork();
- if (res) {
- _collectorState = Precleaning;
- } else { // We failed and a foreground collection wants to take over
- assert(_foregroundGCIsActive, "internal state inconsistency");
- assert(_restart_addr == NULL, "foreground will restart from scratch");
- log_debug(gc)("bailing out to foreground collection");
- }
- verify_overflow_empty();
- return res;
-}
-
-bool CMSCollector::markFromRootsWork() {
- // iterate over marked bits in bit map, doing a full scan and mark
- // from these roots using the following algorithm:
- // . if oop is to the right of the current scan pointer,
- // mark corresponding bit (we'll process it later)
- // . else (oop is to left of current scan pointer)
- // push oop on marking stack
- // . drain the marking stack
-
- // Note that when we do a marking step we need to hold the
- // bit map lock -- recall that direct allocation (by mutators)
- // and promotion (by the young generation collector) is also
- // marking the bit map. [the so-called allocate live policy.]
- // Because the implementation of bit map marking is not
- // robust wrt simultaneous marking of bits in the same word,
- // we need to make sure that there is no such interference
- // between concurrent such updates.
-
- // already have locks
- assert_lock_strong(bitMapLock());
-
- verify_work_stacks_empty();
- verify_overflow_empty();
- bool result = false;
- if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
- result = do_marking_mt();
- } else {
- result = do_marking_st();
- }
- return result;
-}
-
-// Forward decl
-class CMSConcMarkingTask;
-
-class CMSConcMarkingParallelTerminator: public ParallelTaskTerminator {
- CMSCollector* _collector;
- CMSConcMarkingTask* _task;
- public:
- virtual void yield();
-
- // "n_threads" is the number of threads to be terminated.
- // "queue_set" is a set of work queues of other threads.
- // "collector" is the CMS collector associated with this task terminator.
- // "yield" indicates whether we need the gang as a whole to yield.
- CMSConcMarkingParallelTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
- ParallelTaskTerminator(n_threads, queue_set),
- _collector(collector) { }
-
- void set_task(CMSConcMarkingTask* task) {
- _task = task;
- }
-};
-
-class CMSConcMarkingOWSTTerminator: public OWSTTaskTerminator {
- CMSCollector* _collector;
- CMSConcMarkingTask* _task;
- public:
- virtual void yield();
-
- // "n_threads" is the number of threads to be terminated.
- // "queue_set" is a set of work queues of other threads.
- // "collector" is the CMS collector associated with this task terminator.
- // "yield" indicates whether we need the gang as a whole to yield.
- CMSConcMarkingOWSTTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
- OWSTTaskTerminator(n_threads, queue_set),
- _collector(collector) { }
-
- void set_task(CMSConcMarkingTask* task) {
- _task = task;
- }
-};
-
-class CMSConcMarkingTaskTerminator {
- private:
- ParallelTaskTerminator* _term;
- public:
- CMSConcMarkingTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) {
- if (UseOWSTTaskTerminator) {
- _term = new CMSConcMarkingOWSTTerminator(n_threads, queue_set, collector);
- } else {
- _term = new CMSConcMarkingParallelTerminator(n_threads, queue_set, collector);
- }
- }
- ~CMSConcMarkingTaskTerminator() {
- assert(_term != NULL, "Must not be NULL");
- delete _term;
- }
-
- void set_task(CMSConcMarkingTask* task);
- ParallelTaskTerminator* terminator() const { return _term; }
-};
-
-class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
- CMSConcMarkingTask* _task;
- public:
- bool should_exit_termination();
- void set_task(CMSConcMarkingTask* task) {
- _task = task;
- }
-};
-
-// MT Concurrent Marking Task
-class CMSConcMarkingTask: public YieldingFlexibleGangTask {
- CMSCollector* _collector;
- uint _n_workers; // requested/desired # workers
- bool _result;
- CompactibleFreeListSpace* _cms_space;
- char _pad_front[64]; // padding to ...
- HeapWord* volatile _global_finger; // ... avoid sharing cache line
- char _pad_back[64];
- HeapWord* _restart_addr;
-
- // Exposed here for yielding support
- Mutex* const _bit_map_lock;
-
- // The per thread work queues, available here for stealing
- OopTaskQueueSet* _task_queues;
-
- // Termination (and yielding) support
- CMSConcMarkingTaskTerminator _term;
- CMSConcMarkingTerminatorTerminator _term_term;
-
- public:
- CMSConcMarkingTask(CMSCollector* collector,
- CompactibleFreeListSpace* cms_space,
- YieldingFlexibleWorkGang* workers,
- OopTaskQueueSet* task_queues):
- YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
- _collector(collector),
- _n_workers(0),
- _result(true),
- _cms_space(cms_space),
- _bit_map_lock(collector->bitMapLock()),
- _task_queues(task_queues),
- _term(_n_workers, task_queues, _collector)
- {
- _requested_size = _n_workers;
- _term.set_task(this);
- _term_term.set_task(this);
- _restart_addr = _global_finger = _cms_space->bottom();
- }
-
-
- OopTaskQueueSet* task_queues() { return _task_queues; }
-
- OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
-
- HeapWord* volatile* global_finger_addr() { return &_global_finger; }
-
- ParallelTaskTerminator* terminator() { return _term.terminator(); }
-
- virtual void set_for_termination(uint active_workers) {
- terminator()->reset_for_reuse(active_workers);
- }
-
- void work(uint worker_id);
- bool should_yield() {
- return ConcurrentMarkSweepThread::should_yield()
- && !_collector->foregroundGCIsActive();
- }
-
- virtual void coordinator_yield(); // stuff done by coordinator
- bool result() { return _result; }
-
- void reset(HeapWord* ra) {
- assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
- _restart_addr = _global_finger = ra;
- _term.terminator()->reset_for_reuse();
- }
-
- static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
- OopTaskQueue* work_q);
-
- private:
- void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
- void do_work_steal(int i);
- void bump_global_finger(HeapWord* f);
-};
-
-bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
- assert(_task != NULL, "Error");
- return _task->yielding();
- // Note that we do not need the disjunct || _task->should_yield() above
- // because we want terminating threads to yield only if the task
- // is already in the midst of yielding, which happens only after at least one
- // thread has yielded.
-}
-
-void CMSConcMarkingParallelTerminator::yield() {
- if (_task->should_yield()) {
- _task->yield();
- } else {
- ParallelTaskTerminator::yield();
- }
-}
-
-void CMSConcMarkingOWSTTerminator::yield() {
- if (_task->should_yield()) {
- _task->yield();
- } else {
- OWSTTaskTerminator::yield();
- }
-}
-
-void CMSConcMarkingTaskTerminator::set_task(CMSConcMarkingTask* task) {
- if (UseOWSTTaskTerminator) {
- ((CMSConcMarkingOWSTTerminator*)_term)->set_task(task);
- } else {
- ((CMSConcMarkingParallelTerminator*)_term)->set_task(task);
- }
-}
-
-////////////////////////////////////////////////////////////////
-// Concurrent Marking Algorithm Sketch
-////////////////////////////////////////////////////////////////
-// Until all tasks exhausted (both spaces):
-// -- claim next available chunk
-// -- bump global finger via CAS
-// -- find first object that starts in this chunk
-// and start scanning bitmap from that position
-// -- scan marked objects for oops
-// -- CAS-mark target, and if successful:
-// . if target oop is above global finger (volatile read)
-// nothing to do
-// . if target oop is in chunk and above local finger
-// then nothing to do
-// . else push on work-queue
-// -- Deal with possible overflow issues:
-// . local work-queue overflow causes stuff to be pushed on
-// global (common) overflow queue
-// . always first empty local work queue
-// . then get a batch of oops from global work queue if any
-// . then do work stealing
-// -- When all tasks claimed (both spaces)
-// and local work queue empty,
-// then in a loop do:
-// . check global overflow stack; steal a batch of oops and trace
-// . try to steal from other threads oif GOS is empty
-// . if neither is available, offer termination
-// -- Terminate and return result
-//
-void CMSConcMarkingTask::work(uint worker_id) {
- elapsedTimer _timer;
- ResourceMark rm;
- HandleMark hm;
-
- DEBUG_ONLY(_collector->verify_overflow_empty();)
-
- // Before we begin work, our work queue should be empty
- assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
- // Scan the bitmap covering _cms_space, tracing through grey objects.
- _timer.start();
- do_scan_and_mark(worker_id, _cms_space);
- _timer.stop();
- log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-
- // ... do work stealing
- _timer.reset();
- _timer.start();
- do_work_steal(worker_id);
- _timer.stop();
- log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
- assert(_collector->_markStack.isEmpty(), "Should have been emptied");
- assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
- // Note that under the current task protocol, the
- // following assertion is true even of the spaces
- // expanded since the completion of the concurrent
- // marking. XXX This will likely change under a strict
- // ABORT semantics.
- // After perm removal the comparison was changed to
- // greater than or equal to from strictly greater than.
- // Before perm removal the highest address sweep would
- // have been at the end of perm gen but now is at the
- // end of the tenured gen.
- assert(_global_finger >= _cms_space->end(),
- "All tasks have been completed");
- DEBUG_ONLY(_collector->verify_overflow_empty();)
-}
-
-void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
- HeapWord* read = _global_finger;
- HeapWord* cur = read;
- while (f > read) {
- cur = read;
- read = Atomic::cmpxchg(f, &_global_finger, cur);
- if (cur == read) {
- // our cas succeeded
- assert(_global_finger >= f, "protocol consistency");
- break;
- }
- }
-}
-
-// This is really inefficient, and should be redone by
-// using (not yet available) block-read and -write interfaces to the
-// stack and the work_queue. XXX FIX ME !!!
-bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
- OopTaskQueue* work_q) {
- // Fast lock-free check
- if (ovflw_stk->length() == 0) {
- return false;
- }
- assert(work_q->size() == 0, "Shouldn't steal");
- MutexLocker ml(ovflw_stk->par_lock(),
- Mutex::_no_safepoint_check_flag);
- // Grab up to 1/4 the size of the work queue
- size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
- (size_t)ParGCDesiredObjsFromOverflowList);
- num = MIN2(num, ovflw_stk->length());
- for (int i = (int) num; i > 0; i--) {
- oop cur = ovflw_stk->pop();
- assert(cur != NULL, "Counted wrong?");
- work_q->push(cur);
- }
- return num > 0;
-}
-
-void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
- SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
- int n_tasks = pst->n_tasks();
- // We allow that there may be no tasks to do here because
- // we are restarting after a stack overflow.
- assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
- uint nth_task = 0;
-
- HeapWord* aligned_start = sp->bottom();
- if (sp->used_region().contains(_restart_addr)) {
- // Align down to a card boundary for the start of 0th task
- // for this space.
- aligned_start = align_down(_restart_addr, CardTable::card_size);
- }
-
- size_t chunk_size = sp->marking_task_size();
- while (pst->try_claim_task(/* reference */ nth_task)) {
- // Having claimed the nth task in this space,
- // compute the chunk that it corresponds to:
- MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
- aligned_start + (nth_task+1)*chunk_size);
- // Try and bump the global finger via a CAS;
- // note that we need to do the global finger bump
- // _before_ taking the intersection below, because
- // the task corresponding to that region will be
- // deemed done even if the used_region() expands
- // because of allocation -- as it almost certainly will
- // during start-up while the threads yield in the
- // closure below.
- HeapWord* finger = span.end();
- bump_global_finger(finger); // atomically
- // There are null tasks here corresponding to chunks
- // beyond the "top" address of the space.
- span = span.intersection(sp->used_region());
- if (!span.is_empty()) { // Non-null task
- HeapWord* prev_obj;
- assert(!span.contains(_restart_addr) || nth_task == 0,
- "Inconsistency");
- if (nth_task == 0) {
- // For the 0th task, we'll not need to compute a block_start.
- if (span.contains(_restart_addr)) {
- // In the case of a restart because of stack overflow,
- // we might additionally skip a chunk prefix.
- prev_obj = _restart_addr;
- } else {
- prev_obj = span.start();
- }
- } else {
- // We want to skip the first object because
- // the protocol is to scan any object in its entirety
- // that _starts_ in this span; a fortiori, any
- // object starting in an earlier span is scanned
- // as part of an earlier claimed task.
- // Below we use the "careful" version of block_start
- // so we do not try to navigate uninitialized objects.
- prev_obj = sp->block_start_careful(span.start());
- // Below we use a variant of block_size that uses the
- // Printezis bits to avoid waiting for allocated
- // objects to become initialized/parsable.
- while (prev_obj < span.start()) {
- size_t sz = sp->block_size_no_stall(prev_obj, _collector);
- if (sz > 0) {
- prev_obj += sz;
- } else {
- // In this case we may end up doing a bit of redundant
- // scanning, but that appears unavoidable, short of
- // locking the free list locks; see bug 6324141.
- break;
- }
- }
- }
- if (prev_obj < span.end()) {
- MemRegion my_span = MemRegion(prev_obj, span.end());
- // Do the marking work within a non-empty span --
- // the last argument to the constructor indicates whether the
- // iteration should be incremental with periodic yields.
- ParMarkFromRootsClosure cl(this, _collector, my_span,
- &_collector->_markBitMap,
- work_queue(i),
- &_collector->_markStack);
- _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
- } // else nothing to do for this task
- } // else nothing to do for this task
- }
- // We'd be tempted to assert here that since there are no
- // more tasks left to claim in this space, the global_finger
- // must exceed space->top() and a fortiori space->end(). However,
- // that would not quite be correct because the bumping of
- // global_finger occurs strictly after the claiming of a task,
- // so by the time we reach here the global finger may not yet
- // have been bumped up by the thread that claimed the last
- // task.
- pst->all_tasks_completed();
-}
-
-class ParConcMarkingClosure: public MetadataVisitingOopIterateClosure {
- private:
- CMSCollector* _collector;
- CMSConcMarkingTask* _task;
- MemRegion _span;
- CMSBitMap* _bit_map;
- CMSMarkStack* _overflow_stack;
- OopTaskQueue* _work_queue;
- protected:
- DO_OOP_WORK_DEFN
- public:
- ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
- CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
- MetadataVisitingOopIterateClosure(collector->ref_processor()),
- _collector(collector),
- _task(task),
- _span(collector->_span),
- _bit_map(bit_map),
- _overflow_stack(overflow_stack),
- _work_queue(work_queue)
- { }
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-
- void trim_queue(size_t max);
- void handle_stack_overflow(HeapWord* lost);
- void do_yield_check() {
- if (_task->should_yield()) {
- _task->yield();
- }
- }
-};
-
-DO_OOP_WORK_IMPL(ParConcMarkingClosure)
-
-// Grey object scanning during work stealing phase --
-// the salient assumption here is that any references
-// that are in these stolen objects being scanned must
-// already have been initialized (else they would not have
-// been published), so we do not need to check for
-// uninitialized objects before pushing here.
-void ParConcMarkingClosure::do_oop(oop obj) {
- assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
- HeapWord* addr = (HeapWord*)obj;
- // Check if oop points into the CMS generation
- // and is not marked
- if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
- // a white object ...
- // If we manage to "claim" the object, by being the
- // first thread to mark it, then we push it on our
- // marking stack
- if (_bit_map->par_mark(addr)) { // ... now grey
- // push on work queue (grey set)
- bool simulate_overflow = false;
- NOT_PRODUCT(
- if (CMSMarkStackOverflowALot &&
- _collector->simulate_overflow()) {
- // simulate a stack overflow
- simulate_overflow = true;
- }
- )
- if (simulate_overflow ||
- !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
- // stack overflow
- log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
- // We cannot assert that the overflow stack is full because
- // it may have been emptied since.
- assert(simulate_overflow ||
- _work_queue->size() == _work_queue->max_elems(),
- "Else push should have succeeded");
- handle_stack_overflow(addr);
- }
- } // Else, some other thread got there first
- do_yield_check();
- }
-}
-
-void ParConcMarkingClosure::trim_queue(size_t max) {
- while (_work_queue->size() > max) {
- oop new_oop;
- if (_work_queue->pop_local(new_oop)) {
- assert(oopDesc::is_oop(new_oop), "Should be an oop");
- assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
- assert(_span.contains((HeapWord*)new_oop), "Not in span");
- new_oop->oop_iterate(this); // do_oop() above
- do_yield_check();
- }
- }
-}
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
- // We need to do this under a mutex to prevent other
- // workers from interfering with the work done below.
- MutexLocker ml(_overflow_stack->par_lock(),
- Mutex::_no_safepoint_check_flag);
- // Remember the least grey address discarded
- HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
- _collector->lower_restart_addr(ra);
- _overflow_stack->reset(); // discard stack contents
- _overflow_stack->expand(); // expand the stack if possible
-}
-
-
-void CMSConcMarkingTask::do_work_steal(int i) {
- OopTaskQueue* work_q = work_queue(i);
- oop obj_to_scan;
- CMSBitMap* bm = &(_collector->_markBitMap);
- CMSMarkStack* ovflw = &(_collector->_markStack);
- ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
- while (true) {
- cl.trim_queue(0);
- assert(work_q->size() == 0, "Should have been emptied above");
- if (get_work_from_overflow_stack(ovflw, work_q)) {
- // Can't assert below because the work obtained from the
- // overflow stack may already have been stolen from us.
- // assert(work_q->size() > 0, "Work from overflow stack");
- continue;
- } else if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
- assert(oopDesc::is_oop(obj_to_scan), "Should be an oop");
- assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
- obj_to_scan->oop_iterate(&cl);
- } else if (terminator()->offer_termination(&_term_term)) {
- assert(work_q->size() == 0, "Impossible!");
- break;
- } else if (yielding() || should_yield()) {
- yield();
- }
- }
-}
-
-// This is run by the CMS (coordinator) thread.
-void CMSConcMarkingTask::coordinator_yield() {
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should hold CMS token");
- // First give up the locks, then yield, then re-lock
- // We should probably use a constructor/destructor idiom to
- // do this unlock/lock or modify the MutexUnlocker class to
- // serve our purpose. XXX
- assert_lock_strong(_bit_map_lock);
- _bit_map_lock->unlock();
- ConcurrentMarkSweepThread::desynchronize(true);
- _collector->stopTimer();
- _collector->incrementYields();
-
- // It is possible for whichever thread initiated the yield request
- // not to get a chance to wake up and take the bitmap lock between
- // this thread releasing it and reacquiring it. So, while the
- // should_yield() flag is on, let's sleep for a bit to give the
- // other thread a chance to wake up. The limit imposed on the number
- // of iterations is defensive, to avoid any unforseen circumstances
- // putting us into an infinite loop. Since it's always been this
- // (coordinator_yield()) method that was observed to cause the
- // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
- // which is by default non-zero. For the other seven methods that
- // also perform the yield operation, as are using a different
- // parameter (CMSYieldSleepCount) which is by default zero. This way we
- // can enable the sleeping for those methods too, if necessary.
- // See 6442774.
- //
- // We really need to reconsider the synchronization between the GC
- // thread and the yield-requesting threads in the future and we
- // should really use wait/notify, which is the recommended
- // way of doing this type of interaction. Additionally, we should
- // consolidate the eight methods that do the yield operation and they
- // are almost identical into one for better maintainability and
- // readability. See 6445193.
- //
- // Tony 2006.06.29
- for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
- os::naked_short_sleep(1);
- }
-
- ConcurrentMarkSweepThread::synchronize(true);
- _bit_map_lock->lock_without_safepoint_check();
- _collector->startTimer();
-}
-
-bool CMSCollector::do_marking_mt() {
- assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
- uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(),
- conc_workers()->active_workers(),
- Threads::number_of_non_daemon_threads());
- num_workers = conc_workers()->update_active_workers(num_workers);
- log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
-
- CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
-
- CMSConcMarkingTask tsk(this,
- cms_space,
- conc_workers(),
- task_queues());
-
- // Since the actual number of workers we get may be different
- // from the number we requested above, do we need to do anything different
- // below? In particular, may be we need to subclass the SequantialSubTasksDone
- // class?? XXX
- cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
-
- // Refs discovery is already non-atomic.
- assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
- assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
- conc_workers()->start_task(&tsk);
- while (tsk.yielded()) {
- tsk.coordinator_yield();
- conc_workers()->continue_task(&tsk);
- }
- // If the task was aborted, _restart_addr will be non-NULL
- assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
- while (_restart_addr != NULL) {
- // XXX For now we do not make use of ABORTED state and have not
- // yet implemented the right abort semantics (even in the original
- // single-threaded CMS case). That needs some more investigation
- // and is deferred for now; see CR# TBF. 07252005YSR. XXX
- assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
- // If _restart_addr is non-NULL, a marking stack overflow
- // occurred; we need to do a fresh marking iteration from the
- // indicated restart address.
- if (_foregroundGCIsActive) {
- // We may be running into repeated stack overflows, having
- // reached the limit of the stack size, while making very
- // slow forward progress. It may be best to bail out and
- // let the foreground collector do its job.
- // Clear _restart_addr, so that foreground GC
- // works from scratch. This avoids the headache of
- // a "rescan" which would otherwise be needed because
- // of the dirty mod union table & card table.
- _restart_addr = NULL;
- return false;
- }
- // Adjust the task to restart from _restart_addr
- tsk.reset(_restart_addr);
- cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
- _restart_addr);
- _restart_addr = NULL;
- // Get the workers going again
- conc_workers()->start_task(&tsk);
- while (tsk.yielded()) {
- tsk.coordinator_yield();
- conc_workers()->continue_task(&tsk);
- }
- }
- assert(tsk.completed(), "Inconsistency");
- assert(tsk.result() == true, "Inconsistency");
- return true;
-}
-
-bool CMSCollector::do_marking_st() {
- ResourceMark rm;
- HandleMark hm;
-
- // Temporarily make refs discovery single threaded (non-MT)
- ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
- MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
- &_markStack, CMSYield);
- // the last argument to iterate indicates whether the iteration
- // should be incremental with periodic yields.
- _markBitMap.iterate(&markFromRootsClosure);
- // If _restart_addr is non-NULL, a marking stack overflow
- // occurred; we need to do a fresh iteration from the
- // indicated restart address.
- while (_restart_addr != NULL) {
- if (_foregroundGCIsActive) {
- // We may be running into repeated stack overflows, having
- // reached the limit of the stack size, while making very
- // slow forward progress. It may be best to bail out and
- // let the foreground collector do its job.
- // Clear _restart_addr, so that foreground GC
- // works from scratch. This avoids the headache of
- // a "rescan" which would otherwise be needed because
- // of the dirty mod union table & card table.
- _restart_addr = NULL;
- return false; // indicating failure to complete marking
- }
- // Deal with stack overflow:
- // we restart marking from _restart_addr
- HeapWord* ra = _restart_addr;
- markFromRootsClosure.reset(ra);
- _restart_addr = NULL;
- _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
- }
- return true;
-}
-
-void CMSCollector::preclean() {
- check_correct_thread_executing();
- assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
- verify_work_stacks_empty();
- verify_overflow_empty();
- _abort_preclean = false;
- if (CMSPrecleaningEnabled) {
- if (!CMSEdenChunksRecordAlways) {
- _eden_chunk_index = 0;
- }
- size_t used = get_eden_used();
- size_t capacity = get_eden_capacity();
- // Don't start sampling unless we will get sufficiently
- // many samples.
- if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100)
- * CMSScheduleRemarkEdenPenetration)) {
- _start_sampling = true;
- } else {
- _start_sampling = false;
- }
- GCTraceCPUTime tcpu;
- CMSPhaseAccounting pa(this, "Concurrent Preclean");
- preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
- }
- CMSTokenSync x(true); // is cms thread
- if (CMSPrecleaningEnabled) {
- sample_eden();
- _collectorState = AbortablePreclean;
- } else {
- _collectorState = FinalMarking;
- }
- verify_work_stacks_empty();
- verify_overflow_empty();
-}
-
-// Try and schedule the remark such that young gen
-// occupancy is CMSScheduleRemarkEdenPenetration %.
-void CMSCollector::abortable_preclean() {
- check_correct_thread_executing();
- assert(CMSPrecleaningEnabled, "Inconsistent control state");
- assert(_collectorState == AbortablePreclean, "Inconsistent control state");
-
- // If Eden's current occupancy is below this threshold,
- // immediately schedule the remark; else preclean
- // past the next scavenge in an effort to
- // schedule the pause as described above. By choosing
- // CMSScheduleRemarkEdenSizeThreshold >= max eden size
- // we will never do an actual abortable preclean cycle.
- if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
- GCTraceCPUTime tcpu;
- CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
- // We need more smarts in the abortable preclean
- // loop below to deal with cases where allocation
- // in young gen is very very slow, and our precleaning
- // is running a losing race against a horde of
- // mutators intent on flooding us with CMS updates
- // (dirty cards).
- // One, admittedly dumb, strategy is to give up
- // after a certain number of abortable precleaning loops
- // or after a certain maximum time. We want to make
- // this smarter in the next iteration.
- // XXX FIX ME!!! YSR
- size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
- while (!(should_abort_preclean() ||
- ConcurrentMarkSweepThread::cmst()->should_terminate())) {
- workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
- cumworkdone += workdone;
- loops++;
- // Voluntarily terminate abortable preclean phase if we have
- // been at it for too long.
- if ((CMSMaxAbortablePrecleanLoops != 0) &&
- loops >= CMSMaxAbortablePrecleanLoops) {
- log_debug(gc)(" CMS: abort preclean due to loops ");
- break;
- }
- if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
- log_debug(gc)(" CMS: abort preclean due to time ");
- break;
- }
- // If we are doing little work each iteration, we should
- // take a short break.
- if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
- // Sleep for some time, waiting for work to accumulate
- stopTimer();
- cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
- startTimer();
- waited++;
- }
- }
- log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
- loops, waited, cumworkdone);
- }
- CMSTokenSync x(true); // is cms thread
- if (_collectorState != Idling) {
- assert(_collectorState == AbortablePreclean,
- "Spontaneous state transition?");
- _collectorState = FinalMarking;
- } // Else, a foreground collection completed this CMS cycle.
- return;
-}
-
-// Respond to an Eden sampling opportunity
-void CMSCollector::sample_eden() {
- // Make sure a young gc cannot sneak in between our
- // reading and recording of a sample.
- assert(Thread::current()->is_ConcurrentGC_thread(),
- "Only the cms thread may collect Eden samples");
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "Should collect samples while holding CMS token");
- if (!_start_sampling) {
- return;
- }
- // When CMSEdenChunksRecordAlways is true, the eden chunk array
- // is populated by the young generation.
- if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
- if (_eden_chunk_index < _eden_chunk_capacity) {
- _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
- assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
- "Unexpected state of Eden");
- // We'd like to check that what we just sampled is an oop-start address;
- // however, we cannot do that here since the object may not yet have been
- // initialized. So we'll instead do the check when we _use_ this sample
- // later.
- if (_eden_chunk_index == 0 ||
- (pointer_delta(_eden_chunk_array[_eden_chunk_index],
- _eden_chunk_array[_eden_chunk_index-1])
- >= CMSSamplingGrain)) {
- _eden_chunk_index++; // commit sample
- }
- }
- }
- if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
- size_t used = get_eden_used();
- size_t capacity = get_eden_capacity();
- assert(used <= capacity, "Unexpected state of Eden");
- if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
- _abort_preclean = true;
- }
- }
-}
-
-size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
- assert(_collectorState == Precleaning ||
- _collectorState == AbortablePreclean, "incorrect state");
- ResourceMark rm;
- HandleMark hm;
-
- // Precleaning is currently not MT but the reference processor
- // may be set for MT. Disable it temporarily here.
- ReferenceProcessor* rp = ref_processor();
- ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
-
- // Do one pass of scrubbing the discovered reference lists
- // to remove any reference objects with strongly-reachable
- // referents.
- if (clean_refs) {
- CMSPrecleanRefsYieldClosure yield_cl(this);
- assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
- CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
- &_markStack, true /* preclean */);
- CMSDrainMarkingStackClosure complete_trace(this,
- _span, &_markBitMap, &_markStack,
- &keep_alive, true /* preclean */);
-
- // We don't want this step to interfere with a young
- // collection because we don't want to take CPU
- // or memory bandwidth away from the young GC threads
- // (which may be as many as there are CPUs).
- // Note that we don't need to protect ourselves from
- // interference with mutators because they can't
- // manipulate the discovered reference lists nor affect
- // the computed reachability of the referents, the
- // only properties manipulated by the precleaning
- // of these reference lists.
- stopTimer();
- CMSTokenSyncWithLocks x(true /* is cms thread */,
- bitMapLock());
- startTimer();
- sample_eden();
-
- // The following will yield to allow foreground
- // collection to proceed promptly. XXX YSR:
- // The code in this method may need further
- // tweaking for better performance and some restructuring
- // for cleaner interfaces.
- GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
- rp->preclean_discovered_references(
- rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
- gc_timer);
- }
-
- if (clean_survivor) { // preclean the active survivor space(s)
- PushAndMarkClosure pam_cl(this, _span, ref_processor(),
- &_markBitMap, &_modUnionTable,
- &_markStack, true /* precleaning phase */);
- stopTimer();
- CMSTokenSyncWithLocks ts(true /* is cms thread */,
- bitMapLock());
- startTimer();
- unsigned int before_count =
- CMSHeap::heap()->total_collections();
- SurvivorSpacePrecleanClosure
- sss_cl(this, _span, &_markBitMap, &_markStack,
- &pam_cl, before_count, CMSYield);
- _young_gen->from()->object_iterate_careful(&sss_cl);
- _young_gen->to()->object_iterate_careful(&sss_cl);
- }
- MarkRefsIntoAndScanClosure
- mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
- &_markStack, this, CMSYield,
- true /* precleaning phase */);
- // CAUTION: The following closure has persistent state that may need to
- // be reset upon a decrease in the sequence of addresses it
- // processes.
- ScanMarkedObjectsAgainCarefullyClosure
- smoac_cl(this, _span,
- &_markBitMap, &_markStack, &mrias_cl, CMSYield);
-
- // Preclean dirty cards in ModUnionTable and CardTable using
- // appropriate convergence criterion;
- // repeat CMSPrecleanIter times unless we find that
- // we are losing.
- assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
- assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
- "Bad convergence multiplier");
- assert(CMSPrecleanThreshold >= 100,
- "Unreasonably low CMSPrecleanThreshold");
-
- size_t numIter, cumNumCards, lastNumCards, curNumCards;
- for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
- numIter < CMSPrecleanIter;
- numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
- curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
- log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
- // Either there are very few dirty cards, so re-mark
- // pause will be small anyway, or our pre-cleaning isn't
- // that much faster than the rate at which cards are being
- // dirtied, so we might as well stop and re-mark since
- // precleaning won't improve our re-mark time by much.
- if (curNumCards <= CMSPrecleanThreshold ||
- (numIter > 0 &&
- (curNumCards * CMSPrecleanDenominator >
- lastNumCards * CMSPrecleanNumerator))) {
- numIter++;
- cumNumCards += curNumCards;
- break;
- }
- }
-
- preclean_cld(&mrias_cl, _cmsGen->freelistLock());
-
- curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
- cumNumCards += curNumCards;
- log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
- curNumCards, cumNumCards, numIter);
- return cumNumCards; // as a measure of useful work done
-}
-
-// PRECLEANING NOTES:
-// Precleaning involves:
-// . reading the bits of the modUnionTable and clearing the set bits.
-// . For the cards corresponding to the set bits, we scan the
-// objects on those cards. This means we need the free_list_lock
-// so that we can safely iterate over the CMS space when scanning
-// for oops.
-// . When we scan the objects, we'll be both reading and setting
-// marks in the marking bit map, so we'll need the marking bit map.
-// . For protecting _collector_state transitions, we take the CGC_lock.
-// Note that any races in the reading of of card table entries by the
-// CMS thread on the one hand and the clearing of those entries by the
-// VM thread or the setting of those entries by the mutator threads on the
-// other are quite benign. However, for efficiency it makes sense to keep
-// the VM thread from racing with the CMS thread while the latter is
-// dirty card info to the modUnionTable. We therefore also use the
-// CGC_lock to protect the reading of the card table and the mod union
-// table by the CM thread.
-// . We run concurrently with mutator updates, so scanning
-// needs to be done carefully -- we should not try to scan
-// potentially uninitialized objects.
-//
-// Locking strategy: While holding the CGC_lock, we scan over and
-// reset a maximal dirty range of the mod union / card tables, then lock
-// the free_list_lock and bitmap lock to do a full marking, then
-// release these locks; and repeat the cycle. This allows for a
-// certain amount of fairness in the sharing of these locks between
-// the CMS collector on the one hand, and the VM thread and the
-// mutators on the other.
-
-// NOTE: preclean_mod_union_table() and preclean_card_table()
-// further below are largely identical; if you need to modify
-// one of these methods, please check the other method too.
-
-size_t CMSCollector::preclean_mod_union_table(
- ConcurrentMarkSweepGeneration* old_gen,
- ScanMarkedObjectsAgainCarefullyClosure* cl) {
- verify_work_stacks_empty();
- verify_overflow_empty();
-
- // strategy: starting with the first card, accumulate contiguous
- // ranges of dirty cards; clear these cards, then scan the region
- // covered by these cards.
-
- // Since all of the MUT is committed ahead, we can just use
- // that, in case the generations expand while we are precleaning.
- // It might also be fine to just use the committed part of the
- // generation, but we might potentially miss cards when the
- // generation is rapidly expanding while we are in the midst
- // of precleaning.
- HeapWord* startAddr = old_gen->reserved().start();
- HeapWord* endAddr = old_gen->reserved().end();
-
- cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
-
- size_t numDirtyCards, cumNumDirtyCards;
- HeapWord *nextAddr, *lastAddr;
- for (cumNumDirtyCards = numDirtyCards = 0,
- nextAddr = lastAddr = startAddr;
- nextAddr < endAddr;
- nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
-
- ResourceMark rm;
- HandleMark hm;
-
- MemRegion dirtyRegion;
- {
- stopTimer();
- // Potential yield point
- CMSTokenSync ts(true);
- startTimer();
- sample_eden();
- // Get dirty region starting at nextOffset (inclusive),
- // simultaneously clearing it.
- dirtyRegion =
- _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
- assert(dirtyRegion.start() >= nextAddr,
- "returned region inconsistent?");
- }
- // Remember where the next search should begin.
- // The returned region (if non-empty) is a right open interval,
- // so lastOffset is obtained from the right end of that
- // interval.
- lastAddr = dirtyRegion.end();
- // Should do something more transparent and less hacky XXX
- numDirtyCards =
- _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
-
- // We'll scan the cards in the dirty region (with periodic
- // yields for foreground GC as needed).
- if (!dirtyRegion.is_empty()) {
- assert(numDirtyCards > 0, "consistency check");
- HeapWord* stop_point = NULL;
- stopTimer();
- // Potential yield point
- CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
- bitMapLock());
- startTimer();
- {
- verify_work_stacks_empty();
- verify_overflow_empty();
- sample_eden();
- stop_point =
- old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
- }
- if (stop_point != NULL) {
- // The careful iteration stopped early either because it found an
- // uninitialized object, or because we were in the midst of an
- // "abortable preclean", which should now be aborted. Redirty
- // the bits corresponding to the partially-scanned or unscanned
- // cards. We'll either restart at the next block boundary or
- // abort the preclean.
- assert((_collectorState == AbortablePreclean && should_abort_preclean()),
- "Should only be AbortablePreclean.");
- _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
- if (should_abort_preclean()) {
- break; // out of preclean loop
- } else {
- // Compute the next address at which preclean should pick up;
- // might need bitMapLock in order to read P-bits.
- lastAddr = next_card_start_after_block(stop_point);
- }
- }
- } else {
- assert(lastAddr == endAddr, "consistency check");
- assert(numDirtyCards == 0, "consistency check");
- break;
- }
- }
- verify_work_stacks_empty();
- verify_overflow_empty();
- return cumNumDirtyCards;
-}
-
-// NOTE: preclean_mod_union_table() above and preclean_card_table()
-// below are largely identical; if you need to modify
-// one of these methods, please check the other method too.
-
-size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
- ScanMarkedObjectsAgainCarefullyClosure* cl) {
- // strategy: it's similar to precleamModUnionTable above, in that
- // we accumulate contiguous ranges of dirty cards, mark these cards
- // precleaned, then scan the region covered by these cards.
- HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high());
- HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
-
- cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding
-
- size_t numDirtyCards, cumNumDirtyCards;
- HeapWord *lastAddr, *nextAddr;
-
- for (cumNumDirtyCards = numDirtyCards = 0,
- nextAddr = lastAddr = startAddr;
- nextAddr < endAddr;
- nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
-
- ResourceMark rm;
- HandleMark hm;
-
- MemRegion dirtyRegion;
- {
- // See comments in "Precleaning notes" above on why we
- // do this locking. XXX Could the locking overheads be
- // too high when dirty cards are sparse? [I don't think so.]
- stopTimer();
- CMSTokenSync x(true); // is cms thread
- startTimer();
- sample_eden();
- // Get and clear dirty region from card table
- dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr),
- true,
- CardTable::precleaned_card_val());
-
- assert(dirtyRegion.start() >= nextAddr,
- "returned region inconsistent?");
- }
- lastAddr = dirtyRegion.end();
- numDirtyCards =
- dirtyRegion.word_size()/CardTable::card_size_in_words;
-
- if (!dirtyRegion.is_empty()) {
- stopTimer();
- CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
- startTimer();
- sample_eden();
- verify_work_stacks_empty();
- verify_overflow_empty();
- HeapWord* stop_point =
- old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
- if (stop_point != NULL) {
- assert((_collectorState == AbortablePreclean && should_abort_preclean()),
- "Should only be AbortablePreclean.");
- _ct->invalidate(MemRegion(stop_point, dirtyRegion.end()));
- if (should_abort_preclean()) {
- break; // out of preclean loop
- } else {
- // Compute the next address at which preclean should pick up.
- lastAddr = next_card_start_after_block(stop_point);
- }
- }
- } else {
- break;
- }
- }
- verify_work_stacks_empty();
- verify_overflow_empty();
- return cumNumDirtyCards;
-}
-
-class PrecleanCLDClosure : public CLDClosure {
- MetadataVisitingOopsInGenClosure* _cm_closure;
- public:
- PrecleanCLDClosure(MetadataVisitingOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
- void do_cld(ClassLoaderData* cld) {
- if (cld->has_accumulated_modified_oops()) {
- cld->clear_accumulated_modified_oops();
-
- _cm_closure->do_cld(cld);
- }
- }
-};
-
-// The freelist lock is needed to prevent asserts, is it really needed?
-void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
- // Needed to walk CLDG
- MutexLocker ml(ClassLoaderDataGraph_lock);
-
- cl->set_freelistLock(freelistLock);
-
- CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
-
- // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
- // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
- PrecleanCLDClosure preclean_closure(cl);
- ClassLoaderDataGraph::cld_do(&preclean_closure);
-
- verify_work_stacks_empty();
- verify_overflow_empty();
-}
-
-void CMSCollector::checkpointRootsFinal() {
- assert(_collectorState == FinalMarking, "incorrect state transition?");
- check_correct_thread_executing();
- // world is stopped at this checkpoint
- assert(SafepointSynchronize::is_at_safepoint(),
- "world should be stopped");
- TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
-
- verify_work_stacks_empty();
- verify_overflow_empty();
-
- log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
- _young_gen->used() / K, _young_gen->capacity() / K);
- {
- if (CMSScavengeBeforeRemark) {
- CMSHeap* heap = CMSHeap::heap();
- // Temporarily set flag to false, GCH->do_collection will
- // expect it to be false and set to true
- FlagSetting fl(heap->_is_gc_active, false);
-
- heap->do_collection(true, // full (i.e. force, see below)
- false, // !clear_all_soft_refs
- 0, // size
- false, // is_tlab
- GenCollectedHeap::YoungGen // type
- );
- }
- FreelistLocker x(this);
- MutexLocker y(bitMapLock(),
- Mutex::_no_safepoint_check_flag);
- checkpointRootsFinalWork();
- _cmsGen->cmsSpace()->recalculate_used_stable();
- }
- verify_work_stacks_empty();
- verify_overflow_empty();
-}
-
-void CMSCollector::checkpointRootsFinalWork() {
- GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
-
- assert(haveFreelistLocks(), "must have free list locks");
- assert_lock_strong(bitMapLock());
-
- ResourceMark rm;
- HandleMark hm;
-
- CMSHeap* heap = CMSHeap::heap();
-
- assert(haveFreelistLocks(), "must have free list locks");
- assert_lock_strong(bitMapLock());
-
- // We might assume that we need not fill TLAB's when
- // CMSScavengeBeforeRemark is set, because we may have just done
- // a scavenge which would have filled all TLAB's -- and besides
- // Eden would be empty. This however may not always be the case --
- // for instance although we asked for a scavenge, it may not have
- // happened because of a JNI critical section. We probably need
- // a policy for deciding whether we can in that case wait until
- // the critical section releases and then do the remark following
- // the scavenge, and skip it here. In the absence of that policy,
- // or of an indication of whether the scavenge did indeed occur,
- // we cannot rely on TLAB's having been filled and must do
- // so here just in case a scavenge did not happen.
- heap->ensure_parsability(false); // fill TLAB's, but no need to retire them
- // Update the saved marks which may affect the root scans.
- heap->save_marks();
-
- print_eden_and_survivor_chunk_arrays();
-
- {
-#if COMPILER2_OR_JVMCI
- DerivedPointerTableDeactivate dpt_deact;
-#endif
-
- // Note on the role of the mod union table:
- // Since the marker in "markFromRoots" marks concurrently with
- // mutators, it is possible for some reachable objects not to have been
- // scanned. For instance, an only reference to an object A was
- // placed in object B after the marker scanned B. Unless B is rescanned,
- // A would be collected. Such updates to references in marked objects
- // are detected via the mod union table which is the set of all cards
- // dirtied since the first checkpoint in this GC cycle and prior to
- // the most recent young generation GC, minus those cleaned up by the
- // concurrent precleaning.
- if (CMSParallelRemarkEnabled) {
- GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
- do_remark_parallel();
- } else {
- GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
- do_remark_non_parallel();
- }
- }
- verify_work_stacks_empty();
- verify_overflow_empty();
-
- {
- GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
- refProcessingWork();
- }
- verify_work_stacks_empty();
- verify_overflow_empty();
-
- if (should_unload_classes()) {
- heap->prune_scavengable_nmethods();
- }
-
- // If we encountered any (marking stack / work queue) overflow
- // events during the current CMS cycle, take appropriate
- // remedial measures, where possible, so as to try and avoid
- // recurrence of that condition.
- assert(_markStack.isEmpty(), "No grey objects");
- size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
- _ser_kac_ovflw + _ser_kac_preclean_ovflw;
- if (ser_ovflw > 0) {
- log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
- _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
- _markStack.expand();
- _ser_pmc_remark_ovflw = 0;
- _ser_pmc_preclean_ovflw = 0;
- _ser_kac_preclean_ovflw = 0;
- _ser_kac_ovflw = 0;
- }
- if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
- log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
- _par_pmc_remark_ovflw, _par_kac_ovflw);
- _par_pmc_remark_ovflw = 0;
- _par_kac_ovflw = 0;
- }
- if (_markStack._hit_limit > 0) {
- log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
- _markStack._hit_limit);
- }
- if (_markStack._failed_double > 0) {
- log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
- _markStack._failed_double, _markStack.capacity());
- }
- _markStack._hit_limit = 0;
- _markStack._failed_double = 0;
-
- if ((VerifyAfterGC || VerifyDuringGC) &&
- CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
- verify_after_remark();
- }
-
- _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
-
- // Change under the freelistLocks.
- _collectorState = Sweeping;
- // Call isAllClear() under bitMapLock
- assert(_modUnionTable.isAllClear(),
- "Should be clear by end of the final marking");
- assert(_ct->cld_rem_set()->mod_union_is_clear(),
- "Should be clear by end of the final marking");
-}
-
-void CMSParInitialMarkTask::work(uint worker_id) {
- elapsedTimer _timer;
- ResourceMark rm;
- HandleMark hm;
-
- // ---------- scan from roots --------------
- _timer.start();
- CMSHeap* heap = CMSHeap::heap();
- ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
-
- // ---------- young gen roots --------------
- {
- work_on_young_gen_roots(&par_mri_cl);
- _timer.stop();
- log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
- }
-
- // ---------- remaining roots --------------
- _timer.reset();
- _timer.start();
-
- CLDToOopClosure cld_closure(&par_mri_cl, ClassLoaderData::_claim_strong);
-
- heap->cms_process_roots(_strong_roots_scope,
- false, // yg was scanned above
- GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
- _collector->should_unload_classes(),
- &par_mri_cl,
- &cld_closure);
-
- assert(_collector->should_unload_classes()
- || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
- "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
- _timer.stop();
- log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-}
-
-// Parallel remark task
-class CMSParRemarkTask: public CMSParMarkTask {
- CompactibleFreeListSpace* _cms_space;
-
- // The per-thread work queues, available here for stealing.
- OopTaskQueueSet* _task_queues;
- TaskTerminator _term;
- StrongRootsScope* _strong_roots_scope;
-
- public:
- // A value of 0 passed to n_workers will cause the number of
- // workers to be taken from the active workers in the work gang.
- CMSParRemarkTask(CMSCollector* collector,
- CompactibleFreeListSpace* cms_space,
- uint n_workers, WorkGang* workers,
- OopTaskQueueSet* task_queues,
- StrongRootsScope* strong_roots_scope):
- CMSParMarkTask("Rescan roots and grey objects in parallel",
- collector, n_workers),
- _cms_space(cms_space),
- _task_queues(task_queues),
- _term(n_workers, task_queues),
- _strong_roots_scope(strong_roots_scope) { }
-
- OopTaskQueueSet* task_queues() { return _task_queues; }
-
- OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
-
- ParallelTaskTerminator* terminator() { return _term.terminator(); }
- uint n_workers() { return _n_workers; }
-
- void work(uint worker_id);
-
- private:
- // ... of dirty cards in old space
- void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
- ParMarkRefsIntoAndScanClosure* cl);
-
- // ... work stealing for the above
- void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl);
-};
-
-class RemarkCLDClosure : public CLDClosure {
- CLDToOopClosure _cm_closure;
- public:
- RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure, ClassLoaderData::_claim_strong) {}
- void do_cld(ClassLoaderData* cld) {
- // Check if we have modified any oops in the CLD during the concurrent marking.
- if (cld->has_accumulated_modified_oops()) {
- cld->clear_accumulated_modified_oops();
-
- // We could have transfered the current modified marks to the accumulated marks,
- // like we do with the Card Table to Mod Union Table. But it's not really necessary.
- } else if (cld->has_modified_oops()) {
- // Don't clear anything, this info is needed by the next young collection.
- } else {
- // No modified oops in the ClassLoaderData.
- return;
- }
-
- // The klass has modified fields, need to scan the klass.
- _cm_closure.do_cld(cld);
- }
-};
-
-void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
- ParNewGeneration* young_gen = _collector->_young_gen;
- ContiguousSpace* eden_space = young_gen->eden();
- ContiguousSpace* from_space = young_gen->from();
- ContiguousSpace* to_space = young_gen->to();
-
- HeapWord** eca = _collector->_eden_chunk_array;
- size_t ect = _collector->_eden_chunk_index;
- HeapWord** sca = _collector->_survivor_chunk_array;
- size_t sct = _collector->_survivor_chunk_index;
-
- assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
- assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
-
- do_young_space_rescan(cl, to_space, NULL, 0);
- do_young_space_rescan(cl, from_space, sca, sct);
- do_young_space_rescan(cl, eden_space, eca, ect);
-}
-
-// work_queue(i) is passed to the closure
-// ParMarkRefsIntoAndScanClosure. The "i" parameter
-// also is passed to do_dirty_card_rescan_tasks() and to
-// do_work_steal() to select the i-th task_queue.
-
-void CMSParRemarkTask::work(uint worker_id) {
- elapsedTimer _timer;
- ResourceMark rm;
- HandleMark hm;
-
- // ---------- rescan from roots --------------
- _timer.start();
- CMSHeap* heap = CMSHeap::heap();
- ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
- _collector->_span, _collector->ref_processor(),
- &(_collector->_markBitMap),
- work_queue(worker_id));
-
- // Rescan young gen roots first since these are likely
- // coarsely partitioned and may, on that account, constitute
- // the critical path; thus, it's best to start off that
- // work first.
- // ---------- young gen roots --------------
- {
- work_on_young_gen_roots(&par_mrias_cl);
- _timer.stop();
- log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
- }
-
- // ---------- remaining roots --------------
- _timer.reset();
- _timer.start();
- heap->cms_process_roots(_strong_roots_scope,
- false, // yg was scanned above
- GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
- _collector->should_unload_classes(),
- &par_mrias_cl,
- NULL); // The dirty klasses will be handled below
-
- assert(_collector->should_unload_classes()
- || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
- "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
- _timer.stop();
- log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-
- // ---------- unhandled CLD scanning ----------
- if (worker_id == 0) { // Single threaded at the moment.
- _timer.reset();
- _timer.start();
-
- // Scan all new class loader data objects and new dependencies that were
- // introduced during concurrent marking.
- ResourceMark rm;
- GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
- for (int i = 0; i < array->length(); i++) {
- Devirtualizer::do_cld(&par_mrias_cl, array->at(i));
- }
-
- // We don't need to keep track of new CLDs anymore.
- ClassLoaderDataGraph::remember_new_clds(false);
-
- _timer.stop();
- log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
- }
-
- // We might have added oops to ClassLoaderData::_handles during the
- // concurrent marking phase. These oops do not always point to newly allocated objects
- // that are guaranteed to be kept alive. Hence,
- // we do have to revisit the _handles block during the remark phase.
-
- // ---------- dirty CLD scanning ----------
- if (worker_id == 0) { // Single threaded at the moment.
- _timer.reset();
- _timer.start();
-
- // Scan all classes that was dirtied during the concurrent marking phase.
- RemarkCLDClosure remark_closure(&par_mrias_cl);
- ClassLoaderDataGraph::cld_do(&remark_closure);
-
- _timer.stop();
- log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
- }
-
-
- // ---------- rescan dirty cards ------------
- _timer.reset();
- _timer.start();
-
- // Do the rescan tasks for each of the two spaces
- // (cms_space) in turn.
- // "worker_id" is passed to select the task_queue for "worker_id"
- do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
- _timer.stop();
- log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-
- // ---------- steal work from other threads ...
- // ---------- ... and drain overflow list.
- _timer.reset();
- _timer.start();
- do_work_steal(worker_id, &par_mrias_cl);
- _timer.stop();
- log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-}
-
-void
-CMSParMarkTask::do_young_space_rescan(
- OopsInGenClosure* cl, ContiguousSpace* space,
- HeapWord** chunk_array, size_t chunk_top) {
- // Until all tasks completed:
- // . claim an unclaimed task
- // . compute region boundaries corresponding to task claimed
- // using chunk_array
- // . par_oop_iterate(cl) over that region
-
- ResourceMark rm;
- HandleMark hm;
-
- SequentialSubTasksDone* pst = space->par_seq_tasks();
-
- uint nth_task = 0;
- uint n_tasks = pst->n_tasks();
-
- if (n_tasks > 0) {
- assert(pst->valid(), "Uninitialized use?");
- HeapWord *start, *end;
- while (pst->try_claim_task(/* reference */ nth_task)) {
- // We claimed task # nth_task; compute its boundaries.
- if (chunk_top == 0) { // no samples were taken
- assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
- start = space->bottom();
- end = space->top();
- } else if (nth_task == 0) {
- start = space->bottom();
- end = chunk_array[nth_task];
- } else if (nth_task < (uint)chunk_top) {
- assert(nth_task >= 1, "Control point invariant");
- start = chunk_array[nth_task - 1];
- end = chunk_array[nth_task];
- } else {
- assert(nth_task == (uint)chunk_top, "Control point invariant");
- start = chunk_array[chunk_top - 1];
- end = space->top();
- }
- MemRegion mr(start, end);
- // Verify that mr is in space
- assert(mr.is_empty() || space->used_region().contains(mr),
- "Should be in space");
- // Verify that "start" is an object boundary
- assert(mr.is_empty() || oopDesc::is_oop(oop(mr.start())),
- "Should be an oop");
- space->par_oop_iterate(mr, cl);
- }
- pst->all_tasks_completed();
- }
-}
-
-void
-CMSParRemarkTask::do_dirty_card_rescan_tasks(
- CompactibleFreeListSpace* sp, int i,
- ParMarkRefsIntoAndScanClosure* cl) {
- // Until all tasks completed:
- // . claim an unclaimed task
- // . compute region boundaries corresponding to task claimed
- // . transfer dirty bits ct->mut for that region
- // . apply rescanclosure to dirty mut bits for that region
-
- ResourceMark rm;
- HandleMark hm;
-
- OopTaskQueue* work_q = work_queue(i);
- ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
- // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
- // CAUTION: This closure has state that persists across calls to
- // the work method dirty_range_iterate_clear() in that it has
- // embedded in it a (subtype of) UpwardsObjectClosure. The
- // use of that state in the embedded UpwardsObjectClosure instance
- // assumes that the cards are always iterated (even if in parallel
- // by several threads) in monotonically increasing order per each
- // thread. This is true of the implementation below which picks
- // card ranges (chunks) in monotonically increasing order globally
- // and, a-fortiori, in monotonically increasing order per thread
- // (the latter order being a subsequence of the former).
- // If the work code below is ever reorganized into a more chaotic
- // work-partitioning form than the current "sequential tasks"
- // paradigm, the use of that persistent state will have to be
- // revisited and modified appropriately. See also related
- // bug 4756801 work on which should examine this code to make
- // sure that the changes there do not run counter to the
- // assumptions made here and necessary for correctness and
- // efficiency. Note also that this code might yield inefficient
- // behavior in the case of very large objects that span one or
- // more work chunks. Such objects would potentially be scanned
- // several times redundantly. Work on 4756801 should try and
- // address that performance anomaly if at all possible. XXX
- MemRegion full_span = _collector->_span;
- CMSBitMap* bm = &(_collector->_markBitMap); // shared
- MarkFromDirtyCardsClosure
- greyRescanClosure(_collector, full_span, // entire span of interest
- sp, bm, work_q, cl);
-
- SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
- assert(pst->valid(), "Uninitialized use?");
- uint nth_task = 0;
- const int alignment = CardTable::card_size * BitsPerWord;
- MemRegion span = sp->used_region();
- HeapWord* start_addr = span.start();
- HeapWord* end_addr = align_up(span.end(), alignment);
- const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
- assert(is_aligned(start_addr, alignment), "Check alignment");
- assert(is_aligned(chunk_size, alignment), "Check alignment");
-
- while (pst->try_claim_task(/* reference */ nth_task)) {
- // Having claimed the nth_task, compute corresponding mem-region,
- // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
- // The alignment restriction ensures that we do not need any
- // synchronization with other gang-workers while setting or
- // clearing bits in thus chunk of the MUT.
- MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
- start_addr + (nth_task+1)*chunk_size);
- // The last chunk's end might be way beyond end of the
- // used region. In that case pull back appropriately.
- if (this_span.end() > end_addr) {
- this_span.set_end(end_addr);
- assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
- }
- // Iterate over the dirty cards covering this chunk, marking them
- // precleaned, and setting the corresponding bits in the mod union
- // table. Since we have been careful to partition at Card and MUT-word
- // boundaries no synchronization is needed between parallel threads.
- _collector->_ct->dirty_card_iterate(this_span,
- &modUnionClosure);
-
- // Having transferred these marks into the modUnionTable,
- // rescan the marked objects on the dirty cards in the modUnionTable.
- // Even if this is at a synchronous collection, the initial marking
- // may have been done during an asynchronous collection so there
- // may be dirty bits in the mod-union table.
- _collector->_modUnionTable.dirty_range_iterate_clear(
- this_span, &greyRescanClosure);
- _collector->_modUnionTable.verifyNoOneBitsInRange(
- this_span.start(),
- this_span.end());
- }
- pst->all_tasks_completed(); // declare that i am done
-}
-
-// . see if we can share work_queues with ParNew? XXX
-void
-CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl) {
- OopTaskQueue* work_q = work_queue(i);
- NOT_PRODUCT(int num_steals = 0;)
- oop obj_to_scan;
- CMSBitMap* bm = &(_collector->_markBitMap);
-
- while (true) {
- // Completely finish any left over work from (an) earlier round(s)
- cl->trim_queue(0);
- size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
- (size_t)ParGCDesiredObjsFromOverflowList);
- // Now check if there's any work in the overflow list
- // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
- // only affects the number of attempts made to get work from the
- // overflow list and does not affect the number of workers. Just
- // pass ParallelGCThreads so this behavior is unchanged.
- if (_collector->par_take_from_overflow_list(num_from_overflow_list,
- work_q,
- ParallelGCThreads)) {
- // found something in global overflow list;
- // not yet ready to go stealing work from others.
- // We'd like to assert(work_q->size() != 0, ...)
- // because we just took work from the overflow list,
- // but of course we can't since all of that could have
- // been already stolen from us.
- // "He giveth and He taketh away."
- continue;
- }
- // Verify that we have no work before we resort to stealing
- assert(work_q->size() == 0, "Have work, shouldn't steal");
- // Try to steal from other queues that have work
- if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
- NOT_PRODUCT(num_steals++;)
- assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
- assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
- // Do scanning work
- obj_to_scan->oop_iterate(cl);
- // Loop around, finish this work, and try to steal some more
- } else if (terminator()->offer_termination()) {
- break; // nirvana from the infinite cycle
- }
- }
- log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
- assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
- "Else our work is not yet done");
-}
-
-// Record object boundaries in _eden_chunk_array by sampling the eden
-// top in the slow-path eden object allocation code path and record
-// the boundaries, if CMSEdenChunksRecordAlways is true. If
-// CMSEdenChunksRecordAlways is false, we use the other asynchronous
-// sampling in sample_eden() that activates during the part of the
-// preclean phase.
-void CMSCollector::sample_eden_chunk() {
- if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
- if (_eden_chunk_lock->try_lock()) {
- // Record a sample. This is the critical section. The contents
- // of the _eden_chunk_array have to be non-decreasing in the
- // address order.
- _eden_chunk_array[_eden_chunk_index] = *_top_addr;
- assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
- "Unexpected state of Eden");
- if (_eden_chunk_index == 0 ||
- ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
- (pointer_delta(_eden_chunk_array[_eden_chunk_index],
- _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
- _eden_chunk_index++; // commit sample
- }
- _eden_chunk_lock->unlock();
- }
- }
-}
-
-// Return a thread-local PLAB recording array, as appropriate.
-void* CMSCollector::get_data_recorder(int thr_num) {
- if (_survivor_plab_array != NULL &&
- (CMSPLABRecordAlways ||
- (_collectorState > Marking && _collectorState < FinalMarking))) {
- assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
- ChunkArray* ca = &_survivor_plab_array[thr_num];
- ca->reset(); // clear it so that fresh data is recorded
- return (void*) ca;
- } else {
- return NULL;
- }
-}
-
-// Reset all the thread-local PLAB recording arrays
-void CMSCollector::reset_survivor_plab_arrays() {
- for (uint i = 0; i < ParallelGCThreads; i++) {
- _survivor_plab_array[i].reset();
- }
-}
-
-// Merge the per-thread plab arrays into the global survivor chunk
-// array which will provide the partitioning of the survivor space
-// for CMS initial scan and rescan.
-void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
- int no_of_gc_threads) {
- assert(_survivor_plab_array != NULL, "Error");
- assert(_survivor_chunk_array != NULL, "Error");
- assert(_collectorState == FinalMarking ||
- (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
- for (int j = 0; j < no_of_gc_threads; j++) {
- _cursor[j] = 0;
- }
- HeapWord* top = surv->top();
- size_t i;
- for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
- HeapWord* min_val = top; // Higher than any PLAB address
- uint min_tid = 0; // position of min_val this round
- for (int j = 0; j < no_of_gc_threads; j++) {
- ChunkArray* cur_sca = &_survivor_plab_array[j];
- if (_cursor[j] == cur_sca->end()) {
- continue;
- }
- assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
- HeapWord* cur_val = cur_sca->nth(_cursor[j]);
- assert(surv->used_region().contains(cur_val), "Out of bounds value");
- if (cur_val < min_val) {
- min_tid = j;
- min_val = cur_val;
- } else {
- assert(cur_val < top, "All recorded addresses should be less");
- }
- }
- // At this point min_val and min_tid are respectively
- // the least address in _survivor_plab_array[j]->nth(_cursor[j])
- // and the thread (j) that witnesses that address.
- // We record this address in the _survivor_chunk_array[i]
- // and increment _cursor[min_tid] prior to the next round i.
- if (min_val == top) {
- break;
- }
- _survivor_chunk_array[i] = min_val;
- _cursor[min_tid]++;
- }
- // We are all done; record the size of the _survivor_chunk_array
- _survivor_chunk_index = i; // exclusive: [0, i)
- log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
- // Verify that we used up all the recorded entries
- #ifdef ASSERT
- size_t total = 0;
- for (int j = 0; j < no_of_gc_threads; j++) {
- assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
- total += _cursor[j];
- }
- assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
- // Check that the merged array is in sorted order
- if (total > 0) {
- for (size_t i = 0; i < total - 1; i++) {
- log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
- i, p2i(_survivor_chunk_array[i]));
- assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
- "Not sorted");
- }
- }
- #endif // ASSERT
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel initial scan and rescan of young gen.
-// See ParRescanTask where this is currently used.
-void
-CMSCollector::
-initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
- assert(n_threads > 0, "Unexpected n_threads argument");
-
- // Eden space
- if (!_young_gen->eden()->is_empty()) {
- SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
- assert(!pst->valid(), "Clobbering existing data?");
- // Each valid entry in [0, _eden_chunk_index) represents a task.
- size_t n_tasks = _eden_chunk_index + 1;
- assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
- // Sets the condition for completion of the subtask (how many threads
- // need to finish in order to be done).
- pst->set_n_threads(n_threads);
- pst->set_n_tasks((int)n_tasks);
- }
-
- // Merge the survivor plab arrays into _survivor_chunk_array
- if (_survivor_plab_array != NULL) {
- merge_survivor_plab_arrays(_young_gen->from(), n_threads);
- } else {
- assert(_survivor_chunk_index == 0, "Error");
- }
-
- // To space
- {
- SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
- assert(!pst->valid(), "Clobbering existing data?");
- // Sets the condition for completion of the subtask (how many threads
- // need to finish in order to be done).
- pst->set_n_threads(n_threads);
- pst->set_n_tasks(1);
- assert(pst->valid(), "Error");
- }
-
- // From space
- {
- SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
- assert(!pst->valid(), "Clobbering existing data?");
- size_t n_tasks = _survivor_chunk_index + 1;
- assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
- // Sets the condition for completion of the subtask (how many threads
- // need to finish in order to be done).
- pst->set_n_threads(n_threads);
- pst->set_n_tasks((int)n_tasks);
- assert(pst->valid(), "Error");
- }
-}
-
-// Parallel version of remark
-void CMSCollector::do_remark_parallel() {
- CMSHeap* heap = CMSHeap::heap();
- WorkGang* workers = heap->workers();
- assert(workers != NULL, "Need parallel worker threads.");
- // Choose to use the number of GC workers most recently set
- // into "active_workers".
- uint n_workers = workers->active_workers();
-
- CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
-
- StrongRootsScope srs(n_workers);
-
- CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
-
- // We won't be iterating over the cards in the card table updating
- // the younger_gen cards, so we shouldn't call the following else
- // the verification code as well as subsequent younger_refs_iterate
- // code would get confused. XXX
- // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
-
- // The young gen rescan work will not be done as part of
- // process_roots (which currently doesn't know how to
- // parallelize such a scan), but rather will be broken up into
- // a set of parallel tasks (via the sampling that the [abortable]
- // preclean phase did of eden, plus the [two] tasks of
- // scanning the [two] survivor spaces. Further fine-grain
- // parallelization of the scanning of the survivor spaces
- // themselves, and of precleaning of the young gen itself
- // is deferred to the future.
- initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
-
- // The dirty card rescan work is broken up into a "sequence"
- // of parallel tasks (per constituent space) that are dynamically
- // claimed by the parallel threads.
- cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
-
- // It turns out that even when we're using 1 thread, doing the work in a
- // separate thread causes wide variance in run times. We can't help this
- // in the multi-threaded case, but we special-case n=1 here to get
- // repeatable measurements of the 1-thread overhead of the parallel code.
- if (n_workers > 1) {
- // Make refs discovery MT-safe, if it isn't already: it may not
- // necessarily be so, since it's possible that we are doing
- // ST marking.
- ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
- workers->run_task(&tsk);
- } else {
- ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
- tsk.work(0);
- }
-
- // restore, single-threaded for now, any preserved marks
- // as a result of work_q overflow
- restore_preserved_marks_if_any();
-}
-
-// Non-parallel version of remark
-void CMSCollector::do_remark_non_parallel() {
- ResourceMark rm;
- HandleMark hm;
- CMSHeap* heap = CMSHeap::heap();
- ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
-
- MarkRefsIntoAndScanClosure
- mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
- &_markStack, this,
- false /* should_yield */, false /* not precleaning */);
- MarkFromDirtyCardsClosure
- markFromDirtyCardsClosure(this, _span,
- NULL, // space is set further below
- &_markBitMap, &_markStack, &mrias_cl);
- {
- GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
- // Iterate over the dirty cards, setting the corresponding bits in the
- // mod union table.
- {
- ModUnionClosure modUnionClosure(&_modUnionTable);
- _ct->dirty_card_iterate(_cmsGen->used_region(),
- &modUnionClosure);
- }
- // Having transferred these marks into the modUnionTable, we just need
- // to rescan the marked objects on the dirty cards in the modUnionTable.
- // The initial marking may have been done during an asynchronous
- // collection so there may be dirty bits in the mod-union table.
- const int alignment = CardTable::card_size * BitsPerWord;
- {
- // ... First handle dirty cards in CMS gen
- markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
- MemRegion ur = _cmsGen->used_region();
- HeapWord* lb = ur.start();
- HeapWord* ub = align_up(ur.end(), alignment);
- MemRegion cms_span(lb, ub);
- _modUnionTable.dirty_range_iterate_clear(cms_span,
- &markFromDirtyCardsClosure);
- verify_work_stacks_empty();
- log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
- }
- }
- if (VerifyDuringGC &&
- CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
- HandleMark hm; // Discard invalid handles created during verification
- Universe::verify();
- }
- {
- GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
-
- verify_work_stacks_empty();
-
- heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
- StrongRootsScope srs(1);
-
- heap->cms_process_roots(&srs,
- true, // young gen as roots
- GenCollectedHeap::ScanningOption(roots_scanning_options()),
- should_unload_classes(),
- &mrias_cl,
- NULL); // The dirty klasses will be handled below
-
- assert(should_unload_classes()
- || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
- "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
- }
-
- {
- GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
-
- verify_work_stacks_empty();
-
- // Scan all class loader data objects that might have been introduced
- // during concurrent marking.
- ResourceMark rm;
- GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
- for (int i = 0; i < array->length(); i++) {
- Devirtualizer::do_cld(&mrias_cl, array->at(i));
- }
-
- // We don't need to keep track of new CLDs anymore.
- ClassLoaderDataGraph::remember_new_clds(false);
-
- verify_work_stacks_empty();
- }
-
- // We might have added oops to ClassLoaderData::_handles during the
- // concurrent marking phase. These oops do not point to newly allocated objects
- // that are guaranteed to be kept alive. Hence,
- // we do have to revisit the _handles block during the remark phase.
- {
- GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
-
- verify_work_stacks_empty();
-
- RemarkCLDClosure remark_closure(&mrias_cl);
- ClassLoaderDataGraph::cld_do(&remark_closure);
-
- verify_work_stacks_empty();
- }
-
- verify_work_stacks_empty();
- // Restore evacuated mark words, if any, used for overflow list links
- restore_preserved_marks_if_any();
-
- verify_overflow_empty();
-}
-
-////////////////////////////////////////////////////////
-// Parallel Reference Processing Task Proxy Class
-////////////////////////////////////////////////////////
-class AbstractGangTaskWOopQueues : public AbstractGangTask {
- OopTaskQueueSet* _queues;
- TaskTerminator _terminator;
- public:
- AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
- AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
- ParallelTaskTerminator* terminator() { return _terminator.terminator(); }
- OopTaskQueueSet* queues() { return _queues; }
-};
-
-class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
- typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
- CMSCollector* _collector;
- CMSBitMap* _mark_bit_map;
- const MemRegion _span;
- ProcessTask& _task;
-
-public:
- CMSRefProcTaskProxy(ProcessTask& task,
- CMSCollector* collector,
- const MemRegion& span,
- CMSBitMap* mark_bit_map,
- AbstractWorkGang* workers,
- OopTaskQueueSet* task_queues):
- AbstractGangTaskWOopQueues("Process referents by policy in parallel",
- task_queues,
- workers->active_workers()),
- _collector(collector),
- _mark_bit_map(mark_bit_map),
- _span(span),
- _task(task)
- {
- assert(_collector->_span.equals(_span) && !_span.is_empty(),
- "Inconsistency in _span");
- }
-
- OopTaskQueueSet* task_queues() { return queues(); }
-
- OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
-
- void do_work_steal(int i,
- CMSParDrainMarkingStackClosure* drain,
- CMSParKeepAliveClosure* keep_alive);
-
- virtual void work(uint worker_id);
-};
-
-void CMSRefProcTaskProxy::work(uint worker_id) {
- ResourceMark rm;
- HandleMark hm;
- assert(_collector->_span.equals(_span), "Inconsistency in _span");
- CMSParKeepAliveClosure par_keep_alive(_collector, _span,
- _mark_bit_map,
- work_queue(worker_id));
- CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
- _mark_bit_map,
- work_queue(worker_id));
- CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
- _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
- if (_task.marks_oops_alive()) {
- do_work_steal(worker_id, &par_drain_stack, &par_keep_alive);
- }
- assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
- assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
-}
-
-CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
- MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
- _span(span),
- _work_queue(work_queue),
- _bit_map(bit_map),
- _mark_and_push(collector, span, bit_map, work_queue),
- _low_water_mark(MIN2((work_queue->max_elems()/4),
- ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
-{ }
-
-// . see if we can share work_queues with ParNew? XXX
-void CMSRefProcTaskProxy::do_work_steal(int i,
- CMSParDrainMarkingStackClosure* drain,
- CMSParKeepAliveClosure* keep_alive) {
- OopTaskQueue* work_q = work_queue(i);
- NOT_PRODUCT(int num_steals = 0;)
- oop obj_to_scan;
-
- while (true) {
- // Completely finish any left over work from (an) earlier round(s)
- drain->trim_queue(0);
- size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
- (size_t)ParGCDesiredObjsFromOverflowList);
- // Now check if there's any work in the overflow list
- // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
- // only affects the number of attempts made to get work from the
- // overflow list and does not affect the number of workers. Just
- // pass ParallelGCThreads so this behavior is unchanged.
- if (_collector->par_take_from_overflow_list(num_from_overflow_list,
- work_q,
- ParallelGCThreads)) {
- // Found something in global overflow list;
- // not yet ready to go stealing work from others.
- // We'd like to assert(work_q->size() != 0, ...)
- // because we just took work from the overflow list,
- // but of course we can't, since all of that might have
- // been already stolen from us.
- continue;
- }
- // Verify that we have no work before we resort to stealing
- assert(work_q->size() == 0, "Have work, shouldn't steal");
- // Try to steal from other queues that have work
- if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
- NOT_PRODUCT(num_steals++;)
- assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
- assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
- // Do scanning work
- obj_to_scan->oop_iterate(keep_alive);
- // Loop around, finish this work, and try to steal some more
- } else if (terminator()->offer_termination()) {
- break; // nirvana from the infinite cycle
- }
- }
- log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
-}
-
-void CMSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) {
- CMSHeap* heap = CMSHeap::heap();
- WorkGang* workers = heap->workers();
- assert(workers != NULL, "Need parallel worker threads.");
- assert(workers->active_workers() == ergo_workers,
- "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
- ergo_workers, workers->active_workers());
- CMSRefProcTaskProxy rp_task(task, &_collector,
- _collector.ref_processor_span(),
- _collector.markBitMap(),
- workers, _collector.task_queues());
- workers->run_task(&rp_task, workers->active_workers());
-}
-
-void CMSCollector::refProcessingWork() {
- ResourceMark rm;
- HandleMark hm;
-
- ReferenceProcessor* rp = ref_processor();
- assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
- assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
- // Process weak references.
- rp->setup_policy(false);
- verify_work_stacks_empty();
-
- ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
- {
- GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
-
- // Setup keep_alive and complete closures.
- CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
- &_markStack, false /* !preclean */);
- CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
- _span, &_markBitMap, &_markStack,
- &cmsKeepAliveClosure, false /* !preclean */);
-
- ReferenceProcessorStats stats;
- if (rp->processing_is_mt()) {
- // Set the degree of MT here. If the discovery is done MT, there
- // may have been a different number of threads doing the discovery
- // and a different number of discovered lists may have Ref objects.
- // That is OK as long as the Reference lists are balanced (see
- // balance_all_queues() and balance_queues()).
- CMSHeap* heap = CMSHeap::heap();
- uint active_workers = ParallelGCThreads;
- WorkGang* workers = heap->workers();
- if (workers != NULL) {
- active_workers = workers->active_workers();
- // The expectation is that active_workers will have already
- // been set to a reasonable value. If it has not been set,
- // investigate.
- assert(active_workers > 0, "Should have been set during scavenge");
- }
- rp->set_active_mt_degree(active_workers);
- CMSRefProcTaskExecutor task_executor(*this);
- stats = rp->process_discovered_references(&_is_alive_closure,
- &cmsKeepAliveClosure,
- &cmsDrainMarkingStackClosure,
- &task_executor,
- &pt);
- } else {
- stats = rp->process_discovered_references(&_is_alive_closure,
- &cmsKeepAliveClosure,
- &cmsDrainMarkingStackClosure,
- NULL,
- &pt);
- }
- _gc_tracer_cm->report_gc_reference_stats(stats);
- pt.print_all_references();
- }
-
- // This is the point where the entire marking should have completed.
- verify_work_stacks_empty();
-
- {
- GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer_cm);
- WeakProcessor::weak_oops_do(&_is_alive_closure, &do_nothing_cl);
- }
-
- if (should_unload_classes()) {
- {
- GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
-
- // Unload classes and purge the SystemDictionary.
- bool purged_class = SystemDictionary::do_unloading(_gc_timer_cm);
-
- // Unload nmethods.
- CodeCache::do_unloading(&_is_alive_closure, purged_class);
-
- // Prune dead klasses from subklass/sibling/implementor lists.
- Klass::clean_weak_klass_links(purged_class);
-
- // Clean JVMCI metadata handles.
- JVMCI_ONLY(JVMCI::do_unloading(purged_class));
- }
- }
-
- // Restore any preserved marks as a result of mark stack or
- // work queue overflow
- restore_preserved_marks_if_any(); // done single-threaded for now
-
- rp->set_enqueuing_is_done(true);
- rp->verify_no_references_recorded();
-}
-
-#ifndef PRODUCT
-void CMSCollector::check_correct_thread_executing() {
- Thread* t = Thread::current();
- // Only the VM thread or the CMS thread should be here.
- assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
- "Unexpected thread type");
- // If this is the vm thread, the foreground process
- // should not be waiting. Note that _foregroundGCIsActive is
- // true while the foreground collector is waiting.
- if (_foregroundGCShouldWait) {
- // We cannot be the VM thread
- assert(t->is_ConcurrentGC_thread(),
- "Should be CMS thread");
- } else {
- // We can be the CMS thread only if we are in a stop-world
- // phase of CMS collection.
- if (t->is_ConcurrentGC_thread()) {
- assert(_collectorState == InitialMarking ||
- _collectorState == FinalMarking,
- "Should be a stop-world phase");
- // The CMS thread should be holding the CMS_token.
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "Potential interference with concurrently "
- "executing VM thread");
- }
- }
-}
-#endif
-
-void CMSCollector::sweep() {
- assert(_collectorState == Sweeping, "just checking");
- check_correct_thread_executing();
- verify_work_stacks_empty();
- verify_overflow_empty();
- increment_sweep_count();
- TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
-
- _inter_sweep_timer.stop();
- _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
-
- assert(!_intra_sweep_timer.is_active(), "Should not be active");
- _intra_sweep_timer.reset();
- _intra_sweep_timer.start();
- {
- GCTraceCPUTime tcpu;
- CMSPhaseAccounting pa(this, "Concurrent Sweep");
- // First sweep the old gen
- {
- CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
- bitMapLock());
- sweepWork(_cmsGen);
- }
-
- // Update Universe::_heap_*_at_gc figures.
- // We need all the free list locks to make the abstract state
- // transition from Sweeping to Resetting. See detailed note
- // further below.
- {
- CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
-
- // Update heap occupancy information which is used as
- // input to soft ref clearing policy at the next gc.
- Universe::update_heap_info_at_gc();
-
- // recalculate CMS used space after CMS collection
- _cmsGen->cmsSpace()->recalculate_used_stable();
-
- _collectorState = Resizing;
- }
- }
- verify_work_stacks_empty();
- verify_overflow_empty();
-
- if (should_unload_classes()) {
- // Delay purge to the beginning of the next safepoint. Metaspace::contains
- // requires that the virtual spaces are stable and not deleted.
- ClassLoaderDataGraph::set_should_purge(true);
- }
-
- _intra_sweep_timer.stop();
- _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
-
- _inter_sweep_timer.reset();
- _inter_sweep_timer.start();
-
- // We need to use a monotonically non-decreasing time in ms
- // or we will see time-warp warnings and os::javaTimeMillis()
- // does not guarantee monotonicity.
- jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
- update_time_of_last_gc(now);
-
- // NOTE on abstract state transitions:
- // Mutators allocate-live and/or mark the mod-union table dirty
- // based on the state of the collection. The former is done in
- // the interval [Marking, Sweeping] and the latter in the interval
- // [Marking, Sweeping). Thus the transitions into the Marking state
- // and out of the Sweeping state must be synchronously visible
- // globally to the mutators.
- // The transition into the Marking state happens with the world
- // stopped so the mutators will globally see it. Sweeping is
- // done asynchronously by the background collector so the transition
- // from the Sweeping state to the Resizing state must be done
- // under the freelistLock (as is the check for whether to
- // allocate-live and whether to dirty the mod-union table).
- assert(_collectorState == Resizing, "Change of collector state to"
- " Resizing must be done under the freelistLocks (plural)");
-
- // Now that sweeping has been completed, we clear
- // the incremental_collection_failed flag,
- // thus inviting a younger gen collection to promote into
- // this generation. If such a promotion may still fail,
- // the flag will be set again when a young collection is
- // attempted.
- CMSHeap* heap = CMSHeap::heap();
- heap->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
- heap->update_full_collections_completed(_collection_count_start);
-}
-
-// FIX ME!!! Looks like this belongs in CFLSpace, with
-// CMSGen merely delegating to it.
-void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
- double nearLargestPercent = FLSLargestBlockCoalesceProximity;
- HeapWord* minAddr = _cmsSpace->bottom();
- HeapWord* largestAddr =
- (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
- if (largestAddr == NULL) {
- // The dictionary appears to be empty. In this case
- // try to coalesce at the end of the heap.
- largestAddr = _cmsSpace->end();
- }
- size_t largestOffset = pointer_delta(largestAddr, minAddr);
- size_t nearLargestOffset =
- (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
- log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
- p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
- _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
-}
-
-bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
- return addr >= _cmsSpace->nearLargestChunk();
-}
-
-FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
- return _cmsSpace->find_chunk_at_end();
-}
-
-void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
- bool full) {
- // If the young generation has been collected, gather any statistics
- // that are of interest at this point.
- bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
- if (!full && current_is_young) {
- // Gather statistics on the young generation collection.
- collector()->stats().record_gc0_end(used());
- }
- _cmsSpace->recalculate_used_stable();
-}
-
-void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
- // We iterate over the space(s) underlying this generation,
- // checking the mark bit map to see if the bits corresponding
- // to specific blocks are marked or not. Blocks that are
- // marked are live and are not swept up. All remaining blocks
- // are swept up, with coalescing on-the-fly as we sweep up
- // contiguous free and/or garbage blocks:
- // We need to ensure that the sweeper synchronizes with allocators
- // and stop-the-world collectors. In particular, the following
- // locks are used:
- // . CMS token: if this is held, a stop the world collection cannot occur
- // . freelistLock: if this is held no allocation can occur from this
- // generation by another thread
- // . bitMapLock: if this is held, no other thread can access or update
- //
-
- // Note that we need to hold the freelistLock if we use
- // block iterate below; else the iterator might go awry if
- // a mutator (or promotion) causes block contents to change
- // (for instance if the allocator divvies up a block).
- // If we hold the free list lock, for all practical purposes
- // young generation GC's can't occur (they'll usually need to
- // promote), so we might as well prevent all young generation
- // GC's while we do a sweeping step. For the same reason, we might
- // as well take the bit map lock for the entire duration
-
- // check that we hold the requisite locks
- assert(have_cms_token(), "Should hold cms token");
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
- assert_lock_strong(old_gen->freelistLock());
- assert_lock_strong(bitMapLock());
-
- assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
- assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
- old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
- _inter_sweep_estimate.padded_average(),
- _intra_sweep_estimate.padded_average());
- old_gen->setNearLargestChunk();
-
- {
- SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
- old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
- // We need to free-up/coalesce garbage/blocks from a
- // co-terminal free run. This is done in the SweepClosure
- // destructor; so, do not remove this scope, else the
- // end-of-sweep-census below will be off by a little bit.
- }
- old_gen->cmsSpace()->sweep_completed();
- old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
- if (should_unload_classes()) { // unloaded classes this cycle,
- _concurrent_cycles_since_last_unload = 0; // ... reset count
- } else { // did not unload classes,
- _concurrent_cycles_since_last_unload++; // ... increment count
- }
-}
-
-// Reset CMS data structures (for now just the marking bit map)
-// preparatory for the next cycle.
-void CMSCollector::reset_concurrent() {
- CMSTokenSyncWithLocks ts(true, bitMapLock());
-
- // If the state is not "Resetting", the foreground thread
- // has done a collection and the resetting.
- if (_collectorState != Resetting) {
- assert(_collectorState == Idling, "The state should only change"
- " because the foreground collector has finished the collection");
- return;
- }
-
- {
- // Clear the mark bitmap (no grey objects to start with)
- // for the next cycle.
- GCTraceCPUTime tcpu;
- CMSPhaseAccounting cmspa(this, "Concurrent Reset");
-
- HeapWord* curAddr = _markBitMap.startWord();
- while (curAddr < _markBitMap.endWord()) {
- size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
- MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
- _markBitMap.clear_large_range(chunk);
- if (ConcurrentMarkSweepThread::should_yield() &&
- !foregroundGCIsActive() &&
- CMSYield) {
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should hold CMS token");
- assert_lock_strong(bitMapLock());
- bitMapLock()->unlock();
- ConcurrentMarkSweepThread::desynchronize(true);
- stopTimer();
- incrementYields();
-
- // See the comment in coordinator_yield()
- for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
- os::naked_short_sleep(1);
- }
-
- ConcurrentMarkSweepThread::synchronize(true);
- bitMapLock()->lock_without_safepoint_check();
- startTimer();
- }
- curAddr = chunk.end();
- }
- // A successful mostly concurrent collection has been done.
- // Because only the full (i.e., concurrent mode failure) collections
- // are being measured for gc overhead limits, clean the "near" flag
- // and count.
- size_policy()->reset_gc_overhead_limit_count();
- _collectorState = Idling;
- }
-
- register_gc_end();
-}
-
-// Same as above but for STW paths
-void CMSCollector::reset_stw() {
- // already have the lock
- assert(_collectorState == Resetting, "just checking");
- assert_lock_strong(bitMapLock());
- GCIdMark gc_id_mark(_cmsThread->gc_id());
- _markBitMap.clear_all();
- _collectorState = Idling;
- register_gc_end();
-}
-
-void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
- GCTraceCPUTime tcpu;
- TraceCollectorStats tcs_cgc(cgc_counters());
-
- switch (op) {
- case CMS_op_checkpointRootsInitial: {
- GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
- SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
- checkpointRootsInitial();
- break;
- }
- case CMS_op_checkpointRootsFinal: {
- GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
- SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
- checkpointRootsFinal();
- break;
- }
- default:
- fatal("No such CMS_op");
- }
-}
-
-#ifndef PRODUCT
-size_t const CMSCollector::skip_header_HeapWords() {
- return FreeChunk::header_size();
-}
-
-// Try and collect here conditions that should hold when
-// CMS thread is exiting. The idea is that the foreground GC
-// thread should not be blocked if it wants to terminate
-// the CMS thread and yet continue to run the VM for a while
-// after that.
-void CMSCollector::verify_ok_to_terminate() const {
- assert(Thread::current()->is_ConcurrentGC_thread(),
- "should be called by CMS thread");
- assert(!_foregroundGCShouldWait, "should be false");
- // We could check here that all the various low-level locks
- // are not held by the CMS thread, but that is overkill; see
- // also CMSThread::verify_ok_to_terminate() where the CGC_lock
- // is checked.
-}
-#endif
-
-size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
- assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
- "missing Printezis mark?");
- HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
- size_t size = pointer_delta(nextOneAddr + 1, addr);
- assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
- "alignment problem");
- assert(size >= 3, "Necessary for Printezis marks to work");
- return size;
-}
-
-// A variant of the above (block_size_using_printezis_bits()) except
-// that we return 0 if the P-bits are not yet set.
-size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
- if (_markBitMap.isMarked(addr + 1)) {
- assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
- HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
- size_t size = pointer_delta(nextOneAddr + 1, addr);
- assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
- "alignment problem");
- assert(size >= 3, "Necessary for Printezis marks to work");
- return size;
- }
- return 0;
-}
-
-HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
- size_t sz = 0;
- oop p = (oop)addr;
- if (p->klass_or_null_acquire() != NULL) {
- sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
- } else {
- sz = block_size_using_printezis_bits(addr);
- }
- assert(sz > 0, "size must be nonzero");
- HeapWord* next_block = addr + sz;
- HeapWord* next_card = align_up(next_block, CardTable::card_size);
- assert(align_down((uintptr_t)addr, CardTable::card_size) <
- align_down((uintptr_t)next_card, CardTable::card_size),
- "must be different cards");
- return next_card;
-}
-
-
-// CMS Bit Map Wrapper /////////////////////////////////////////
-
-// Construct a CMS bit map infrastructure, but don't create the
-// bit vector itself. That is done by a separate call CMSBitMap::allocate()
-// further below.
-CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
- _shifter(shifter),
- _bm(),
- _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
- Monitor::_safepoint_check_never) : NULL)
-{
- _bmStartWord = 0;
- _bmWordSize = 0;
-}
-
-bool CMSBitMap::allocate(MemRegion mr) {
- _bmStartWord = mr.start();
- _bmWordSize = mr.word_size();
- ReservedSpace brs(ReservedSpace::allocation_align_size_up(
- (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
- if (!brs.is_reserved()) {
- log_warning(gc)("CMS bit map allocation failure");
- return false;
- }
- // For now we'll just commit all of the bit map up front.
- // Later on we'll try to be more parsimonious with swap.
- if (!_virtual_space.initialize(brs, brs.size())) {
- log_warning(gc)("CMS bit map backing store failure");
- return false;
- }
- assert(_virtual_space.committed_size() == brs.size(),
- "didn't reserve backing store for all of CMS bit map?");
- assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
- _bmWordSize, "inconsistency in bit map sizing");
- _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
-
- // bm.clear(); // can we rely on getting zero'd memory? verify below
- assert(isAllClear(),
- "Expected zero'd memory from ReservedSpace constructor");
- assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
- "consistency check");
- return true;
-}
-
-void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
- HeapWord *next_addr, *end_addr, *last_addr;
- assert_locked();
- assert(covers(mr), "out-of-range error");
- // XXX assert that start and end are appropriately aligned
- for (next_addr = mr.start(), end_addr = mr.end();
- next_addr < end_addr; next_addr = last_addr) {
- MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
- last_addr = dirty_region.end();
- if (!dirty_region.is_empty()) {
- cl->do_MemRegion(dirty_region);
- } else {
- assert(last_addr == end_addr, "program logic");
- return;
- }
- }
-}
-
-void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
- _bm.print_on_error(st, prefix);
-}
-
-#ifndef PRODUCT
-void CMSBitMap::assert_locked() const {
- CMSLockVerifier::assert_locked(lock());
-}
-
-bool CMSBitMap::covers(MemRegion mr) const {
- // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
- assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
- "size inconsistency");
- return (mr.start() >= _bmStartWord) &&
- (mr.end() <= endWord());
-}
-
-bool CMSBitMap::covers(HeapWord* start, size_t size) const {
- return (start >= _bmStartWord && (start + size) <= endWord());
-}
-
-void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
- // verify that there are no 1 bits in the interval [left, right)
- FalseBitMapClosure falseBitMapClosure;
- iterate(&falseBitMapClosure, left, right);
-}
-
-void CMSBitMap::region_invariant(MemRegion mr)
-{
- assert_locked();
- // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
- assert(!mr.is_empty(), "unexpected empty region");
- assert(covers(mr), "mr should be covered by bit map");
- // convert address range into offset range
- size_t start_ofs = heapWordToOffset(mr.start());
- // Make sure that end() is appropriately aligned
- assert(mr.end() == align_up(mr.end(), (1 << (_shifter+LogHeapWordSize))),
- "Misaligned mr.end()");
- size_t end_ofs = heapWordToOffset(mr.end());
- assert(end_ofs > start_ofs, "Should mark at least one bit");
-}
-
-#endif
-
-bool CMSMarkStack::allocate(size_t size) {
- // allocate a stack of the requisite depth
- ReservedSpace rs(ReservedSpace::allocation_align_size_up(
- size * sizeof(oop)));
- if (!rs.is_reserved()) {
- log_warning(gc)("CMSMarkStack allocation failure");
- return false;
- }
- if (!_virtual_space.initialize(rs, rs.size())) {
- log_warning(gc)("CMSMarkStack backing store failure");
- return false;
- }
- assert(_virtual_space.committed_size() == rs.size(),
- "didn't reserve backing store for all of CMS stack?");
- _base = (oop*)(_virtual_space.low());
- _index = 0;
- _capacity = size;
- NOT_PRODUCT(_max_depth = 0);
- return true;
-}
-
-// XXX FIX ME !!! In the MT case we come in here holding a
-// leaf lock. For printing we need to take a further lock
-// which has lower rank. We need to recalibrate the two
-// lock-ranks involved in order to be able to print the
-// messages below. (Or defer the printing to the caller.
-// For now we take the expedient path of just disabling the
-// messages for the problematic case.)
-void CMSMarkStack::expand() {
- assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
- if (_capacity == MarkStackSizeMax) {
- if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
- // We print a warning message only once per CMS cycle.
- log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
- }
- return;
- }
- // Double capacity if possible
- size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
- // Do not give up existing stack until we have managed to
- // get the double capacity that we desired.
- ReservedSpace rs(ReservedSpace::allocation_align_size_up(
- new_capacity * sizeof(oop)));
- if (rs.is_reserved()) {
- // Release the backing store associated with old stack
- _virtual_space.release();
- // Reinitialize virtual space for new stack
- if (!_virtual_space.initialize(rs, rs.size())) {
- fatal("Not enough swap for expanded marking stack");
- }
- _base = (oop*)(_virtual_space.low());
- _index = 0;
- _capacity = new_capacity;
- } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
- // Failed to double capacity, continue;
- // we print a detail message only once per CMS cycle.
- log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
- _capacity / K, new_capacity / K);
- }
-}
-
-
-// Closures
-// XXX: there seems to be a lot of code duplication here;
-// should refactor and consolidate common code.
-
-// This closure is used to mark refs into the CMS generation in
-// the CMS bit map. Called at the first checkpoint. This closure
-// assumes that we do not need to re-mark dirty cards; if the CMS
-// generation on which this is used is not an oldest
-// generation then this will lose younger_gen cards!
-
-MarkRefsIntoClosure::MarkRefsIntoClosure(
- MemRegion span, CMSBitMap* bitMap):
- _span(span),
- _bitMap(bitMap)
-{
- assert(ref_discoverer() == NULL, "deliberately left NULL");
- assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
-}
-
-void MarkRefsIntoClosure::do_oop(oop obj) {
- // if p points into _span, then mark corresponding bit in _markBitMap
- assert(oopDesc::is_oop(obj), "expected an oop");
- HeapWord* addr = (HeapWord*)obj;
- if (_span.contains(addr)) {
- // this should be made more efficient
- _bitMap->mark(addr);
- }
-}
-
-ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
- MemRegion span, CMSBitMap* bitMap):
- _span(span),
- _bitMap(bitMap)
-{
- assert(ref_discoverer() == NULL, "deliberately left NULL");
- assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
-}
-
-void ParMarkRefsIntoClosure::do_oop(oop obj) {
- // if p points into _span, then mark corresponding bit in _markBitMap
- assert(oopDesc::is_oop(obj), "expected an oop");
- HeapWord* addr = (HeapWord*)obj;
- if (_span.contains(addr)) {
- // this should be made more efficient
- _bitMap->par_mark(addr);
- }
-}
-
-// A variant of the above, used for CMS marking verification.
-MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
- MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
- _span(span),
- _verification_bm(verification_bm),
- _cms_bm(cms_bm)
-{
- assert(ref_discoverer() == NULL, "deliberately left NULL");
- assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
-}
-
-void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
- // if p points into _span, then mark corresponding bit in _markBitMap
- assert(oopDesc::is_oop(obj), "expected an oop");
- HeapWord* addr = (HeapWord*)obj;
- if (_span.contains(addr)) {
- _verification_bm->mark(addr);
- if (!_cms_bm->isMarked(addr)) {
- Log(gc, verify) log;
- ResourceMark rm;
- LogStream ls(log.error());
- oop(addr)->print_on(&ls);
- log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
- fatal("... aborting");
- }
- }
-}
-
-//////////////////////////////////////////////////
-// MarkRefsIntoAndScanClosure
-//////////////////////////////////////////////////
-
-MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
- ReferenceDiscoverer* rd,
- CMSBitMap* bit_map,
- CMSBitMap* mod_union_table,
- CMSMarkStack* mark_stack,
- CMSCollector* collector,
- bool should_yield,
- bool concurrent_precleaning):
- _span(span),
- _bit_map(bit_map),
- _mark_stack(mark_stack),
- _pushAndMarkClosure(collector, span, rd, bit_map, mod_union_table,
- mark_stack, concurrent_precleaning),
- _collector(collector),
- _freelistLock(NULL),
- _yield(should_yield),
- _concurrent_precleaning(concurrent_precleaning)
-{
- // FIXME: Should initialize in base class constructor.
- assert(rd != NULL, "ref_discoverer shouldn't be NULL");
- set_ref_discoverer_internal(rd);
-}
-
-// This closure is used to mark refs into the CMS generation at the
-// second (final) checkpoint, and to scan and transitively follow
-// the unmarked oops. It is also used during the concurrent precleaning
-// phase while scanning objects on dirty cards in the CMS generation.
-// The marks are made in the marking bit map and the marking stack is
-// used for keeping the (newly) grey objects during the scan.
-// The parallel version (Par_...) appears further below.
-void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
- if (obj != NULL) {
- assert(oopDesc::is_oop(obj), "expected an oop");
- HeapWord* addr = (HeapWord*)obj;
- assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
- assert(_collector->overflow_list_is_empty(),
- "overflow list should be empty");
- if (_span.contains(addr) &&
- !_bit_map->isMarked(addr)) {
- // mark bit map (object is now grey)
- _bit_map->mark(addr);
- // push on marking stack (stack should be empty), and drain the
- // stack by applying this closure to the oops in the oops popped
- // from the stack (i.e. blacken the grey objects)
- bool res = _mark_stack->push(obj);
- assert(res, "Should have space to push on empty stack");
- do {
- oop new_oop = _mark_stack->pop();
- assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
- assert(_bit_map->isMarked((HeapWord*)new_oop),
- "only grey objects on this stack");
- // iterate over the oops in this oop, marking and pushing
- // the ones in CMS heap (i.e. in _span).
- new_oop->oop_iterate(&_pushAndMarkClosure);
- // check if it's time to yield
- do_yield_check();
- } while (!_mark_stack->isEmpty() ||
- (!_concurrent_precleaning && take_from_overflow_list()));
- // if marking stack is empty, and we are not doing this
- // during precleaning, then check the overflow list
- }
- assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
- assert(_collector->overflow_list_is_empty(),
- "overflow list was drained above");
-
- assert(_collector->no_preserved_marks(),
- "All preserved marks should have been restored above");
- }
-}
-
-void MarkRefsIntoAndScanClosure::do_yield_work() {
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should hold CMS token");
- assert_lock_strong(_freelistLock);
- assert_lock_strong(_bit_map->lock());
- // relinquish the free_list_lock and bitMaplock()
- _bit_map->lock()->unlock();
- _freelistLock->unlock();
- ConcurrentMarkSweepThread::desynchronize(true);
- _collector->stopTimer();
- _collector->incrementYields();
-
- // See the comment in coordinator_yield()
- for (unsigned i = 0;
- i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive();
- ++i) {
- os::naked_short_sleep(1);
- }
-
- ConcurrentMarkSweepThread::synchronize(true);
- _freelistLock->lock_without_safepoint_check();
- _bit_map->lock()->lock_without_safepoint_check();
- _collector->startTimer();
-}
-
-///////////////////////////////////////////////////////////
-// ParMarkRefsIntoAndScanClosure: a parallel version of
-// MarkRefsIntoAndScanClosure
-///////////////////////////////////////////////////////////
-ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
- CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd,
- CMSBitMap* bit_map, OopTaskQueue* work_queue):
- _span(span),
- _bit_map(bit_map),
- _work_queue(work_queue),
- _low_water_mark(MIN2((work_queue->max_elems()/4),
- ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
- _parPushAndMarkClosure(collector, span, rd, bit_map, work_queue)
-{
- // FIXME: Should initialize in base class constructor.
- assert(rd != NULL, "ref_discoverer shouldn't be NULL");
- set_ref_discoverer_internal(rd);
-}
-
-// This closure is used to mark refs into the CMS generation at the
-// second (final) checkpoint, and to scan and transitively follow
-// the unmarked oops. The marks are made in the marking bit map and
-// the work_queue is used for keeping the (newly) grey objects during
-// the scan phase whence they are also available for stealing by parallel
-// threads. Since the marking bit map is shared, updates are
-// synchronized (via CAS).
-void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
- if (obj != NULL) {
- // Ignore mark word because this could be an already marked oop
- // that may be chained at the end of the overflow list.
- assert(oopDesc::is_oop(obj, true), "expected an oop");
- HeapWord* addr = (HeapWord*)obj;
- if (_span.contains(addr) &&
- !_bit_map->isMarked(addr)) {
- // mark bit map (object will become grey):
- // It is possible for several threads to be
- // trying to "claim" this object concurrently;
- // the unique thread that succeeds in marking the
- // object first will do the subsequent push on
- // to the work queue (or overflow list).
- if (_bit_map->par_mark(addr)) {
- // push on work_queue (which may not be empty), and trim the
- // queue to an appropriate length by applying this closure to
- // the oops in the oops popped from the stack (i.e. blacken the
- // grey objects)
- bool res = _work_queue->push(obj);
- assert(res, "Low water mark should be less than capacity?");
- trim_queue(_low_water_mark);
- } // Else, another thread claimed the object
- }
- }
-}
-
-// This closure is used to rescan the marked objects on the dirty cards
-// in the mod union table and the card table proper.
-size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
- oop p, MemRegion mr) {
-
- size_t size = 0;
- HeapWord* addr = (HeapWord*)p;
- DEBUG_ONLY(_collector->verify_work_stacks_empty();)
- assert(_span.contains(addr), "we are scanning the CMS generation");
- // check if it's time to yield
- if (do_yield_check()) {
- // We yielded for some foreground stop-world work,
- // and we have been asked to abort this ongoing preclean cycle.
- return 0;
- }
- if (_bitMap->isMarked(addr)) {
- // it's marked; is it potentially uninitialized?
- if (p->klass_or_null_acquire() != NULL) {
- // an initialized object; ignore mark word in verification below
- // since we are running concurrent with mutators
- assert(oopDesc::is_oop(p, true), "should be an oop");
- if (p->is_objArray()) {
- // objArrays are precisely marked; restrict scanning
- // to dirty cards only.
- size = CompactibleFreeListSpace::adjustObjectSize(
- p->oop_iterate_size(_scanningClosure, mr));
- } else {
- // A non-array may have been imprecisely marked; we need
- // to scan object in its entirety.
- size = CompactibleFreeListSpace::adjustObjectSize(
- p->oop_iterate_size(_scanningClosure));
- }
- #ifdef ASSERT
- size_t direct_size =
- CompactibleFreeListSpace::adjustObjectSize(p->size());
- assert(size == direct_size, "Inconsistency in size");
- assert(size >= 3, "Necessary for Printezis marks to work");
- HeapWord* start_pbit = addr + 1;
- HeapWord* end_pbit = addr + size - 1;
- assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
- "inconsistent Printezis mark");
- // Verify inner mark bits (between Printezis bits) are clear,
- // but don't repeat if there are multiple dirty regions for
- // the same object, to avoid potential O(N^2) performance.
- if (addr != _last_scanned_object) {
- _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit);
- _last_scanned_object = addr;
- }
- #endif // ASSERT
- } else {
- // An uninitialized object.
- assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
- HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
- size = pointer_delta(nextOneAddr + 1, addr);
- assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
- "alignment problem");
- // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
- // will dirty the card when the klass pointer is installed in the
- // object (signaling the completion of initialization).
- }
- } else {
- // Either a not yet marked object or an uninitialized object
- if (p->klass_or_null_acquire() == NULL) {
- // An uninitialized object, skip to the next card, since
- // we may not be able to read its P-bits yet.
- assert(size == 0, "Initial value");
- } else {
- // An object not (yet) reached by marking: we merely need to
- // compute its size so as to go look at the next block.
- assert(oopDesc::is_oop(p, true), "should be an oop");
- size = CompactibleFreeListSpace::adjustObjectSize(p->size());
- }
- }
- DEBUG_ONLY(_collector->verify_work_stacks_empty();)
- return size;
-}
-
-void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should hold CMS token");
- assert_lock_strong(_freelistLock);
- assert_lock_strong(_bitMap->lock());
- // relinquish the free_list_lock and bitMaplock()
- _bitMap->lock()->unlock();
- _freelistLock->unlock();
- ConcurrentMarkSweepThread::desynchronize(true);
- _collector->stopTimer();
- _collector->incrementYields();
-
- // See the comment in coordinator_yield()
- for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
- os::naked_short_sleep(1);
- }
-
- ConcurrentMarkSweepThread::synchronize(true);
- _freelistLock->lock_without_safepoint_check();
- _bitMap->lock()->lock_without_safepoint_check();
- _collector->startTimer();
-}
-
-
-//////////////////////////////////////////////////////////////////
-// SurvivorSpacePrecleanClosure
-//////////////////////////////////////////////////////////////////
-// This (single-threaded) closure is used to preclean the oops in
-// the survivor spaces.
-size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
-
- HeapWord* addr = (HeapWord*)p;
- DEBUG_ONLY(_collector->verify_work_stacks_empty();)
- assert(!_span.contains(addr), "we are scanning the survivor spaces");
- assert(p->klass_or_null() != NULL, "object should be initialized");
- // an initialized object; ignore mark word in verification below
- // since we are running concurrent with mutators
- assert(oopDesc::is_oop(p, true), "should be an oop");
- // Note that we do not yield while we iterate over
- // the interior oops of p, pushing the relevant ones
- // on our marking stack.
- size_t size = p->oop_iterate_size(_scanning_closure);
- do_yield_check();
- // Observe that below, we do not abandon the preclean
- // phase as soon as we should; rather we empty the
- // marking stack before returning. This is to satisfy
- // some existing assertions. In general, it may be a
- // good idea to abort immediately and complete the marking
- // from the grey objects at a later time.
- while (!_mark_stack->isEmpty()) {
- oop new_oop = _mark_stack->pop();
- assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
- assert(_bit_map->isMarked((HeapWord*)new_oop),
- "only grey objects on this stack");
- // iterate over the oops in this oop, marking and pushing
- // the ones in CMS heap (i.e. in _span).
- new_oop->oop_iterate(_scanning_closure);
- // check if it's time to yield
- do_yield_check();
- }
- unsigned int after_count =
- CMSHeap::heap()->total_collections();
- bool abort = (_before_count != after_count) ||
- _collector->should_abort_preclean();
- return abort ? 0 : size;
-}
-
-void SurvivorSpacePrecleanClosure::do_yield_work() {
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should hold CMS token");
- assert_lock_strong(_bit_map->lock());
- // Relinquish the bit map lock
- _bit_map->lock()->unlock();
- ConcurrentMarkSweepThread::desynchronize(true);
- _collector->stopTimer();
- _collector->incrementYields();
-
- // See the comment in coordinator_yield()
- for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
- os::naked_short_sleep(1);
- }
-
- ConcurrentMarkSweepThread::synchronize(true);
- _bit_map->lock()->lock_without_safepoint_check();
- _collector->startTimer();
-}
-
-// This closure is used to rescan the marked objects on the dirty cards
-// in the mod union table and the card table proper. In the parallel
-// case, although the bitMap is shared, we do a single read so the
-// isMarked() query is "safe".
-bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
- // Ignore mark word because we are running concurrent with mutators
- assert(oopDesc::is_oop_or_null(p, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
- HeapWord* addr = (HeapWord*)p;
- assert(_span.contains(addr), "we are scanning the CMS generation");
- bool is_obj_array = false;
- #ifdef ASSERT
- if (!_parallel) {
- assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
- assert(_collector->overflow_list_is_empty(),
- "overflow list should be empty");
-
- }
- #endif // ASSERT
- if (_bit_map->isMarked(addr)) {
- // Obj arrays are precisely marked, non-arrays are not;
- // so we scan objArrays precisely and non-arrays in their
- // entirety.
- if (p->is_objArray()) {
- is_obj_array = true;
- if (_parallel) {
- p->oop_iterate(_par_scan_closure, mr);
- } else {
- p->oop_iterate(_scan_closure, mr);
- }
- } else {
- if (_parallel) {
- p->oop_iterate(_par_scan_closure);
- } else {
- p->oop_iterate(_scan_closure);
- }
- }
- }
- #ifdef ASSERT
- if (!_parallel) {
- assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
- assert(_collector->overflow_list_is_empty(),
- "overflow list should be empty");
-
- }
- #endif // ASSERT
- return is_obj_array;
-}
-
-MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
- MemRegion span,
- CMSBitMap* bitMap, CMSMarkStack* markStack,
- bool should_yield, bool verifying):
- _collector(collector),
- _span(span),
- _bitMap(bitMap),
- _mut(&collector->_modUnionTable),
- _markStack(markStack),
- _yield(should_yield),
- _skipBits(0)
-{
- assert(_markStack->isEmpty(), "stack should be empty");
- _finger = _bitMap->startWord();
- _threshold = _finger;
- assert(_collector->_restart_addr == NULL, "Sanity check");
- assert(_span.contains(_finger), "Out of bounds _finger?");
- DEBUG_ONLY(_verifying = verifying;)
-}
-
-void MarkFromRootsClosure::reset(HeapWord* addr) {
- assert(_markStack->isEmpty(), "would cause duplicates on stack");
- assert(_span.contains(addr), "Out of bounds _finger?");
- _finger = addr;
- _threshold = align_up(_finger, CardTable::card_size);
-}
-
-// Should revisit to see if this should be restructured for
-// greater efficiency.
-bool MarkFromRootsClosure::do_bit(size_t offset) {
- if (_skipBits > 0) {
- _skipBits--;
- return true;
- }
- // convert offset into a HeapWord*
- HeapWord* addr = _bitMap->startWord() + offset;
- assert(_bitMap->endWord() && addr < _bitMap->endWord(),
- "address out of range");
- assert(_bitMap->isMarked(addr), "tautology");
- if (_bitMap->isMarked(addr+1)) {
- // this is an allocated but not yet initialized object
- assert(_skipBits == 0, "tautology");
- _skipBits = 2; // skip next two marked bits ("Printezis-marks")
- oop p = oop(addr);
- if (p->klass_or_null_acquire() == NULL) {
- DEBUG_ONLY(if (!_verifying) {)
- // We re-dirty the cards on which this object lies and increase
- // the _threshold so that we'll come back to scan this object
- // during the preclean or remark phase. (CMSCleanOnEnter)
- if (CMSCleanOnEnter) {
- size_t sz = _collector->block_size_using_printezis_bits(addr);
- HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
- MemRegion redirty_range = MemRegion(addr, end_card_addr);
- assert(!redirty_range.is_empty(), "Arithmetical tautology");
- // Bump _threshold to end_card_addr; note that
- // _threshold cannot possibly exceed end_card_addr, anyhow.
- // This prevents future clearing of the card as the scan proceeds
- // to the right.
- assert(_threshold <= end_card_addr,
- "Because we are just scanning into this object");
- if (_threshold < end_card_addr) {
- _threshold = end_card_addr;
- }
- if (p->klass_or_null_acquire() != NULL) {
- // Redirty the range of cards...
- _mut->mark_range(redirty_range);
- } // ...else the setting of klass will dirty the card anyway.
- }
- DEBUG_ONLY(})
- return true;
- }
- }
- scanOopsInOop(addr);
- return true;
-}
-
-// We take a break if we've been at this for a while,
-// so as to avoid monopolizing the locks involved.
-void MarkFromRootsClosure::do_yield_work() {
- // First give up the locks, then yield, then re-lock
- // We should probably use a constructor/destructor idiom to
- // do this unlock/lock or modify the MutexUnlocker class to
- // serve our purpose. XXX
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should hold CMS token");
- assert_lock_strong(_bitMap->lock());
- _bitMap->lock()->unlock();
- ConcurrentMarkSweepThread::desynchronize(true);
- _collector->stopTimer();
- _collector->incrementYields();
-
- // See the comment in coordinator_yield()
- for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
- os::naked_short_sleep(1);
- }
-
- ConcurrentMarkSweepThread::synchronize(true);
- _bitMap->lock()->lock_without_safepoint_check();
- _collector->startTimer();
-}
-
-void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
- assert(_bitMap->isMarked(ptr), "expected bit to be set");
- assert(_markStack->isEmpty(),
- "should drain stack to limit stack usage");
- // convert ptr to an oop preparatory to scanning
- oop obj = oop(ptr);
- // Ignore mark word in verification below, since we
- // may be running concurrent with mutators.
- assert(oopDesc::is_oop(obj, true), "should be an oop");
- assert(_finger <= ptr, "_finger runneth ahead");
- // advance the finger to right end of this object
- _finger = ptr + obj->size();
- assert(_finger > ptr, "we just incremented it above");
- // On large heaps, it may take us some time to get through
- // the marking phase. During
- // this time it's possible that a lot of mutations have
- // accumulated in the card table and the mod union table --
- // these mutation records are redundant until we have
- // actually traced into the corresponding card.
- // Here, we check whether advancing the finger would make
- // us cross into a new card, and if so clear corresponding
- // cards in the MUT (preclean them in the card-table in the
- // future).
-
- DEBUG_ONLY(if (!_verifying) {)
- // The clean-on-enter optimization is disabled by default,
- // until we fix 6178663.
- if (CMSCleanOnEnter && (_finger > _threshold)) {
- // [_threshold, _finger) represents the interval
- // of cards to be cleared in MUT (or precleaned in card table).
- // The set of cards to be cleared is all those that overlap
- // with the interval [_threshold, _finger); note that
- // _threshold is always kept card-aligned but _finger isn't
- // always card-aligned.
- HeapWord* old_threshold = _threshold;
- assert(is_aligned(old_threshold, CardTable::card_size),
- "_threshold should always be card-aligned");
- _threshold = align_up(_finger, CardTable::card_size);
- MemRegion mr(old_threshold, _threshold);
- assert(!mr.is_empty(), "Control point invariant");
- assert(_span.contains(mr), "Should clear within span");
- _mut->clear_range(mr);
- }
- DEBUG_ONLY(})
- // Note: the finger doesn't advance while we drain
- // the stack below.
- PushOrMarkClosure pushOrMarkClosure(_collector,
- _span, _bitMap, _markStack,
- _finger, this);
- bool res = _markStack->push(obj);
- assert(res, "Empty non-zero size stack should have space for single push");
- while (!_markStack->isEmpty()) {
- oop new_oop = _markStack->pop();
- // Skip verifying header mark word below because we are
- // running concurrent with mutators.
- assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
- // now scan this oop's oops
- new_oop->oop_iterate(&pushOrMarkClosure);
- do_yield_check();
- }
- assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
-}
-
-ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
- CMSCollector* collector, MemRegion span,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue,
- CMSMarkStack* overflow_stack):
- _collector(collector),
- _whole_span(collector->_span),
- _span(span),
- _bit_map(bit_map),
- _mut(&collector->_modUnionTable),
- _work_queue(work_queue),
- _overflow_stack(overflow_stack),
- _skip_bits(0),
- _task(task)
-{
- assert(_work_queue->size() == 0, "work_queue should be empty");
- _finger = span.start();
- _threshold = _finger; // XXX Defer clear-on-enter optimization for now
- assert(_span.contains(_finger), "Out of bounds _finger?");
-}
-
-// Should revisit to see if this should be restructured for
-// greater efficiency.
-bool ParMarkFromRootsClosure::do_bit(size_t offset) {
- if (_skip_bits > 0) {
- _skip_bits--;
- return true;
- }
- // convert offset into a HeapWord*
- HeapWord* addr = _bit_map->startWord() + offset;
- assert(_bit_map->endWord() && addr < _bit_map->endWord(),
- "address out of range");
- assert(_bit_map->isMarked(addr), "tautology");
- if (_bit_map->isMarked(addr+1)) {
- // this is an allocated object that might not yet be initialized
- assert(_skip_bits == 0, "tautology");
- _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
- oop p = oop(addr);
- if (p->klass_or_null_acquire() == NULL) {
- // in the case of Clean-on-Enter optimization, redirty card
- // and avoid clearing card by increasing the threshold.
- return true;
- }
- }
- scan_oops_in_oop(addr);
- return true;
-}
-
-void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
- assert(_bit_map->isMarked(ptr), "expected bit to be set");
- // Should we assert that our work queue is empty or
- // below some drain limit?
- assert(_work_queue->size() == 0,
- "should drain stack to limit stack usage");
- // convert ptr to an oop preparatory to scanning
- oop obj = oop(ptr);
- // Ignore mark word in verification below, since we
- // may be running concurrent with mutators.
- assert(oopDesc::is_oop(obj, true), "should be an oop");
- assert(_finger <= ptr, "_finger runneth ahead");
- // advance the finger to right end of this object
- _finger = ptr + obj->size();
- assert(_finger > ptr, "we just incremented it above");
- // On large heaps, it may take us some time to get through
- // the marking phase. During
- // this time it's possible that a lot of mutations have
- // accumulated in the card table and the mod union table --
- // these mutation records are redundant until we have
- // actually traced into the corresponding card.
- // Here, we check whether advancing the finger would make
- // us cross into a new card, and if so clear corresponding
- // cards in the MUT (preclean them in the card-table in the
- // future).
-
- // The clean-on-enter optimization is disabled by default,
- // until we fix 6178663.
- if (CMSCleanOnEnter && (_finger > _threshold)) {
- // [_threshold, _finger) represents the interval
- // of cards to be cleared in MUT (or precleaned in card table).
- // The set of cards to be cleared is all those that overlap
- // with the interval [_threshold, _finger); note that
- // _threshold is always kept card-aligned but _finger isn't
- // always card-aligned.
- HeapWord* old_threshold = _threshold;
- assert(is_aligned(old_threshold, CardTable::card_size),
- "_threshold should always be card-aligned");
- _threshold = align_up(_finger, CardTable::card_size);
- MemRegion mr(old_threshold, _threshold);
- assert(!mr.is_empty(), "Control point invariant");
- assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
- _mut->clear_range(mr);
- }
-
- // Note: the local finger doesn't advance while we drain
- // the stack below, but the global finger sure can and will.
- HeapWord* volatile* gfa = _task->global_finger_addr();
- ParPushOrMarkClosure pushOrMarkClosure(_collector,
- _span, _bit_map,
- _work_queue,
- _overflow_stack,
- _finger,
- gfa, this);
- bool res = _work_queue->push(obj); // overflow could occur here
- assert(res, "Will hold once we use workqueues");
- while (true) {
- oop new_oop;
- if (!_work_queue->pop_local(new_oop)) {
- // We emptied our work_queue; check if there's stuff that can
- // be gotten from the overflow stack.
- if (CMSConcMarkingTask::get_work_from_overflow_stack(
- _overflow_stack, _work_queue)) {
- do_yield_check();
- continue;
- } else { // done
- break;
- }
- }
- // Skip verifying header mark word below because we are
- // running concurrent with mutators.
- assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
- // now scan this oop's oops
- new_oop->oop_iterate(&pushOrMarkClosure);
- do_yield_check();
- }
- assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
-}
-
-// Yield in response to a request from VM Thread or
-// from mutators.
-void ParMarkFromRootsClosure::do_yield_work() {
- assert(_task != NULL, "sanity");
- _task->yield();
-}
-
-// A variant of the above used for verifying CMS marking work.
-MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
- MemRegion span,
- CMSBitMap* verification_bm, CMSBitMap* cms_bm,
- CMSMarkStack* mark_stack):
- _collector(collector),
- _span(span),
- _verification_bm(verification_bm),
- _cms_bm(cms_bm),
- _mark_stack(mark_stack),
- _pam_verify_closure(collector, span, verification_bm, cms_bm,
- mark_stack)
-{
- assert(_mark_stack->isEmpty(), "stack should be empty");
- _finger = _verification_bm->startWord();
- assert(_collector->_restart_addr == NULL, "Sanity check");
- assert(_span.contains(_finger), "Out of bounds _finger?");
-}
-
-void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
- assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
- assert(_span.contains(addr), "Out of bounds _finger?");
- _finger = addr;
-}
-
-// Should revisit to see if this should be restructured for
-// greater efficiency.
-bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
- // convert offset into a HeapWord*
- HeapWord* addr = _verification_bm->startWord() + offset;
- assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
- "address out of range");
- assert(_verification_bm->isMarked(addr), "tautology");
- assert(_cms_bm->isMarked(addr), "tautology");
-
- assert(_mark_stack->isEmpty(),
- "should drain stack to limit stack usage");
- // convert addr to an oop preparatory to scanning
- oop obj = oop(addr);
- assert(oopDesc::is_oop(obj), "should be an oop");
- assert(_finger <= addr, "_finger runneth ahead");
- // advance the finger to right end of this object
- _finger = addr + obj->size();
- assert(_finger > addr, "we just incremented it above");
- // Note: the finger doesn't advance while we drain
- // the stack below.
- bool res = _mark_stack->push(obj);
- assert(res, "Empty non-zero size stack should have space for single push");
- while (!_mark_stack->isEmpty()) {
- oop new_oop = _mark_stack->pop();
- assert(oopDesc::is_oop(new_oop), "Oops! expected to pop an oop");
- // now scan this oop's oops
- new_oop->oop_iterate(&_pam_verify_closure);
- }
- assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
- return true;
-}
-
-PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
- CMSCollector* collector, MemRegion span,
- CMSBitMap* verification_bm, CMSBitMap* cms_bm,
- CMSMarkStack* mark_stack):
- MetadataVisitingOopIterateClosure(collector->ref_processor()),
- _collector(collector),
- _span(span),
- _verification_bm(verification_bm),
- _cms_bm(cms_bm),
- _mark_stack(mark_stack)
-{ }
-
-template <class T> void PushAndMarkVerifyClosure::do_oop_work(T *p) {
- oop obj = RawAccess<>::oop_load(p);
- do_oop(obj);
-}
-
-void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
-void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
- // Remember the least grey address discarded
- HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
- _collector->lower_restart_addr(ra);
- _mark_stack->reset(); // discard stack contents
- _mark_stack->expand(); // expand the stack if possible
-}
-
-void PushAndMarkVerifyClosure::do_oop(oop obj) {
- assert(oopDesc::is_oop_or_null(obj), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
- HeapWord* addr = (HeapWord*)obj;
- if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
- // Oop lies in _span and isn't yet grey or black
- _verification_bm->mark(addr); // now grey
- if (!_cms_bm->isMarked(addr)) {
- Log(gc, verify) log;
- ResourceMark rm;
- LogStream ls(log.error());
- oop(addr)->print_on(&ls);
- log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
- fatal("... aborting");
- }
-
- if (!_mark_stack->push(obj)) { // stack overflow
- log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
- assert(_mark_stack->isFull(), "Else push should have succeeded");
- handle_stack_overflow(addr);
- }
- // anything including and to the right of _finger
- // will be scanned as we iterate over the remainder of the
- // bit map
- }
-}
-
-PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
- MemRegion span,
- CMSBitMap* bitMap, CMSMarkStack* markStack,
- HeapWord* finger, MarkFromRootsClosure* parent) :
- MetadataVisitingOopIterateClosure(collector->ref_processor()),
- _collector(collector),
- _span(span),
- _bitMap(bitMap),
- _markStack(markStack),
- _finger(finger),
- _parent(parent)
-{ }
-
-ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
- MemRegion span,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue,
- CMSMarkStack* overflow_stack,
- HeapWord* finger,
- HeapWord* volatile* global_finger_addr,
- ParMarkFromRootsClosure* parent) :
- MetadataVisitingOopIterateClosure(collector->ref_processor()),
- _collector(collector),
- _whole_span(collector->_span),
- _span(span),
- _bit_map(bit_map),
- _work_queue(work_queue),
- _overflow_stack(overflow_stack),
- _finger(finger),
- _global_finger_addr(global_finger_addr),
- _parent(parent)
-{ }
-
-// Assumes thread-safe access by callers, who are
-// responsible for mutual exclusion.
-void CMSCollector::lower_restart_addr(HeapWord* low) {
- assert(_span.contains(low), "Out of bounds addr");
- if (_restart_addr == NULL) {
- _restart_addr = low;
- } else {
- _restart_addr = MIN2(_restart_addr, low);
- }
-}
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
- // Remember the least grey address discarded
- HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
- _collector->lower_restart_addr(ra);
- _markStack->reset(); // discard stack contents
- _markStack->expand(); // expand the stack if possible
-}
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
- // We need to do this under a mutex to prevent other
- // workers from interfering with the work done below.
- MutexLocker ml(_overflow_stack->par_lock(),
- Mutex::_no_safepoint_check_flag);
- // Remember the least grey address discarded
- HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
- _collector->lower_restart_addr(ra);
- _overflow_stack->reset(); // discard stack contents
- _overflow_stack->expand(); // expand the stack if possible
-}
-
-void PushOrMarkClosure::do_oop(oop obj) {
- // Ignore mark word because we are running concurrent with mutators.
- assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
- HeapWord* addr = (HeapWord*)obj;
- if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
- // Oop lies in _span and isn't yet grey or black
- _bitMap->mark(addr); // now grey
- if (addr < _finger) {
- // the bit map iteration has already either passed, or
- // sampled, this bit in the bit map; we'll need to
- // use the marking stack to scan this oop's oops.
- bool simulate_overflow = false;
- NOT_PRODUCT(
- if (CMSMarkStackOverflowALot &&
- _collector->simulate_overflow()) {
- // simulate a stack overflow
- simulate_overflow = true;
- }
- )
- if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
- log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
- assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
- handle_stack_overflow(addr);
- }
- }
- // anything including and to the right of _finger
- // will be scanned as we iterate over the remainder of the
- // bit map
- do_yield_check();
- }
-}
-
-void ParPushOrMarkClosure::do_oop(oop obj) {
- // Ignore mark word because we are running concurrent with mutators.
- assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
- HeapWord* addr = (HeapWord*)obj;
- if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
- // Oop lies in _span and isn't yet grey or black
- // We read the global_finger (volatile read) strictly after marking oop
- bool res = _bit_map->par_mark(addr); // now grey
- volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
- // Should we push this marked oop on our stack?
- // -- if someone else marked it, nothing to do
- // -- if target oop is above global finger nothing to do
- // -- if target oop is in chunk and above local finger
- // then nothing to do
- // -- else push on work queue
- if ( !res // someone else marked it, they will deal with it
- || (addr >= *gfa) // will be scanned in a later task
- || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
- return;
- }
- // the bit map iteration has already either passed, or
- // sampled, this bit in the bit map; we'll need to
- // use the marking stack to scan this oop's oops.
- bool simulate_overflow = false;
- NOT_PRODUCT(
- if (CMSMarkStackOverflowALot &&
- _collector->simulate_overflow()) {
- // simulate a stack overflow
- simulate_overflow = true;
- }
- )
- if (simulate_overflow ||
- !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
- // stack overflow
- log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
- // We cannot assert that the overflow stack is full because
- // it may have been emptied since.
- assert(simulate_overflow ||
- _work_queue->size() == _work_queue->max_elems(),
- "Else push should have succeeded");
- handle_stack_overflow(addr);
- }
- do_yield_check();
- }
-}
-
-PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
- MemRegion span,
- ReferenceDiscoverer* rd,
- CMSBitMap* bit_map,
- CMSBitMap* mod_union_table,
- CMSMarkStack* mark_stack,
- bool concurrent_precleaning):
- MetadataVisitingOopIterateClosure(rd),
- _collector(collector),
- _span(span),
- _bit_map(bit_map),
- _mod_union_table(mod_union_table),
- _mark_stack(mark_stack),
- _concurrent_precleaning(concurrent_precleaning)
-{
- assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
-}
-
-// Grey object rescan during pre-cleaning and second checkpoint phases --
-// the non-parallel version (the parallel version appears further below.)
-void PushAndMarkClosure::do_oop(oop obj) {
- // Ignore mark word verification. If during concurrent precleaning,
- // the object monitor may be locked. If during the checkpoint
- // phases, the object may already have been reached by a different
- // path and may be at the end of the global overflow list (so
- // the mark word may be NULL).
- assert(oopDesc::is_oop_or_null(obj, true /* ignore mark word */),
- "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
- HeapWord* addr = (HeapWord*)obj;
- // Check if oop points into the CMS generation
- // and is not marked
- if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
- // a white object ...
- _bit_map->mark(addr); // ... now grey
- // push on the marking stack (grey set)
- bool simulate_overflow = false;
- NOT_PRODUCT(
- if (CMSMarkStackOverflowALot &&
- _collector->simulate_overflow()) {
- // simulate a stack overflow
- simulate_overflow = true;
- }
- )
- if (simulate_overflow || !_mark_stack->push(obj)) {
- if (_concurrent_precleaning) {
- // During precleaning we can just dirty the appropriate card(s)
- // in the mod union table, thus ensuring that the object remains
- // in the grey set and continue. In the case of object arrays
- // we need to dirty all of the cards that the object spans,
- // since the rescan of object arrays will be limited to the
- // dirty cards.
- // Note that no one can be interfering with us in this action
- // of dirtying the mod union table, so no locking or atomics
- // are required.
- if (obj->is_objArray()) {
- size_t sz = obj->size();
- HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
- MemRegion redirty_range = MemRegion(addr, end_card_addr);
- assert(!redirty_range.is_empty(), "Arithmetical tautology");
- _mod_union_table->mark_range(redirty_range);
- } else {
- _mod_union_table->mark(addr);
- }
- _collector->_ser_pmc_preclean_ovflw++;
- } else {
- // During the remark phase, we need to remember this oop
- // in the overflow list.
- _collector->push_on_overflow_list(obj);
- _collector->_ser_pmc_remark_ovflw++;
- }
- }
- }
-}
-
-ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
- MemRegion span,
- ReferenceDiscoverer* rd,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue):
- MetadataVisitingOopIterateClosure(rd),
- _collector(collector),
- _span(span),
- _bit_map(bit_map),
- _work_queue(work_queue)
-{
- assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
-}
-
-// Grey object rescan during second checkpoint phase --
-// the parallel version.
-void ParPushAndMarkClosure::do_oop(oop obj) {
- // In the assert below, we ignore the mark word because
- // this oop may point to an already visited object that is
- // on the overflow stack (in which case the mark word has
- // been hijacked for chaining into the overflow stack --
- // if this is the last object in the overflow stack then
- // its mark word will be NULL). Because this object may
- // have been subsequently popped off the global overflow
- // stack, and the mark word possibly restored to the prototypical
- // value, by the time we get to examined this failing assert in
- // the debugger, is_oop_or_null(false) may subsequently start
- // to hold.
- assert(oopDesc::is_oop_or_null(obj, true),
- "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
- HeapWord* addr = (HeapWord*)obj;
- // Check if oop points into the CMS generation
- // and is not marked
- if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
- // a white object ...
- // If we manage to "claim" the object, by being the
- // first thread to mark it, then we push it on our
- // marking stack
- if (_bit_map->par_mark(addr)) { // ... now grey
- // push on work queue (grey set)
- bool simulate_overflow = false;
- NOT_PRODUCT(
- if (CMSMarkStackOverflowALot &&
- _collector->par_simulate_overflow()) {
- // simulate a stack overflow
- simulate_overflow = true;
- }
- )
- if (simulate_overflow || !_work_queue->push(obj)) {
- _collector->par_push_on_overflow_list(obj);
- _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
- }
- } // Else, some other thread got there first
- }
-}
-
-void CMSPrecleanRefsYieldClosure::do_yield_work() {
- Mutex* bml = _collector->bitMapLock();
- assert_lock_strong(bml);
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should hold CMS token");
-
- bml->unlock();
- ConcurrentMarkSweepThread::desynchronize(true);
-
- _collector->stopTimer();
- _collector->incrementYields();
-
- // See the comment in coordinator_yield()
- for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
- os::naked_short_sleep(1);
- }
-
- ConcurrentMarkSweepThread::synchronize(true);
- bml->lock_without_safepoint_check();
-
- _collector->startTimer();
-}
-
-bool CMSPrecleanRefsYieldClosure::should_return() {
- if (ConcurrentMarkSweepThread::should_yield()) {
- do_yield_work();
- }
- return _collector->foregroundGCIsActive();
-}
-
-void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
- assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
- "mr should be aligned to start at a card boundary");
- // We'd like to assert:
- // assert(mr.word_size()%CardTable::card_size_in_words == 0,
- // "mr should be a range of cards");
- // However, that would be too strong in one case -- the last
- // partition ends at _unallocated_block which, in general, can be
- // an arbitrary boundary, not necessarily card aligned.
- _num_dirty_cards += mr.word_size()/CardTable::card_size_in_words;
- _space->object_iterate_mem(mr, &_scan_cl);
-}
-
-SweepClosure::SweepClosure(CMSCollector* collector,
- ConcurrentMarkSweepGeneration* g,
- CMSBitMap* bitMap, bool should_yield) :
- _collector(collector),
- _g(g),
- _sp(g->cmsSpace()),
- _limit(_sp->sweep_limit()),
- _freelistLock(_sp->freelistLock()),
- _bitMap(bitMap),
- _inFreeRange(false), // No free range at beginning of sweep
- _freeRangeInFreeLists(false), // No free range at beginning of sweep
- _lastFreeRangeCoalesced(false),
- _yield(should_yield),
- _freeFinger(g->used_region().start())
-{
- NOT_PRODUCT(
- _numObjectsFreed = 0;
- _numWordsFreed = 0;
- _numObjectsLive = 0;
- _numWordsLive = 0;
- _numObjectsAlreadyFree = 0;
- _numWordsAlreadyFree = 0;
- _last_fc = NULL;
-
- _sp->initializeIndexedFreeListArrayReturnedBytes();
- _sp->dictionary()->initialize_dict_returned_bytes();
- )
- assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
- "sweep _limit out of bounds");
- log_develop_trace(gc, sweep)("====================");
- log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
-}
-
-void SweepClosure::print_on(outputStream* st) const {
- st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
- p2i(_sp->bottom()), p2i(_sp->end()));
- st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
- st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
- NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
- st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
- _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
-}
-
-#ifndef PRODUCT
-// Assertion checking only: no useful work in product mode --
-// however, if any of the flags below become product flags,
-// you may need to review this code to see if it needs to be
-// enabled in product mode.
-SweepClosure::~SweepClosure() {
- assert_lock_strong(_freelistLock);
- assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
- "sweep _limit out of bounds");
- if (inFreeRange()) {
- Log(gc, sweep) log;
- log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
- ResourceMark rm;
- LogStream ls(log.error());
- print_on(&ls);
- ShouldNotReachHere();
- }
-
- if (log_is_enabled(Debug, gc, sweep)) {
- log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
- _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
- log_debug(gc, sweep)("Live " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
- _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
- size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
- log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
- }
-
- if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
- size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
- size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
- size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
- log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes Indexed List Returned " SIZE_FORMAT " bytes Dictionary Returned " SIZE_FORMAT " bytes",
- returned_bytes, indexListReturnedBytes, dict_returned_bytes);
- }
- log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
- log_develop_trace(gc, sweep)("================");
-}
-#endif // PRODUCT
-
-void SweepClosure::initialize_free_range(HeapWord* freeFinger,
- bool freeRangeInFreeLists) {
- log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
- p2i(freeFinger), freeRangeInFreeLists);
- assert(!inFreeRange(), "Trampling existing free range");
- set_inFreeRange(true);
- set_lastFreeRangeCoalesced(false);
-
- set_freeFinger(freeFinger);
- set_freeRangeInFreeLists(freeRangeInFreeLists);
- if (CMSTestInFreeList) {
- if (freeRangeInFreeLists) {
- FreeChunk* fc = (FreeChunk*) freeFinger;
- assert(fc->is_free(), "A chunk on the free list should be free.");
- assert(fc->size() > 0, "Free range should have a size");
- assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
- }
- }
-}
-
-// Note that the sweeper runs concurrently with mutators. Thus,
-// it is possible for direct allocation in this generation to happen
-// in the middle of the sweep. Note that the sweeper also coalesces
-// contiguous free blocks. Thus, unless the sweeper and the allocator
-// synchronize appropriately freshly allocated blocks may get swept up.
-// This is accomplished by the sweeper locking the free lists while
-// it is sweeping. Thus blocks that are determined to be free are
-// indeed free. There is however one additional complication:
-// blocks that have been allocated since the final checkpoint and
-// mark, will not have been marked and so would be treated as
-// unreachable and swept up. To prevent this, the allocator marks
-// the bit map when allocating during the sweep phase. This leads,
-// however, to a further complication -- objects may have been allocated
-// but not yet initialized -- in the sense that the header isn't yet
-// installed. The sweeper can not then determine the size of the block
-// in order to skip over it. To deal with this case, we use a technique
-// (due to Printezis) to encode such uninitialized block sizes in the
-// bit map. Since the bit map uses a bit per every HeapWord, but the
-// CMS generation has a minimum object size of 3 HeapWords, it follows
-// that "normal marks" won't be adjacent in the bit map (there will
-// always be at least two 0 bits between successive 1 bits). We make use
-// of these "unused" bits to represent uninitialized blocks -- the bit
-// corresponding to the start of the uninitialized object and the next
-// bit are both set. Finally, a 1 bit marks the end of the object that
-// started with the two consecutive 1 bits to indicate its potentially
-// uninitialized state.
-
-size_t SweepClosure::do_blk_careful(HeapWord* addr) {
- FreeChunk* fc = (FreeChunk*)addr;
- size_t res;
-
- // Check if we are done sweeping. Below we check "addr >= _limit" rather
- // than "addr == _limit" because although _limit was a block boundary when
- // we started the sweep, it may no longer be one because heap expansion
- // may have caused us to coalesce the block ending at the address _limit
- // with a newly expanded chunk (this happens when _limit was set to the
- // previous _end of the space), so we may have stepped past _limit:
- // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
- if (addr >= _limit) { // we have swept up to or past the limit: finish up
- assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
- "sweep _limit out of bounds");
- assert(addr < _sp->end(), "addr out of bounds");
- // Flush any free range we might be holding as a single
- // coalesced chunk to the appropriate free list.
- if (inFreeRange()) {
- assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
- "freeFinger() " PTR_FORMAT " is out of bounds", p2i(freeFinger()));
- flush_cur_free_chunk(freeFinger(),
- pointer_delta(addr, freeFinger()));
- log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
- p2i(freeFinger()), pointer_delta(addr, freeFinger()),
- lastFreeRangeCoalesced() ? 1 : 0);
- }
-
- // help the iterator loop finish
- return pointer_delta(_sp->end(), addr);
- }
-
- assert(addr < _limit, "sweep invariant");
- // check if we should yield
- do_yield_check(addr);
- if (fc->is_free()) {
- // Chunk that is already free
- res = fc->size();
- do_already_free_chunk(fc);
- debug_only(_sp->verifyFreeLists());
- // If we flush the chunk at hand in lookahead_and_flush()
- // and it's coalesced with a preceding chunk, then the
- // process of "mangling" the payload of the coalesced block
- // will cause erasure of the size information from the
- // (erstwhile) header of all the coalesced blocks but the
- // first, so the first disjunct in the assert will not hold
- // in that specific case (in which case the second disjunct
- // will hold).
- assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
- "Otherwise the size info doesn't change at this step");
- NOT_PRODUCT(
- _numObjectsAlreadyFree++;
- _numWordsAlreadyFree += res;
- )
- NOT_PRODUCT(_last_fc = fc;)
- } else if (!_bitMap->isMarked(addr)) {
- // Chunk is fresh garbage
- res = do_garbage_chunk(fc);
- debug_only(_sp->verifyFreeLists());
- NOT_PRODUCT(
- _numObjectsFreed++;
- _numWordsFreed += res;
- )
- } else {
- // Chunk that is alive.
- res = do_live_chunk(fc);
- debug_only(_sp->verifyFreeLists());
- NOT_PRODUCT(
- _numObjectsLive++;
- _numWordsLive += res;
- )
- }
- return res;
-}
-
-// For the smart allocation, record following
-// split deaths - a free chunk is removed from its free list because
-// it is being split into two or more chunks.
-// split birth - a free chunk is being added to its free list because
-// a larger free chunk has been split and resulted in this free chunk.
-// coal death - a free chunk is being removed from its free list because
-// it is being coalesced into a large free chunk.
-// coal birth - a free chunk is being added to its free list because
-// it was created when two or more free chunks where coalesced into
-// this free chunk.
-//
-// These statistics are used to determine the desired number of free
-// chunks of a given size. The desired number is chosen to be relative
-// to the end of a CMS sweep. The desired number at the end of a sweep
-// is the
-// count-at-end-of-previous-sweep (an amount that was enough)
-// - count-at-beginning-of-current-sweep (the excess)
-// + split-births (gains in this size during interval)
-// - split-deaths (demands on this size during interval)
-// where the interval is from the end of one sweep to the end of the
-// next.
-//
-// When sweeping the sweeper maintains an accumulated chunk which is
-// the chunk that is made up of chunks that have been coalesced. That
-// will be termed the left-hand chunk. A new chunk of garbage that
-// is being considered for coalescing will be referred to as the
-// right-hand chunk.
-//
-// When making a decision on whether to coalesce a right-hand chunk with
-// the current left-hand chunk, the current count vs. the desired count
-// of the left-hand chunk is considered. Also if the right-hand chunk
-// is near the large chunk at the end of the heap (see
-// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
-// left-hand chunk is coalesced.
-//
-// When making a decision about whether to split a chunk, the desired count
-// vs. the current count of the candidate to be split is also considered.
-// If the candidate is underpopulated (currently fewer chunks than desired)
-// a chunk of an overpopulated (currently more chunks than desired) size may
-// be chosen. The "hint" associated with a free list, if non-null, points
-// to a free list which may be overpopulated.
-//
-
-void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
- const size_t size = fc->size();
- // Chunks that cannot be coalesced are not in the
- // free lists.
- if (CMSTestInFreeList && !fc->cantCoalesce()) {
- assert(_sp->verify_chunk_in_free_list(fc),
- "free chunk should be in free lists");
- }
- // a chunk that is already free, should not have been
- // marked in the bit map
- HeapWord* const addr = (HeapWord*) fc;
- assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
- // Verify that the bit map has no bits marked between
- // addr and purported end of this block.
- _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
-
- // Some chunks cannot be coalesced under any circumstances.
- // See the definition of cantCoalesce().
- if (!fc->cantCoalesce()) {
- // This chunk can potentially be coalesced.
- // All the work is done in
- do_post_free_or_garbage_chunk(fc, size);
- // Note that if the chunk is not coalescable (the else arm
- // below), we unconditionally flush, without needing to do
- // a "lookahead," as we do below.
- if (inFreeRange()) lookahead_and_flush(fc, size);
- } else {
- // Code path common to both original and adaptive free lists.
-
- // cant coalesce with previous block; this should be treated
- // as the end of a free run if any
- if (inFreeRange()) {
- // we kicked some butt; time to pick up the garbage
- assert(freeFinger() < addr, "freeFinger points too high");
- flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
- }
- // else, nothing to do, just continue
- }
-}
-
-size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
- // This is a chunk of garbage. It is not in any free list.
- // Add it to a free list or let it possibly be coalesced into
- // a larger chunk.
- HeapWord* const addr = (HeapWord*) fc;
- const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
-
- // Verify that the bit map has no bits marked between
- // addr and purported end of just dead object.
- _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
- do_post_free_or_garbage_chunk(fc, size);
-
- assert(_limit >= addr + size,
- "A freshly garbage chunk can't possibly straddle over _limit");
- if (inFreeRange()) lookahead_and_flush(fc, size);
- return size;
-}
-
-size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
- HeapWord* addr = (HeapWord*) fc;
- // The sweeper has just found a live object. Return any accumulated
- // left hand chunk to the free lists.
- if (inFreeRange()) {
- assert(freeFinger() < addr, "freeFinger points too high");
- flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
- }
-
- // This object is live: we'd normally expect this to be
- // an oop, and like to assert the following:
- // assert(oopDesc::is_oop(oop(addr)), "live block should be an oop");
- // However, as we commented above, this may be an object whose
- // header hasn't yet been initialized.
- size_t size;
- assert(_bitMap->isMarked(addr), "Tautology for this control point");
- if (_bitMap->isMarked(addr + 1)) {
- // Determine the size from the bit map, rather than trying to
- // compute it from the object header.
- HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
- size = pointer_delta(nextOneAddr + 1, addr);
- assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
- "alignment problem");
-
-#ifdef ASSERT
- if (oop(addr)->klass_or_null_acquire() != NULL) {
- // Ignore mark word because we are running concurrent with mutators
- assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
- assert(size ==
- CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
- "P-mark and computed size do not agree");
- }
-#endif
-
- } else {
- // This should be an initialized object that's alive.
- assert(oop(addr)->klass_or_null_acquire() != NULL,
- "Should be an initialized object");
- // Ignore mark word because we are running concurrent with mutators
- assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
- // Verify that the bit map has no bits marked between
- // addr and purported end of this block.
- size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
- assert(size >= 3, "Necessary for Printezis marks to work");
- assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
- DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
- }
- return size;
-}
-
-void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
- size_t chunkSize) {
- // do_post_free_or_garbage_chunk() should only be called in the case
- // of the adaptive free list allocator.
- const bool fcInFreeLists = fc->is_free();
- assert((HeapWord*)fc <= _limit, "sweep invariant");
- if (CMSTestInFreeList && fcInFreeLists) {
- assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
- }
-
- log_develop_trace(gc, sweep)(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
-
- HeapWord* const fc_addr = (HeapWord*) fc;
-
- bool coalesce = false;
- const size_t left = pointer_delta(fc_addr, freeFinger());
- const size_t right = chunkSize;
- switch (FLSCoalescePolicy) {
- // numeric value forms a coalition aggressiveness metric
- case 0: { // never coalesce
- coalesce = false;
- break;
- }
- case 1: { // coalesce if left & right chunks on overpopulated lists
- coalesce = _sp->coalOverPopulated(left) &&
- _sp->coalOverPopulated(right);
- break;
- }
- case 2: { // coalesce if left chunk on overpopulated list (default)
- coalesce = _sp->coalOverPopulated(left);
- break;
- }
- case 3: { // coalesce if left OR right chunk on overpopulated list
- coalesce = _sp->coalOverPopulated(left) ||
- _sp->coalOverPopulated(right);
- break;
- }
- case 4: { // always coalesce
- coalesce = true;
- break;
- }
- default:
- ShouldNotReachHere();
- }
-
- // Should the current free range be coalesced?
- // If the chunk is in a free range and either we decided to coalesce above
- // or the chunk is near the large block at the end of the heap
- // (isNearLargestChunk() returns true), then coalesce this chunk.
- const bool doCoalesce = inFreeRange()
- && (coalesce || _g->isNearLargestChunk(fc_addr));
- if (doCoalesce) {
- // Coalesce the current free range on the left with the new
- // chunk on the right. If either is on a free list,
- // it must be removed from the list and stashed in the closure.
- if (freeRangeInFreeLists()) {
- FreeChunk* const ffc = (FreeChunk*)freeFinger();
- assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
- "Size of free range is inconsistent with chunk size.");
- if (CMSTestInFreeList) {
- assert(_sp->verify_chunk_in_free_list(ffc),
- "Chunk is not in free lists");
- }
- _sp->coalDeath(ffc->size());
- _sp->removeFreeChunkFromFreeLists(ffc);
- set_freeRangeInFreeLists(false);
- }
- if (fcInFreeLists) {
- _sp->coalDeath(chunkSize);
- assert(fc->size() == chunkSize,
- "The chunk has the wrong size or is not in the free lists");
- _sp->removeFreeChunkFromFreeLists(fc);
- }
- set_lastFreeRangeCoalesced(true);
- print_free_block_coalesced(fc);
- } else { // not in a free range and/or should not coalesce
- // Return the current free range and start a new one.
- if (inFreeRange()) {
- // In a free range but cannot coalesce with the right hand chunk.
- // Put the current free range into the free lists.
- flush_cur_free_chunk(freeFinger(),
- pointer_delta(fc_addr, freeFinger()));
- }
- // Set up for new free range. Pass along whether the right hand
- // chunk is in the free lists.
- initialize_free_range((HeapWord*)fc, fcInFreeLists);
- }
-}
-
-// Lookahead flush:
-// If we are tracking a free range, and this is the last chunk that
-// we'll look at because its end crosses past _limit, we'll preemptively
-// flush it along with any free range we may be holding on to. Note that
-// this can be the case only for an already free or freshly garbage
-// chunk. If this block is an object, it can never straddle
-// over _limit. The "straddling" occurs when _limit is set at
-// the previous end of the space when this cycle started, and
-// a subsequent heap expansion caused the previously co-terminal
-// free block to be coalesced with the newly expanded portion,
-// thus rendering _limit a non-block-boundary making it dangerous
-// for the sweeper to step over and examine.
-void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
- assert(inFreeRange(), "Should only be called if currently in a free range.");
- HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
- assert(_sp->used_region().contains(eob - 1),
- "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
- " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
- " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
- p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
- if (eob >= _limit) {
- assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
- log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
- "[" PTR_FORMAT "," PTR_FORMAT ") in space "
- "[" PTR_FORMAT "," PTR_FORMAT ")",
- p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
- // Return the storage we are tracking back into the free lists.
- log_develop_trace(gc, sweep)("Flushing ... ");
- assert(freeFinger() < eob, "Error");
- flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
- }
-}
-
-void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
- assert(inFreeRange(), "Should only be called if currently in a free range.");
- assert(size > 0,
- "A zero sized chunk cannot be added to the free lists.");
- if (!freeRangeInFreeLists()) {
- if (CMSTestInFreeList) {
- FreeChunk* fc = (FreeChunk*) chunk;
- fc->set_size(size);
- assert(!_sp->verify_chunk_in_free_list(fc),
- "chunk should not be in free lists yet");
- }
- log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
- // A new free range is going to be starting. The current
- // free range has not been added to the free lists yet or
- // was removed so add it back.
- // If the current free range was coalesced, then the death
- // of the free range was recorded. Record a birth now.
- if (lastFreeRangeCoalesced()) {
- _sp->coalBirth(size);
- }
- _sp->addChunkAndRepairOffsetTable(chunk, size,
- lastFreeRangeCoalesced());
- } else {
- log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
- }
- set_inFreeRange(false);
- set_freeRangeInFreeLists(false);
-}
-
-// We take a break if we've been at this for a while,
-// so as to avoid monopolizing the locks involved.
-void SweepClosure::do_yield_work(HeapWord* addr) {
- // Return current free chunk being used for coalescing (if any)
- // to the appropriate freelist. After yielding, the next
- // free block encountered will start a coalescing range of
- // free blocks. If the next free block is adjacent to the
- // chunk just flushed, they will need to wait for the next
- // sweep to be coalesced.
- if (inFreeRange()) {
- flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
- }
-
- // First give up the locks, then yield, then re-lock.
- // We should probably use a constructor/destructor idiom to
- // do this unlock/lock or modify the MutexUnlocker class to
- // serve our purpose. XXX
- assert_lock_strong(_bitMap->lock());
- assert_lock_strong(_freelistLock);
- assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "CMS thread should hold CMS token");
- _bitMap->lock()->unlock();
- _freelistLock->unlock();
- ConcurrentMarkSweepThread::desynchronize(true);
- _collector->stopTimer();
- _collector->incrementYields();
-
- // See the comment in coordinator_yield()
- for (unsigned i = 0; i < CMSYieldSleepCount &&
- ConcurrentMarkSweepThread::should_yield() &&
- !CMSCollector::foregroundGCIsActive(); ++i) {
- os::naked_short_sleep(1);
- }
-
- ConcurrentMarkSweepThread::synchronize(true);
- _freelistLock->lock_without_safepoint_check();
- _bitMap->lock()->lock_without_safepoint_check();
- _collector->startTimer();
-}
-
-#ifndef PRODUCT
-// This is actually very useful in a product build if it can
-// be called from the debugger. Compile it into the product
-// as needed.
-bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
- return debug_cms_space->verify_chunk_in_free_list(fc);
-}
-#endif
-
-void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
- log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
- p2i(fc), fc->size());
-}
-
-// CMSIsAliveClosure
-bool CMSIsAliveClosure::do_object_b(oop obj) {
- HeapWord* addr = (HeapWord*)obj;
- return addr != NULL &&
- (!_span.contains(addr) || _bit_map->isMarked(addr));
-}
-
-CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
- MemRegion span,
- CMSBitMap* bit_map, CMSMarkStack* mark_stack,
- bool cpc):
- _collector(collector),
- _span(span),
- _mark_stack(mark_stack),
- _bit_map(bit_map),
- _concurrent_precleaning(cpc) {
- assert(!_span.is_empty(), "Empty span could spell trouble");
-}
-
-
-// CMSKeepAliveClosure: the serial version
-void CMSKeepAliveClosure::do_oop(oop obj) {
- HeapWord* addr = (HeapWord*)obj;
- if (_span.contains(addr) &&
- !_bit_map->isMarked(addr)) {
- _bit_map->mark(addr);
- bool simulate_overflow = false;
- NOT_PRODUCT(
- if (CMSMarkStackOverflowALot &&
- _collector->simulate_overflow()) {
- // simulate a stack overflow
- simulate_overflow = true;
- }
- )
- if (simulate_overflow || !_mark_stack->push(obj)) {
- if (_concurrent_precleaning) {
- // We dirty the overflown object and let the remark
- // phase deal with it.
- assert(_collector->overflow_list_is_empty(), "Error");
- // In the case of object arrays, we need to dirty all of
- // the cards that the object spans. No locking or atomics
- // are needed since no one else can be mutating the mod union
- // table.
- if (obj->is_objArray()) {
- size_t sz = obj->size();
- HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
- MemRegion redirty_range = MemRegion(addr, end_card_addr);
- assert(!redirty_range.is_empty(), "Arithmetical tautology");
- _collector->_modUnionTable.mark_range(redirty_range);
- } else {
- _collector->_modUnionTable.mark(addr);
- }
- _collector->_ser_kac_preclean_ovflw++;
- } else {
- _collector->push_on_overflow_list(obj);
- _collector->_ser_kac_ovflw++;
- }
- }
- }
-}
-
-// CMSParKeepAliveClosure: a parallel version of the above.
-// The work queues are private to each closure (thread),
-// but (may be) available for stealing by other threads.
-void CMSParKeepAliveClosure::do_oop(oop obj) {
- HeapWord* addr = (HeapWord*)obj;
- if (_span.contains(addr) &&
- !_bit_map->isMarked(addr)) {
- // In general, during recursive tracing, several threads
- // may be concurrently getting here; the first one to
- // "tag" it, claims it.
- if (_bit_map->par_mark(addr)) {
- bool res = _work_queue->push(obj);
- assert(res, "Low water mark should be much less than capacity");
- // Do a recursive trim in the hope that this will keep
- // stack usage lower, but leave some oops for potential stealers
- trim_queue(_low_water_mark);
- } // Else, another thread got there first
- }
-}
-
-void CMSParKeepAliveClosure::trim_queue(uint max) {
- while (_work_queue->size() > max) {
- oop new_oop;
- if (_work_queue->pop_local(new_oop)) {
- assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
- assert(_bit_map->isMarked((HeapWord*)new_oop),
- "no white objects on this stack!");
- assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
- // iterate over the oops in this oop, marking and pushing
- // the ones in CMS heap (i.e. in _span).
- new_oop->oop_iterate(&_mark_and_push);
- }
- }
-}
-
-CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
- CMSCollector* collector,
- MemRegion span, CMSBitMap* bit_map,
- OopTaskQueue* work_queue):
- _collector(collector),
- _span(span),
- _work_queue(work_queue),
- _bit_map(bit_map) { }
-
-void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
- HeapWord* addr = (HeapWord*)obj;
- if (_span.contains(addr) &&
- !_bit_map->isMarked(addr)) {
- if (_bit_map->par_mark(addr)) {
- bool simulate_overflow = false;
- NOT_PRODUCT(
- if (CMSMarkStackOverflowALot &&
- _collector->par_simulate_overflow()) {
- // simulate a stack overflow
- simulate_overflow = true;
- }
- )
- if (simulate_overflow || !_work_queue->push(obj)) {
- _collector->par_push_on_overflow_list(obj);
- _collector->_par_kac_ovflw++;
- }
- } // Else another thread got there already
- }
-}
-
-//////////////////////////////////////////////////////////////////
-// CMSExpansionCause /////////////////////////////
-//////////////////////////////////////////////////////////////////
-const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
- switch (cause) {
- case _no_expansion:
- return "No expansion";
- case _satisfy_free_ratio:
- return "Free ratio";
- case _satisfy_promotion:
- return "Satisfy promotion";
- case _satisfy_allocation:
- return "allocation";
- case _allocate_par_lab:
- return "Par LAB";
- case _allocate_par_spooling_space:
- return "Par Spooling Space";
- case _adaptive_size_policy:
- return "Ergonomics";
- default:
- return "unknown";
- }
-}
-
-void CMSDrainMarkingStackClosure::do_void() {
- // the max number to take from overflow list at a time
- const size_t num = _mark_stack->capacity()/4;
- assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
- "Overflow list should be NULL during concurrent phases");
- while (!_mark_stack->isEmpty() ||
- // if stack is empty, check the overflow list
- _collector->take_from_overflow_list(num, _mark_stack)) {
- oop obj = _mark_stack->pop();
- HeapWord* addr = (HeapWord*)obj;
- assert(_span.contains(addr), "Should be within span");
- assert(_bit_map->isMarked(addr), "Should be marked");
- assert(oopDesc::is_oop(obj), "Should be an oop");
- obj->oop_iterate(_keep_alive);
- }
-}
-
-void CMSParDrainMarkingStackClosure::do_void() {
- // drain queue
- trim_queue(0);
-}
-
-// Trim our work_queue so its length is below max at return
-void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
- while (_work_queue->size() > max) {
- oop new_oop;
- if (_work_queue->pop_local(new_oop)) {
- assert(oopDesc::is_oop(new_oop), "Expected an oop");
- assert(_bit_map->isMarked((HeapWord*)new_oop),
- "no white objects on this stack!");
- assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
- // iterate over the oops in this oop, marking and pushing
- // the ones in CMS heap (i.e. in _span).
- new_oop->oop_iterate(&_mark_and_push);
- }
- }
-}
-
-////////////////////////////////////////////////////////////////////
-// Support for Marking Stack Overflow list handling and related code
-////////////////////////////////////////////////////////////////////
-// Much of the following code is similar in shape and spirit to the
-// code used in ParNewGC. We should try and share that code
-// as much as possible in the future.
-
-#ifndef PRODUCT
-// Debugging support for CMSStackOverflowALot
-
-// It's OK to call this multi-threaded; the worst thing
-// that can happen is that we'll get a bunch of closely
-// spaced simulated overflows, but that's OK, in fact
-// probably good as it would exercise the overflow code
-// under contention.
-bool CMSCollector::simulate_overflow() {
- if (_overflow_counter-- <= 0) { // just being defensive
- _overflow_counter = CMSMarkStackOverflowInterval;
- return true;
- } else {
- return false;
- }
-}
-
-bool CMSCollector::par_simulate_overflow() {
- return simulate_overflow();
-}
-#endif
-
-// Single-threaded
-bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
- assert(stack->isEmpty(), "Expected precondition");
- assert(stack->capacity() > num, "Shouldn't bite more than can chew");
- size_t i = num;
- oop cur = _overflow_list;
- const markWord proto = markWord::prototype();
- NOT_PRODUCT(ssize_t n = 0;)
- for (oop next; i > 0 && cur != NULL; cur = next, i--) {
- next = oop(cur->mark_raw().to_pointer());
- cur->set_mark_raw(proto); // until proven otherwise
- assert(oopDesc::is_oop(cur), "Should be an oop");
- bool res = stack->push(cur);
- assert(res, "Bit off more than can chew?");
- NOT_PRODUCT(n++;)
- }
- _overflow_list = cur;
-#ifndef PRODUCT
- assert(_num_par_pushes >= n, "Too many pops?");
- _num_par_pushes -=n;
-#endif
- return !stack->isEmpty();
-}
-
-#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
-// (MT-safe) Get a prefix of at most "num" from the list.
-// The overflow list is chained through the mark word of
-// each object in the list. We fetch the entire list,
-// break off a prefix of the right size and return the
-// remainder. If other threads try to take objects from
-// the overflow list at that time, they will wait for
-// some time to see if data becomes available. If (and
-// only if) another thread places one or more object(s)
-// on the global list before we have returned the suffix
-// to the global list, we will walk down our local list
-// to find its end and append the global list to
-// our suffix before returning it. This suffix walk can
-// prove to be expensive (quadratic in the amount of traffic)
-// when there are many objects in the overflow list and
-// there is much producer-consumer contention on the list.
-// *NOTE*: The overflow list manipulation code here and
-// in ParNewGeneration:: are very similar in shape,
-// except that in the ParNew case we use the old (from/eden)
-// copy of the object to thread the list via its klass word.
-// Because of the common code, if you make any changes in
-// the code below, please check the ParNew version to see if
-// similar changes might be needed.
-// CR 6797058 has been filed to consolidate the common code.
-bool CMSCollector::par_take_from_overflow_list(size_t num,
- OopTaskQueue* work_q,
- int no_of_gc_threads) {
- assert(work_q->size() == 0, "First empty local work queue");
- assert(num < work_q->max_elems(), "Can't bite more than we can chew");
- if (_overflow_list == NULL) {
- return false;
- }
- // Grab the entire list; we'll put back a suffix
- oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
- // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
- // set to ParallelGCThreads.
- size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
- size_t sleep_time_millis = MAX2((size_t)1, num/100);
- // If the list is busy, we spin for a short while,
- // sleeping between attempts to get the list.
- for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
- os::naked_sleep(sleep_time_millis);
- if (_overflow_list == NULL) {
- // Nothing left to take
- return false;
- } else if (_overflow_list != BUSY) {
- // Try and grab the prefix
- prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
- }
- }
- // If the list was found to be empty, or we spun long
- // enough, we give up and return empty-handed. If we leave
- // the list in the BUSY state below, it must be the case that
- // some other thread holds the overflow list and will set it
- // to a non-BUSY state in the future.
- if (prefix == NULL || prefix == BUSY) {
- // Nothing to take or waited long enough
- if (prefix == NULL) {
- // Write back the NULL in case we overwrote it with BUSY above
- // and it is still the same value.
- Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
- }
- return false;
- }
- assert(prefix != NULL && prefix != BUSY, "Error");
- size_t i = num;
- oop cur = prefix;
- // Walk down the first "num" objects, unless we reach the end.
- for (; i > 1 && cur->mark_raw().to_pointer() != NULL; cur = oop(cur->mark_raw().to_pointer()), i--);
- if (cur->mark_raw().to_pointer() == NULL) {
- // We have "num" or fewer elements in the list, so there
- // is nothing to return to the global list.
- // Write back the NULL in lieu of the BUSY we wrote
- // above, if it is still the same value.
- if (_overflow_list == BUSY) {
- Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
- }
- } else {
- // Chop off the suffix and return it to the global list.
- assert(cur->mark_raw().to_pointer() != (void*)BUSY, "Error");
- oop suffix_head = oop(cur->mark_raw().to_pointer()); // suffix will be put back on global list
- cur->set_mark_raw(markWord::from_pointer(NULL)); // break off suffix
- // It's possible that the list is still in the empty(busy) state
- // we left it in a short while ago; in that case we may be
- // able to place back the suffix without incurring the cost
- // of a walk down the list.
- oop observed_overflow_list = _overflow_list;
- oop cur_overflow_list = observed_overflow_list;
- bool attached = false;
- while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
- observed_overflow_list =
- Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
- if (cur_overflow_list == observed_overflow_list) {
- attached = true;
- break;
- } else cur_overflow_list = observed_overflow_list;
- }
- if (!attached) {
- // Too bad, someone else sneaked in (at least) an element; we'll need
- // to do a splice. Find tail of suffix so we can prepend suffix to global
- // list.
- for (cur = suffix_head; cur->mark_raw().to_pointer() != NULL; cur = (oop)(cur->mark_raw().to_pointer()));
- oop suffix_tail = cur;
- assert(suffix_tail != NULL && suffix_tail->mark_raw().to_pointer() == NULL,
- "Tautology");
- observed_overflow_list = _overflow_list;
- do {
- cur_overflow_list = observed_overflow_list;
- if (cur_overflow_list != BUSY) {
- // Do the splice ...
- suffix_tail->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
- } else { // cur_overflow_list == BUSY
- suffix_tail->set_mark_raw(markWord::from_pointer(NULL));
- }
- // ... and try to place spliced list back on overflow_list ...
- observed_overflow_list =
- Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
- } while (cur_overflow_list != observed_overflow_list);
- // ... until we have succeeded in doing so.
- }
- }
-
- // Push the prefix elements on work_q
- assert(prefix != NULL, "control point invariant");
- const markWord proto = markWord::prototype();
- oop next;
- NOT_PRODUCT(ssize_t n = 0;)
- for (cur = prefix; cur != NULL; cur = next) {
- next = oop(cur->mark_raw().to_pointer());
- cur->set_mark_raw(proto); // until proven otherwise
- assert(oopDesc::is_oop(cur), "Should be an oop");
- bool res = work_q->push(cur);
- assert(res, "Bit off more than we can chew?");
- NOT_PRODUCT(n++;)
- }
-#ifndef PRODUCT
- assert(_num_par_pushes >= n, "Too many pops?");
- Atomic::sub(n, &_num_par_pushes);
-#endif
- return true;
-}
-
-// Single-threaded
-void CMSCollector::push_on_overflow_list(oop p) {
- NOT_PRODUCT(_num_par_pushes++;)
- assert(oopDesc::is_oop(p), "Not an oop");
- preserve_mark_if_necessary(p);
- p->set_mark_raw(markWord::from_pointer(_overflow_list));
- _overflow_list = p;
-}
-
-// Multi-threaded; use CAS to prepend to overflow list
-void CMSCollector::par_push_on_overflow_list(oop p) {
- NOT_PRODUCT(Atomic::inc(&_num_par_pushes);)
- assert(oopDesc::is_oop(p), "Not an oop");
- par_preserve_mark_if_necessary(p);
- oop observed_overflow_list = _overflow_list;
- oop cur_overflow_list;
- do {
- cur_overflow_list = observed_overflow_list;
- if (cur_overflow_list != BUSY) {
- p->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
- } else {
- p->set_mark_raw(markWord::from_pointer(NULL));
- }
- observed_overflow_list =
- Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
- } while (cur_overflow_list != observed_overflow_list);
-}
-#undef BUSY
-
-// Single threaded
-// General Note on GrowableArray: pushes may silently fail
-// because we are (temporarily) out of C-heap for expanding
-// the stack. The problem is quite ubiquitous and affects
-// a lot of code in the JVM. The prudent thing for GrowableArray
-// to do (for now) is to exit with an error. However, that may
-// be too draconian in some cases because the caller may be
-// able to recover without much harm. For such cases, we
-// should probably introduce a "soft_push" method which returns
-// an indication of success or failure with the assumption that
-// the caller may be able to recover from a failure; code in
-// the VM can then be changed, incrementally, to deal with such
-// failures where possible, thus, incrementally hardening the VM
-// in such low resource situations.
-void CMSCollector::preserve_mark_work(oop p, markWord m) {
- _preserved_oop_stack.push(p);
- _preserved_mark_stack.push(m);
- assert(m == p->mark_raw(), "Mark word changed");
- assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
- "bijection");
-}
-
-// Single threaded
-void CMSCollector::preserve_mark_if_necessary(oop p) {
- markWord m = p->mark_raw();
- if (p->mark_must_be_preserved(m)) {
- preserve_mark_work(p, m);
- }
-}
-
-void CMSCollector::par_preserve_mark_if_necessary(oop p) {
- markWord m = p->mark_raw();
- if (p->mark_must_be_preserved(m)) {
- MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
- // Even though we read the mark word without holding
- // the lock, we are assured that it will not change
- // because we "own" this oop, so no other thread can
- // be trying to push it on the overflow list; see
- // the assertion in preserve_mark_work() that checks
- // that m == p->mark_raw().
- preserve_mark_work(p, m);
- }
-}
-
-// We should be able to do this multi-threaded,
-// a chunk of stack being a task (this is
-// correct because each oop only ever appears
-// once in the overflow list. However, it's
-// not very easy to completely overlap this with
-// other operations, so will generally not be done
-// until all work's been completed. Because we
-// expect the preserved oop stack (set) to be small,
-// it's probably fine to do this single-threaded.
-// We can explore cleverer concurrent/overlapped/parallel
-// processing of preserved marks if we feel the
-// need for this in the future. Stack overflow should
-// be so rare in practice and, when it happens, its
-// effect on performance so great that this will
-// likely just be in the noise anyway.
-void CMSCollector::restore_preserved_marks_if_any() {
- assert(SafepointSynchronize::is_at_safepoint(),
- "world should be stopped");
- assert(Thread::current()->is_ConcurrentGC_thread() ||
- Thread::current()->is_VM_thread(),
- "should be single-threaded");
- assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
- "bijection");
-
- while (!_preserved_oop_stack.is_empty()) {
- oop p = _preserved_oop_stack.pop();
- assert(oopDesc::is_oop(p), "Should be an oop");
- assert(_span.contains(p), "oop should be in _span");
- assert(p->mark_raw() == markWord::prototype(),
- "Set when taken from overflow list");
- markWord m = _preserved_mark_stack.pop();
- p->set_mark_raw(m);
- }
- assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
- "stacks were cleared above");
-}
-
-#ifndef PRODUCT
-bool CMSCollector::no_preserved_marks() const {
- return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
-}
-#endif
-
-// Transfer some number of overflown objects to usual marking
-// stack. Return true if some objects were transferred.
-bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
- size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
- (size_t)ParGCDesiredObjsFromOverflowList);
-
- bool res = _collector->take_from_overflow_list(num, _mark_stack);
- assert(_collector->overflow_list_is_empty() || res,
- "If list is not empty, we should have taken something");
- assert(!res || !_mark_stack->isEmpty(),
- "If we took something, it should now be on our stack");
- return res;
-}
-
-size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
- size_t res = _sp->block_size_no_stall(addr, _collector);
- if (_sp->block_is_obj(addr)) {
- if (_live_bit_map->isMarked(addr)) {
- // It can't have been dead in a previous cycle
- guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
- } else {
- _dead_bit_map->mark(addr); // mark the dead object
- }
- }
- // Could be 0, if the block size could not be computed without stalling.
- return res;
-}
-
-TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
- GCMemoryManager* manager = CMSHeap::heap()->old_manager();
- switch (phase) {
- case CMSCollector::InitialMarking:
- initialize(manager /* GC manager */ ,
- cause /* cause of the GC */,
- true /* allMemoryPoolsAffected */,
- true /* recordGCBeginTime */,
- true /* recordPreGCUsage */,
- false /* recordPeakUsage */,
- false /* recordPostGCusage */,
- true /* recordAccumulatedGCTime */,
- false /* recordGCEndTime */,
- false /* countCollection */ );
- break;
-
- case CMSCollector::FinalMarking:
- initialize(manager /* GC manager */ ,
- cause /* cause of the GC */,
- true /* allMemoryPoolsAffected */,
- false /* recordGCBeginTime */,
- false /* recordPreGCUsage */,
- false /* recordPeakUsage */,
- false /* recordPostGCusage */,
- true /* recordAccumulatedGCTime */,
- false /* recordGCEndTime */,
- false /* countCollection */ );
- break;
-
- case CMSCollector::Sweeping:
- initialize(manager /* GC manager */ ,
- cause /* cause of the GC */,
- true /* allMemoryPoolsAffected */,
- false /* recordGCBeginTime */,
- false /* recordPreGCUsage */,
- true /* recordPeakUsage */,
- true /* recordPostGCusage */,
- false /* recordAccumulatedGCTime */,
- true /* recordGCEndTime */,
- true /* countCollection */ );
- break;
-
- default:
- ShouldNotReachHere();
- }
-}
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1796 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
-#define SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
-
-#include "gc/cms/cmsOopClosures.hpp"
-#include "gc/cms/gSpaceCounters.hpp"
-#include "gc/cms/yieldingWorkgroup.hpp"
-#include "gc/shared/cardGeneration.hpp"
-#include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcStats.hpp"
-#include "gc/shared/gcWhen.hpp"
-#include "gc/shared/generationCounters.hpp"
-#include "gc/shared/space.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "logging/log.hpp"
-#include "memory/iterator.hpp"
-#include "memory/virtualspace.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "services/memoryService.hpp"
-#include "utilities/bitMap.hpp"
-#include "utilities/stack.hpp"
-
-// ConcurrentMarkSweepGeneration is in support of a concurrent
-// mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
-// style. We assume, for now, that this generation is always the
-// seniormost generation and for simplicity
-// in the first implementation, that this generation is a single compactible
-// space. Neither of these restrictions appears essential, and will be
-// relaxed in the future when more time is available to implement the
-// greater generality (and there's a need for it).
-//
-// Concurrent mode failures are currently handled by
-// means of a sliding mark-compact.
-
-class AdaptiveSizePolicy;
-class CMSCollector;
-class CMSConcMarkingTask;
-class CMSGCAdaptivePolicyCounters;
-class CMSTracer;
-class ConcurrentGCTimer;
-class ConcurrentMarkSweepGeneration;
-class ConcurrentMarkSweepPolicy;
-class ConcurrentMarkSweepThread;
-class CompactibleFreeListSpace;
-class FreeChunk;
-class ParNewGeneration;
-class PromotionInfo;
-class ScanMarkedObjectsAgainCarefullyClosure;
-class SerialOldTracer;
-
-// A generic CMS bit map. It's the basis for both the CMS marking bit map
-// as well as for the mod union table (in each case only a subset of the
-// methods are used). This is essentially a wrapper around the BitMap class,
-// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
-// we have _shifter == 0. and for the mod union table we have
-// shifter == CardTable::card_shift - LogHeapWordSize.)
-// XXX 64-bit issues in BitMap?
-class CMSBitMap {
- friend class VMStructs;
-
- HeapWord* _bmStartWord; // base address of range covered by map
- size_t _bmWordSize; // map size (in #HeapWords covered)
- const int _shifter; // shifts to convert HeapWord to bit position
- VirtualSpace _virtual_space; // underlying the bit map
- BitMapView _bm; // the bit map itself
- Mutex* const _lock; // mutex protecting _bm;
-
- public:
- // constructor
- CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
-
- // allocates the actual storage for the map
- bool allocate(MemRegion mr);
- // field getter
- Mutex* lock() const { return _lock; }
- // locking verifier convenience function
- void assert_locked() const PRODUCT_RETURN;
-
- // inquiries
- HeapWord* startWord() const { return _bmStartWord; }
- size_t sizeInWords() const { return _bmWordSize; }
- size_t sizeInBits() const { return _bm.size(); }
- // the following is one past the last word in space
- HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
-
- // reading marks
- bool isMarked(HeapWord* addr) const;
- bool par_isMarked(HeapWord* addr) const; // do not lock checks
- bool isUnmarked(HeapWord* addr) const;
- bool isAllClear() const;
-
- // writing marks
- void mark(HeapWord* addr);
- // For marking by parallel GC threads;
- // returns true if we did, false if another thread did
- bool par_mark(HeapWord* addr);
-
- void mark_range(MemRegion mr);
- void par_mark_range(MemRegion mr);
- void mark_large_range(MemRegion mr);
- void par_mark_large_range(MemRegion mr);
- void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
- void clear_range(MemRegion mr);
- void par_clear_range(MemRegion mr);
- void clear_large_range(MemRegion mr);
- void par_clear_large_range(MemRegion mr);
- void clear_all();
- void clear_all_incrementally(); // Not yet implemented!!
-
- NOT_PRODUCT(
- // checks the memory region for validity
- void region_invariant(MemRegion mr);
- )
-
- // iteration
- void iterate(BitMapClosure* cl) {
- _bm.iterate(cl);
- }
- void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
- void dirty_range_iterate_clear(MemRegionClosure* cl);
- void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
-
- // auxiliary support for iteration
- HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
- HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
- HeapWord* end_addr) const;
- HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
- HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
- HeapWord* end_addr) const;
- MemRegion getAndClearMarkedRegion(HeapWord* addr);
- MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
- HeapWord* end_addr);
-
- // conversion utilities
- HeapWord* offsetToHeapWord(size_t offset) const;
- size_t heapWordToOffset(HeapWord* addr) const;
- size_t heapWordDiffToOffsetDiff(size_t diff) const;
-
- void print_on_error(outputStream* st, const char* prefix) const;
-
- // debugging
- // is this address range covered by the bit-map?
- NOT_PRODUCT(
- bool covers(MemRegion mr) const;
- bool covers(HeapWord* start, size_t size = 0) const;
- )
- void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
-};
-
-// Represents a marking stack used by the CMS collector.
-// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
-class CMSMarkStack: public CHeapObj<mtGC> {
- friend class CMSCollector; // To get at expansion stats further below.
-
- VirtualSpace _virtual_space; // Space for the stack
- oop* _base; // Bottom of stack
- size_t _index; // One more than last occupied index
- size_t _capacity; // Max #elements
- Mutex _par_lock; // An advisory lock used in case of parallel access
- NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run
-
- protected:
- size_t _hit_limit; // We hit max stack size limit
- size_t _failed_double; // We failed expansion before hitting limit
-
- public:
- CMSMarkStack():
- _par_lock(Mutex::event, "CMSMarkStack._par_lock", true,
- Monitor::_safepoint_check_never),
- _hit_limit(0),
- _failed_double(0) {}
-
- bool allocate(size_t size);
-
- size_t capacity() const { return _capacity; }
-
- oop pop() {
- if (!isEmpty()) {
- return _base[--_index] ;
- }
- return NULL;
- }
-
- bool push(oop ptr) {
- if (isFull()) {
- return false;
- } else {
- _base[_index++] = ptr;
- NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
- return true;
- }
- }
-
- bool isEmpty() const { return _index == 0; }
- bool isFull() const {
- assert(_index <= _capacity, "buffer overflow");
- return _index == _capacity;
- }
-
- size_t length() { return _index; }
-
- // "Parallel versions" of some of the above
- oop par_pop() {
- // lock and pop
- MutexLocker x(&_par_lock, Mutex::_no_safepoint_check_flag);
- return pop();
- }
-
- bool par_push(oop ptr) {
- // lock and push
- MutexLocker x(&_par_lock, Mutex::_no_safepoint_check_flag);
- return push(ptr);
- }
-
- // Forcibly reset the stack, losing all of its contents.
- void reset() {
- _index = 0;
- }
-
- // Expand the stack, typically in response to an overflow condition.
- void expand();
-
- // Compute the least valued stack element.
- oop least_value(HeapWord* low) {
- HeapWord* least = low;
- for (size_t i = 0; i < _index; i++) {
- least = MIN2(least, (HeapWord*)_base[i]);
- }
- return (oop)least;
- }
-
- // Exposed here to allow stack expansion in || case.
- Mutex* par_lock() { return &_par_lock; }
-};
-
-class CardTableRS;
-class CMSParGCThreadState;
-
-class ModUnionClosure: public MemRegionClosure {
- protected:
- CMSBitMap* _t;
- public:
- ModUnionClosure(CMSBitMap* t): _t(t) { }
- void do_MemRegion(MemRegion mr);
-};
-
-class ModUnionClosurePar: public ModUnionClosure {
- public:
- ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
- void do_MemRegion(MemRegion mr);
-};
-
-// Survivor Chunk Array in support of parallelization of
-// Survivor Space rescan.
-class ChunkArray: public CHeapObj<mtGC> {
- size_t _index;
- size_t _capacity;
- size_t _overflows;
- HeapWord** _array; // storage for array
-
- public:
- ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
- ChunkArray(HeapWord** a, size_t c):
- _index(0), _capacity(c), _overflows(0), _array(a) {}
-
- HeapWord** array() { return _array; }
- void set_array(HeapWord** a) { _array = a; }
-
- size_t capacity() { return _capacity; }
- void set_capacity(size_t c) { _capacity = c; }
-
- size_t end() {
- assert(_index <= capacity(),
- "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
- _index, _capacity);
- return _index;
- } // exclusive
-
- HeapWord* nth(size_t n) {
- assert(n < end(), "Out of bounds access");
- return _array[n];
- }
-
- void reset() {
- _index = 0;
- if (_overflows > 0) {
- log_trace(gc)("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", _capacity, _overflows);
- }
- _overflows = 0;
- }
-
- void record_sample(HeapWord* p, size_t sz) {
- // For now we do not do anything with the size
- if (_index < _capacity) {
- _array[_index++] = p;
- } else {
- ++_overflows;
- assert(_index == _capacity,
- "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
- "): out of bounds at overflow#" SIZE_FORMAT,
- _index, _capacity, _overflows);
- }
- }
-};
-
-//
-// Timing, allocation and promotion statistics for gc scheduling and incremental
-// mode pacing. Most statistics are exponential averages.
-//
-class CMSStats {
- private:
- ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen.
-
- // The following are exponential averages with factor alpha:
- // avg = (100 - alpha) * avg + alpha * cur_sample
- //
- // The durations measure: end_time[n] - start_time[n]
- // The periods measure: start_time[n] - start_time[n-1]
- //
- // The cms period and duration include only concurrent collections; time spent
- // in foreground cms collections due to System.gc() or because of a failure to
- // keep up are not included.
- //
- // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
- // real value, but is used only after the first period. A value of 100 is
- // used for the first sample so it gets the entire weight.
- unsigned int _saved_alpha; // 0-100
- unsigned int _gc0_alpha;
- unsigned int _cms_alpha;
-
- double _gc0_duration;
- double _gc0_period;
- size_t _gc0_promoted; // bytes promoted per gc0
- double _cms_duration;
- double _cms_duration_pre_sweep; // time from initiation to start of sweep
- double _cms_period;
- size_t _cms_allocated; // bytes of direct allocation per gc0 period
-
- // Timers.
- elapsedTimer _cms_timer;
- TimeStamp _gc0_begin_time;
- TimeStamp _cms_begin_time;
- TimeStamp _cms_end_time;
-
- // Snapshots of the amount used in the CMS generation.
- size_t _cms_used_at_gc0_begin;
- size_t _cms_used_at_gc0_end;
- size_t _cms_used_at_cms_begin;
-
- // Used to prevent the duty cycle from being reduced in the middle of a cms
- // cycle.
- bool _allow_duty_cycle_reduction;
-
- enum {
- _GC0_VALID = 0x1,
- _CMS_VALID = 0x2,
- _ALL_VALID = _GC0_VALID | _CMS_VALID
- };
-
- unsigned int _valid_bits;
-
- protected:
- // In support of adjusting of cms trigger ratios based on history
- // of concurrent mode failure.
- double cms_free_adjustment_factor(size_t free) const;
- void adjust_cms_free_adjustment_factor(bool fail, size_t free);
-
- public:
- CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
- unsigned int alpha = CMSExpAvgFactor);
-
- // Whether or not the statistics contain valid data; higher level statistics
- // cannot be called until this returns true (they require at least one young
- // gen and one cms cycle to have completed).
- bool valid() const;
-
- // Record statistics.
- void record_gc0_begin();
- void record_gc0_end(size_t cms_gen_bytes_used);
- void record_cms_begin();
- void record_cms_end();
-
- // Allow management of the cms timer, which must be stopped/started around
- // yield points.
- elapsedTimer& cms_timer() { return _cms_timer; }
- void start_cms_timer() { _cms_timer.start(); }
- void stop_cms_timer() { _cms_timer.stop(); }
-
- // Basic statistics; units are seconds or bytes.
- double gc0_period() const { return _gc0_period; }
- double gc0_duration() const { return _gc0_duration; }
- size_t gc0_promoted() const { return _gc0_promoted; }
- double cms_period() const { return _cms_period; }
- double cms_duration() const { return _cms_duration; }
- size_t cms_allocated() const { return _cms_allocated; }
-
- size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
-
- // Seconds since the last background cms cycle began or ended.
- double cms_time_since_begin() const;
- double cms_time_since_end() const;
-
- // Higher level statistics--caller must check that valid() returns true before
- // calling.
-
- // Returns bytes promoted per second of wall clock time.
- double promotion_rate() const;
-
- // Returns bytes directly allocated per second of wall clock time.
- double cms_allocation_rate() const;
-
- // Rate at which space in the cms generation is being consumed (sum of the
- // above two).
- double cms_consumption_rate() const;
-
- // Returns an estimate of the number of seconds until the cms generation will
- // fill up, assuming no collection work is done.
- double time_until_cms_gen_full() const;
-
- // Returns an estimate of the number of seconds remaining until
- // the cms generation collection should start.
- double time_until_cms_start() const;
-
- // End of higher level statistics.
-
- // Debugging.
- void print_on(outputStream* st) const PRODUCT_RETURN;
- void print() const { print_on(tty); }
-};
-
-// A closure related to weak references processing which
-// we embed in the CMSCollector, since we need to pass
-// it to the reference processor for secondary filtering
-// of references based on reachability of referent;
-// see role of _is_alive_non_header closure in the
-// ReferenceProcessor class.
-// For objects in the CMS generation, this closure checks
-// if the object is "live" (reachable). Used in weak
-// reference processing.
-class CMSIsAliveClosure: public BoolObjectClosure {
- const MemRegion _span;
- const CMSBitMap* _bit_map;
-
- friend class CMSCollector;
- public:
- CMSIsAliveClosure(MemRegion span,
- CMSBitMap* bit_map):
- _span(span),
- _bit_map(bit_map) {
- assert(!span.is_empty(), "Empty span could spell trouble");
- }
-
- bool do_object_b(oop obj);
-};
-
-
-// Implements AbstractRefProcTaskExecutor for CMS.
-class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
-public:
-
- CMSRefProcTaskExecutor(CMSCollector& collector)
- : _collector(collector)
- { }
-
- // Executes a task using worker threads.
- virtual void execute(ProcessTask& task, uint ergo_workers);
-private:
- CMSCollector& _collector;
-};
-
-
-class CMSCollector: public CHeapObj<mtGC> {
- friend class VMStructs;
- friend class ConcurrentMarkSweepThread;
- friend class ConcurrentMarkSweepGeneration;
- friend class CompactibleFreeListSpace;
- friend class CMSParMarkTask;
- friend class CMSParInitialMarkTask;
- friend class CMSParRemarkTask;
- friend class CMSConcMarkingTask;
- friend class CMSRefProcTaskProxy;
- friend class CMSRefProcTaskExecutor;
- friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
- friend class SurvivorSpacePrecleanClosure; // --- ditto -------
- friend class PushOrMarkClosure; // to access _restart_addr
- friend class ParPushOrMarkClosure; // to access _restart_addr
- friend class MarkFromRootsClosure; // -- ditto --
- // ... and for clearing cards
- friend class ParMarkFromRootsClosure; // to access _restart_addr
- // ... and for clearing cards
- friend class ParConcMarkingClosure; // to access _restart_addr etc.
- friend class MarkFromRootsVerifyClosure; // to access _restart_addr
- friend class PushAndMarkVerifyClosure; // -- ditto --
- friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
- friend class PushAndMarkClosure; // -- ditto --
- friend class ParPushAndMarkClosure; // -- ditto --
- friend class CMSKeepAliveClosure; // -- ditto --
- friend class CMSDrainMarkingStackClosure; // -- ditto --
- friend class CMSInnerParMarkAndPushClosure; // -- ditto --
- NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
- friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
- friend class VM_CMS_Operation;
- friend class VM_CMS_Initial_Mark;
- friend class VM_CMS_Final_Remark;
- friend class TraceCMSMemoryManagerStats;
-
- private:
- jlong _time_of_last_gc;
- void update_time_of_last_gc(jlong now) {
- _time_of_last_gc = now;
- }
-
- OopTaskQueueSet* _task_queues;
-
- // Overflow list of grey objects, threaded through mark-word
- // Manipulated with CAS in the parallel/multi-threaded case.
- oopDesc* volatile _overflow_list;
- // The following array-pair keeps track of mark words
- // displaced for accommodating overflow list above.
- // This code will likely be revisited under RFE#4922830.
- Stack<oop, mtGC> _preserved_oop_stack;
- Stack<markWord, mtGC> _preserved_mark_stack;
-
- // In support of multi-threaded concurrent phases
- YieldingFlexibleWorkGang* _conc_workers;
-
- // Performance Counters
- CollectorCounters* _gc_counters;
- CollectorCounters* _cgc_counters;
-
- // Initialization Errors
- bool _completed_initialization;
-
- // In support of ExplicitGCInvokesConcurrent
- static bool _full_gc_requested;
- static GCCause::Cause _full_gc_cause;
- unsigned int _collection_count_start;
-
- // Should we unload classes this concurrent cycle?
- bool _should_unload_classes;
- unsigned int _concurrent_cycles_since_last_unload;
- unsigned int concurrent_cycles_since_last_unload() const {
- return _concurrent_cycles_since_last_unload;
- }
- // Did we (allow) unload classes in the previous concurrent cycle?
- bool unloaded_classes_last_cycle() const {
- return concurrent_cycles_since_last_unload() == 0;
- }
- // Root scanning options for perm gen
- int _roots_scanning_options;
- int roots_scanning_options() const { return _roots_scanning_options; }
- void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
- void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
-
- // Verification support
- CMSBitMap _verification_mark_bm;
- void verify_after_remark_work_1();
- void verify_after_remark_work_2();
-
- // True if any verification flag is on.
- bool _verifying;
- bool verifying() const { return _verifying; }
- void set_verifying(bool v) { _verifying = v; }
-
- void set_did_compact(bool v);
-
- // XXX Move these to CMSStats ??? FIX ME !!!
- elapsedTimer _inter_sweep_timer; // Time between sweeps
- elapsedTimer _intra_sweep_timer; // Time _in_ sweeps
- // Padded decaying average estimates of the above
- AdaptivePaddedAverage _inter_sweep_estimate;
- AdaptivePaddedAverage _intra_sweep_estimate;
-
- CMSTracer* _gc_tracer_cm;
- ConcurrentGCTimer* _gc_timer_cm;
-
- bool _cms_start_registered;
-
- GCHeapSummary _last_heap_summary;
- MetaspaceSummary _last_metaspace_summary;
-
- void register_gc_start(GCCause::Cause cause);
- void register_gc_end();
- void save_heap_summary();
- void report_heap_summary(GCWhen::Type when);
-
- protected:
- ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS)
- MemRegion _span; // Span covering above
- CardTableRS* _ct; // Card table
-
- // CMS marking support structures
- CMSBitMap _markBitMap;
- CMSBitMap _modUnionTable;
- CMSMarkStack _markStack;
-
- HeapWord* _restart_addr; // In support of marking stack overflow
- void lower_restart_addr(HeapWord* low);
-
- // Counters in support of marking stack / work queue overflow handling:
- // a non-zero value indicates certain types of overflow events during
- // the current CMS cycle and could lead to stack resizing efforts at
- // an opportune future time.
- size_t _ser_pmc_preclean_ovflw;
- size_t _ser_pmc_remark_ovflw;
- size_t _par_pmc_remark_ovflw;
- size_t _ser_kac_preclean_ovflw;
- size_t _ser_kac_ovflw;
- size_t _par_kac_ovflw;
- NOT_PRODUCT(ssize_t _num_par_pushes;)
-
- // ("Weak") Reference processing support.
- SpanSubjectToDiscoveryClosure _span_based_discoverer;
- ReferenceProcessor* _ref_processor;
- CMSIsAliveClosure _is_alive_closure;
- // Keep this textually after _markBitMap and _span; c'tor dependency.
-
- ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work
- ModUnionClosurePar _modUnionClosurePar;
-
- // CMS abstract state machine
- // initial_state: Idling
- // next_state(Idling) = {Marking}
- // next_state(Marking) = {Precleaning, Sweeping}
- // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
- // next_state(AbortablePreclean) = {FinalMarking}
- // next_state(FinalMarking) = {Sweeping}
- // next_state(Sweeping) = {Resizing}
- // next_state(Resizing) = {Resetting}
- // next_state(Resetting) = {Idling}
- // The numeric values below are chosen so that:
- // . _collectorState <= Idling == post-sweep && pre-mark
- // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
- // precleaning || abortablePrecleanb
- public:
- enum CollectorState {
- Resizing = 0,
- Resetting = 1,
- Idling = 2,
- InitialMarking = 3,
- Marking = 4,
- Precleaning = 5,
- AbortablePreclean = 6,
- FinalMarking = 7,
- Sweeping = 8
- };
- protected:
- static CollectorState _collectorState;
-
- // State related to prologue/epilogue invocation for my generations
- bool _between_prologue_and_epilogue;
-
- // Signaling/State related to coordination between fore- and background GC
- // Note: When the baton has been passed from background GC to foreground GC,
- // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
- static bool _foregroundGCIsActive; // true iff foreground collector is active or
- // wants to go active
- static bool _foregroundGCShouldWait; // true iff background GC is active and has not
- // yet passed the baton to the foreground GC
-
- // Support for CMSScheduleRemark (abortable preclean)
- bool _abort_preclean;
- bool _start_sampling;
-
- int _numYields;
- size_t _numDirtyCards;
- size_t _sweep_count;
-
- // Occupancy used for bootstrapping stats
- double _bootstrap_occupancy;
-
- // Timer
- elapsedTimer _timer;
-
- // Timing, allocation and promotion statistics, used for scheduling.
- CMSStats _stats;
-
- enum CMS_op_type {
- CMS_op_checkpointRootsInitial,
- CMS_op_checkpointRootsFinal
- };
-
- void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
- bool stop_world_and_do(CMS_op_type op);
-
- OopTaskQueueSet* task_queues() { return _task_queues; }
- YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
-
- // Support for parallelizing Eden rescan in CMS remark phase
- void sample_eden(); // ... sample Eden space top
-
- private:
- // Support for parallelizing young gen rescan in CMS remark phase
- ParNewGeneration* _young_gen;
-
- HeapWord* volatile* _top_addr; // ... Top of Eden
- HeapWord** _end_addr; // ... End of Eden
- Mutex* _eden_chunk_lock;
- HeapWord** _eden_chunk_array; // ... Eden partitioning array
- size_t _eden_chunk_index; // ... top (exclusive) of array
- size_t _eden_chunk_capacity; // ... max entries in array
-
- // Support for parallelizing survivor space rescan
- HeapWord** _survivor_chunk_array;
- size_t _survivor_chunk_index;
- size_t _survivor_chunk_capacity;
- size_t* _cursor;
- ChunkArray* _survivor_plab_array;
-
- // Support for marking stack overflow handling
- bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
- bool par_take_from_overflow_list(size_t num,
- OopTaskQueue* to_work_q,
- int no_of_gc_threads);
- void push_on_overflow_list(oop p);
- void par_push_on_overflow_list(oop p);
- // The following is, obviously, not, in general, "MT-stable"
- bool overflow_list_is_empty() const;
-
- void preserve_mark_if_necessary(oop p);
- void par_preserve_mark_if_necessary(oop p);
- void preserve_mark_work(oop p, markWord m);
- void restore_preserved_marks_if_any();
- NOT_PRODUCT(bool no_preserved_marks() const;)
- // In support of testing overflow code
- NOT_PRODUCT(int _overflow_counter;)
- NOT_PRODUCT(bool simulate_overflow();) // Sequential
- NOT_PRODUCT(bool par_simulate_overflow();) // MT version
-
- // CMS work methods
- void checkpointRootsInitialWork(); // Initial checkpoint work
-
- // A return value of false indicates failure due to stack overflow
- bool markFromRootsWork(); // Concurrent marking work
-
- public: // FIX ME!!! only for testing
- bool do_marking_st(); // Single-threaded marking
- bool do_marking_mt(); // Multi-threaded marking
-
- private:
-
- // Concurrent precleaning work
- size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen,
- ScanMarkedObjectsAgainCarefullyClosure* cl);
- size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
- ScanMarkedObjectsAgainCarefullyClosure* cl);
- // Does precleaning work, returning a quantity indicative of
- // the amount of "useful work" done.
- size_t preclean_work(bool clean_refs, bool clean_survivors);
- void preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
- void abortable_preclean(); // Preclean while looking for possible abort
- void initialize_sequential_subtasks_for_young_gen_rescan(int i);
- // Helper function for above; merge-sorts the per-thread plab samples
- void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
- // Resets (i.e. clears) the per-thread plab sample vectors
- void reset_survivor_plab_arrays();
-
- // Final (second) checkpoint work
- void checkpointRootsFinalWork();
- // Work routine for parallel version of remark
- void do_remark_parallel();
- // Work routine for non-parallel version of remark
- void do_remark_non_parallel();
- // Reference processing work routine (during second checkpoint)
- void refProcessingWork();
-
- // Concurrent sweeping work
- void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
-
- // Concurrent resetting of support data structures
- void reset_concurrent();
- // Resetting of support data structures from a STW full GC
- void reset_stw();
-
- // Clear _expansion_cause fields of constituent generations
- void clear_expansion_cause();
-
- // An auxiliary method used to record the ends of
- // used regions of each generation to limit the extent of sweep
- void save_sweep_limits();
-
- // A work method used by the foreground collector to do
- // a mark-sweep-compact.
- void do_compaction_work(bool clear_all_soft_refs);
-
- // Work methods for reporting concurrent mode interruption or failure
- bool is_external_interruption();
- void report_concurrent_mode_interruption();
-
- // If the background GC is active, acquire control from the background
- // GC and do the collection.
- void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
-
- // For synchronizing passing of control from background to foreground
- // GC. waitForForegroundGC() is called by the background
- // collector. It if had to wait for a foreground collection,
- // it returns true and the background collection should assume
- // that the collection was finished by the foreground
- // collector.
- bool waitForForegroundGC();
-
- size_t block_size_using_printezis_bits(HeapWord* addr) const;
- size_t block_size_if_printezis_bits(HeapWord* addr) const;
- HeapWord* next_card_start_after_block(HeapWord* addr) const;
-
- void setup_cms_unloading_and_verification_state();
- public:
- CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
- CardTableRS* ct);
- ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
-
- MemRegion ref_processor_span() const { return _span_based_discoverer.span(); }
- ReferenceProcessor* ref_processor() { return _ref_processor; }
- void ref_processor_init();
-
- Mutex* bitMapLock() const { return _markBitMap.lock(); }
- static CollectorState abstract_state() { return _collectorState; }
-
- bool should_abort_preclean() const; // Whether preclean should be aborted.
- size_t get_eden_used() const;
- size_t get_eden_capacity() const;
-
- ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
-
- // Locking checks
- NOT_PRODUCT(static bool have_cms_token();)
-
- bool shouldConcurrentCollect();
-
- void collect(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool tlab);
- void collect_in_background(GCCause::Cause cause);
-
- // In support of ExplicitGCInvokesConcurrent
- static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
- // Should we unload classes in a particular concurrent cycle?
- bool should_unload_classes() const {
- return _should_unload_classes;
- }
- void update_should_unload_classes();
-
- void direct_allocated(HeapWord* start, size_t size);
-
- // Object is dead if not marked and current phase is sweeping.
- bool is_dead_obj(oop obj) const;
-
- // After a promotion (of "start"), do any necessary marking.
- // If "par", then it's being done by a parallel GC thread.
- // The last two args indicate if we need precise marking
- // and if so the size of the object so it can be dirtied
- // in its entirety.
- void promoted(bool par, HeapWord* start,
- bool is_obj_array, size_t obj_size);
-
- void getFreelistLocks() const;
- void releaseFreelistLocks() const;
- bool haveFreelistLocks() const;
-
- // Adjust size of underlying generation
- void compute_new_size();
-
- // GC prologue and epilogue
- void gc_prologue(bool full);
- void gc_epilogue(bool full);
-
- jlong time_of_last_gc(jlong now) {
- if (_collectorState <= Idling) {
- // gc not in progress
- return _time_of_last_gc;
- } else {
- // collection in progress
- return now;
- }
- }
-
- // Support for parallel remark of survivor space
- void* get_data_recorder(int thr_num);
- void sample_eden_chunk();
-
- CMSBitMap* markBitMap() { return &_markBitMap; }
- void directAllocated(HeapWord* start, size_t size);
-
- // Main CMS steps and related support
- void checkpointRootsInitial();
- bool markFromRoots(); // a return value of false indicates failure
- // due to stack overflow
- void preclean();
- void checkpointRootsFinal();
- void sweep();
-
- // Check that the currently executing thread is the expected
- // one (foreground collector or background collector).
- static void check_correct_thread_executing() PRODUCT_RETURN;
-
- NOT_PRODUCT(bool is_cms_reachable(HeapWord* addr);)
-
- // Performance Counter Support
- CollectorCounters* counters() { return _gc_counters; }
- CollectorCounters* cgc_counters() { return _cgc_counters; }
-
- // Timer stuff
- void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
- void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
- void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
- jlong timerTicks() { assert(!_timer.is_active(), "Error"); return _timer.ticks(); }
-
- int yields() { return _numYields; }
- void resetYields() { _numYields = 0; }
- void incrementYields() { _numYields++; }
- void resetNumDirtyCards() { _numDirtyCards = 0; }
- void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
- size_t numDirtyCards() { return _numDirtyCards; }
-
- static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
- static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
- static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
- static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
- size_t sweep_count() const { return _sweep_count; }
- void increment_sweep_count() { _sweep_count++; }
-
- // Timers/stats for gc scheduling and incremental mode pacing.
- CMSStats& stats() { return _stats; }
-
- // Adaptive size policy
- AdaptiveSizePolicy* size_policy();
-
- static void print_on_error(outputStream* st);
-
- // Debugging
- void verify();
- bool verify_after_remark();
- void verify_ok_to_terminate() const PRODUCT_RETURN;
- void verify_work_stacks_empty() const PRODUCT_RETURN;
- void verify_overflow_empty() const PRODUCT_RETURN;
-
- // Convenience methods in support of debugging
- static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
- HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
-
- // Accessors
- CMSMarkStack* verification_mark_stack() { return &_markStack; }
- CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
-
- // Initialization errors
- bool completed_initialization() { return _completed_initialization; }
-
- void print_eden_and_survivor_chunk_arrays();
-
- ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
-};
-
-class CMSExpansionCause : public AllStatic {
- public:
- enum Cause {
- _no_expansion,
- _satisfy_free_ratio,
- _satisfy_promotion,
- _satisfy_allocation,
- _allocate_par_lab,
- _allocate_par_spooling_space,
- _adaptive_size_policy
- };
- // Return a string describing the cause of the expansion.
- static const char* to_string(CMSExpansionCause::Cause cause);
-};
-
-class ConcurrentMarkSweepGeneration: public CardGeneration {
- friend class VMStructs;
- friend class ConcurrentMarkSweepThread;
- friend class ConcurrentMarkSweep;
- friend class CMSCollector;
- protected:
- static CMSCollector* _collector; // the collector that collects us
- CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now)
-
- // Performance Counters
- GenerationCounters* _gen_counters;
- GSpaceCounters* _space_counters;
-
- // Words directly allocated, used by CMSStats.
- size_t _direct_allocated_words;
-
- // Non-product stat counters
- NOT_PRODUCT(
- size_t _numObjectsPromoted;
- size_t _numWordsPromoted;
- size_t _numObjectsAllocated;
- size_t _numWordsAllocated;
- )
-
- // Used for sizing decisions
- bool _incremental_collection_failed;
- bool incremental_collection_failed() {
- return _incremental_collection_failed;
- }
- void set_incremental_collection_failed() {
- _incremental_collection_failed = true;
- }
- void clear_incremental_collection_failed() {
- _incremental_collection_failed = false;
- }
-
- // accessors
- void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
- CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
-
- // Accessing spaces
- CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; }
-
- private:
- // For parallel young-gen GC support.
- CMSParGCThreadState** _par_gc_thread_states;
-
- // Reason generation was expanded
- CMSExpansionCause::Cause _expansion_cause;
-
- // In support of MinChunkSize being larger than min object size
- const double _dilatation_factor;
-
- // True if a compacting collection was done.
- bool _did_compact;
- bool did_compact() { return _did_compact; }
-
- // Fraction of current occupancy at which to start a CMS collection which
- // will collect this generation (at least).
- double _initiating_occupancy;
-
- protected:
- // Shrink generation by specified size (returns false if unable to shrink)
- void shrink_free_list_by(size_t bytes);
-
- // Update statistics for GC
- virtual void update_gc_stats(Generation* current_generation, bool full);
-
- // Maximum available space in the generation (including uncommitted)
- // space.
- size_t max_available() const;
-
- // getter and initializer for _initiating_occupancy field.
- double initiating_occupancy() const { return _initiating_occupancy; }
- void init_initiating_occupancy(intx io, uintx tr);
-
- void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause);
-
- void assert_correct_size_change_locking();
-
- public:
- ConcurrentMarkSweepGeneration(ReservedSpace rs,
- size_t initial_byte_size,
- size_t min_byte_size,
- size_t max_byte_size,
- CardTableRS* ct);
-
- // Accessors
- CMSCollector* collector() const { return _collector; }
- static void set_collector(CMSCollector* collector) {
- assert(_collector == NULL, "already set");
- _collector = collector;
- }
- CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
-
- Mutex* freelistLock() const;
-
- virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
-
- void set_did_compact(bool v) { _did_compact = v; }
-
- bool refs_discovery_is_atomic() const { return false; }
- bool refs_discovery_is_mt() const {
- // Note: CMS does MT-discovery during the parallel-remark
- // phases. Use ReferenceProcessorMTMutator to make refs
- // discovery MT-safe during such phases or other parallel
- // discovery phases in the future. This may all go away
- // if/when we decide that refs discovery is sufficiently
- // rare that the cost of the CAS's involved is in the
- // noise. That's a measurement that should be done, and
- // the code simplified if that turns out to be the case.
- return ConcGCThreads > 1;
- }
-
- // Override
- virtual void ref_processor_init();
-
- void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
-
- // Space enquiries
- double occupancy() const { return ((double)used())/((double)capacity()); }
- size_t contiguous_available() const;
- size_t unsafe_max_alloc_nogc() const;
- size_t used_stable() const;
-
- // over-rides
- MemRegion used_region_at_save_marks() const;
-
- // Adjust quantities in the generation affected by
- // the compaction.
- void reset_after_compaction();
-
- // Allocation support
- HeapWord* allocate(size_t size, bool tlab);
- HeapWord* have_lock_and_allocate(size_t size, bool tlab);
- oop promote(oop obj, size_t obj_size);
- HeapWord* par_allocate(size_t size, bool tlab) {
- return allocate(size, tlab);
- }
-
-
- // Used by CMSStats to track direct allocation. The value is sampled and
- // reset after each young gen collection.
- size_t direct_allocated_words() const { return _direct_allocated_words; }
- void reset_direct_allocated_words() { _direct_allocated_words = 0; }
-
- // Overrides for parallel promotion.
- virtual oop par_promote(int thread_num,
- oop obj, markWord m, size_t word_sz);
- virtual void par_promote_alloc_done(int thread_num);
- virtual void par_oop_since_save_marks_iterate_done(int thread_num);
-
- virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
-
- // Inform this (old) generation that a promotion failure was
- // encountered during a collection of the young generation.
- virtual void promotion_failure_occurred();
-
- bool should_collect(bool full, size_t size, bool tlab);
- virtual bool should_concurrent_collect() const;
- virtual bool is_too_full() const;
- void collect(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool tlab);
-
- HeapWord* expand_and_allocate(size_t word_size,
- bool tlab,
- bool parallel = false);
-
- // GC prologue and epilogue
- void gc_prologue(bool full);
- void gc_prologue_work(bool full, bool registerClosure,
- ModUnionClosure* modUnionClosure);
- void gc_epilogue(bool full);
- void gc_epilogue_work(bool full);
-
- // Time since last GC of this generation
- jlong time_of_last_gc(jlong now) {
- return collector()->time_of_last_gc(now);
- }
- void update_time_of_last_gc(jlong now) {
- collector()-> update_time_of_last_gc(now);
- }
-
- // Allocation failure
- void shrink(size_t bytes);
- HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
- bool expand_and_ensure_spooling_space(PromotionInfo* promo);
-
- // Iteration support and related enquiries
- void save_marks();
- bool no_allocs_since_save_marks();
-
- // Iteration support specific to CMS generations
- void save_sweep_limit();
-
- // More iteration support
- virtual void oop_iterate(OopIterateClosure* cl);
- virtual void safe_object_iterate(ObjectClosure* cl);
- virtual void object_iterate(ObjectClosure* cl);
-
- template <typename OopClosureType>
- void oop_since_save_marks_iterate(OopClosureType* cl);
-
- // Smart allocation XXX -- move to CFLSpace?
- void setNearLargestChunk();
- bool isNearLargestChunk(HeapWord* addr);
-
- // Get the chunk at the end of the space. Delegates to
- // the space.
- FreeChunk* find_chunk_at_end();
-
- void post_compact();
-
- // Debugging
- void prepare_for_verify();
- void verify();
- void print_statistics() PRODUCT_RETURN;
-
- // Performance Counters support
- virtual void update_counters();
- virtual void update_counters(size_t used);
- void initialize_performance_counters(size_t min_old_size, size_t max_old_size);
- CollectorCounters* counters() { return collector()->counters(); }
-
- // Support for parallel remark of survivor space
- void* get_data_recorder(int thr_num) {
- //Delegate to collector
- return collector()->get_data_recorder(thr_num);
- }
- void sample_eden_chunk() {
- //Delegate to collector
- return collector()->sample_eden_chunk();
- }
-
- // Printing
- const char* name() const;
- virtual const char* short_name() const { return "CMS"; }
- void print() const;
-
- // Resize the generation after a compacting GC. The
- // generation can be treated as a contiguous space
- // after the compaction.
- virtual void compute_new_size();
- // Resize the generation after a non-compacting
- // collection.
- void compute_new_size_free_list();
-};
-
-//
-// Closures of various sorts used by CMS to accomplish its work
-//
-
-// This closure is used to do concurrent marking from the roots
-// following the first checkpoint.
-class MarkFromRootsClosure: public BitMapClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _bitMap;
- CMSBitMap* _mut;
- CMSMarkStack* _markStack;
- bool _yield;
- int _skipBits;
- HeapWord* _finger;
- HeapWord* _threshold;
- DEBUG_ONLY(bool _verifying;)
-
- public:
- MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
- CMSBitMap* bitMap,
- CMSMarkStack* markStack,
- bool should_yield, bool verifying = false);
- bool do_bit(size_t offset);
- void reset(HeapWord* addr);
- inline void do_yield_check();
-
- private:
- void scanOopsInOop(HeapWord* ptr);
- void do_yield_work();
-};
-
-// This closure is used to do concurrent multi-threaded
-// marking from the roots following the first checkpoint.
-// XXX This should really be a subclass of The serial version
-// above, but i have not had the time to refactor things cleanly.
-class ParMarkFromRootsClosure: public BitMapClosure {
- CMSCollector* _collector;
- MemRegion _whole_span;
- MemRegion _span;
- CMSBitMap* _bit_map;
- CMSBitMap* _mut;
- OopTaskQueue* _work_queue;
- CMSMarkStack* _overflow_stack;
- int _skip_bits;
- HeapWord* _finger;
- HeapWord* _threshold;
- CMSConcMarkingTask* _task;
- public:
- ParMarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
- MemRegion span,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue,
- CMSMarkStack* overflow_stack);
- bool do_bit(size_t offset);
- inline void do_yield_check();
-
- private:
- void scan_oops_in_oop(HeapWord* ptr);
- void do_yield_work();
- bool get_work_from_overflow_stack();
-};
-
-// The following closures are used to do certain kinds of verification of
-// CMS marking.
-class PushAndMarkVerifyClosure: public MetadataVisitingOopIterateClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _verification_bm;
- CMSBitMap* _cms_bm;
- CMSMarkStack* _mark_stack;
- protected:
- void do_oop(oop p);
- template <class T> void do_oop_work(T *p);
-
- public:
- PushAndMarkVerifyClosure(CMSCollector* cms_collector,
- MemRegion span,
- CMSBitMap* verification_bm,
- CMSBitMap* cms_bm,
- CMSMarkStack* mark_stack);
- void do_oop(oop* p);
- void do_oop(narrowOop* p);
-
- // Deal with a stack overflow condition
- void handle_stack_overflow(HeapWord* lost);
-};
-
-class MarkFromRootsVerifyClosure: public BitMapClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSBitMap* _verification_bm;
- CMSBitMap* _cms_bm;
- CMSMarkStack* _mark_stack;
- HeapWord* _finger;
- PushAndMarkVerifyClosure _pam_verify_closure;
- public:
- MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
- CMSBitMap* verification_bm,
- CMSBitMap* cms_bm,
- CMSMarkStack* mark_stack);
- bool do_bit(size_t offset);
- void reset(HeapWord* addr);
-};
-
-
-// This closure is used to check that a certain set of bits is
-// "empty" (i.e. the bit vector doesn't have any 1-bits).
-class FalseBitMapClosure: public BitMapClosure {
- public:
- bool do_bit(size_t offset) {
- guarantee(false, "Should not have a 1 bit");
- return true;
- }
-};
-
-// A version of ObjectClosure with "memory" (see _previous_address below)
-class UpwardsObjectClosure: public BoolObjectClosure {
- HeapWord* _previous_address;
- public:
- UpwardsObjectClosure() : _previous_address(NULL) { }
- void set_previous(HeapWord* addr) { _previous_address = addr; }
- HeapWord* previous() { return _previous_address; }
- // A return value of "true" can be used by the caller to decide
- // if this object's end should *NOT* be recorded in
- // _previous_address above.
- virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
-};
-
-// This closure is used during the second checkpointing phase
-// to rescan the marked objects on the dirty cards in the mod
-// union table and the card table proper. It's invoked via
-// MarkFromDirtyCardsClosure below. It uses either
-// [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
-// declared in genOopClosures.hpp to accomplish some of its work.
-// In the parallel case the bitMap is shared, so access to
-// it needs to be suitably synchronized for updates by embedded
-// closures that update it; however, this closure itself only
-// reads the bit_map and because it is idempotent, is immune to
-// reading stale values.
-class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
- #ifdef ASSERT
- CMSCollector* _collector;
- MemRegion _span;
- union {
- CMSMarkStack* _mark_stack;
- OopTaskQueue* _work_queue;
- };
- #endif // ASSERT
- bool _parallel;
- CMSBitMap* _bit_map;
- union {
- MarkRefsIntoAndScanClosure* _scan_closure;
- ParMarkRefsIntoAndScanClosure* _par_scan_closure;
- };
-
- public:
- ScanMarkedObjectsAgainClosure(CMSCollector* collector,
- MemRegion span,
- ReferenceProcessor* rp,
- CMSBitMap* bit_map,
- CMSMarkStack* mark_stack,
- MarkRefsIntoAndScanClosure* cl):
- #ifdef ASSERT
- _collector(collector),
- _span(span),
- _mark_stack(mark_stack),
- #endif // ASSERT
- _parallel(false),
- _bit_map(bit_map),
- _scan_closure(cl) { }
-
- ScanMarkedObjectsAgainClosure(CMSCollector* collector,
- MemRegion span,
- ReferenceProcessor* rp,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue,
- ParMarkRefsIntoAndScanClosure* cl):
- #ifdef ASSERT
- _collector(collector),
- _span(span),
- _work_queue(work_queue),
- #endif // ASSERT
- _parallel(true),
- _bit_map(bit_map),
- _par_scan_closure(cl) { }
-
- bool do_object_b(oop obj) {
- guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
- return false;
- }
- bool do_object_bm(oop p, MemRegion mr);
-};
-
-// This closure is used during the second checkpointing phase
-// to rescan the marked objects on the dirty cards in the mod
-// union table and the card table proper. It invokes
-// ScanMarkedObjectsAgainClosure above to accomplish much of its work.
-// In the parallel case, the bit map is shared and requires
-// synchronized access.
-class MarkFromDirtyCardsClosure: public MemRegionClosure {
- CompactibleFreeListSpace* _space;
- ScanMarkedObjectsAgainClosure _scan_cl;
- size_t _num_dirty_cards;
-
- public:
- MarkFromDirtyCardsClosure(CMSCollector* collector,
- MemRegion span,
- CompactibleFreeListSpace* space,
- CMSBitMap* bit_map,
- CMSMarkStack* mark_stack,
- MarkRefsIntoAndScanClosure* cl):
- _space(space),
- _scan_cl(collector, span, collector->ref_processor(), bit_map,
- mark_stack, cl),
- _num_dirty_cards(0) { }
-
- MarkFromDirtyCardsClosure(CMSCollector* collector,
- MemRegion span,
- CompactibleFreeListSpace* space,
- CMSBitMap* bit_map,
- OopTaskQueue* work_queue,
- ParMarkRefsIntoAndScanClosure* cl):
- _space(space),
- _scan_cl(collector, span, collector->ref_processor(), bit_map,
- work_queue, cl),
- _num_dirty_cards(0) { }
-
- void do_MemRegion(MemRegion mr);
- void set_space(CompactibleFreeListSpace* space) { _space = space; }
- size_t num_dirty_cards() { return _num_dirty_cards; }
-};
-
-// This closure is used in the non-product build to check
-// that there are no MemRegions with a certain property.
-class FalseMemRegionClosure: public MemRegionClosure {
- void do_MemRegion(MemRegion mr) {
- guarantee(!mr.is_empty(), "Shouldn't be empty");
- guarantee(false, "Should never be here");
- }
-};
-
-// This closure is used during the precleaning phase
-// to "carefully" rescan marked objects on dirty cards.
-// It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
-// to accomplish some of its work.
-class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
- CMSCollector* _collector;
- MemRegion _span;
- bool _yield;
- Mutex* _freelistLock;
- CMSBitMap* _bitMap;
- CMSMarkStack* _markStack;
- MarkRefsIntoAndScanClosure* _scanningClosure;
- DEBUG_ONLY(HeapWord* _last_scanned_object;)
-
- public:
- ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
- MemRegion span,
- CMSBitMap* bitMap,
- CMSMarkStack* markStack,
- MarkRefsIntoAndScanClosure* cl,
- bool should_yield):
- _collector(collector),
- _span(span),
- _yield(should_yield),
- _bitMap(bitMap),
- _markStack(markStack),
- _scanningClosure(cl)
- DEBUG_ONLY(COMMA _last_scanned_object(NULL))
- { }
-
- void do_object(oop p) {
- guarantee(false, "call do_object_careful instead");
- }
-
- size_t do_object_careful(oop p) {
- guarantee(false, "Unexpected caller");
- return 0;
- }
-
- size_t do_object_careful_m(oop p, MemRegion mr);
-
- void setFreelistLock(Mutex* m) {
- _freelistLock = m;
- _scanningClosure->set_freelistLock(m);
- }
-
- private:
- inline bool do_yield_check();
-
- void do_yield_work();
-};
-
-class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
- CMSCollector* _collector;
- MemRegion _span;
- bool _yield;
- CMSBitMap* _bit_map;
- CMSMarkStack* _mark_stack;
- PushAndMarkClosure* _scanning_closure;
- unsigned int _before_count;
-
- public:
- SurvivorSpacePrecleanClosure(CMSCollector* collector,
- MemRegion span,
- CMSBitMap* bit_map,
- CMSMarkStack* mark_stack,
- PushAndMarkClosure* cl,
- unsigned int before_count,
- bool should_yield):
- _collector(collector),
- _span(span),
- _yield(should_yield),
- _bit_map(bit_map),
- _mark_stack(mark_stack),
- _scanning_closure(cl),
- _before_count(before_count)
- { }
-
- void do_object(oop p) {
- guarantee(false, "call do_object_careful instead");
- }
-
- size_t do_object_careful(oop p);
-
- size_t do_object_careful_m(oop p, MemRegion mr) {
- guarantee(false, "Unexpected caller");
- return 0;
- }
-
- private:
- inline void do_yield_check();
- void do_yield_work();
-};
-
-// This closure is used to accomplish the sweeping work
-// after the second checkpoint but before the concurrent reset
-// phase.
-//
-// Terminology
-// left hand chunk (LHC) - block of one or more chunks currently being
-// coalesced. The LHC is available for coalescing with a new chunk.
-// right hand chunk (RHC) - block that is currently being swept that is
-// free or garbage that can be coalesced with the LHC.
-// _inFreeRange is true if there is currently a LHC
-// _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
-// _freeRangeInFreeLists is true if the LHC is in the free lists.
-// _freeFinger is the address of the current LHC
-class SweepClosure: public BlkClosureCareful {
- CMSCollector* _collector; // collector doing the work
- ConcurrentMarkSweepGeneration* _g; // Generation being swept
- CompactibleFreeListSpace* _sp; // Space being swept
- HeapWord* _limit;// the address at or above which the sweep should stop
- // because we do not expect newly garbage blocks
- // eligible for sweeping past that address.
- Mutex* _freelistLock; // Free list lock (in space)
- CMSBitMap* _bitMap; // Marking bit map (in
- // generation)
- bool _inFreeRange; // Indicates if we are in the
- // midst of a free run
- bool _freeRangeInFreeLists;
- // Often, we have just found
- // a free chunk and started
- // a new free range; we do not
- // eagerly remove this chunk from
- // the free lists unless there is
- // a possibility of coalescing.
- // When true, this flag indicates
- // that the _freeFinger below
- // points to a potentially free chunk
- // that may still be in the free lists
- bool _lastFreeRangeCoalesced;
- // free range contains chunks
- // coalesced
- bool _yield;
- // Whether sweeping should be
- // done with yields. For instance
- // when done by the foreground
- // collector we shouldn't yield.
- HeapWord* _freeFinger; // When _inFreeRange is set, the
- // pointer to the "left hand
- // chunk"
- size_t _freeRangeSize;
- // When _inFreeRange is set, this
- // indicates the accumulated size
- // of the "left hand chunk"
- NOT_PRODUCT(
- size_t _numObjectsFreed;
- size_t _numWordsFreed;
- size_t _numObjectsLive;
- size_t _numWordsLive;
- size_t _numObjectsAlreadyFree;
- size_t _numWordsAlreadyFree;
- FreeChunk* _last_fc;
- )
- private:
- // Code that is common to a free chunk or garbage when
- // encountered during sweeping.
- void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
- // Process a free chunk during sweeping.
- void do_already_free_chunk(FreeChunk *fc);
- // Work method called when processing an already free or a
- // freshly garbage chunk to do a lookahead and possibly a
- // preemptive flush if crossing over _limit.
- void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
- // Process a garbage chunk during sweeping.
- size_t do_garbage_chunk(FreeChunk *fc);
- // Process a live chunk during sweeping.
- size_t do_live_chunk(FreeChunk* fc);
-
- // Accessors.
- HeapWord* freeFinger() const { return _freeFinger; }
- void set_freeFinger(HeapWord* v) { _freeFinger = v; }
- bool inFreeRange() const { return _inFreeRange; }
- void set_inFreeRange(bool v) { _inFreeRange = v; }
- bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
- void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
- bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
- void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
-
- // Initialize a free range.
- void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
- // Return this chunk to the free lists.
- void flush_cur_free_chunk(HeapWord* chunk, size_t size);
-
- // Check if we should yield and do so when necessary.
- inline void do_yield_check(HeapWord* addr);
-
- // Yield
- void do_yield_work(HeapWord* addr);
-
- // Debugging/Printing
- void print_free_block_coalesced(FreeChunk* fc) const;
-
- public:
- SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
- CMSBitMap* bitMap, bool should_yield);
- ~SweepClosure() PRODUCT_RETURN;
-
- size_t do_blk_careful(HeapWord* addr);
- void print() const { print_on(tty); }
- void print_on(outputStream *st) const;
-};
-
-// Closures related to weak references processing
-
-// During CMS' weak reference processing, this is a
-// work-routine/closure used to complete transitive
-// marking of objects as live after a certain point
-// in which an initial set has been completely accumulated.
-// This closure is currently used both during the final
-// remark stop-world phase, as well as during the concurrent
-// precleaning of the discovered reference lists.
-class CMSDrainMarkingStackClosure: public VoidClosure {
- CMSCollector* _collector;
- MemRegion _span;
- CMSMarkStack* _mark_stack;
- CMSBitMap* _bit_map;
- CMSKeepAliveClosure* _keep_alive;
- bool _concurrent_precleaning;
- public:
- CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
- CMSBitMap* bit_map, CMSMarkStack* mark_stack,
- CMSKeepAliveClosure* keep_alive,
- bool cpc):
- _collector(collector),
- _span(span),
- _mark_stack(mark_stack),
- _bit_map(bit_map),
- _keep_alive(keep_alive),
- _concurrent_precleaning(cpc) {
- assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
- "Mismatch");
- }
-
- void do_void();
-};
-
-// A parallel version of CMSDrainMarkingStackClosure above.
-class CMSParDrainMarkingStackClosure: public VoidClosure {
- CMSCollector* _collector;
- MemRegion _span;
- OopTaskQueue* _work_queue;
- CMSBitMap* _bit_map;
- CMSInnerParMarkAndPushClosure _mark_and_push;
-
- public:
- CMSParDrainMarkingStackClosure(CMSCollector* collector,
- MemRegion span, CMSBitMap* bit_map,
- OopTaskQueue* work_queue):
- _collector(collector),
- _span(span),
- _work_queue(work_queue),
- _bit_map(bit_map),
- _mark_and_push(collector, span, bit_map, work_queue) { }
-
- public:
- void trim_queue(uint max);
- void do_void();
-};
-
-// Allow yielding or short-circuiting of reference list
-// precleaning work.
-class CMSPrecleanRefsYieldClosure: public YieldClosure {
- CMSCollector* _collector;
- void do_yield_work();
- public:
- CMSPrecleanRefsYieldClosure(CMSCollector* collector):
- _collector(collector) {}
- virtual bool should_return();
-};
-
-
-// Convenience class that locks free list locks for given CMS collector
-class FreelistLocker: public StackObj {
- private:
- CMSCollector* _collector;
- public:
- FreelistLocker(CMSCollector* collector):
- _collector(collector) {
- _collector->getFreelistLocks();
- }
-
- ~FreelistLocker() {
- _collector->releaseFreelistLocks();
- }
-};
-
-// Mark all dead objects in a given space.
-class MarkDeadObjectsClosure: public BlkClosure {
- const CMSCollector* _collector;
- const CompactibleFreeListSpace* _sp;
- CMSBitMap* _live_bit_map;
- CMSBitMap* _dead_bit_map;
-public:
- MarkDeadObjectsClosure(const CMSCollector* collector,
- const CompactibleFreeListSpace* sp,
- CMSBitMap *live_bit_map,
- CMSBitMap *dead_bit_map) :
- _collector(collector),
- _sp(sp),
- _live_bit_map(live_bit_map),
- _dead_bit_map(dead_bit_map) {}
- size_t do_blk(HeapWord* addr);
-};
-
-class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
-
- public:
- TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
-};
-
-
-#endif // SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,472 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
-#define SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
-
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsLockVerifier.hpp"
-#include "gc/cms/compactibleFreeListSpace.inline.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/gcUtil.hpp"
-#include "utilities/align.hpp"
-#include "utilities/bitMap.inline.hpp"
-
-inline void CMSBitMap::clear_all() {
- assert_locked();
- // CMS bitmaps are usually cover large memory regions
- _bm.clear_large();
- return;
-}
-
-inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
- return (pointer_delta(addr, _bmStartWord)) >> _shifter;
-}
-
-inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
- return _bmStartWord + (offset << _shifter);
-}
-
-inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
- assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
- return diff >> _shifter;
-}
-
-inline void CMSBitMap::mark(HeapWord* addr) {
- assert_locked();
- assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
- "outside underlying space?");
- _bm.set_bit(heapWordToOffset(addr));
-}
-
-inline bool CMSBitMap::par_mark(HeapWord* addr) {
- assert_locked();
- assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
- "outside underlying space?");
- return _bm.par_at_put(heapWordToOffset(addr), true);
-}
-
-inline void CMSBitMap::par_clear(HeapWord* addr) {
- assert_locked();
- assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
- "outside underlying space?");
- _bm.par_at_put(heapWordToOffset(addr), false);
-}
-
-inline void CMSBitMap::mark_range(MemRegion mr) {
- NOT_PRODUCT(region_invariant(mr));
- // Range size is usually just 1 bit.
- _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
- BitMap::small_range);
-}
-
-inline void CMSBitMap::clear_range(MemRegion mr) {
- NOT_PRODUCT(region_invariant(mr));
- // Range size is usually just 1 bit.
- _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
- BitMap::small_range);
-}
-
-inline void CMSBitMap::par_mark_range(MemRegion mr) {
- NOT_PRODUCT(region_invariant(mr));
- // Range size is usually just 1 bit.
- _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
- BitMap::small_range);
-}
-
-inline void CMSBitMap::par_clear_range(MemRegion mr) {
- NOT_PRODUCT(region_invariant(mr));
- // Range size is usually just 1 bit.
- _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
- BitMap::small_range);
-}
-
-inline void CMSBitMap::mark_large_range(MemRegion mr) {
- NOT_PRODUCT(region_invariant(mr));
- // Range size must be greater than 32 bytes.
- _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
- BitMap::large_range);
-}
-
-inline void CMSBitMap::clear_large_range(MemRegion mr) {
- NOT_PRODUCT(region_invariant(mr));
- // Range size must be greater than 32 bytes.
- _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
- BitMap::large_range);
-}
-
-inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
- NOT_PRODUCT(region_invariant(mr));
- // Range size must be greater than 32 bytes.
- _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
- BitMap::large_range);
-}
-
-inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
- NOT_PRODUCT(region_invariant(mr));
- // Range size must be greater than 32 bytes.
- _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
- BitMap::large_range);
-}
-
-// Starting at "addr" (inclusive) return a memory region
-// corresponding to the first maximally contiguous marked ("1") region.
-inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
- return getAndClearMarkedRegion(addr, endWord());
-}
-
-// Starting at "start_addr" (inclusive) return a memory region
-// corresponding to the first maximal contiguous marked ("1") region
-// strictly less than end_addr.
-inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
- HeapWord* end_addr) {
- HeapWord *start, *end;
- assert_locked();
- start = getNextMarkedWordAddress (start_addr, end_addr);
- end = getNextUnmarkedWordAddress(start, end_addr);
- assert(start <= end, "Consistency check");
- MemRegion mr(start, end);
- if (!mr.is_empty()) {
- clear_range(mr);
- }
- return mr;
-}
-
-inline bool CMSBitMap::isMarked(HeapWord* addr) const {
- assert_locked();
- assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
- "outside underlying space?");
- return _bm.at(heapWordToOffset(addr));
-}
-
-// The same as isMarked() but without a lock check.
-inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
- assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
- "outside underlying space?");
- return _bm.at(heapWordToOffset(addr));
-}
-
-
-inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
- assert_locked();
- assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
- "outside underlying space?");
- return !_bm.at(heapWordToOffset(addr));
-}
-
-// Return the HeapWord address corresponding to next "1" bit
-// (inclusive).
-inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
- return getNextMarkedWordAddress(addr, endWord());
-}
-
-// Return the least HeapWord address corresponding to next "1" bit
-// starting at start_addr (inclusive) but strictly less than end_addr.
-inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
- HeapWord* start_addr, HeapWord* end_addr) const {
- assert_locked();
- size_t nextOffset = _bm.get_next_one_offset(
- heapWordToOffset(start_addr),
- heapWordToOffset(end_addr));
- HeapWord* nextAddr = offsetToHeapWord(nextOffset);
- assert(nextAddr >= start_addr &&
- nextAddr <= end_addr, "get_next_one postcondition");
- assert((nextAddr == end_addr) ||
- isMarked(nextAddr), "get_next_one postcondition");
- return nextAddr;
-}
-
-
-// Return the HeapWord address corresponding to the next "0" bit
-// (inclusive).
-inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
- return getNextUnmarkedWordAddress(addr, endWord());
-}
-
-// Return the HeapWord address corresponding to the next "0" bit
-// (inclusive).
-inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
- HeapWord* start_addr, HeapWord* end_addr) const {
- assert_locked();
- size_t nextOffset = _bm.get_next_zero_offset(
- heapWordToOffset(start_addr),
- heapWordToOffset(end_addr));
- HeapWord* nextAddr = offsetToHeapWord(nextOffset);
- assert(nextAddr >= start_addr &&
- nextAddr <= end_addr, "get_next_zero postcondition");
- assert((nextAddr == end_addr) ||
- isUnmarked(nextAddr), "get_next_zero postcondition");
- return nextAddr;
-}
-
-inline bool CMSBitMap::isAllClear() const {
- assert_locked();
- return getNextMarkedWordAddress(startWord()) >= endWord();
-}
-
-inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
- HeapWord* right) {
- assert_locked();
- left = MAX2(_bmStartWord, left);
- right = MIN2(_bmStartWord + _bmWordSize, right);
- if (right > left) {
- _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
- }
-}
-
-inline void CMSCollector::save_sweep_limits() {
- _cmsGen->save_sweep_limit();
-}
-
-inline bool CMSCollector::is_dead_obj(oop obj) const {
- HeapWord* addr = (HeapWord*)obj;
- assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
- && _cmsGen->cmsSpace()->block_is_obj(addr)),
- "must be object");
- return should_unload_classes() &&
- _collectorState == Sweeping &&
- !_markBitMap.isMarked(addr);
-}
-
-inline bool CMSCollector::should_abort_preclean() const {
- // We are in the midst of an "abortable preclean" and either
- // scavenge is done or foreground GC wants to take over collection
- return _collectorState == AbortablePreclean &&
- (_abort_preclean || _foregroundGCIsActive ||
- CMSHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
-}
-
-inline size_t CMSCollector::get_eden_used() const {
- return _young_gen->eden()->used();
-}
-
-inline size_t CMSCollector::get_eden_capacity() const {
- return _young_gen->eden()->capacity();
-}
-
-inline bool CMSStats::valid() const {
- return _valid_bits == _ALL_VALID;
-}
-
-inline void CMSStats::record_gc0_begin() {
- if (_gc0_begin_time.is_updated()) {
- float last_gc0_period = _gc0_begin_time.seconds();
- _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
- last_gc0_period, _gc0_alpha);
- _gc0_alpha = _saved_alpha;
- _valid_bits |= _GC0_VALID;
- }
- _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
-
- _gc0_begin_time.update();
-}
-
-inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
- float last_gc0_duration = _gc0_begin_time.seconds();
- _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
- last_gc0_duration, _gc0_alpha);
-
- // Amount promoted.
- _cms_used_at_gc0_end = cms_gen_bytes_used;
-
- size_t promoted_bytes = 0;
- if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
- promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
- }
-
- // If the young gen collection was skipped, then the
- // number of promoted bytes will be 0 and adding it to the
- // average will incorrectly lessen the average. It is, however,
- // also possible that no promotion was needed.
- //
- // _gc0_promoted used to be calculated as
- // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
- // promoted_bytes, _gc0_alpha);
- _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
- _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
-
- // Amount directly allocated.
- size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
- _cms_gen->reset_direct_allocated_words();
- _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
- allocated_bytes, _gc0_alpha);
-}
-
-inline void CMSStats::record_cms_begin() {
- _cms_timer.stop();
-
- // This is just an approximate value, but is good enough.
- _cms_used_at_cms_begin = _cms_used_at_gc0_end;
-
- _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
- (float) _cms_timer.seconds(), _cms_alpha);
- _cms_begin_time.update();
-
- _cms_timer.reset();
- _cms_timer.start();
-}
-
-inline void CMSStats::record_cms_end() {
- _cms_timer.stop();
-
- float cur_duration = _cms_timer.seconds();
- _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
- cur_duration, _cms_alpha);
-
- _cms_end_time.update();
- _cms_alpha = _saved_alpha;
- _allow_duty_cycle_reduction = true;
- _valid_bits |= _CMS_VALID;
-
- _cms_timer.start();
-}
-
-inline double CMSStats::cms_time_since_begin() const {
- return _cms_begin_time.seconds();
-}
-
-inline double CMSStats::cms_time_since_end() const {
- return _cms_end_time.seconds();
-}
-
-inline double CMSStats::promotion_rate() const {
- assert(valid(), "statistics not valid yet");
- return gc0_promoted() / gc0_period();
-}
-
-inline double CMSStats::cms_allocation_rate() const {
- assert(valid(), "statistics not valid yet");
- return cms_allocated() / gc0_period();
-}
-
-inline double CMSStats::cms_consumption_rate() const {
- assert(valid(), "statistics not valid yet");
- return (gc0_promoted() + cms_allocated()) / gc0_period();
-}
-
-inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
- cmsSpace()->save_sweep_limit();
-}
-
-inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
- return _cmsSpace->used_region_at_save_marks();
-}
-
-template <typename OopClosureType>
-void ConcurrentMarkSweepGeneration::oop_since_save_marks_iterate(OopClosureType* cl) {
- cl->set_generation(this);
- cmsSpace()->oop_since_save_marks_iterate(cl);
- cl->reset_generation();
- save_marks();
-}
-
-inline void MarkFromRootsClosure::do_yield_check() {
- if (ConcurrentMarkSweepThread::should_yield() &&
- !_collector->foregroundGCIsActive() &&
- _yield) {
- do_yield_work();
- }
-}
-
-inline void ParMarkFromRootsClosure::do_yield_check() {
- if (ConcurrentMarkSweepThread::should_yield() &&
- !_collector->foregroundGCIsActive()) {
- do_yield_work();
- }
-}
-
-inline void PushOrMarkClosure::do_yield_check() {
- _parent->do_yield_check();
-}
-
-inline void ParPushOrMarkClosure::do_yield_check() {
- _parent->do_yield_check();
-}
-
-// Return value of "true" indicates that the on-going preclean
-// should be aborted.
-inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
- if (ConcurrentMarkSweepThread::should_yield() &&
- !_collector->foregroundGCIsActive() &&
- _yield) {
- // Sample young gen size before and after yield
- _collector->sample_eden();
- do_yield_work();
- _collector->sample_eden();
- return _collector->should_abort_preclean();
- }
- return false;
-}
-
-inline void SurvivorSpacePrecleanClosure::do_yield_check() {
- if (ConcurrentMarkSweepThread::should_yield() &&
- !_collector->foregroundGCIsActive() &&
- _yield) {
- // Sample young gen size before and after yield
- _collector->sample_eden();
- do_yield_work();
- _collector->sample_eden();
- }
-}
-
-inline void SweepClosure::do_yield_check(HeapWord* addr) {
- if (ConcurrentMarkSweepThread::should_yield() &&
- !_collector->foregroundGCIsActive() &&
- _yield) {
- do_yield_work(addr);
- }
-}
-
-inline void MarkRefsIntoAndScanClosure::do_yield_check() {
- // The conditions are ordered for the remarking phase
- // when _yield is false.
- if (_yield &&
- !_collector->foregroundGCIsActive() &&
- ConcurrentMarkSweepThread::should_yield()) {
- do_yield_work();
- }
-}
-
-
-inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
- // Align the end of mr so it's at a card boundary.
- // This is superfluous except at the end of the space;
- // we should do better than this XXX
- MemRegion mr2(mr.start(), align_up(mr.end(),
- CardTable::card_size /* bytes */));
- _t->mark_range(mr2);
-}
-
-inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
- // Align the end of mr so it's at a card boundary.
- // This is superfluous except at the end of the space;
- // we should do better than this XXX
- MemRegion mr2(mr.start(), align_up(mr.end(),
- CardTable::card_size /* bytes */));
- _t->par_mark_range(mr2);
-}
-
-#endif // SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,311 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/shared/gcId.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/init.hpp"
-#include "runtime/java.hpp"
-#include "runtime/javaCalls.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
-#include "runtime/vmThread.hpp"
-
-// ======= Concurrent Mark Sweep Thread ========
-
-ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
-CMSCollector* ConcurrentMarkSweepThread::_collector = NULL;
-int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
-
-volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
-
-ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
- : ConcurrentGCThread() {
- assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
- assert(_cmst == NULL, "CMS thread already created");
- _cmst = this;
- assert(_collector == NULL, "Collector already set");
- _collector = collector;
-
- set_name("CMS Main Thread");
-
- // An old comment here said: "Priority should be just less
- // than that of VMThread". Since the VMThread runs at
- // NearMaxPriority, the old comment was inaccurate, but
- // changing the default priority to NearMaxPriority-1
- // could change current behavior, so the default of
- // NearMaxPriority stays in place.
- //
- // Note that there's a possibility of the VMThread
- // starving if UseCriticalCMSThreadPriority is on.
- // That won't happen on Solaris for various reasons,
- // but may well happen on non-Solaris platforms.
- create_and_start(UseCriticalCMSThreadPriority ? CriticalPriority : NearMaxPriority);
-}
-
-void ConcurrentMarkSweepThread::run_service() {
- assert(this == cmst(), "just checking");
-
- if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
- log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
- }
-
- while (!should_terminate()) {
- sleepBeforeNextCycle();
- if (should_terminate()) break;
- GCIdMark gc_id_mark;
- GCCause::Cause cause = _collector->_full_gc_requested ?
- _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
- _collector->collect_in_background(cause);
- }
-
- // Check that the state of any protocol for synchronization
- // between background (CMS) and foreground collector is "clean"
- // (i.e. will not potentially block the foreground collector,
- // requiring action by us).
- verify_ok_to_terminate();
-}
-
-#ifndef PRODUCT
-void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
- assert(!(CGC_lock->owned_by_self() || cms_thread_has_cms_token() ||
- cms_thread_wants_cms_token()),
- "Must renounce all worldly possessions and desires for nirvana");
- _collector->verify_ok_to_terminate();
-}
-#endif
-
-// create and start a new ConcurrentMarkSweep Thread for given CMS generation
-ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) {
- guarantee(_cmst == NULL, "start() called twice!");
- ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
- assert(_cmst == th, "Where did the just-created CMS thread go?");
- return th;
-}
-
-void ConcurrentMarkSweepThread::stop_service() {
- // Now post a notify on CGC_lock so as to nudge
- // CMS thread(s) that might be slumbering in
- // sleepBeforeNextCycle.
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- CGC_lock->notify_all();
-}
-
-void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
- assert(tc != NULL, "Null ThreadClosure");
- if (cmst() != NULL && !cmst()->has_terminated()) {
- tc->do_thread(cmst());
- }
- assert(Universe::is_fully_initialized(),
- "Called too early, make sure heap is fully initialized");
- if (_collector != NULL) {
- AbstractWorkGang* gang = _collector->conc_workers();
- if (gang != NULL) {
- gang->threads_do(tc);
- }
- }
-}
-
-void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
- if (cmst() != NULL && !cmst()->has_terminated()) {
- cmst()->print_on(st);
- st->cr();
- }
- if (_collector != NULL) {
- AbstractWorkGang* gang = _collector->conc_workers();
- if (gang != NULL) {
- gang->print_worker_threads_on(st);
- }
- }
-}
-
-void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
- assert(UseConcMarkSweepGC, "just checking");
-
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- if (!is_cms_thread) {
- assert(Thread::current()->is_VM_thread(), "Not a VM thread");
- CMSSynchronousYieldRequest yr;
- while (CMS_flag_is_set(CMS_cms_has_token)) {
- // indicate that we want to get the token
- set_CMS_flag(CMS_vm_wants_token);
- CGC_lock->wait_without_safepoint_check();
- }
- // claim the token and proceed
- clear_CMS_flag(CMS_vm_wants_token);
- set_CMS_flag(CMS_vm_has_token);
- } else {
- assert(Thread::current()->is_ConcurrentGC_thread(),
- "Not a CMS thread");
- // The following barrier assumes there's only one CMS thread.
- // This will need to be modified is there are more CMS threads than one.
- while (CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token)) {
- set_CMS_flag(CMS_cms_wants_token);
- CGC_lock->wait_without_safepoint_check();
- }
- // claim the token
- clear_CMS_flag(CMS_cms_wants_token);
- set_CMS_flag(CMS_cms_has_token);
- }
-}
-
-void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
- assert(UseConcMarkSweepGC, "just checking");
-
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- if (!is_cms_thread) {
- assert(Thread::current()->is_VM_thread(), "Not a VM thread");
- assert(CMS_flag_is_set(CMS_vm_has_token), "just checking");
- clear_CMS_flag(CMS_vm_has_token);
- if (CMS_flag_is_set(CMS_cms_wants_token)) {
- // wake-up a waiting CMS thread
- CGC_lock->notify();
- }
- assert(!CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token),
- "Should have been cleared");
- } else {
- assert(Thread::current()->is_ConcurrentGC_thread(),
- "Not a CMS thread");
- assert(CMS_flag_is_set(CMS_cms_has_token), "just checking");
- clear_CMS_flag(CMS_cms_has_token);
- if (CMS_flag_is_set(CMS_vm_wants_token)) {
- // wake-up a waiting VM thread
- CGC_lock->notify();
- }
- assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
- "Should have been cleared");
- }
-}
-
-// Wait until any cms_lock event
-void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
- if (should_terminate() || _collector->_full_gc_requested) {
- return;
- }
- set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
- CGC_lock->wait_without_safepoint_check(t_millis);
- clear_CMS_flag(CMS_cms_wants_token);
- assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
- "Should not be set");
-}
-
-// Wait until the next synchronous GC, a concurrent full gc request,
-// or a timeout, whichever is earlier.
-void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
- // Wait time in millis or 0 value representing infinite wait for a scavenge
- assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
-
- CMSHeap* heap = CMSHeap::heap();
- double start_time_secs = os::elapsedTime();
- double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
-
- // Total collections count before waiting loop
- unsigned int before_count;
- {
- MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
- before_count = heap->total_collections();
- }
-
- unsigned int loop_count = 0;
-
- while(!should_terminate()) {
- double now_time = os::elapsedTime();
- long wait_time_millis;
-
- if(t_millis != 0) {
- // New wait limit
- wait_time_millis = (long) ((end_time_secs - now_time) * MILLIUNITS);
- if(wait_time_millis <= 0) {
- // Wait time is over
- break;
- }
- } else {
- // No wait limit, wait if necessary forever
- wait_time_millis = 0;
- }
-
- // Wait until the next event or the remaining timeout
- {
- MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-
- if (should_terminate() || _collector->_full_gc_requested) {
- return;
- }
- set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
- assert(t_millis == 0 || wait_time_millis > 0, "Sanity");
- CGC_lock->wait_without_safepoint_check(wait_time_millis);
- clear_CMS_flag(CMS_cms_wants_token);
- assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
- "Should not be set");
- }
-
- // Extra wait time check before entering the heap lock to get the collection count
- if(t_millis != 0 && os::elapsedTime() >= end_time_secs) {
- // Wait time is over
- break;
- }
-
- // Total collections count after the event
- unsigned int after_count;
- {
- MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
- after_count = heap->total_collections();
- }
-
- if(before_count != after_count) {
- // There was a collection - success
- break;
- }
-
- // Too many loops warning
- if(++loop_count == 0) {
- log_warning(gc)("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
- }
- }
-}
-
-void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
- while (!should_terminate()) {
- if(CMSWaitDuration >= 0) {
- // Wait until the next synchronous GC, a concurrent full gc
- // request or a timeout, whichever is earlier.
- wait_on_cms_lock_for_scavenge(CMSWaitDuration);
- } else {
- // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
- wait_on_cms_lock(CMSCheckInterval);
- }
- // Check if we should start a CMS collection cycle
- if (_collector->shouldConcurrentCollect()) {
- return;
- }
- // .. collection criterion not yet met, let's go back
- // and wait some more
- }
-}
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepThread.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CONCURRENTMARKSWEEPTHREAD_HPP
-#define SHARE_GC_CMS_CONCURRENTMARKSWEEPTHREAD_HPP
-
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/shared/concurrentGCThread.hpp"
-#include "runtime/thread.hpp"
-
-class ConcurrentMarkSweepGeneration;
-class CMSCollector;
-
-// The Concurrent Mark Sweep GC Thread
-class ConcurrentMarkSweepThread: public ConcurrentGCThread {
- friend class VMStructs;
- friend class ConcurrentMarkSweepGeneration; // XXX should remove friendship
- friend class CMSCollector;
-
- private:
- static ConcurrentMarkSweepThread* _cmst;
- static CMSCollector* _collector;
-
- enum CMS_flag_type {
- CMS_nil = NoBits,
- CMS_cms_wants_token = nth_bit(0),
- CMS_cms_has_token = nth_bit(1),
- CMS_vm_wants_token = nth_bit(2),
- CMS_vm_has_token = nth_bit(3)
- };
-
- static int _CMS_flag;
-
- static bool CMS_flag_is_set(int b) { return (_CMS_flag & b) != 0; }
- static bool set_CMS_flag(int b) { return (_CMS_flag |= b) != 0; }
- static bool clear_CMS_flag(int b) { return (_CMS_flag &= ~b) != 0; }
- void sleepBeforeNextCycle();
-
- // CMS thread should yield for a young gen collection and direct allocations
- static char _pad_1[64 - sizeof(jint)]; // prevent cache-line sharing
- static volatile jint _pending_yields;
- static char _pad_2[64 - sizeof(jint)]; // prevent cache-line sharing
-
- // debugging
- void verify_ok_to_terminate() const PRODUCT_RETURN;
-
- void run_service();
- void stop_service();
-
- public:
- // Constructor
- ConcurrentMarkSweepThread(CMSCollector* collector);
-
- static void threads_do(ThreadClosure* tc);
-
- // Printing
- static void print_all_on(outputStream* st);
- static void print_all() { print_all_on(tty); }
-
- // Returns the CMS Thread
- static ConcurrentMarkSweepThread* cmst() { return _cmst; }
- static CMSCollector* collector() { return _collector; }
-
- // Create and start the CMS Thread, or stop it on shutdown
- static ConcurrentMarkSweepThread* start(CMSCollector* collector);
-
- // Synchronization using CMS token
- static void synchronize(bool is_cms_thread);
- static void desynchronize(bool is_cms_thread);
- static bool vm_thread_has_cms_token() {
- return CMS_flag_is_set(CMS_vm_has_token);
- }
- static bool cms_thread_has_cms_token() {
- return CMS_flag_is_set(CMS_cms_has_token);
- }
- static bool vm_thread_wants_cms_token() {
- return CMS_flag_is_set(CMS_vm_wants_token);
- }
- static bool cms_thread_wants_cms_token() {
- return CMS_flag_is_set(CMS_cms_wants_token);
- }
-
- // Wait on CMS lock until the next synchronous GC
- // or given timeout, whichever is earlier. A timeout value
- // of 0 indicates that there is no upper bound on the wait time.
- // A concurrent full gc request terminates the wait.
- void wait_on_cms_lock(long t_millis);
-
- // Wait on CMS lock until the next synchronous GC
- // or given timeout, whichever is earlier. A timeout value
- // of 0 indicates that there is no upper bound on the wait time.
- // A concurrent full gc request terminates the wait.
- void wait_on_cms_lock_for_scavenge(long t_millis);
-
- // The CMS thread will yield during the work portion of its cycle
- // only when requested to.
- // A synchronous request is used for young gen collections and
- // for direct allocations. The requesting thread increments
- // _pending_yields at the beginning of an operation, and decrements
- // _pending_yields when that operation is completed.
- // In turn, the CMS thread yields when _pending_yields is positive,
- // and continues to yield until the value reverts to 0.
-
- static void increment_pending_yields() {
- Atomic::inc(&_pending_yields);
- assert(_pending_yields >= 0, "can't be negative");
- }
- static void decrement_pending_yields() {
- Atomic::dec(&_pending_yields);
- assert(_pending_yields >= 0, "can't be negative");
- }
- static bool should_yield() { return _pending_yields > 0; }
-};
-
-// For scoped increment/decrement of (synchronous) yield requests
-class CMSSynchronousYieldRequest: public StackObj {
- public:
- CMSSynchronousYieldRequest() {
- ConcurrentMarkSweepThread::increment_pending_yields();
- }
- ~CMSSynchronousYieldRequest() {
- ConcurrentMarkSweepThread::decrement_pending_yields();
- }
-};
-
-// Used to emit a warning in case of unexpectedly excessive
-// looping (in "apparently endless loops") in CMS code.
-class CMSLoopCountWarn: public StackObj {
- private:
- const char* _src;
- const char* _msg;
- const intx _threshold;
- intx _ticks;
-
- public:
- inline CMSLoopCountWarn(const char* src, const char* msg,
- const intx threshold) :
- _src(src), _msg(msg), _threshold(threshold), _ticks(0) { }
-
- inline void tick() {
- _ticks++;
- if (CMSLoopWarn && _ticks % _threshold == 0) {
- log_warning(gc)("%s has looped " INTX_FORMAT " times %s", _src, _ticks, _msg);
- }
- }
-};
-
-#endif // SHARE_GC_CMS_CONCURRENTMARKSWEEPTHREAD_HPP
--- a/src/hotspot/share/gc/cms/freeChunk.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/freeChunk.hpp"
-#include "utilities/copy.hpp"
-
-#ifndef PRODUCT
-
-#define baadbabeHeapWord badHeapWordVal
-#define deadbeefHeapWord 0xdeadbeef
-
-size_t const FreeChunk::header_size() {
- return sizeof(FreeChunk)/HeapWordSize;
-}
-
-void FreeChunk::mangleAllocated(size_t size) {
- // mangle all but the header of a just-allocated block
- // of storage
- assert(size >= MinChunkSize, "smallest size of object");
- // we can't assert that _size == size because this may be an
- // allocation out of a linear allocation block
- assert(sizeof(FreeChunk) % HeapWordSize == 0,
- "shouldn't write beyond chunk");
- HeapWord* addr = (HeapWord*)this;
- size_t hdr = header_size();
- Copy::fill_to_words(addr + hdr, size - hdr, baadbabeHeapWord);
-}
-
-void FreeChunk::mangleFreed(size_t sz) {
- assert(baadbabeHeapWord != deadbeefHeapWord, "Need distinct patterns");
- // mangle all but the header of a just-freed block of storage
- // just prior to passing it to the storage dictionary
- assert(sz >= MinChunkSize, "smallest size of object");
- assert(sz == size(), "just checking");
- HeapWord* addr = (HeapWord*)this;
- size_t hdr = header_size();
- Copy::fill_to_words(addr + hdr, sz - hdr, deadbeefHeapWord);
-}
-
-void FreeChunk::verifyList() const {
- FreeChunk* nextFC = next();
- if (nextFC != NULL) {
- assert(this == nextFC->prev(), "broken chain");
- assert(size() == nextFC->size(), "wrong size");
- nextFC->verifyList();
- }
-}
-#endif
-
-void FreeChunk::print_on(outputStream* st) {
- st->print_cr("Next: " PTR_FORMAT " Prev: " PTR_FORMAT " %s",
- p2i(next()), p2i(prev()), cantCoalesce() ? "[can't coalesce]" : "");
-}
--- a/src/hotspot/share/gc/cms/freeChunk.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_FREECHUNK_HPP
-#define SHARE_GC_CMS_FREECHUNK_HPP
-
-#include "memory/memRegion.hpp"
-#include "oops/markWord.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/orderAccess.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/ostream.hpp"
-
-//
-// Free block maintenance for Concurrent Mark Sweep Generation
-//
-// The main data structure for free blocks are
-// . an indexed array of small free blocks, and
-// . a dictionary of large free blocks
-//
-
-// No virtuals in FreeChunk (don't want any vtables).
-
-// A FreeChunk is merely a chunk that can be in a doubly linked list
-// and has a size field. NOTE: FreeChunks are distinguished from allocated
-// objects in two ways (by the sweeper), depending on whether the VM is 32 or
-// 64 bits.
-// In 32 bits or 64 bits without CompressedOops, the second word (prev) has the
-// LSB set to indicate a free chunk; allocated objects' klass() pointers
-// don't have their LSB set. The corresponding bit in the CMSBitMap is
-// set when the chunk is allocated. There are also blocks that "look free"
-// but are not part of the free list and should not be coalesced into larger
-// free blocks. These free blocks have their two LSB's set.
-
-class FreeChunk {
- friend class VMStructs;
- // For 64 bit compressed oops, the markWord encodes both the size and the
- // indication that this is a FreeChunk and not an object.
- volatile size_t _size;
- FreeChunk* _prev;
- FreeChunk* _next;
-
- markWord mark() const volatile { return markWord((uintptr_t)_size); }
- void set_mark(markWord m) { _size = (size_t)m.value(); }
-
- public:
- NOT_PRODUCT(static const size_t header_size();)
-
- // Returns "true" if the address indicates that the block represents
- // a free chunk.
- static bool indicatesFreeChunk(const HeapWord* addr) {
- // Force volatile read from addr because value might change between
- // calls. We really want the read of _mark and _prev from this pointer
- // to be volatile but making the fields volatile causes all sorts of
- // compilation errors.
- return ((volatile FreeChunk*)addr)->is_free();
- }
-
- bool is_free() const volatile {
- LP64_ONLY(if (UseCompressedOops) return mark().is_cms_free_chunk(); else)
- return (((intptr_t)_prev) & 0x1) == 0x1;
- }
- bool cantCoalesce() const {
- assert(is_free(), "can't get coalesce bit on not free");
- return (((intptr_t)_prev) & 0x2) == 0x2;
- }
- void dontCoalesce() {
- // the block should be free
- assert(is_free(), "Should look like a free block");
- _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
- }
- FreeChunk* prev() const {
- return (FreeChunk*)(((intptr_t)_prev) & ~(0x3));
- }
-
- debug_only(void* prev_addr() const { return (void*)&_prev; })
- debug_only(void* next_addr() const { return (void*)&_next; })
- debug_only(void* size_addr() const { return (void*)&_size; })
-
- size_t size() const volatile {
- LP64_ONLY(if (UseCompressedOops) return mark().get_size(); else )
- return _size;
- }
- void set_size(size_t sz) {
- LP64_ONLY(if (UseCompressedOops) set_mark(markWord::set_size_and_free(sz)); else )
- _size = sz;
- }
-
- FreeChunk* next() const { return _next; }
-
- void link_after(FreeChunk* ptr) {
- link_next(ptr);
- if (ptr != NULL) ptr->link_prev(this);
- }
- void link_next(FreeChunk* ptr) { _next = ptr; }
- void link_prev(FreeChunk* ptr) {
- LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
- _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
- }
- void clear_next() { _next = NULL; }
- void markNotFree() {
- // Set _prev (klass) to null before (if) clearing the mark word below
- _prev = NULL;
-#ifdef _LP64
- if (UseCompressedOops) {
- OrderAccess::storestore();
- set_mark(markWord::prototype());
- }
-#endif
- assert(!is_free(), "Error");
- }
-
- // Return the address past the end of this chunk
- uintptr_t* end() const { return ((uintptr_t*) this) + size(); }
-
- // debugging
- void verify() const PRODUCT_RETURN;
- void verifyList() const PRODUCT_RETURN;
- void mangleAllocated(size_t size) PRODUCT_RETURN;
- void mangleFreed(size_t size) PRODUCT_RETURN;
-
- void print_on(outputStream* st);
-};
-
-extern size_t MinChunkSize;
-
-
-#endif // SHARE_GC_CMS_FREECHUNK_HPP
--- a/src/hotspot/share/gc/cms/gSpaceCounters.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/gSpaceCounters.hpp"
-#include "gc/shared/generation.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "utilities/macros.hpp"
-
-GSpaceCounters::GSpaceCounters(const char* name, int ordinal, size_t max_size,
- Generation* g, GenerationCounters* gc,
- bool sampled) :
- _gen(g) {
-
- if (UsePerfData) {
- EXCEPTION_MARK;
- ResourceMark rm;
-
- const char* cns = PerfDataManager::name_space(gc->name_space(), "space",
- ordinal);
-
- _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
- strcpy(_name_space, cns);
-
- const char* cname = PerfDataManager::counter_name(_name_space, "name");
- PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
-
- cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
- PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
- (jlong)max_size, CHECK);
-
- cname = PerfDataManager::counter_name(_name_space, "capacity");
- _capacity = PerfDataManager::create_variable(SUN_GC, cname,
- PerfData::U_Bytes,
- _gen->capacity(), CHECK);
-
- cname = PerfDataManager::counter_name(_name_space, "used");
- if (sampled) {
- _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
- new GenerationUsedHelper(_gen),
- CHECK);
- }
- else {
- _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
- (jlong)0, CHECK);
- }
-
- cname = PerfDataManager::counter_name(_name_space, "initCapacity");
- PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
- _gen->capacity(), CHECK);
- }
-}
-
-GSpaceCounters::~GSpaceCounters() {
- FREE_C_HEAP_ARRAY(char, _name_space);
-}
--- a/src/hotspot/share/gc/cms/gSpaceCounters.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_GSPACECOUNTERS_HPP
-#define SHARE_GC_CMS_GSPACECOUNTERS_HPP
-
-#include "gc/shared/generation.hpp"
-#include "gc/shared/generationCounters.hpp"
-#include "runtime/perfData.hpp"
-#include "utilities/macros.hpp"
-
-// A GSpaceCounter is a holder class for performance counters
-// that track a space;
-
-class GSpaceCounters: public CHeapObj<mtGC> {
- friend class VMStructs;
-
- private:
- PerfVariable* _capacity;
- PerfVariable* _used;
-
- // Constant PerfData types don't need to retain a reference.
- // However, it's a good idea to document them here.
- // PerfConstant* _size;
-
- Generation* _gen;
- char* _name_space;
-
- public:
-
- GSpaceCounters(const char* name, int ordinal, size_t max_size, Generation* g,
- GenerationCounters* gc, bool sampled=true);
-
- ~GSpaceCounters();
-
- inline void update_capacity() {
- _capacity->set_value(_gen->capacity());
- }
-
- inline void update_used() {
- _used->set_value(_gen->used_stable());
- }
-
- // special version of update_used() to allow the used value to be
- // passed as a parameter. This method can can be used in cases were
- // the utilization is already known and/or when the _gen->used()
- // method is known to be expensive and we want to avoid unnecessary
- // calls to it.
- //
- inline void update_used(size_t used) {
- _used->set_value(used);
- }
-
- inline void inc_used(size_t size) {
- _used->inc(size);
- }
-
- debug_only(
- // for security reasons, we do not allow arbitrary reads from
- // the counters as they may live in shared memory.
- jlong used() {
- return _used->get_value();
- }
- jlong capacity() {
- return _used->get_value();
- }
- )
-
- inline void update_all() {
- update_used();
- update_capacity();
- }
-
- const char* name_space() const { return _name_space; }
-};
-
-class GenerationUsedHelper : public PerfLongSampleHelper {
- private:
- Generation* _gen;
-
- public:
- GenerationUsedHelper(Generation* g) : _gen(g) { }
-
- inline jlong take_sample() {
- return _gen->used_stable();
- }
-};
-
-#endif // SHARE_GC_CMS_GSPACECOUNTERS_HPP
--- a/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,238 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/jvmFlagConstraintsCMS.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/jvmFlagConstraintsGC.hpp"
-#include "memory/universe.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-static JVMFlag::Error ParallelGCThreadsAndCMSWorkQueueDrainThreshold(uint threads, uintx threshold, bool verbose) {
- // CMSWorkQueueDrainThreshold is verified to be less than max_juint
- if (UseConcMarkSweepGC && (threads > (uint)(max_jint / (uint)threshold))) {
- JVMFlag::printError(verbose,
- "ParallelGCThreads (" UINT32_FORMAT ") or CMSWorkQueueDrainThreshold ("
- UINTX_FORMAT ") is too large\n",
- threads, threshold);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose) {
- // To avoid overflow at ParScanClosure::do_oop_work.
- if (UseConcMarkSweepGC && (value > (max_jint / 10))) {
- JVMFlag::printError(verbose,
- "ParallelGCThreads (" UINT32_FORMAT ") must be "
- "less than or equal to " UINT32_FORMAT " for CMS GC\n",
- value, (max_jint / 10));
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(value, CMSWorkQueueDrainThreshold, verbose);
-}
-JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC && (value > ((uintx)max_jint / (uintx)ParallelGCThreads))) {
- JVMFlag::printError(verbose,
- "ParGCStridesPerThread (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
- value, ((uintx)max_jint / (uintx)ParallelGCThreads));
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) {
- if (UseConcMarkSweepGC) {
- // ParGCCardsPerStrideChunk should be compared with card table size.
- size_t heap_size = CMSHeap::heap()->reserved_region().word_size();
- CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
- size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
-
- if ((size_t)value > card_table_size) {
- JVMFlag::printError(verbose,
- "ParGCCardsPerStrideChunk (" INTX_FORMAT ") is too large for the heap size and "
- "must be less than or equal to card table size (" SIZE_FORMAT ")\n",
- value, card_table_size);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
-
- // ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread)
- // from CardTableRS::process_stride(). Note that ParGCStridesPerThread is already checked
- // not to make an overflow with ParallelGCThreads from its constraint function.
- uintx n_strides = ParallelGCThreads * ParGCStridesPerThread;
- uintx ergo_max = max_uintx / n_strides;
- if ((uintx)value > ergo_max) {
- JVMFlag::printError(verbose,
- "ParGCCardsPerStrideChunk (" INTX_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
- value, ergo_max);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- }
- return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose) {
- JVMFlag::Error status = JVMFlag::SUCCESS;
-
- if (UseConcMarkSweepGC) {
- if (value > CMSOldPLABMax) {
- JVMFlag::printError(verbose,
- "CMSOldPLABMin (" SIZE_FORMAT ") must be "
- "less than or equal to CMSOldPLABMax (" SIZE_FORMAT ")\n",
- value, CMSOldPLABMax);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- status = MaxPLABSizeBounds("CMSOldPLABMin", value, verbose);
- }
- return status;
-}
-
-JVMFlag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose) {
- JVMFlag::Error status = JVMFlag::SUCCESS;
-
- if (UseConcMarkSweepGC) {
- status = MaxPLABSizeBounds("CMSOldPLABMax", value, verbose);
- }
- return status;
-}
-
-static JVMFlag::Error CMSReservedAreaConstraintFunc(const char* name, size_t value, bool verbose) {
- if (UseConcMarkSweepGC) {
- ConcurrentMarkSweepGeneration* cms = CMSHeap::heap()->old_gen();
- const size_t ergo_max = cms->cmsSpace()->max_flag_size_for_task_size();
- if (value > ergo_max) {
- JVMFlag::printError(verbose,
- "%s (" SIZE_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" SIZE_FORMAT ") "
- "which is based on the maximum size of the old generation of the Java heap\n",
- name, value, ergo_max);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- }
- return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose) {
- JVMFlag::Error status = CMSReservedAreaConstraintFunc("CMSRescanMultiple", value, verbose);
-
- if (status == JVMFlag::SUCCESS && UseConcMarkSweepGC) {
- // CMSParRemarkTask::do_dirty_card_rescan_tasks requires CompactibleFreeListSpace::rescan_task_size()
- // to be aligned to CardTable::card_size * BitsPerWord.
- // Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize'
- // because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord.
- if (value % HeapWordSize != 0) {
- JVMFlag::printError(verbose,
- "CMSRescanMultiple (" SIZE_FORMAT ") must be "
- "a multiple of %d\n",
- value, HeapWordSize);
- status = JVMFlag::VIOLATES_CONSTRAINT;
- }
- }
- return status;
-}
-
-JVMFlag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose) {
- return CMSReservedAreaConstraintFunc("CMSConcMarkMultiple", value, verbose);
-}
-
-JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC && (value <= CMSPrecleanNumerator)) {
- JVMFlag::printError(verbose,
- "CMSPrecleanDenominator (" UINTX_FORMAT ") must be "
- "strickly greater than CMSPrecleanNumerator (" UINTX_FORMAT ")\n",
- value, CMSPrecleanNumerator);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC && (value >= CMSPrecleanDenominator)) {
- JVMFlag::printError(verbose,
- "CMSPrecleanNumerator (" UINTX_FORMAT ") must be "
- "less than CMSPrecleanDenominator (" UINTX_FORMAT ")\n",
- value, CMSPrecleanDenominator);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC) {
- size_t max_capacity = CMSHeap::heap()->young_gen()->max_capacity();
- if (value > max_uintx - max_capacity) {
- JVMFlag::printError(verbose,
- "CMSSamplingGrain (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" SIZE_FORMAT ")\n",
- value, max_uintx - max_capacity);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- }
- return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC) {
- return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(ParallelGCThreads, value, verbose);
- }
- return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose) {
- // Skip for current default value.
- if (UseConcMarkSweepGC && FLAG_IS_CMDLINE(CMSBitMapYieldQuantum)) {
- // CMSBitMapYieldQuantum should be compared with mark bitmap size.
- ConcurrentMarkSweepGeneration* cms = CMSHeap::heap()->old_gen();
- size_t bitmap_size = cms->collector()->markBitMap()->sizeInWords();
-
- if (value > bitmap_size) {
- JVMFlag::printError(verbose,
- "CMSBitMapYieldQuantum (" SIZE_FORMAT ") must "
- "be less than or equal to bitmap size (" SIZE_FORMAT ") "
- "whose size corresponds to the size of old generation of the Java heap\n",
- value, bitmap_size);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- }
- return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose) {
- if (value == 0) {
- JVMFlag::printError(verbose,
- "OldPLABSize (" SIZE_FORMAT ") must be greater than 0",
- value);
- return JVMFlag::VIOLATES_CONSTRAINT;
- }
- // For CMS, OldPLABSize is the number of free blocks of a given size that are used when
- // replenishing the local per-worker free list caches.
- // For more details, please refer to Arguments::set_cms_and_parnew_gc_flags().
- return MaxPLABSizeBounds("OldPLABSize", value, verbose);
-}
--- a/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_JVMFLAGCONSTRAINTSCMS_HPP
-#define SHARE_GC_CMS_JVMFLAGCONSTRAINTSCMS_HPP
-
-#include "runtime/flags/jvmFlag.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// CMS Flag Constraints
-JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose);
-JVMFlag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose);
-JVMFlag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose);
-JVMFlag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose);
-JVMFlag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose);
-JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose);
-
-// CMS Subconstraints
-JVMFlag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose);
-JVMFlag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose);
-
-#endif // SHARE_GC_CMS_JVMFLAGCONSTRAINTSCMS_HPP
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1446 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/stringTable.hpp"
-#include "gc/cms/cmsHeap.inline.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/parNewGeneration.inline.hpp"
-#include "gc/cms/parOopClosures.inline.hpp"
-#include "gc/serial/defNewGeneration.inline.hpp"
-#include "gc/shared/adaptiveSizePolicy.hpp"
-#include "gc/shared/ageTable.inline.hpp"
-#include "gc/shared/copyFailedInfo.hpp"
-#include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/generation.hpp"
-#include "gc/shared/plab.inline.hpp"
-#include "gc/shared/preservedMarks.inline.hpp"
-#include "gc/shared/referencePolicy.hpp"
-#include "gc/shared/referenceProcessorPhaseTimes.hpp"
-#include "gc/shared/space.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "gc/shared/strongRootsScope.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "gc/shared/weakProcessor.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "gc/shared/workerPolicy.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "memory/iterator.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/objArrayOop.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/thread.inline.hpp"
-#include "utilities/copy.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/stack.inline.hpp"
-
-ParScanThreadState::ParScanThreadState(Space* to_space_,
- ParNewGeneration* young_gen_,
- Generation* old_gen_,
- int thread_num_,
- ObjToScanQueueSet* work_queue_set_,
- Stack<oop, mtGC>* overflow_stacks_,
- PreservedMarks* preserved_marks_,
- size_t desired_plab_sz_,
- TaskTerminator& term_) :
- _work_queue(work_queue_set_->queue(thread_num_)),
- _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
- _preserved_marks(preserved_marks_),
- _to_space_alloc_buffer(desired_plab_sz_),
- _to_space_closure(young_gen_, this),
- _old_gen_closure(young_gen_, this),
- _to_space_root_closure(young_gen_, this),
- _older_gen_closure(young_gen_, this),
- _old_gen_root_closure(young_gen_, this),
- _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
- &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
- work_queue_set_, term_.terminator()),
- _is_alive_closure(young_gen_),
- _scan_weak_ref_closure(young_gen_, this),
- _keep_alive_closure(&_scan_weak_ref_closure),
- _to_space(to_space_),
- _young_gen(young_gen_),
- _old_gen(old_gen_),
- _young_old_boundary(NULL),
- _thread_num(thread_num_),
- _ageTable(false), // false ==> not the global age table, no perf data.
- _to_space_full(false),
- _strong_roots_time(0.0),
- _term_time(0.0)
-{
- #if TASKQUEUE_STATS
- _term_attempts = 0;
- _overflow_refills = 0;
- _overflow_refill_objs = 0;
- #endif // TASKQUEUE_STATS
-
- _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
- _start = os::elapsedTime();
- _old_gen_closure.set_generation(old_gen_);
- _old_gen_root_closure.set_generation(old_gen_);
-}
-
-void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
- size_t plab_word_size) {
- ChunkArray* sca = survivor_chunk_array();
- if (sca != NULL) {
- // A non-null SCA implies that we want the PLAB data recorded.
- sca->record_sample(plab_start, plab_word_size);
- }
-}
-
-bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
- return new_obj->is_objArray() &&
- arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
- new_obj != old_obj;
-}
-
-void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
- assert(old->is_objArray(), "must be obj array");
- assert(old->is_forwarded(), "must be forwarded");
- assert(CMSHeap::heap()->is_in_reserved(old), "must be in heap.");
- assert(!old_gen()->is_in(old), "must be in young generation.");
-
- objArrayOop obj = objArrayOop(old->forwardee());
- // Process ParGCArrayScanChunk elements now
- // and push the remainder back onto queue
- int start = arrayOop(old)->length();
- int end = obj->length();
- int remainder = end - start;
- assert(start <= end, "just checking");
- if (remainder > 2 * ParGCArrayScanChunk) {
- // Test above combines last partial chunk with a full chunk
- end = start + ParGCArrayScanChunk;
- arrayOop(old)->set_length(end);
- // Push remainder.
- bool ok = work_queue()->push(old);
- assert(ok, "just popped, push must be okay");
- } else {
- // Restore length so that it can be used if there
- // is a promotion failure and forwarding pointers
- // must be removed.
- arrayOop(old)->set_length(end);
- }
-
- // process our set of indices (include header in first chunk)
- // should make sure end is even (aligned to HeapWord in case of compressed oops)
- if ((HeapWord *)obj < young_old_boundary()) {
- // object is in to_space
- obj->oop_iterate_range(&_to_space_closure, start, end);
- } else {
- // object is in old generation
- obj->oop_iterate_range(&_old_gen_closure, start, end);
- }
-}
-
-void ParScanThreadState::trim_queues(int max_size) {
- ObjToScanQueue* queue = work_queue();
- do {
- while (queue->size() > (juint)max_size) {
- oop obj_to_scan;
- if (queue->pop_local(obj_to_scan)) {
- if ((HeapWord *)obj_to_scan < young_old_boundary()) {
- if (obj_to_scan->is_objArray() &&
- obj_to_scan->is_forwarded() &&
- obj_to_scan->forwardee() != obj_to_scan) {
- scan_partial_array_and_push_remainder(obj_to_scan);
- } else {
- // object is in to_space
- obj_to_scan->oop_iterate(&_to_space_closure);
- }
- } else {
- // object is in old generation
- obj_to_scan->oop_iterate(&_old_gen_closure);
- }
- }
- }
- // For the case of compressed oops, we have a private, non-shared
- // overflow stack, so we eagerly drain it so as to more evenly
- // distribute load early. Note: this may be good to do in
- // general rather than delay for the final stealing phase.
- // If applicable, we'll transfer a set of objects over to our
- // work queue, allowing them to be stolen and draining our
- // private overflow stack.
- } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
-}
-
-bool ParScanThreadState::take_from_overflow_stack() {
- assert(ParGCUseLocalOverflow, "Else should not call");
- assert(young_gen()->overflow_list() == NULL, "Error");
- ObjToScanQueue* queue = work_queue();
- Stack<oop, mtGC>* const of_stack = overflow_stack();
- const size_t num_overflow_elems = of_stack->size();
- const size_t space_available = queue->max_elems() - queue->size();
- const size_t num_take_elems = MIN3(space_available / 4,
- (size_t)ParGCDesiredObjsFromOverflowList,
- num_overflow_elems);
- // Transfer the most recent num_take_elems from the overflow
- // stack to our work queue.
- for (size_t i = 0; i != num_take_elems; i++) {
- oop cur = of_stack->pop();
- oop obj_to_push = cur->forwardee();
- assert(CMSHeap::heap()->is_in_reserved(cur), "Should be in heap");
- assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
- assert(CMSHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
- if (should_be_partially_scanned(obj_to_push, cur)) {
- assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
- obj_to_push = cur;
- }
- bool ok = queue->push(obj_to_push);
- assert(ok, "Should have succeeded");
- }
- assert(young_gen()->overflow_list() == NULL, "Error");
- return num_take_elems > 0; // was something transferred?
-}
-
-void ParScanThreadState::push_on_overflow_stack(oop p) {
- assert(ParGCUseLocalOverflow, "Else should not call");
- overflow_stack()->push(p);
- assert(young_gen()->overflow_list() == NULL, "Error");
-}
-
-HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
- // If the object is small enough, try to reallocate the buffer.
- HeapWord* obj = NULL;
- if (!_to_space_full) {
- PLAB* const plab = to_space_alloc_buffer();
- Space* const sp = to_space();
- if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
- // Is small enough; abandon this buffer and start a new one.
- plab->retire();
- // The minimum size has to be twice SurvivorAlignmentInBytes to
- // allow for padding used in the alignment of 1 word. A padding
- // of 1 is too small for a filler word so the padding size will
- // be increased by SurvivorAlignmentInBytes.
- size_t min_usable_size = 2 * static_cast<size_t>(SurvivorAlignmentInBytes >> LogHeapWordSize);
- size_t buf_size = MAX2(plab->word_sz(), min_usable_size);
- HeapWord* buf_space = sp->par_allocate(buf_size);
- if (buf_space == NULL) {
- const size_t min_bytes = MAX2(PLAB::min_size(), min_usable_size) << LogHeapWordSize;
- size_t free_bytes = sp->free();
- while(buf_space == NULL && free_bytes >= min_bytes) {
- buf_size = free_bytes >> LogHeapWordSize;
- assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
- buf_space = sp->par_allocate(buf_size);
- free_bytes = sp->free();
- }
- }
- if (buf_space != NULL) {
- plab->set_buf(buf_space, buf_size);
- record_survivor_plab(buf_space, buf_size);
- obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
- // Note that we cannot compare buf_size < word_sz below
- // because of AlignmentReserve (see PLAB::allocate()).
- assert(obj != NULL || plab->words_remaining() < word_sz,
- "Else should have been able to allocate requested object size "
- SIZE_FORMAT ", PLAB size " SIZE_FORMAT ", SurvivorAlignmentInBytes "
- SIZE_FORMAT ", words_remaining " SIZE_FORMAT,
- word_sz, buf_size, SurvivorAlignmentInBytes, plab->words_remaining());
- // It's conceivable that we may be able to use the
- // buffer we just grabbed for subsequent small requests
- // even if not for this one.
- } else {
- // We're used up.
- _to_space_full = true;
- }
- } else {
- // Too large; allocate the object individually.
- obj = sp->par_allocate(word_sz);
- }
- }
- return obj;
-}
-
-void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
- to_space_alloc_buffer()->undo_allocation(obj, word_sz);
-}
-
-void ParScanThreadState::print_promotion_failure_size() {
- if (_promotion_failed_info.has_failed()) {
- log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ",
- _thread_num, _promotion_failed_info.first_size());
- }
-}
-
-class ParScanThreadStateSet: StackObj {
-public:
- // Initializes states for the specified number of threads;
- ParScanThreadStateSet(int num_threads,
- Space& to_space,
- ParNewGeneration& young_gen,
- Generation& old_gen,
- ObjToScanQueueSet& queue_set,
- Stack<oop, mtGC>* overflow_stacks_,
- PreservedMarksSet& preserved_marks_set,
- size_t desired_plab_sz,
- TaskTerminator& term);
-
- ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
-
- inline ParScanThreadState& thread_state(int i);
-
- void trace_promotion_failed(const YoungGCTracer* gc_tracer);
- void reset(uint active_workers, bool promotion_failed);
- void flush();
-
- #if TASKQUEUE_STATS
- static void
- print_termination_stats_hdr(outputStream* const st);
- void print_termination_stats();
- static void
- print_taskqueue_stats_hdr(outputStream* const st);
- void print_taskqueue_stats();
- void reset_stats();
- #endif // TASKQUEUE_STATS
-
-private:
- TaskTerminator& _term;
- ParNewGeneration& _young_gen;
- Generation& _old_gen;
- ParScanThreadState* _per_thread_states;
- const int _num_threads;
- public:
- bool is_valid(int id) const { return id < _num_threads; }
- ParallelTaskTerminator* terminator() { return _term.terminator(); }
-};
-
-ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
- Space& to_space,
- ParNewGeneration& young_gen,
- Generation& old_gen,
- ObjToScanQueueSet& queue_set,
- Stack<oop, mtGC>* overflow_stacks,
- PreservedMarksSet& preserved_marks_set,
- size_t desired_plab_sz,
- TaskTerminator& term)
- : _term(term),
- _young_gen(young_gen),
- _old_gen(old_gen),
- _per_thread_states(NEW_RESOURCE_ARRAY(ParScanThreadState, num_threads)),
- _num_threads(num_threads)
-{
- assert(num_threads > 0, "sanity check!");
- assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
- "overflow_stack allocation mismatch");
- // Initialize states.
- for (int i = 0; i < num_threads; ++i) {
- new(_per_thread_states + i)
- ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
- overflow_stacks, preserved_marks_set.get(i),
- desired_plab_sz, term);
- }
-}
-
-inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
- assert(i >= 0 && i < _num_threads, "sanity check!");
- return _per_thread_states[i];
-}
-
-void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
- for (int i = 0; i < _num_threads; ++i) {
- if (thread_state(i).promotion_failed()) {
- gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
- thread_state(i).promotion_failed_info().reset();
- }
- }
-}
-
-void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
- _term.terminator()->reset_for_reuse(active_threads);
- if (promotion_failed) {
- for (int i = 0; i < _num_threads; ++i) {
- thread_state(i).print_promotion_failure_size();
- }
- }
-}
-
-#if TASKQUEUE_STATS
-void ParScanThreadState::reset_stats() {
- taskqueue_stats().reset();
- _term_attempts = 0;
- _overflow_refills = 0;
- _overflow_refill_objs = 0;
-}
-
-void ParScanThreadStateSet::reset_stats() {
- for (int i = 0; i < _num_threads; ++i) {
- thread_state(i).reset_stats();
- }
-}
-
-void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
- st->print_raw_cr("GC Termination Stats");
- st->print_raw_cr(" elapsed --strong roots-- -------termination-------");
- st->print_raw_cr("thr ms ms % ms % attempts");
- st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
-}
-
-void ParScanThreadStateSet::print_termination_stats() {
- Log(gc, task, stats) log;
- if (!log.is_debug()) {
- return;
- }
-
- ResourceMark rm;
- LogStream ls(log.debug());
- outputStream* st = &ls;
-
- print_termination_stats_hdr(st);
-
- for (int i = 0; i < _num_threads; ++i) {
- const ParScanThreadState & pss = thread_state(i);
- const double elapsed_ms = pss.elapsed_time() * 1000.0;
- const double s_roots_ms = pss.strong_roots_time() * 1000.0;
- const double term_ms = pss.term_time() * 1000.0;
- st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
- i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
- term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
- }
-}
-
-// Print stats related to work queue activity.
-void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
- st->print_raw_cr("GC Task Stats");
- st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
- st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
-}
-
-void ParScanThreadStateSet::print_taskqueue_stats() {
- if (!log_is_enabled(Trace, gc, task, stats)) {
- return;
- }
- Log(gc, task, stats) log;
- ResourceMark rm;
- LogStream ls(log.trace());
- outputStream* st = &ls;
- print_taskqueue_stats_hdr(st);
-
- TaskQueueStats totals;
- for (int i = 0; i < _num_threads; ++i) {
- const ParScanThreadState & pss = thread_state(i);
- const TaskQueueStats & stats = pss.taskqueue_stats();
- st->print("%3d ", i); stats.print(st); st->cr();
- totals += stats;
-
- if (pss.overflow_refills() > 0) {
- st->print_cr(" " SIZE_FORMAT_W(10) " overflow refills "
- SIZE_FORMAT_W(10) " overflow objects",
- pss.overflow_refills(), pss.overflow_refill_objs());
- }
- }
- st->print("tot "); totals.print(st); st->cr();
-
- DEBUG_ONLY(totals.verify());
-}
-#endif // TASKQUEUE_STATS
-
-void ParScanThreadStateSet::flush() {
- // Work in this loop should be kept as lightweight as
- // possible since this might otherwise become a bottleneck
- // to scaling. Should we add heavy-weight work into this
- // loop, consider parallelizing the loop into the worker threads.
- for (int i = 0; i < _num_threads; ++i) {
- ParScanThreadState& par_scan_state = thread_state(i);
-
- // Flush stats related to To-space PLAB activity and
- // retire the last buffer.
- par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
-
- // Every thread has its own age table. We need to merge
- // them all into one.
- AgeTable *local_table = par_scan_state.age_table();
- _young_gen.age_table()->merge(local_table);
-
- // Inform old gen that we're done.
- _old_gen.par_promote_alloc_done(i);
- }
-
- if (UseConcMarkSweepGC) {
- // We need to call this even when ResizeOldPLAB is disabled
- // so as to avoid breaking some asserts. While we may be able
- // to avoid this by reorganizing the code a bit, I am loathe
- // to do that unless we find cases where ergo leads to bad
- // performance.
- CompactibleFreeListSpaceLAB::compute_desired_plab_size();
- }
-}
-
-ParScanClosure::ParScanClosure(ParNewGeneration* g,
- ParScanThreadState* par_scan_state) :
- OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
- _boundary = _g->reserved().end();
-}
-
-void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, true); }
-void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
-
-void ParRootScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, true); }
-void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
-
-ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
- ParScanThreadState* par_scan_state)
- : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
-{}
-
-#ifdef WIN32
-#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
-#endif
-
-ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
- ParScanThreadState* par_scan_state_,
- ParScanWithoutBarrierClosure* to_space_closure_,
- ParScanWithBarrierClosure* old_gen_closure_,
- ParRootScanWithoutBarrierClosure* to_space_root_closure_,
- ParNewGeneration* par_gen_,
- ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
- ObjToScanQueueSet* task_queues_,
- ParallelTaskTerminator* terminator_) :
-
- _par_scan_state(par_scan_state_),
- _to_space_closure(to_space_closure_),
- _to_space_root_closure(to_space_root_closure_),
- _old_gen_closure(old_gen_closure_),
- _old_gen_root_closure(old_gen_root_closure_),
- _par_gen(par_gen_),
- _task_queues(task_queues_),
- _terminator(terminator_)
-{}
-
-void ParEvacuateFollowersClosure::do_void() {
- ObjToScanQueue* work_q = par_scan_state()->work_queue();
-
- while (true) {
- // Scan to-space and old-gen objs until we run out of both.
- oop obj_to_scan;
- par_scan_state()->trim_queues(0);
-
- // We have no local work, attempt to steal from other threads.
-
- // Attempt to steal work from promoted.
- if (task_queues()->steal(par_scan_state()->thread_num(),
- obj_to_scan)) {
- bool res = work_q->push(obj_to_scan);
- assert(res, "Empty queue should have room for a push.");
-
- // If successful, goto Start.
- continue;
-
- // Try global overflow list.
- } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
- continue;
- }
-
- // Otherwise, offer termination.
- par_scan_state()->start_term_time();
- if (terminator()->offer_termination()) break;
- par_scan_state()->end_term_time();
- }
- assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
- "Broken overflow list?");
- // Finish the last termination pause.
- par_scan_state()->end_term_time();
-}
-
-ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
- Generation* old_gen,
- HeapWord* young_old_boundary,
- ParScanThreadStateSet* state_set,
- StrongRootsScope* strong_roots_scope) :
- AbstractGangTask("ParNewGeneration collection"),
- _young_gen(young_gen), _old_gen(old_gen),
- _young_old_boundary(young_old_boundary),
- _state_set(state_set),
- _strong_roots_scope(strong_roots_scope)
-{}
-
-void ParNewGenTask::work(uint worker_id) {
- CMSHeap* heap = CMSHeap::heap();
- // Since this is being done in a separate thread, need new resource
- // and handle marks.
- ResourceMark rm;
- HandleMark hm;
-
- ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
- assert(_state_set->is_valid(worker_id), "Should not have been called");
-
- par_scan_state.set_young_old_boundary(_young_old_boundary);
-
- CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
- heap->rem_set()->cld_rem_set()->accumulate_modified_oops());
-
- par_scan_state.start_strong_roots();
- heap->young_process_roots(_strong_roots_scope,
- &par_scan_state.to_space_root_closure(),
- &par_scan_state.older_gen_closure(),
- &cld_scan_closure);
-
- par_scan_state.end_strong_roots();
-
- // "evacuate followers".
- par_scan_state.evacuate_followers_closure().do_void();
-
- // This will collapse this worker's promoted object list that's
- // created during the main ParNew parallel phase of ParNew. This has
- // to be called after all workers have finished promoting objects
- // and scanning promoted objects. It should be safe calling it from
- // here, given that we can only reach here after all thread have
- // offered termination, i.e., after there is no more work to be
- // done. It will also disable promotion tracking for the rest of
- // this GC as it's not necessary to be on during reference processing.
- _old_gen->par_oop_since_save_marks_iterate_done((int) worker_id);
-}
-
-ParNewGeneration::ParNewGeneration(ReservedSpace rs,
- size_t initial_byte_size,
- size_t min_byte_size,
- size_t max_byte_size)
- : DefNewGeneration(rs, initial_byte_size, min_byte_size, max_byte_size, "CMS young collection pauses"),
- _plab_stats("Young", YoungPLABSize, PLABWeight),
- _overflow_list(NULL),
- _is_alive_closure(this)
-{
- NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
- NOT_PRODUCT(_num_par_pushes = 0;)
- _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
- guarantee(_task_queues != NULL, "task_queues allocation failure.");
-
- for (uint i = 0; i < ParallelGCThreads; i++) {
- ObjToScanQueue *q = new ObjToScanQueue();
- guarantee(q != NULL, "work_queue Allocation failure.");
- _task_queues->register_queue(i, q);
- }
-
- for (uint i = 0; i < ParallelGCThreads; i++) {
- _task_queues->queue(i)->initialize();
- }
-
- _overflow_stacks = NULL;
- if (ParGCUseLocalOverflow) {
- // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
- typedef Stack<oop, mtGC> GCOopStack;
-
- _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
- for (size_t i = 0; i < ParallelGCThreads; ++i) {
- new (_overflow_stacks + i) Stack<oop, mtGC>();
- }
- }
-
- if (UsePerfData) {
- EXCEPTION_MARK;
- ResourceMark rm;
-
- const char* cname =
- PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
- PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
- ParallelGCThreads, CHECK);
- }
-}
-
-// ParNewGeneration::
-ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
- DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
-
-template <class T>
-void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
-#ifdef ASSERT
- {
- oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
- // We never expect to see a null reference being processed
- // as a weak reference.
- assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
- }
-#endif // ASSERT
-
- Devirtualizer::do_oop_no_verify(_par_cl, p);
-
- if (CMSHeap::heap()->is_in_reserved(p)) {
- oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);;
- _rs->write_ref_field_gc_par(p, obj);
- }
-}
-
-void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p) { ParKeepAliveClosure::do_oop_work(p); }
-void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
-
-// ParNewGeneration::
-KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
- DefNewGeneration::KeepAliveClosure(cl) {}
-
-template <class T>
-void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
-#ifdef ASSERT
- {
- oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
- // We never expect to see a null reference being processed
- // as a weak reference.
- assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
- }
-#endif // ASSERT
-
- Devirtualizer::do_oop_no_verify(_cl, p);
-
- if (CMSHeap::heap()->is_in_reserved(p)) {
- oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
- _rs->write_ref_field_gc_par(p, obj);
- }
-}
-
-void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClosure::do_oop_work(p); }
-void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
-
-template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
- T heap_oop = RawAccess<>::oop_load(p);
- if (!CompressedOops::is_null(heap_oop)) {
- oop obj = CompressedOops::decode_not_null(heap_oop);
- if ((HeapWord*)obj < _boundary) {
- assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
- oop new_obj = obj->is_forwarded()
- ? obj->forwardee()
- : _g->DefNewGeneration::copy_to_survivor_space(obj);
- RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
- }
- if (_gc_barrier) {
- // If p points to a younger generation, mark the card.
- if ((HeapWord*)obj < _gen_boundary) {
- _rs->write_ref_field_gc_par(p, obj);
- }
- }
- }
-}
-
-void ScanClosureWithParBarrier::do_oop(oop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
-void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
-
-class ParNewRefProcTaskProxy: public AbstractGangTask {
- typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
-public:
- ParNewRefProcTaskProxy(ProcessTask& task,
- ParNewGeneration& young_gen,
- Generation& old_gen,
- HeapWord* young_old_boundary,
- ParScanThreadStateSet& state_set);
-
-private:
- virtual void work(uint worker_id);
-private:
- ParNewGeneration& _young_gen;
- ProcessTask& _task;
- Generation& _old_gen;
- HeapWord* _young_old_boundary;
- ParScanThreadStateSet& _state_set;
-};
-
-ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
- ParNewGeneration& young_gen,
- Generation& old_gen,
- HeapWord* young_old_boundary,
- ParScanThreadStateSet& state_set)
- : AbstractGangTask("ParNewGeneration parallel reference processing"),
- _young_gen(young_gen),
- _task(task),
- _old_gen(old_gen),
- _young_old_boundary(young_old_boundary),
- _state_set(state_set)
-{ }
-
-void ParNewRefProcTaskProxy::work(uint worker_id) {
- ResourceMark rm;
- HandleMark hm;
- ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
- par_scan_state.set_young_old_boundary(_young_old_boundary);
- _task.work(worker_id, par_scan_state.is_alive_closure(),
- par_scan_state.keep_alive_closure(),
- par_scan_state.evacuate_followers_closure());
-}
-
-void ParNewRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) {
- CMSHeap* gch = CMSHeap::heap();
- WorkGang* workers = gch->workers();
- assert(workers != NULL, "Need parallel worker threads.");
- assert(workers->active_workers() == ergo_workers,
- "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
- ergo_workers, workers->active_workers());
- _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
- ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
- _young_gen.reserved().end(), _state_set);
- workers->run_task(&rp_task, workers->active_workers());
- _state_set.reset(0 /* bad value in debug if not reset */,
- _young_gen.promotion_failed());
-}
-
-void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
- _state_set.flush();
- CMSHeap* heap = CMSHeap::heap();
- heap->save_marks();
-}
-
-ScanClosureWithParBarrier::
-ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
- OopsInClassLoaderDataOrGenClosure(g), _g(g), _boundary(g->reserved().end()), _gc_barrier(gc_barrier)
-{ }
-
-template <typename OopClosureType1, typename OopClosureType2>
-EvacuateFollowersClosureGeneral<OopClosureType1, OopClosureType2>::
-EvacuateFollowersClosureGeneral(CMSHeap* heap,
- OopClosureType1* cur,
- OopClosureType2* older) :
- _heap(heap),
- _scan_cur_or_nonheap(cur), _scan_older(older)
-{ }
-
-template <typename OopClosureType1, typename OopClosureType2>
-void EvacuateFollowersClosureGeneral<OopClosureType1, OopClosureType2>::do_void() {
- do {
- _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap,
- _scan_older);
- } while (!_heap->no_allocs_since_save_marks());
-}
-
-// A Generation that does parallel young-gen collection.
-
-void ParNewGeneration::handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set) {
- assert(_promo_failure_scan_stack.is_empty(), "post condition");
- _promo_failure_scan_stack.clear(true); // Clear cached segments.
-
- remove_forwarding_pointers();
- log_info(gc, promotion)("Promotion failed");
- // All the spaces are in play for mark-sweep.
- swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
- from()->set_next_compaction_space(to());
- gch->set_incremental_collection_failed();
- // Inform the next generation that a promotion failure occurred.
- _old_gen->promotion_failure_occurred();
-
- // Trace promotion failure in the parallel GC threads
- thread_state_set.trace_promotion_failed(gc_tracer());
- // Single threaded code may have reported promotion failure to the global state
- if (_promotion_failed_info.has_failed()) {
- _gc_tracer.report_promotion_failed(_promotion_failed_info);
- }
- // Reset the PromotionFailureALot counters.
- NOT_PRODUCT(gch->reset_promotion_should_fail();)
-}
-
-void ParNewGeneration::collect(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool is_tlab) {
- assert(full || size > 0, "otherwise we don't want to collect");
-
- CMSHeap* gch = CMSHeap::heap();
-
- _gc_timer->register_gc_start();
-
- AdaptiveSizePolicy* size_policy = gch->size_policy();
- WorkGang* workers = gch->workers();
- assert(workers != NULL, "Need workgang for parallel work");
- uint active_workers =
- WorkerPolicy::calc_active_workers(workers->total_workers(),
- workers->active_workers(),
- Threads::number_of_non_daemon_threads());
- active_workers = workers->update_active_workers(active_workers);
- log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
-
- _old_gen = gch->old_gen();
-
- // If the next generation is too full to accommodate worst-case promotion
- // from this generation, pass on collection; let the next generation
- // do it.
- if (!collection_attempt_is_safe()) {
- gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
- return;
- }
- assert(to()->is_empty(), "Else not collection_attempt_is_safe");
-
- _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
- gch->trace_heap_before_gc(gc_tracer());
-
- init_assuming_no_promotion_failure();
-
- GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause());
-
- age_table()->clear();
- to()->clear(SpaceDecorator::Mangle);
-
- gch->save_marks();
-
- // Set the correct parallelism (number of queues) in the reference processor
- ref_processor()->set_active_mt_degree(active_workers);
-
- // Need to initialize the preserved marks before the ThreadStateSet c'tor.
- _preserved_marks_set.init(active_workers);
-
- // Always set the terminator for the active number of workers
- // because only those workers go through the termination protocol.
- TaskTerminator _term(active_workers, task_queues());
- ParScanThreadStateSet thread_state_set(active_workers,
- *to(), *this, *_old_gen, *task_queues(),
- _overflow_stacks, _preserved_marks_set,
- desired_plab_sz(), _term);
-
- thread_state_set.reset(active_workers, promotion_failed());
-
- {
- StrongRootsScope srs(active_workers);
-
- ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
- gch->rem_set()->prepare_for_younger_refs_iterate(true);
- // It turns out that even when we're using 1 thread, doing the work in a
- // separate thread causes wide variance in run times. We can't help this
- // in the multi-threaded case, but we special-case n=1 here to get
- // repeatable measurements of the 1-thread overhead of the parallel code.
- // Might multiple workers ever be used? If yes, initialization
- // has been done such that the single threaded path should not be used.
- if (workers->total_workers() > 1) {
- workers->run_task(&tsk);
- } else {
- tsk.work(0);
- }
- }
-
- thread_state_set.reset(0 /* Bad value in debug if not reset */,
- promotion_failed());
-
- // Trace and reset failed promotion info.
- if (promotion_failed()) {
- thread_state_set.trace_promotion_failed(gc_tracer());
- }
-
- // Process (weak) reference objects found during scavenge.
- ReferenceProcessor* rp = ref_processor();
- IsAliveClosure is_alive(this);
- ScanWeakRefClosure scan_weak_ref(this);
- KeepAliveClosure keep_alive(&scan_weak_ref);
- ScanClosure scan_without_gc_barrier(this, false);
- ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
- set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
- EvacuateFollowersClosureGeneral<ScanClosure, ScanClosureWithParBarrier> evacuate_followers(
- gch, &scan_without_gc_barrier, &scan_with_gc_barrier);
- rp->setup_policy(clear_all_soft_refs);
- // Can the mt_degree be set later (at run_task() time would be best)?
- rp->set_active_mt_degree(active_workers);
- ReferenceProcessorStats stats;
- ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
- if (rp->processing_is_mt()) {
- ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
- stats = rp->process_discovered_references(&is_alive, &keep_alive,
- &evacuate_followers, &task_executor,
- &pt);
- } else {
- thread_state_set.flush();
- gch->save_marks();
- stats = rp->process_discovered_references(&is_alive, &keep_alive,
- &evacuate_followers, NULL,
- &pt);
- }
- _gc_tracer.report_gc_reference_stats(stats);
- _gc_tracer.report_tenuring_threshold(tenuring_threshold());
- pt.print_all_references();
-
- assert(gch->no_allocs_since_save_marks(), "evacuation should be done at this point");
-
- WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
-
- // Verify that the usage of keep_alive only forwarded
- // the oops and did not find anything new to copy.
- assert(gch->no_allocs_since_save_marks(), "unexpectedly copied objects");
-
- if (!promotion_failed()) {
- // Swap the survivor spaces.
- eden()->clear(SpaceDecorator::Mangle);
- from()->clear(SpaceDecorator::Mangle);
- if (ZapUnusedHeapArea) {
- // This is now done here because of the piece-meal mangling which
- // can check for valid mangling at intermediate points in the
- // collection(s). When a young collection fails to collect
- // sufficient space resizing of the young generation can occur
- // and redistribute the spaces in the young generation. Mangle
- // here so that unzapped regions don't get distributed to
- // other spaces.
- to()->mangle_unused_area();
- }
- swap_spaces();
-
- // A successful scavenge should restart the GC time limit count which is
- // for full GC's.
- size_policy->reset_gc_overhead_limit_count();
-
- assert(to()->is_empty(), "to space should be empty now");
-
- adjust_desired_tenuring_threshold();
- } else {
- handle_promotion_failed(gch, thread_state_set);
- }
- _preserved_marks_set.reclaim();
- // set new iteration safe limit for the survivor spaces
- from()->set_concurrent_iteration_safe_limit(from()->top());
- to()->set_concurrent_iteration_safe_limit(to()->top());
-
- plab_stats()->adjust_desired_plab_sz();
-
- TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
- TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
-
- // We need to use a monotonically non-decreasing time in ms
- // or we will see time-warp warnings and os::javaTimeMillis()
- // does not guarantee monotonicity.
- jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
- update_time_of_last_gc(now);
-
- rp->set_enqueuing_is_done(true);
- rp->verify_no_references_recorded();
-
- gch->trace_heap_after_gc(gc_tracer());
-
- _gc_timer->register_gc_end();
-
- _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
-}
-
-size_t ParNewGeneration::desired_plab_sz() {
- return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers());
-}
-
-static int sum;
-void ParNewGeneration::waste_some_time() {
- for (int i = 0; i < 100; i++) {
- sum += i;
- }
-}
-
-static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
-
-// Because of concurrency, there are times where an object for which
-// "is_forwarded()" is true contains an "interim" forwarding pointer
-// value. Such a value will soon be overwritten with a real value.
-// This method requires "obj" to have a forwarding pointer, and waits, if
-// necessary for a real one to be inserted, and returns it.
-
-oop ParNewGeneration::real_forwardee(oop obj) {
- oop forward_ptr = obj->forwardee();
- if (forward_ptr != ClaimedForwardPtr) {
- return forward_ptr;
- } else {
- return real_forwardee_slow(obj);
- }
-}
-
-oop ParNewGeneration::real_forwardee_slow(oop obj) {
- // Spin-read if it is claimed but not yet written by another thread.
- oop forward_ptr = obj->forwardee();
- while (forward_ptr == ClaimedForwardPtr) {
- waste_some_time();
- assert(obj->is_forwarded(), "precondition");
- forward_ptr = obj->forwardee();
- }
- return forward_ptr;
-}
-
-// Multiple GC threads may try to promote an object. If the object
-// is successfully promoted, a forwarding pointer will be installed in
-// the object in the young generation. This method claims the right
-// to install the forwarding pointer before it copies the object,
-// thus avoiding the need to undo the copy as in
-// copy_to_survivor_space_avoiding_with_undo.
-
-oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
- oop old,
- size_t sz,
- markWord m) {
- // In the sequential version, this assert also says that the object is
- // not forwarded. That might not be the case here. It is the case that
- // the caller observed it to be not forwarded at some time in the past.
- assert(is_in_reserved(old), "shouldn't be scavenging this oop");
-
- // The sequential code read "old->age()" below. That doesn't work here,
- // since the age is in the mark word, and that might be overwritten with
- // a forwarding pointer by a parallel thread. So we must save the mark
- // word in a local and then analyze it.
- oopDesc dummyOld;
- dummyOld.set_mark_raw(m);
- assert(!dummyOld.is_forwarded(),
- "should not be called with forwarding pointer mark word.");
-
- oop new_obj = NULL;
- oop forward_ptr;
-
- // Try allocating obj in to-space (unless too old)
- if (dummyOld.age() < tenuring_threshold()) {
- new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
- }
-
- if (new_obj == NULL) {
- // Either to-space is full or we decided to promote try allocating obj tenured
-
- // Attempt to install a null forwarding pointer (atomically),
- // to claim the right to install the real forwarding pointer.
- forward_ptr = old->forward_to_atomic(ClaimedForwardPtr, m);
- if (forward_ptr != NULL) {
- // someone else beat us to it.
- return real_forwardee(old);
- }
-
- if (!_promotion_failed) {
- new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
- old, m, sz);
- }
-
- if (new_obj == NULL) {
- // promotion failed, forward to self
- _promotion_failed = true;
- new_obj = old;
-
- par_scan_state->preserved_marks()->push_if_necessary(old, m);
- par_scan_state->register_promotion_failure(sz);
- }
-
- old->forward_to(new_obj);
- forward_ptr = NULL;
- } else {
- // Is in to-space; do copying ourselves.
- Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
- assert(CMSHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
- forward_ptr = old->forward_to_atomic(new_obj, m);
- // Restore the mark word copied above.
- new_obj->set_mark_raw(m);
- // Increment age if obj still in new generation
- new_obj->incr_age();
- par_scan_state->age_table()->add(new_obj, sz);
- }
- assert(new_obj != NULL, "just checking");
-
- // This code must come after the CAS test, or it will print incorrect
- // information.
- log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
- is_in_reserved(new_obj) ? "copying" : "tenuring",
- new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
-
- if (forward_ptr == NULL) {
- oop obj_to_push = new_obj;
- if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
- // Length field used as index of next element to be scanned.
- // Real length can be obtained from real_forwardee()
- arrayOop(old)->set_length(0);
- obj_to_push = old;
- assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
- "push forwarded object");
- }
- // Push it on one of the queues of to-be-scanned objects.
- bool simulate_overflow = false;
- NOT_PRODUCT(
- if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
- // simulate a stack overflow
- simulate_overflow = true;
- }
- )
- if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
- // Add stats for overflow pushes.
- log_develop_trace(gc)("Queue Overflow");
- push_on_overflow_list(old, par_scan_state);
- TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
- }
-
- return new_obj;
- }
-
- // Oops. Someone beat us to it. Undo the allocation. Where did we
- // allocate it?
- if (is_in_reserved(new_obj)) {
- // Must be in to_space.
- assert(to()->is_in_reserved(new_obj), "Checking");
- if (forward_ptr == ClaimedForwardPtr) {
- // Wait to get the real forwarding pointer value.
- forward_ptr = real_forwardee(old);
- }
- par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
- }
-
- return forward_ptr;
-}
-
-#ifndef PRODUCT
-// It's OK to call this multi-threaded; the worst thing
-// that can happen is that we'll get a bunch of closely
-// spaced simulated overflows, but that's OK, in fact
-// probably good as it would exercise the overflow code
-// under contention.
-bool ParNewGeneration::should_simulate_overflow() {
- if (_overflow_counter-- <= 0) { // just being defensive
- _overflow_counter = ParGCWorkQueueOverflowInterval;
- return true;
- } else {
- return false;
- }
-}
-#endif
-
-// In case we are using compressed oops, we need to be careful.
-// If the object being pushed is an object array, then its length
-// field keeps track of the "grey boundary" at which the next
-// incremental scan will be done (see ParGCArrayScanChunk).
-// When using compressed oops, this length field is kept in the
-// lower 32 bits of the erstwhile klass word and cannot be used
-// for the overflow chaining pointer (OCP below). As such the OCP
-// would itself need to be compressed into the top 32-bits in this
-// case. Unfortunately, see below, in the event that we have a
-// promotion failure, the node to be pushed on the list can be
-// outside of the Java heap, so the heap-based pointer compression
-// would not work (we would have potential aliasing between C-heap
-// and Java-heap pointers). For this reason, when using compressed
-// oops, we simply use a worker-thread-local, non-shared overflow
-// list in the form of a growable array, with a slightly different
-// overflow stack draining strategy. If/when we start using fat
-// stacks here, we can go back to using (fat) pointer chains
-// (although some performance comparisons would be useful since
-// single global lists have their own performance disadvantages
-// as we were made painfully aware not long ago, see 6786503).
-#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
-void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
- assert(is_in_reserved(from_space_obj), "Should be from this generation");
- if (ParGCUseLocalOverflow) {
- // In the case of compressed oops, we use a private, not-shared
- // overflow stack.
- par_scan_state->push_on_overflow_stack(from_space_obj);
- } else {
- assert(!UseCompressedOops, "Error");
- // if the object has been forwarded to itself, then we cannot
- // use the klass pointer for the linked list. Instead we have
- // to allocate an oopDesc in the C-Heap and use that for the linked list.
- // XXX This is horribly inefficient when a promotion failure occurs
- // and should be fixed. XXX FIX ME !!!
-#ifndef PRODUCT
- Atomic::inc(&_num_par_pushes);
- assert(_num_par_pushes > 0, "Tautology");
-#endif
- if (from_space_obj->forwardee() == from_space_obj) {
- oopDesc* listhead = NEW_C_HEAP_OBJ(oopDesc, mtGC);
- listhead->forward_to(from_space_obj);
- from_space_obj = listhead;
- }
- oop observed_overflow_list = _overflow_list;
- oop cur_overflow_list;
- do {
- cur_overflow_list = observed_overflow_list;
- if (cur_overflow_list != BUSY) {
- from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
- } else {
- from_space_obj->set_klass_to_list_ptr(NULL);
- }
- observed_overflow_list =
- Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
- } while (cur_overflow_list != observed_overflow_list);
- }
-}
-
-bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
- bool res;
-
- if (ParGCUseLocalOverflow) {
- res = par_scan_state->take_from_overflow_stack();
- } else {
- assert(!UseCompressedOops, "Error");
- res = take_from_overflow_list_work(par_scan_state);
- }
- return res;
-}
-
-
-// *NOTE*: The overflow list manipulation code here and
-// in CMSCollector:: are very similar in shape,
-// except that in the CMS case we thread the objects
-// directly into the list via their mark word, and do
-// not need to deal with special cases below related
-// to chunking of object arrays and promotion failure
-// handling.
-// CR 6797058 has been filed to attempt consolidation of
-// the common code.
-// Because of the common code, if you make any changes in
-// the code below, please check the CMS version to see if
-// similar changes might be needed.
-// See CMSCollector::par_take_from_overflow_list() for
-// more extensive documentation comments.
-bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
- ObjToScanQueue* work_q = par_scan_state->work_queue();
- // How many to take?
- size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
- (size_t)ParGCDesiredObjsFromOverflowList);
-
- assert(!UseCompressedOops, "Error");
- assert(par_scan_state->overflow_stack() == NULL, "Error");
- if (_overflow_list == NULL) return false;
-
- // Otherwise, there was something there; try claiming the list.
- oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
- // Trim off a prefix of at most objsFromOverflow items
- size_t spin_count = ParallelGCThreads;
- size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
- for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
- // someone grabbed it before we did ...
- // ... we spin/block for a short while...
- os::naked_sleep(sleep_time_millis);
- if (_overflow_list == NULL) {
- // nothing left to take
- return false;
- } else if (_overflow_list != BUSY) {
- // try and grab the prefix
- prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
- }
- }
- if (prefix == NULL || prefix == BUSY) {
- // Nothing to take or waited long enough
- if (prefix == NULL) {
- // Write back the NULL in case we overwrote it with BUSY above
- // and it is still the same value.
- (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
- }
- return false;
- }
- assert(prefix != NULL && prefix != BUSY, "Error");
- oop cur = prefix;
- for (size_t i = 1; i < objsFromOverflow; ++i) {
- oop next = cur->list_ptr_from_klass();
- if (next == NULL) break;
- cur = next;
- }
- assert(cur != NULL, "Loop postcondition");
-
- // Reattach remaining (suffix) to overflow list
- oop suffix = cur->list_ptr_from_klass();
- if (suffix == NULL) {
- // Write back the NULL in lieu of the BUSY we wrote
- // above and it is still the same value.
- if (_overflow_list == BUSY) {
- (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
- }
- } else {
- assert(suffix != BUSY, "Error");
- // suffix will be put back on global list
- cur->set_klass_to_list_ptr(NULL); // break off suffix
- // It's possible that the list is still in the empty(busy) state
- // we left it in a short while ago; in that case we may be
- // able to place back the suffix.
- oop observed_overflow_list = _overflow_list;
- oop cur_overflow_list = observed_overflow_list;
- bool attached = false;
- while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
- observed_overflow_list =
- Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
- if (cur_overflow_list == observed_overflow_list) {
- attached = true;
- break;
- } else cur_overflow_list = observed_overflow_list;
- }
- if (!attached) {
- // Too bad, someone else got in in between; we'll need to do a splice.
- // Find the last item of suffix list
- oop last = suffix;
- while (true) {
- oop next = last->list_ptr_from_klass();
- if (next == NULL) break;
- last = next;
- }
- // Atomically prepend suffix to current overflow list
- observed_overflow_list = _overflow_list;
- do {
- cur_overflow_list = observed_overflow_list;
- if (cur_overflow_list != BUSY) {
- // Do the splice ...
- last->set_klass_to_list_ptr(cur_overflow_list);
- } else { // cur_overflow_list == BUSY
- last->set_klass_to_list_ptr(NULL);
- }
- observed_overflow_list =
- Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
- } while (cur_overflow_list != observed_overflow_list);
- }
- }
-
- // Push objects on prefix list onto this thread's work queue
- assert(prefix != NULL && prefix != BUSY, "program logic");
- cur = prefix;
- ssize_t n = 0;
- while (cur != NULL) {
- oop obj_to_push = cur->forwardee();
- oop next = cur->list_ptr_from_klass();
- cur->set_klass(obj_to_push->klass());
- // This may be an array object that is self-forwarded. In that case, the list pointer
- // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
- if (!is_in_reserved(cur)) {
- // This can become a scaling bottleneck when there is work queue overflow coincident
- // with promotion failure.
- oopDesc* f = cur;
- FREE_C_HEAP_OBJ(f);
- } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
- assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
- obj_to_push = cur;
- }
- bool ok = work_q->push(obj_to_push);
- assert(ok, "Should have succeeded");
- cur = next;
- n++;
- }
- TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
-#ifndef PRODUCT
- assert(_num_par_pushes >= n, "Too many pops?");
- Atomic::sub(n, &_num_par_pushes);
-#endif
- return true;
-}
-#undef BUSY
-
-void ParNewGeneration::ref_processor_init() {
- if (_ref_processor == NULL) {
- // Allocate and initialize a reference processor
- _span_based_discoverer.set_span(_reserved);
- _ref_processor =
- new ReferenceProcessor(&_span_based_discoverer, // span
- ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
- ParallelGCThreads, // mt processing degree
- refs_discovery_is_mt(), // mt discovery
- ParallelGCThreads, // mt discovery degree
- refs_discovery_is_atomic(), // atomic_discovery
- NULL, // is_alive_non_header
- false); // disable adjusting number of processing threads
- }
-}
-
-const char* ParNewGeneration::name() const {
- return "par new generation";
-}
-
-void ParNewGeneration::restore_preserved_marks() {
- SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
- _preserved_marks_set.restore(&task_executor);
-}
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,420 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PARNEWGENERATION_HPP
-#define SHARE_GC_CMS_PARNEWGENERATION_HPP
-
-#include "gc/cms/parOopClosures.hpp"
-#include "gc/serial/defNewGeneration.hpp"
-#include "gc/shared/copyFailedInfo.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/oopStorageParState.hpp"
-#include "gc/shared/plab.hpp"
-#include "gc/shared/preservedMarks.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "memory/padded.hpp"
-
-class ChunkArray;
-class CMSHeap;
-class ParScanWithoutBarrierClosure;
-class ParScanWithBarrierClosure;
-class ParRootScanWithoutBarrierClosure;
-class ParRootScanWithBarrierTwoGensClosure;
-class ParEvacuateFollowersClosure;
-class StrongRootsScope;
-
-// It would be better if these types could be kept local to the .cpp file,
-// but they must be here to allow ParScanClosure::do_oop_work to be defined
-// in genOopClosures.inline.hpp.
-
-typedef Padded<OopTaskQueue> ObjToScanQueue;
-typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
-
-class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
- private:
- ParScanWeakRefClosure* _par_cl;
- protected:
- template <class T> void do_oop_work(T* p);
- public:
- ParKeepAliveClosure(ParScanWeakRefClosure* cl);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-// The state needed by thread performing parallel young-gen collection.
-class ParScanThreadState {
- friend class ParScanThreadStateSet;
- private:
- ObjToScanQueue *_work_queue;
- Stack<oop, mtGC>* const _overflow_stack;
- PreservedMarks* const _preserved_marks;
-
- PLAB _to_space_alloc_buffer;
-
- ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
- ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
- ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
- // Will be passed to process_roots to set its generation.
- ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
- // This closure will always be bound to the old gen; it will be used
- // in evacuate_followers.
- ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
- ParEvacuateFollowersClosure _evacuate_followers;
- DefNewGeneration::IsAliveClosure _is_alive_closure;
- ParScanWeakRefClosure _scan_weak_ref_closure;
- ParKeepAliveClosure _keep_alive_closure;
-
- Space* _to_space;
- Space* to_space() { return _to_space; }
-
- ParNewGeneration* _young_gen;
- ParNewGeneration* young_gen() const { return _young_gen; }
-
- Generation* _old_gen;
- Generation* old_gen() { return _old_gen; }
-
- HeapWord *_young_old_boundary;
-
- int _thread_num;
- AgeTable _ageTable;
-
- bool _to_space_full;
-
-#if TASKQUEUE_STATS
- size_t _term_attempts;
- size_t _overflow_refills;
- size_t _overflow_refill_objs;
-#endif // TASKQUEUE_STATS
-
- // Stats for promotion failure
- PromotionFailedInfo _promotion_failed_info;
-
- // Timing numbers.
- double _start;
- double _start_strong_roots;
- double _strong_roots_time;
- double _start_term;
- double _term_time;
-
- // Helper for trim_queues. Scans subset of an array and makes
- // remainder available for work stealing.
- void scan_partial_array_and_push_remainder(oop obj);
-
- // In support of CMS' parallel rescan of survivor space.
- ChunkArray* _survivor_chunk_array;
- ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
-
- void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
-
- ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
- Generation* old_gen_, int thread_num_,
- ObjToScanQueueSet* work_queue_set_,
- Stack<oop, mtGC>* overflow_stacks_,
- PreservedMarks* preserved_marks_,
- size_t desired_plab_sz_,
- TaskTerminator& term_);
-
- public:
- AgeTable* age_table() {return &_ageTable;}
-
- ObjToScanQueue* work_queue() { return _work_queue; }
-
- PreservedMarks* preserved_marks() const { return _preserved_marks; }
-
- PLAB* to_space_alloc_buffer() {
- return &_to_space_alloc_buffer;
- }
-
- ParEvacuateFollowersClosure& evacuate_followers_closure() { return _evacuate_followers; }
- DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
- ParScanWeakRefClosure& scan_weak_ref_closure() { return _scan_weak_ref_closure; }
- ParKeepAliveClosure& keep_alive_closure() { return _keep_alive_closure; }
- ParScanClosure& older_gen_closure() { return _older_gen_closure; }
- ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
-
- // Decrease queue size below "max_size".
- void trim_queues(int max_size);
-
- // Private overflow stack usage
- Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
- bool take_from_overflow_stack();
- void push_on_overflow_stack(oop p);
-
- // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
- inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
-
- int thread_num() { return _thread_num; }
-
- // Allocate a to-space block of size "sz", or else return NULL.
- HeapWord* alloc_in_to_space_slow(size_t word_sz);
-
- inline HeapWord* alloc_in_to_space(size_t word_sz);
-
- HeapWord* young_old_boundary() { return _young_old_boundary; }
-
- void set_young_old_boundary(HeapWord *boundary) {
- _young_old_boundary = boundary;
- }
-
- // Undo the most recent allocation ("obj", of "word_sz").
- void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
-
- // Promotion failure stats
- void register_promotion_failure(size_t sz) {
- _promotion_failed_info.register_copy_failure(sz);
- }
- PromotionFailedInfo& promotion_failed_info() {
- return _promotion_failed_info;
- }
- bool promotion_failed() {
- return _promotion_failed_info.has_failed();
- }
- void print_promotion_failure_size();
-
-#if TASKQUEUE_STATS
- TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
-
- size_t term_attempts() const { return _term_attempts; }
- size_t overflow_refills() const { return _overflow_refills; }
- size_t overflow_refill_objs() const { return _overflow_refill_objs; }
-
- void note_term_attempt() { ++_term_attempts; }
- void note_overflow_refill(size_t objs) {
- ++_overflow_refills; _overflow_refill_objs += objs;
- }
-
- void reset_stats();
-#endif // TASKQUEUE_STATS
-
- void start_strong_roots() {
- _start_strong_roots = os::elapsedTime();
- }
- void end_strong_roots() {
- _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
- }
- double strong_roots_time() const { return _strong_roots_time; }
- void start_term_time() {
- TASKQUEUE_STATS_ONLY(note_term_attempt());
- _start_term = os::elapsedTime();
- }
- void end_term_time() {
- _term_time += (os::elapsedTime() - _start_term);
- }
- double term_time() const { return _term_time; }
-
- double elapsed_time() const {
- return os::elapsedTime() - _start;
- }
-};
-
-class ParNewGenTask: public AbstractGangTask {
- private:
- ParNewGeneration* _young_gen;
- Generation* _old_gen;
- HeapWord* _young_old_boundary;
- class ParScanThreadStateSet* _state_set;
- StrongRootsScope* _strong_roots_scope;
-
-public:
- ParNewGenTask(ParNewGeneration* young_gen,
- Generation* old_gen,
- HeapWord* young_old_boundary,
- ParScanThreadStateSet* state_set,
- StrongRootsScope* strong_roots_scope);
-
- HeapWord* young_old_boundary() { return _young_old_boundary; }
-
- void work(uint worker_id);
-};
-
-class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
- protected:
- template <class T> void do_oop_work(T* p);
- public:
- KeepAliveClosure(ScanWeakRefClosure* cl);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-template <typename OopClosureType1, typename OopClosureType2>
-class EvacuateFollowersClosureGeneral: public VoidClosure {
- private:
- CMSHeap* _heap;
- OopClosureType1* _scan_cur_or_nonheap;
- OopClosureType2* _scan_older;
- public:
- EvacuateFollowersClosureGeneral(CMSHeap* heap,
- OopClosureType1* cur,
- OopClosureType2* older);
- virtual void do_void();
-};
-
-// Closure for scanning ParNewGeneration.
-// Same as ScanClosure, except does parallel GC barrier.
-class ScanClosureWithParBarrier: public OopsInClassLoaderDataOrGenClosure {
- private:
- ParNewGeneration* _g;
- HeapWord* _boundary;
- bool _gc_barrier;
-
- template <class T> void do_oop_work(T* p);
-
- public:
- ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-// Implements AbstractRefProcTaskExecutor for ParNew.
-class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
- private:
- ParNewGeneration& _young_gen;
- Generation& _old_gen;
- ParScanThreadStateSet& _state_set;
- public:
- ParNewRefProcTaskExecutor(ParNewGeneration& young_gen,
- Generation& old_gen,
- ParScanThreadStateSet& state_set)
- : _young_gen(young_gen), _old_gen(old_gen), _state_set(state_set)
- { }
-
- // Executes a task using worker threads.
- virtual void execute(ProcessTask& task, uint ergo_workers);
- // Switch to single threaded mode.
- virtual void set_single_threaded_mode();
-};
-
-
-// A Generation that does parallel young-gen collection.
-
-class ParNewGeneration: public DefNewGeneration {
- friend class ParNewGenTask;
- friend class ParNewRefProcTask;
- friend class ParNewRefProcTaskExecutor;
- friend class ParScanThreadStateSet;
- friend class ParEvacuateFollowersClosure;
-
- private:
- // The per-worker-thread work queues
- ObjToScanQueueSet* _task_queues;
-
- // Per-worker-thread local overflow stacks
- Stack<oop, mtGC>* _overflow_stacks;
-
- // Desired size of survivor space plab's
- PLABStats _plab_stats;
-
- // A list of from-space images of to-be-scanned objects, threaded through
- // klass-pointers (klass information already copied to the forwarded
- // image.) Manipulated with CAS.
- oopDesc* volatile _overflow_list;
- NOT_PRODUCT(ssize_t _num_par_pushes;)
-
- // This closure is used by the reference processor to filter out
- // references to live referent.
- DefNewGeneration::IsAliveClosure _is_alive_closure;
-
- // GC tracer that should be used during collection.
- ParNewTracer _gc_tracer;
-
- static oop real_forwardee_slow(oop obj);
- static void waste_some_time();
-
- void handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set);
-
- protected:
-
- void restore_preserved_marks();
-
- public:
- ParNewGeneration(ReservedSpace rs,
- size_t initial_byte_size,
- size_t min_byte_size,
- size_t max_byte_size);
-
- ~ParNewGeneration() {
- for (uint i = 0; i < ParallelGCThreads; i++)
- delete _task_queues->queue(i);
-
- delete _task_queues;
- }
-
- virtual void ref_processor_init();
- virtual Generation::Name kind() { return Generation::ParNew; }
- virtual const char* name() const;
- virtual const char* short_name() const { return "ParNew"; }
-
- // override
- virtual bool refs_discovery_is_mt() const {
- return ParallelGCThreads > 1;
- }
-
- // Make the collection virtual.
- virtual void collect(bool full,
- bool clear_all_soft_refs,
- size_t size,
- bool is_tlab);
-
- // This needs to be visible to the closure function.
- // "obj" is the object to be copied, "m" is a recent value of its mark
- // that must not contain a forwarding pointer (though one might be
- // inserted in "obj"s mark word by a parallel thread).
- oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
- oop obj, size_t obj_sz, markWord m);
-
- // in support of testing overflow code
- NOT_PRODUCT(int _overflow_counter;)
- NOT_PRODUCT(bool should_simulate_overflow();)
-
- // Accessor for overflow list
- oop overflow_list() { return _overflow_list; }
-
- // Push the given (from-space) object on the global overflow list.
- void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
-
- // If the global overflow list is non-empty, move some tasks from it
- // onto "work_q" (which need not be empty). No more than 1/4 of the
- // available space on "work_q" is used.
- bool take_from_overflow_list(ParScanThreadState* par_scan_state);
- bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
-
- // The task queues to be used by parallel GC threads.
- ObjToScanQueueSet* task_queues() {
- return _task_queues;
- }
-
- PLABStats* plab_stats() {
- return &_plab_stats;
- }
-
- size_t desired_plab_sz();
-
- const ParNewTracer* gc_tracer() const {
- return &_gc_tracer;
- }
-
- static oop real_forwardee(oop obj);
-};
-
-#endif // SHARE_GC_CMS_PARNEWGENERATION_HPP
--- a/src/hotspot/share/gc/cms/parNewGeneration.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PARNEWGENERATION_INLINE_HPP
-#define SHARE_GC_CMS_PARNEWGENERATION_INLINE_HPP
-
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/plab.inline.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-inline HeapWord* ParScanThreadState::alloc_in_to_space(size_t word_sz) {
- HeapWord* obj = to_space_alloc_buffer()->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
- if (obj != NULL) return obj;
- else return alloc_in_to_space_slow(word_sz);
-}
-#endif // SHARE_GC_CMS_PARNEWGENERATION_INLINE_HPP
--- a/src/hotspot/share/gc/cms/parOopClosures.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PAROOPCLOSURES_HPP
-#define SHARE_GC_CMS_PAROOPCLOSURES_HPP
-
-#include "gc/shared/genOopClosures.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "memory/padded.hpp"
-
-// Closures for ParNewGeneration
-
-class ParScanThreadState;
-class ParNewGeneration;
-typedef Padded<OopTaskQueue> ObjToScanQueue;
-typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
-class ParallelTaskTerminator;
-
-class ParScanClosure: public OopsInClassLoaderDataOrGenClosure {
- protected:
- ParScanThreadState* _par_scan_state;
- ParNewGeneration* _g;
- HeapWord* _boundary;
- template <class T> void inline par_do_barrier(T* p);
- template <class T> void inline do_oop_work(T* p,
- bool gc_barrier,
- bool root_scan);
- public:
- ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state);
-};
-
-class ParScanWithBarrierClosure: public ParScanClosure {
- public:
- ParScanWithBarrierClosure(ParNewGeneration* g,
- ParScanThreadState* par_scan_state) :
- ParScanClosure(g, par_scan_state) {}
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-class ParScanWithoutBarrierClosure: public ParScanClosure {
- public:
- ParScanWithoutBarrierClosure(ParNewGeneration* g,
- ParScanThreadState* par_scan_state) :
- ParScanClosure(g, par_scan_state) {}
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure {
- public:
- ParRootScanWithBarrierTwoGensClosure(ParNewGeneration* g,
- ParScanThreadState* par_scan_state) :
- ParScanClosure(g, par_scan_state) {}
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-class ParRootScanWithoutBarrierClosure: public ParScanClosure {
- public:
- ParRootScanWithoutBarrierClosure(ParNewGeneration* g,
- ParScanThreadState* par_scan_state) :
- ParScanClosure(g, par_scan_state) {}
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-class ParScanWeakRefClosure: public ScanWeakRefClosure {
- protected:
- ParScanThreadState* _par_scan_state;
- template <class T> inline void do_oop_work(T* p);
- public:
- ParScanWeakRefClosure(ParNewGeneration* g,
- ParScanThreadState* par_scan_state);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-};
-
-class ParEvacuateFollowersClosure: public VoidClosure {
- private:
- ParScanThreadState* _par_scan_state;
- ParScanThreadState* par_scan_state() { return _par_scan_state; }
-
- // We want to preserve the specific types here (rather than "OopClosure")
- // for later de-virtualization of do_oop calls.
- ParScanWithoutBarrierClosure* _to_space_closure;
- ParScanWithoutBarrierClosure* to_space_closure() {
- return _to_space_closure;
- }
- ParRootScanWithoutBarrierClosure* _to_space_root_closure;
- ParRootScanWithoutBarrierClosure* to_space_root_closure() {
- return _to_space_root_closure;
- }
-
- ParScanWithBarrierClosure* _old_gen_closure;
- ParScanWithBarrierClosure* old_gen_closure () {
- return _old_gen_closure;
- }
- ParRootScanWithBarrierTwoGensClosure* _old_gen_root_closure;
- ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure () {
- return _old_gen_root_closure;
- }
-
- ParNewGeneration* _par_gen;
- ParNewGeneration* par_gen() { return _par_gen; }
-
- ObjToScanQueueSet* _task_queues;
- ObjToScanQueueSet* task_queues() { return _task_queues; }
-
- ParallelTaskTerminator* _terminator;
- ParallelTaskTerminator* terminator() { return _terminator; }
- public:
- ParEvacuateFollowersClosure(
- ParScanThreadState* par_scan_state_,
- ParScanWithoutBarrierClosure* to_space_closure_,
- ParScanWithBarrierClosure* old_gen_closure_,
- ParRootScanWithoutBarrierClosure* to_space_root_closure_,
- ParNewGeneration* par_gen_,
- ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
- ObjToScanQueueSet* task_queues_,
- ParallelTaskTerminator* terminator_);
- virtual void do_void();
-};
-
-#endif // SHARE_GC_CMS_PAROOPCLOSURES_HPP
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PAROOPCLOSURES_INLINE_HPP
-#define SHARE_GC_CMS_PAROOPCLOSURES_INLINE_HPP
-
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/cms/parOopClosures.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/oop.inline.hpp"
-
-template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
- oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
- // weak references are sometimes scanned twice; must check
- // that to-space doesn't already contain this object
- if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
- // we need to ensure that it is copied (see comment in
- // ParScanClosure::do_oop_work).
- Klass* objK = obj->klass();
- markWord m = obj->mark_raw();
- oop new_obj;
- if (m.is_marked()) { // Contains forwarding pointer.
- new_obj = ParNewGeneration::real_forwardee(obj);
- } else {
- size_t obj_sz = obj->size_given_klass(objK);
- new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
- obj, obj_sz, m);
- }
- RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
- }
-}
-
-inline void ParScanWeakRefClosure::do_oop(oop* p) { ParScanWeakRefClosure::do_oop_work(p); }
-inline void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
-
-template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
- assert(generation()->is_in_reserved(p), "expected ref in generation");
- oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
- // If p points to a younger generation, mark the card.
- if ((HeapWord*)obj < gen_boundary()) {
- rs()->write_ref_field_gc_par(p, obj);
- }
-}
-
-template <class T>
-inline void ParScanClosure::do_oop_work(T* p,
- bool gc_barrier,
- bool root_scan) {
- assert((!CMSHeap::heap()->is_in_reserved(p) ||
- generation()->is_in_reserved(p))
- && (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
- "The gen must be right, and we must be doing the barrier "
- "in older generations.");
- T heap_oop = RawAccess<>::oop_load(p);
- if (!CompressedOops::is_null(heap_oop)) {
- oop obj = CompressedOops::decode_not_null(heap_oop);
- if ((HeapWord*)obj < _boundary) {
-#ifndef PRODUCT
- if (_g->to()->is_in_reserved(obj)) {
- Log(gc) log;
- log.error("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
- CMSHeap* heap = CMSHeap::heap();
- Space* sp = heap->space_containing(p);
- oop obj = oop(sp->block_start(p));
- assert((HeapWord*)obj < (HeapWord*)p, "Error");
- log.error("Object: " PTR_FORMAT, p2i((void *)obj));
- log.error("-------");
- LogStream ls(log.error());
- obj->print_on(&ls);
- log.error("-----");
- log.error("Heap:");
- log.error("-----");
- heap->print_on(&ls);
- ShouldNotReachHere();
- }
-#endif
- // OK, we need to ensure that it is copied.
- // We read the klass and mark in this order, so that we can reliably
- // get the size of the object: if the mark we read is not a
- // forwarding pointer, then the klass is valid: the klass is only
- // overwritten with an overflow next pointer after the object is
- // forwarded.
- Klass* objK = obj->klass();
- markWord m = obj->mark_raw();
- oop new_obj;
- if (m.is_marked()) { // Contains forwarding pointer.
- new_obj = ParNewGeneration::real_forwardee(obj);
- RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
- log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
- "forwarded ",
- new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
- } else {
- size_t obj_sz = obj->size_given_klass(objK);
- new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
- RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
- if (root_scan) {
- // This may have pushed an object. If we have a root
- // category with a lot of roots, can't let the queue get too
- // full:
- (void)_par_scan_state->trim_queues(10 * ParallelGCThreads);
- }
- }
- if (is_scanning_a_cld()) {
- do_cld_barrier();
- } else if (gc_barrier) {
- // Now call parent closure
- par_do_barrier(p);
- }
- }
- }
-}
-
-inline void ParScanWithBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, true, false); }
-inline void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
-
-inline void ParScanWithoutBarrierClosure::do_oop(oop* p) { ParScanClosure::do_oop_work(p, false, false); }
-inline void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
-
-#endif // SHARE_GC_CMS_PAROOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/cms/promotionInfo.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,308 +0,0 @@
-/*
- * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/promotionInfo.hpp"
-#include "gc/shared/genOopClosures.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/markWord.inline.hpp"
-#include "oops/oop.inline.hpp"
-
-/////////////////////////////////////////////////////////////////////////
-//// PromotionInfo
-/////////////////////////////////////////////////////////////////////////
-
-
-PromotedObject* PromotedObject::next() const {
- assert(!((FreeChunk*)this)->is_free(), "Error");
- PromotedObject* res;
- if (UseCompressedOops) {
- // The next pointer is a compressed oop stored in the top 32 bits
- res = (PromotedObject*)CompressedOops::decode(_data._narrow_next);
- } else {
- res = (PromotedObject*)(_next & next_mask);
- }
- assert(oopDesc::is_oop_or_null(oop(res), true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res)));
- return res;
-}
-
-inline void PromotedObject::setNext(PromotedObject* x) {
- assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
- "or insufficient alignment of objects");
- if (UseCompressedOops) {
- assert(_data._narrow_next == 0, "Overwrite?");
- _data._narrow_next = CompressedOops::encode(oop(x));
- } else {
- _next |= (intptr_t)x;
- }
- assert(!((FreeChunk*)this)->is_free(), "Error");
-}
-
-// Return the next displaced header, incrementing the pointer and
-// recycling spool area as necessary.
-markWord PromotionInfo::nextDisplacedHeader() {
- assert(_spoolHead != NULL, "promotionInfo inconsistency");
- assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
- "Empty spool space: no displaced header can be fetched");
- assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
- markWord hdr = _spoolHead->displacedHdr[_firstIndex];
- // Spool forward
- if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
- // forward to next block, recycling this block into spare spool buffer
- SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
- assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
- _spoolHead->nextSpoolBlock = _spareSpool;
- _spareSpool = _spoolHead;
- _spoolHead = tmp;
- _firstIndex = 1;
- NOT_PRODUCT(
- if (_spoolHead == NULL) { // all buffers fully consumed
- assert(_spoolTail == NULL && _nextIndex == 1,
- "spool buffers processing inconsistency");
- }
- )
- }
- return hdr;
-}
-
-void PromotionInfo::track(PromotedObject* trackOop) {
- track(trackOop, oop(trackOop)->klass());
-}
-
-void PromotionInfo::track(PromotedObject* trackOop, Klass* klassOfOop) {
- // make a copy of header as it may need to be spooled
- markWord mark = oop(trackOop)->mark_raw();
- trackOop->clear_next();
- if (mark.must_be_preserved_for_cms_scavenge(klassOfOop)) {
- // save non-prototypical header, and mark oop
- saveDisplacedHeader(mark);
- trackOop->setDisplacedMark();
- } else {
- // we'd like to assert something like the following:
- // assert(mark == markWord::prototype(), "consistency check");
- // ... but the above won't work because the age bits have not (yet) been
- // cleared. The remainder of the check would be identical to the
- // condition checked in must_be_preserved() above, so we don't really
- // have anything useful to check here!
- }
- if (_promoTail != NULL) {
- assert(_promoHead != NULL, "List consistency");
- _promoTail->setNext(trackOop);
- _promoTail = trackOop;
- } else {
- assert(_promoHead == NULL, "List consistency");
- _promoHead = _promoTail = trackOop;
- }
- // Mask as newly promoted, so we can skip over such objects
- // when scanning dirty cards
- assert(!trackOop->hasPromotedMark(), "Should not have been marked");
- trackOop->setPromotedMark();
-}
-
-// Save the given displaced header, incrementing the pointer and
-// obtaining more spool area as necessary.
-void PromotionInfo::saveDisplacedHeader(markWord hdr) {
- assert(_spoolHead != NULL && _spoolTail != NULL,
- "promotionInfo inconsistency");
- assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
- _spoolTail->displacedHdr[_nextIndex] = hdr;
- // Spool forward
- if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
- // get a new spooling block
- assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
- _splice_point = _spoolTail; // save for splicing
- _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
- _spoolTail = _spoolTail->nextSpoolBlock; // might become NULL ...
- // ... but will attempt filling before next promotion attempt
- _nextIndex = 1;
- }
-}
-
-// Ensure that spooling space exists. Return false if spooling space
-// could not be obtained.
-bool PromotionInfo::ensure_spooling_space_work() {
- assert(!has_spooling_space(), "Only call when there is no spooling space");
- // Try and obtain more spooling space
- SpoolBlock* newSpool = getSpoolBlock();
- assert(newSpool == NULL ||
- (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
- "getSpoolBlock() sanity check");
- if (newSpool == NULL) {
- return false;
- }
- _nextIndex = 1;
- if (_spoolTail == NULL) {
- _spoolTail = newSpool;
- if (_spoolHead == NULL) {
- _spoolHead = newSpool;
- _firstIndex = 1;
- } else {
- assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
- "Splice point invariant");
- // Extra check that _splice_point is connected to list
- #ifdef ASSERT
- {
- SpoolBlock* blk = _spoolHead;
- for (; blk->nextSpoolBlock != NULL;
- blk = blk->nextSpoolBlock);
- assert(blk != NULL && blk == _splice_point,
- "Splice point incorrect");
- }
- #endif // ASSERT
- _splice_point->nextSpoolBlock = newSpool;
- }
- } else {
- assert(_spoolHead != NULL, "spool list consistency");
- _spoolTail->nextSpoolBlock = newSpool;
- _spoolTail = newSpool;
- }
- return true;
-}
-
-// Get a free spool buffer from the free pool, getting a new block
-// from the heap if necessary.
-SpoolBlock* PromotionInfo::getSpoolBlock() {
- SpoolBlock* res;
- if ((res = _spareSpool) != NULL) {
- _spareSpool = _spareSpool->nextSpoolBlock;
- res->nextSpoolBlock = NULL;
- } else { // spare spool exhausted, get some from heap
- res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
- if (res != NULL) {
- res->init();
- }
- }
- assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
- return res;
-}
-
-void PromotionInfo::startTrackingPromotions() {
- assert(noPromotions(), "sanity");
- assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
- "spooling inconsistency?");
- _firstIndex = _nextIndex = 1;
- _tracking = true;
-}
-
-void PromotionInfo::stopTrackingPromotions() {
- assert(noPromotions(), "we should have torn down the lists by now");
- assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
- "spooling inconsistency?");
- _firstIndex = _nextIndex = 1;
- _tracking = false;
-}
-
-// When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
-// points to the next slot available for filling.
-// The set of slots holding displaced headers are then all those in the
-// right-open interval denoted by:
-//
-// [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
-//
-// When _spoolTail is NULL, then the set of slots with displaced headers
-// is all those starting at the slot <_spoolHead, _firstIndex> and
-// going up to the last slot of last block in the linked list.
-// In this latter case, _splice_point points to the tail block of
-// this linked list of blocks holding displaced headers.
-void PromotionInfo::verify() const {
- // Verify the following:
- // 1. the number of displaced headers matches the number of promoted
- // objects that have displaced headers
- // 2. each promoted object lies in this space
- debug_only(
- PromotedObject* junk = NULL;
- assert(junk->next_addr() == (void*)(oop(junk)->mark_addr_raw()),
- "Offset of PromotedObject::_next is expected to align with "
- " the OopDesc::_mark within OopDesc");
- )
- // FIXME: guarantee????
- guarantee(_spoolHead == NULL || _spoolTail != NULL ||
- _splice_point != NULL, "list consistency");
- guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
- // count the number of objects with displaced headers
- size_t numObjsWithDisplacedHdrs = 0;
- for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
- guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
- // the last promoted object may fail the mark() != NULL test of is_oop().
- guarantee(curObj->next() == NULL || oopDesc::is_oop(oop(curObj)), "must be an oop");
- if (curObj->hasDisplacedMark()) {
- numObjsWithDisplacedHdrs++;
- }
- }
- // Count the number of displaced headers
- size_t numDisplacedHdrs = 0;
- for (SpoolBlock* curSpool = _spoolHead;
- curSpool != _spoolTail && curSpool != NULL;
- curSpool = curSpool->nextSpoolBlock) {
- // the first entry is just a self-pointer; indices 1 through
- // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
- guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
- "first entry of displacedHdr should be self-referential");
- numDisplacedHdrs += curSpool->bufferSize - 1;
- }
- guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
- "internal consistency");
- guarantee(_spoolTail != NULL || _nextIndex == 1,
- "Inconsistency between _spoolTail and _nextIndex");
- // We overcounted (_firstIndex-1) worth of slots in block
- // _spoolHead and we undercounted (_nextIndex-1) worth of
- // slots in block _spoolTail. We make an appropriate
- // adjustment by subtracting the first and adding the
- // second: - (_firstIndex - 1) + (_nextIndex - 1)
- numDisplacedHdrs += (_nextIndex - _firstIndex);
- guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
-}
-
-void PromotionInfo::print_on(outputStream* st) const {
- SpoolBlock* curSpool = NULL;
- size_t i = 0;
- st->print_cr(" start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
- _firstIndex, _nextIndex);
- for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
- curSpool = curSpool->nextSpoolBlock) {
- curSpool->print_on(st);
- st->print_cr(" active ");
- i++;
- }
- for (curSpool = _spoolTail; curSpool != NULL;
- curSpool = curSpool->nextSpoolBlock) {
- curSpool->print_on(st);
- st->print_cr(" inactive ");
- i++;
- }
- for (curSpool = _spareSpool; curSpool != NULL;
- curSpool = curSpool->nextSpoolBlock) {
- curSpool->print_on(st);
- st->print_cr(" free ");
- i++;
- }
- st->print_cr(" " SIZE_FORMAT " header spooling blocks", i);
-}
-
-void SpoolBlock::print_on(outputStream* st) const {
- st->print("[" PTR_FORMAT "," PTR_FORMAT "), " SIZE_FORMAT " HeapWords -> " PTR_FORMAT,
- p2i(this), p2i((HeapWord*)displacedHdr + bufferSize),
- bufferSize, p2i(nextSpoolBlock));
-}
--- a/src/hotspot/share/gc/cms/promotionInfo.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PROMOTIONINFO_HPP
-#define SHARE_GC_CMS_PROMOTIONINFO_HPP
-
-#include "gc/cms/freeChunk.hpp"
-
-// Forward declarations
-class CompactibleFreeListSpace;
-
-class PromotedObject {
- private:
- enum {
- promoted_mask = right_n_bits(2), // i.e. 0x3
- displaced_mark = nth_bit(2), // i.e. 0x4
- next_mask = ~(right_n_bits(3)) // i.e. ~(0x7)
- };
-
- // Below, we want _narrow_next in the "higher" 32 bit slot,
- // whose position will depend on endian-ness of the platform.
- // This is so that there is no interference with the
- // cms_free_bit occupying bit position 7 (lsb == 0)
- // when we are using compressed oops; see FreeChunk::is_free().
- // We cannot move the cms_free_bit down because currently
- // biased locking code assumes that age bits are contiguous
- // with the lock bits. Even if that assumption were relaxed,
- // the least position we could move this bit to would be
- // to bit position 3, which would require 16 byte alignment.
- typedef struct {
-#ifdef VM_LITTLE_ENDIAN
- LP64_ONLY(narrowOop _pad;)
- narrowOop _narrow_next;
-#else
- narrowOop _narrow_next;
- LP64_ONLY(narrowOop _pad;)
-#endif
- } Data;
-
- union {
- intptr_t _next;
- Data _data;
- };
- public:
- PromotedObject* next() const;
- void setNext(PromotedObject* x);
- inline void setPromotedMark() {
- _next |= promoted_mask;
- assert(!((FreeChunk*)this)->is_free(), "Error");
- }
- inline bool hasPromotedMark() const {
- assert(!((FreeChunk*)this)->is_free(), "Error");
- return (_next & promoted_mask) == promoted_mask;
- }
- inline void setDisplacedMark() {
- _next |= displaced_mark;
- assert(!((FreeChunk*)this)->is_free(), "Error");
- }
- inline bool hasDisplacedMark() const {
- assert(!((FreeChunk*)this)->is_free(), "Error");
- return (_next & displaced_mark) != 0;
- }
- inline void clear_next() {
- _next = 0;
- assert(!((FreeChunk*)this)->is_free(), "Error");
- }
- debug_only(void *next_addr() { return (void *) &_next; })
-};
-
-class SpoolBlock: public FreeChunk {
- friend class PromotionInfo;
- protected:
- SpoolBlock* nextSpoolBlock;
- size_t bufferSize; // number of usable words in this block
- markWord* displacedHdr; // the displaced headers start here
-
- // Note about bufferSize: it denotes the number of entries available plus 1;
- // legal indices range from 1 through BufferSize - 1. See the verification
- // code verify() that counts the number of displaced headers spooled.
- size_t computeBufferSize() {
- return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markWord);
- }
-
- public:
- void init() {
- bufferSize = computeBufferSize();
- displacedHdr = (markWord*)&displacedHdr;
- nextSpoolBlock = NULL;
- }
-
- void print_on(outputStream* st) const;
- void print() const { print_on(tty); }
-};
-
-class PromotionInfo {
- bool _tracking; // set if tracking
- CompactibleFreeListSpace* _space; // the space to which this belongs
- PromotedObject* _promoHead; // head of list of promoted objects
- PromotedObject* _promoTail; // tail of list of promoted objects
- SpoolBlock* _spoolHead; // first spooling block
- SpoolBlock* _spoolTail; // last non-full spooling block or null
- SpoolBlock* _splice_point; // when _spoolTail is null, holds list tail
- SpoolBlock* _spareSpool; // free spool buffer
- size_t _firstIndex; // first active index in
- // first spooling block (_spoolHead)
- size_t _nextIndex; // last active index + 1 in last
- // spooling block (_spoolTail)
- private:
- // ensure that spooling space exists; return true if there is spooling space
- bool ensure_spooling_space_work();
-
- public:
- PromotionInfo() :
- _tracking(0), _space(NULL),
- _promoHead(NULL), _promoTail(NULL),
- _spoolHead(NULL), _spoolTail(NULL),
- _spareSpool(NULL), _firstIndex(1),
- _nextIndex(1) {}
-
- bool noPromotions() const {
- assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency");
- return _promoHead == NULL;
- }
- void startTrackingPromotions();
- void stopTrackingPromotions();
- bool tracking() const { return _tracking; }
- void track(PromotedObject* trackOop); // keep track of a promoted oop
- // The following variant must be used when trackOop is not fully
- // initialized and has a NULL klass:
- void track(PromotedObject* trackOop, Klass* klassOfOop); // keep track of a promoted oop
- void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
- CompactibleFreeListSpace* space() const { return _space; }
- markWord nextDisplacedHeader(); // get next header & forward spool pointer
- void saveDisplacedHeader(markWord hdr);
- // save header and forward spool
-
- inline size_t refillSize() const;
-
- SpoolBlock* getSpoolBlock(); // return a free spooling block
- inline bool has_spooling_space() {
- return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex;
- }
- // ensure that spooling space exists
- bool ensure_spooling_space() {
- return has_spooling_space() || ensure_spooling_space_work();
- }
-
- template <typename OopClosureType>
- void promoted_oops_iterate(OopClosureType* cl);
-
- void verify() const;
- void reset() {
- _promoHead = NULL;
- _promoTail = NULL;
- _spoolHead = NULL;
- _spoolTail = NULL;
- _spareSpool = NULL;
- _firstIndex = 0;
- _nextIndex = 0;
-
- }
-
- void print_on(outputStream* st) const;
-};
-
-
-#endif // SHARE_GC_CMS_PROMOTIONINFO_HPP
--- a/src/hotspot/share/gc/cms/promotionInfo.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PROMOTIONINFO_INLINE_HPP
-#define SHARE_GC_CMS_PROMOTIONINFO_INLINE_HPP
-
-#include "gc/cms/promotionInfo.hpp"
-#include "oops/oop.inline.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-
-//////////////////////////////////////////////////////////////////////////////
-// We go over the list of promoted objects, removing each from the list,
-// and applying the closure (this may, in turn, add more elements to
-// the tail of the promoted list, and these newly added objects will
-// also be processed) until the list is empty.
-// To aid verification and debugging, in the non-product builds
-// we actually forward _promoHead each time we process a promoted oop.
-// Note that this is not necessary in general (i.e. when we don't need to
-// call PromotionInfo::verify()) because oop_iterate can only add to the
-// end of _promoTail, and never needs to look at _promoHead.
-
-template <typename OopClosureType>
-void PromotionInfo::promoted_oops_iterate(OopClosureType* cl) {
- NOT_PRODUCT(verify());
- PromotedObject *curObj, *nextObj;
- for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {
- if ((nextObj = curObj->next()) == NULL) {
- /* protect ourselves against additions due to closure application
- below by resetting the list. */
- assert(_promoTail == curObj, "Should have been the tail");
- _promoHead = _promoTail = NULL;
- }
- if (curObj->hasDisplacedMark()) {
- /* restore displaced header */
- oop(curObj)->set_mark_raw(nextDisplacedHeader());
- } else {
- /* restore prototypical header */
- oop(curObj)->init_mark_raw();
- }
- /* The "promoted_mark" should now not be set */
- assert(!curObj->hasPromotedMark(),
- "Should have been cleared by restoring displaced mark-word");
- NOT_PRODUCT(_promoHead = nextObj);
- if (cl != NULL) oop(curObj)->oop_iterate(cl);
- if (nextObj == NULL) { /* start at head of list reset above */
- nextObj = _promoHead;
- }
- }
- assert(noPromotions(), "post-condition violation");
- assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");
- assert(_spoolHead == _spoolTail, "emptied spooling buffers");
- assert(_firstIndex == _nextIndex, "empty buffer");
-}
-
-#endif // SHARE_GC_CMS_PROMOTIONINFO_INLINE_HPP
--- a/src/hotspot/share/gc/cms/vmStructs_cms.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_VMSTRUCTS_CMS_HPP
-#define SHARE_GC_CMS_VMSTRUCTS_CMS_HPP
-
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-
-#define VM_STRUCTS_CMSGC(nonstatic_field, \
- volatile_nonstatic_field, \
- static_field) \
- nonstatic_field(CompactibleFreeListSpace, _collector, CMSCollector*) \
- nonstatic_field(CompactibleFreeListSpace, _bt, BlockOffsetArrayNonContigSpace) \
- static_field(CompactibleFreeListSpace, _min_chunk_size_in_bytes, size_t) \
- nonstatic_field(CMSBitMap, _bmStartWord, HeapWord*) \
- nonstatic_field(CMSBitMap, _bmWordSize, size_t) \
- nonstatic_field(CMSBitMap, _shifter, const int) \
- nonstatic_field(CMSBitMap, _bm, BitMapView) \
- nonstatic_field(CMSBitMap, _virtual_space, VirtualSpace) \
- nonstatic_field(CMSCollector, _markBitMap, CMSBitMap) \
- nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace, CompactibleFreeListSpace*) \
- static_field(ConcurrentMarkSweepThread, _collector, CMSCollector*) \
- nonstatic_field(LinearAllocBlock, _word_size, size_t) \
- nonstatic_field(AFLBinaryTreeDictionary, _total_size, size_t) \
- nonstatic_field(CompactibleFreeListSpace, _dictionary, AFLBinaryTreeDictionary*) \
- nonstatic_field(CompactibleFreeListSpace, _indexedFreeList[0], AdaptiveFreeList<FreeChunk>) \
- nonstatic_field(CompactibleFreeListSpace, _smallLinearAllocBlock, LinearAllocBlock) \
- volatile_nonstatic_field(FreeChunk, _size, size_t) \
- nonstatic_field(FreeChunk, _next, FreeChunk*) \
- nonstatic_field(FreeChunk, _prev, FreeChunk*) \
- nonstatic_field(AdaptiveFreeList<FreeChunk>, _size, size_t) \
- nonstatic_field(AdaptiveFreeList<FreeChunk>, _count, ssize_t)
-
-
-
-#define VM_TYPES_CMSGC(declare_type, \
- declare_toplevel_type, \
- declare_integer_type) \
- \
- declare_type(CMSHeap, GenCollectedHeap) \
- declare_type(ConcurrentMarkSweepGeneration,CardGeneration) \
- declare_type(ParNewGeneration, DefNewGeneration) \
- declare_type(CompactibleFreeListSpace, CompactibleSpace) \
- declare_type(ConcurrentMarkSweepThread, NamedThread) \
- declare_toplevel_type(CMSCollector) \
- declare_toplevel_type(CMSBitMap) \
- declare_toplevel_type(FreeChunk) \
- declare_toplevel_type(metaspace::Metablock) \
- declare_toplevel_type(ConcurrentMarkSweepThread*) \
- declare_toplevel_type(ConcurrentMarkSweepGeneration*) \
- declare_toplevel_type(CompactibleFreeListSpace*) \
- declare_toplevel_type(CMSCollector*) \
- declare_toplevel_type(AFLBinaryTreeDictionary) \
- declare_toplevel_type(LinearAllocBlock) \
- declare_toplevel_type(FreeChunk*) \
- declare_toplevel_type(AdaptiveFreeList<FreeChunk>*) \
- declare_toplevel_type(AdaptiveFreeList<FreeChunk>)
-
-
-#define VM_INT_CONSTANTS_CMSGC(declare_constant, \
- declare_constant_with_value) \
- declare_constant(CompactibleFreeListSpace::IndexSetSize) \
- declare_constant(Generation::ConcurrentMarkSweep) \
- declare_constant(Generation::ParNew)
-
-#endif // SHARE_GC_CMS_VMSTRUCTS_CMS_HPP
--- a/src/hotspot/share/gc/cms/yieldingWorkgroup.cpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,399 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/yieldingWorkgroup.hpp"
-#include "gc/shared/gcId.hpp"
-#include "utilities/macros.hpp"
-
-YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id)
- : AbstractGangWorker(gang, id) {}
-
-YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
- const char* name, uint workers, bool are_GC_task_threads) :
- AbstractWorkGang(name, workers, are_GC_task_threads, false),
- _yielded_workers(0),
- _started_workers(0),
- _finished_workers(0),
- _sequence_number(0),
- _task(NULL) {
-
- // Other initialization.
- _monitor = new Monitor(/* priority */ Mutex::leaf,
- /* name */ "WorkGroup monitor",
- /* allow_vm_block */ are_GC_task_threads,
- Monitor::_safepoint_check_never);
-
- assert(monitor() != NULL, "Failed to allocate monitor");
-}
-
-AbstractGangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
- return new YieldingFlexibleGangWorker(this, which);
-}
-
-void YieldingFlexibleWorkGang::internal_worker_poll(YieldingWorkData* data) const {
- assert(data != NULL, "worker data is null");
- data->set_task(task());
- data->set_sequence_number(sequence_number());
-}
-
-void YieldingFlexibleWorkGang::internal_note_start() {
- assert(monitor()->owned_by_self(), "note_finish is an internal method");
- _started_workers += 1;
-}
-
-void YieldingFlexibleWorkGang::internal_note_finish() {
- assert(monitor()->owned_by_self(), "note_finish is an internal method");
- _finished_workers += 1;
-}
-
-// Run a task; returns when the task is done, or the workers yield,
-// or the task is aborted.
-// A task that has been yielded can be continued via this interface
-// by using the same task repeatedly as the argument to the call.
-// It is expected that the YieldingFlexibleGangTask carries the appropriate
-// continuation information used by workers to continue the task
-// from its last yield point. Thus, a completed task will return
-// immediately with no actual work having been done by the workers.
-/////////////////////
-// Implementatiuon notes: remove before checking XXX
-/*
-Each gang is working on a task at a certain time.
-Some subset of workers may have yielded and some may
-have finished their quota of work. Until this task has
-been completed, the workers are bound to that task.
-Once the task has been completed, the gang unbounds
-itself from the task.
-
-The yielding work gang thus exports two invokation
-interfaces: run_task() and continue_task(). The
-first is used to initiate a new task and bind it
-to the workers; the second is used to continue an
-already bound task that has yielded. Upon completion
-the binding is released and a new binding may be
-created.
-
-The shape of a yielding work gang is as follows:
-
-Overseer invokes run_task(*task).
- Lock gang monitor
- Check that there is no existing binding for the gang
- If so, abort with an error
- Else, create a new binding of this gang to the given task
- Set number of active workers (as asked)
- Notify workers that work is ready to be done
- [the requisite # workers would then start up
- and do the task]
- Wait on the monitor until either
- all work is completed or the task has yielded
- -- this is normally done through
- yielded + completed == active
- [completed workers are rest to idle state by overseer?]
- return appropriate status to caller
-
-Overseer invokes continue_task(*task),
- Lock gang monitor
- Check that task is the same as current binding
- If not, abort with an error
- Else, set the number of active workers as requested?
- Notify workers that they can continue from yield points
- New workers can also start up as required
- while satisfying the constraint that
- active + yielded does not exceed required number
- Wait (as above).
-
-NOTE: In the above, for simplicity in a first iteration
- our gangs will be of fixed population and will not
- therefore be flexible work gangs, just yielding work
- gangs. Once this works well, we will in a second
- iteration.refinement introduce flexibility into
- the work gang.
-
-NOTE: we can always create a new gang per each iteration
- in order to get the flexibility, but we will for now
- desist that simplified route.
-
- */
-/////////////////////
-void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
- MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- assert(task() == NULL, "Gang currently tied to a task");
- assert(new_task != NULL, "Null task");
- // Bind task to gang
- _task = new_task;
- new_task->set_gang(this); // Establish 2-way binding to support yielding
- _sequence_number++;
-
- uint requested_size = new_task->requested_size();
- if (requested_size != 0) {
- _active_workers = MIN2(requested_size, total_workers());
- } else {
- _active_workers = active_workers();
- }
- new_task->set_actual_size(_active_workers);
- new_task->set_for_termination(_active_workers);
-
- assert(_started_workers == 0, "Tabula rasa non");
- assert(_finished_workers == 0, "Tabula rasa non");
- assert(_yielded_workers == 0, "Tabula rasa non");
- yielding_task()->set_status(ACTIVE);
-
- // Wake up all the workers, the first few will get to work,
- // and the rest will go back to sleep
- monitor()->notify_all();
- wait_for_gang();
-}
-
-void YieldingFlexibleWorkGang::wait_for_gang() {
-
- assert(monitor()->owned_by_self(), "Data race");
- // Wait for task to complete or yield
- for (Status status = yielding_task()->status();
- status != COMPLETED && status != YIELDED && status != ABORTED;
- status = yielding_task()->status()) {
- assert(started_workers() <= active_workers(), "invariant");
- assert(finished_workers() <= active_workers(), "invariant");
- assert(yielded_workers() <= active_workers(), "invariant");
- monitor()->wait_without_safepoint_check();
- }
- switch (yielding_task()->status()) {
- case COMPLETED:
- case ABORTED: {
- assert(finished_workers() == active_workers(), "Inconsistent status");
- assert(yielded_workers() == 0, "Invariant");
- reset(); // for next task; gang<->task binding released
- break;
- }
- case YIELDED: {
- assert(yielded_workers() > 0, "Invariant");
- assert(yielded_workers() + finished_workers() == active_workers(),
- "Inconsistent counts");
- break;
- }
- case ACTIVE:
- case INACTIVE:
- case COMPLETING:
- case YIELDING:
- case ABORTING:
- default:
- ShouldNotReachHere();
- }
-}
-
-void YieldingFlexibleWorkGang::continue_task(
- YieldingFlexibleGangTask* gang_task) {
-
- MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- assert(task() != NULL && task() == gang_task, "Incorrect usage");
- assert(_started_workers == _active_workers, "Precondition");
- assert(_yielded_workers > 0 && yielding_task()->status() == YIELDED,
- "Else why are we calling continue_task()");
- // Restart the yielded gang workers
- yielding_task()->set_status(ACTIVE);
- monitor()->notify_all();
- wait_for_gang();
-}
-
-void YieldingFlexibleWorkGang::reset() {
- _started_workers = 0;
- _finished_workers = 0;
- yielding_task()->set_gang(NULL);
- _task = NULL; // unbind gang from task
-}
-
-void YieldingFlexibleWorkGang::yield() {
- assert(task() != NULL, "Inconsistency; should have task binding");
- MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- assert(yielded_workers() < active_workers(), "Consistency check");
- if (yielding_task()->status() == ABORTING) {
- // Do not yield; we need to abort as soon as possible
- // XXX NOTE: This can cause a performance pathology in the
- // current implementation in Mustang, as of today, and
- // pre-Mustang in that as soon as an overflow occurs,
- // yields will not be honoured. The right way to proceed
- // of course is to fix bug # TBF, so that abort's cause
- // us to return at each potential yield point.
- return;
- }
- if (++_yielded_workers + finished_workers() == active_workers()) {
- yielding_task()->set_status(YIELDED);
- monitor()->notify_all();
- } else {
- yielding_task()->set_status(YIELDING);
- }
-
- while (true) {
- switch (yielding_task()->status()) {
- case YIELDING:
- case YIELDED: {
- monitor()->wait_without_safepoint_check();
- break; // from switch
- }
- case ACTIVE:
- case ABORTING:
- case COMPLETING: {
- assert(_yielded_workers > 0, "Else why am i here?");
- _yielded_workers--;
- return;
- }
- case INACTIVE:
- case ABORTED:
- case COMPLETED:
- default: {
- ShouldNotReachHere();
- }
- }
- }
- // Only return is from inside switch statement above
- ShouldNotReachHere();
-}
-
-void YieldingFlexibleWorkGang::abort() {
- assert(task() != NULL, "Inconsistency; should have task binding");
- MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
- assert(yielded_workers() < active_workers(), "Consistency check");
- #ifndef PRODUCT
- switch (yielding_task()->status()) {
- // allowed states
- case ACTIVE:
- case ABORTING:
- case COMPLETING:
- case YIELDING:
- break;
- // not allowed states
- case INACTIVE:
- case ABORTED:
- case COMPLETED:
- case YIELDED:
- default:
- ShouldNotReachHere();
- }
- #endif // !PRODUCT
- Status prev_status = yielding_task()->status();
- yielding_task()->set_status(ABORTING);
- if (prev_status == YIELDING) {
- assert(yielded_workers() > 0, "Inconsistency");
- // At least one thread has yielded, wake it up
- // so it can go back to waiting stations ASAP.
- monitor()->notify_all();
- }
-}
-
-///////////////////////////////
-// YieldingFlexibleGangTask
-///////////////////////////////
-void YieldingFlexibleGangTask::yield() {
- assert(gang() != NULL, "No gang to signal");
- gang()->yield();
-}
-
-void YieldingFlexibleGangTask::abort() {
- assert(gang() != NULL, "No gang to signal");
- gang()->abort();
-}
-
-///////////////////////////////
-// YieldingFlexibleGangWorker
-///////////////////////////////
-void YieldingFlexibleGangWorker::loop() {
- int previous_sequence_number = 0;
- Monitor* gang_monitor = yf_gang()->monitor();
- MutexLocker ml(gang_monitor, Mutex::_no_safepoint_check_flag);
- YieldingWorkData data;
- int id;
- while (true) {
- // Check if there is work to do.
- yf_gang()->internal_worker_poll(&data);
- if (data.task() != NULL && data.sequence_number() != previous_sequence_number) {
- // There is work to be done.
- // First check if we need to become active or if there
- // are already the requisite number of workers
- if (yf_gang()->started_workers() == yf_gang()->active_workers()) {
- // There are already enough workers, we do not need to
- // to run; fall through and wait on monitor.
- } else {
- // We need to pitch in and do the work.
- assert(yf_gang()->started_workers() < yf_gang()->active_workers(),
- "Unexpected state");
- id = yf_gang()->started_workers();
- yf_gang()->internal_note_start();
- // Now, release the gang mutex and do the work.
- {
- MutexUnlocker mul(gang_monitor, Mutex::_no_safepoint_check_flag);
- GCIdMark gc_id_mark(data.task()->gc_id());
- data.task()->work(id); // This might include yielding
- }
- // Reacquire monitor and note completion of this worker
- yf_gang()->internal_note_finish();
- // Update status of task based on whether all workers have
- // finished or some have yielded
- assert(data.task() == yf_gang()->task(), "Confused task binding");
- if (yf_gang()->finished_workers() == yf_gang()->active_workers()) {
- switch (data.yf_task()->status()) {
- case ABORTING: {
- data.yf_task()->set_status(ABORTED);
- break;
- }
- case ACTIVE:
- case COMPLETING: {
- data.yf_task()->set_status(COMPLETED);
- break;
- }
- default:
- ShouldNotReachHere();
- }
- gang_monitor->notify_all(); // Notify overseer
- } else { // at least one worker is still working or yielded
- assert(yf_gang()->finished_workers() < yf_gang()->active_workers(),
- "Counts inconsistent");
- switch (data.yf_task()->status()) {
- case ACTIVE: {
- // first, but not only thread to complete
- data.yf_task()->set_status(COMPLETING);
- break;
- }
- case YIELDING: {
- if (yf_gang()->finished_workers() + yf_gang()->yielded_workers()
- == yf_gang()->active_workers()) {
- data.yf_task()->set_status(YIELDED);
- gang_monitor->notify_all(); // notify overseer
- }
- break;
- }
- case ABORTING:
- case COMPLETING: {
- break; // nothing to do
- }
- default: // everything else: INACTIVE, YIELDED, ABORTED, COMPLETED
- ShouldNotReachHere();
- }
- }
- }
- }
- // Remember the sequence number
- previous_sequence_number = data.sequence_number();
- // Wait for more work
- gang_monitor->wait_without_safepoint_check();
- }
-}
--- a/src/hotspot/share/gc/cms/yieldingWorkgroup.hpp Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,272 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_YIELDINGWORKGROUP_HPP
-#define SHARE_GC_CMS_YIELDINGWORKGROUP_HPP
-
-#include "gc/shared/workgroup.hpp"
-#include "utilities/macros.hpp"
-
-// Forward declarations
-class YieldingFlexibleGangTask;
-class YieldingFlexibleWorkGang;
-
-// Status of tasks
-enum Status {
- INACTIVE,
- ACTIVE,
- YIELDING,
- YIELDED,
- ABORTING,
- ABORTED,
- COMPLETING,
- COMPLETED
-};
-
-class YieldingWorkData: public StackObj {
- // This would be a struct, but I want accessor methods.
-private:
- AbstractGangTask* _task;
- int _sequence_number;
-public:
- // Constructor and destructor
- YieldingWorkData() : _task(NULL), _sequence_number(0) {}
- ~YieldingWorkData() {}
-
- // Accessors and modifiers
- AbstractGangTask* task() const { return _task; }
- void set_task(AbstractGangTask* value) { _task = value; }
- int sequence_number() const { return _sequence_number; }
- void set_sequence_number(int value) { _sequence_number = value; }
-
- YieldingFlexibleGangTask* yf_task() const {
- return (YieldingFlexibleGangTask*)_task;
- }
-};
-
-// Class YieldingFlexibleGangWorker:
-// Several instances of this class run in parallel as workers for a gang.
-class YieldingFlexibleGangWorker: public AbstractGangWorker {
-public:
- YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id);
-
-public:
- YieldingFlexibleWorkGang* yf_gang() const
- { return (YieldingFlexibleWorkGang*)gang(); }
-
-protected: // Override from parent class
- virtual void loop();
-};
-
-class FlexibleGangTask: public AbstractGangTask {
- int _actual_size; // size of gang obtained
-protected:
- int _requested_size; // size of gang requested
-public:
- FlexibleGangTask(const char* name): AbstractGangTask(name),
- _requested_size(0) {}
-
- // The abstract work method.
- // The argument tells you which member of the gang you are.
- virtual void work(uint worker_id) = 0;
-
- int requested_size() const { return _requested_size; }
- int actual_size() const { return _actual_size; }
-
- void set_requested_size(int sz) { _requested_size = sz; }
- void set_actual_size(int sz) { _actual_size = sz; }
-};
-
-// An abstract task to be worked on by a flexible work gang,
-// and where the workers will periodically yield, usually
-// in response to some condition that is signalled by means
-// that are specific to the task at hand.
-// You subclass this to supply your own work() method.
-// A second feature of this kind of work gang is that
-// it allows for the signalling of certain exceptional
-// conditions that may be encountered during the performance
-// of the task and that may require the task at hand to be
-// `aborted' forthwith. Finally, these gangs are `flexible'
-// in that they can operate at partial capacity with some
-// gang workers waiting on the bench; in other words, the
-// size of the active worker pool can flex (up to an apriori
-// maximum) in response to task requests at certain points.
-// The last part (the flexible part) has not yet been fully
-// fleshed out and is a work in progress.
-class YieldingFlexibleGangTask: public FlexibleGangTask {
- Status _status;
- YieldingFlexibleWorkGang* _gang;
-
-protected:
- // Constructor and desctructor: only construct subclasses.
- YieldingFlexibleGangTask(const char* name): FlexibleGangTask(name),
- _status(INACTIVE),
- _gang(NULL) { }
-
- ~YieldingFlexibleGangTask() { }
-
- friend class YieldingFlexibleWorkGang;
- friend class YieldingFlexibleGangWorker;
-
- void set_status(Status s) {
- _status = s;
- }
- YieldingFlexibleWorkGang* gang() {
- return _gang;
- }
- void set_gang(YieldingFlexibleWorkGang* gang) {
- assert(_gang == NULL || gang == NULL, "Clobber without intermediate reset?");
- _gang = gang;
- }
-
-public:
- // The abstract work method.
- // The argument tells you which member of the gang you are.
- virtual void work(uint worker_id) = 0;
-
- // Subclasses should call the parent's yield() method
- // after having done any work specific to the subclass.
- virtual void yield();
-
- // An abstract method supplied by
- // a concrete sub-class which is used by the coordinator
- // to do any "central yielding" work.
- virtual void coordinator_yield() = 0;
-
- // Subclasses should call the parent's abort() method
- // after having done any work specific to the sunbclass.
- virtual void abort();
-
- Status status() const { return _status; }
- bool yielding() const { return _status == YIELDING; }
- bool yielded() const { return _status == YIELDED; }
- bool completed() const { return _status == COMPLETED; }
- bool aborted() const { return _status == ABORTED; }
- bool active() const { return _status == ACTIVE; }
-
- // This method configures the task for proper termination.
- // Some tasks do not have any requirements on termination
- // and may inherit this method that does nothing. Some
- // tasks do some coordination on termination and override
- // this method to implement that coordination.
- virtual void set_for_termination(uint active_workers) {}
-};
-// Class YieldingWorkGang: A subclass of WorkGang.
-// In particular, a YieldingWorkGang is made up of
-// YieldingGangWorkers, and provides infrastructure
-// supporting yielding to the "GangOverseer",
-// being the thread that orchestrates the WorkGang via run_task().
-class YieldingFlexibleWorkGang: public AbstractWorkGang {
- // Here's the public interface to this class.
-public:
- // Constructor and destructor.
- YieldingFlexibleWorkGang(const char* name, uint workers,
- bool are_GC_task_threads);
-
- YieldingFlexibleGangTask* yielding_task() const {
- return task();
- }
- // Allocate a worker and return a pointer to it.
- AbstractGangWorker* allocate_worker(uint which);
-
- // Run a task; returns when the task is done, or the workers yield,
- // or the task is aborted.
- // A task that has been yielded can be continued via this same interface
- // by using the same task repeatedly as the argument to the call.
- // It is expected that the YieldingFlexibleGangTask carries the appropriate
- // continuation information used by workers to continue the task
- // from its last yield point. Thus, a completed task will return
- // immediately with no actual work having been done by the workers.
- void run_task(AbstractGangTask* task) {
- guarantee(false, "Use start_task instead");
- }
- void start_task(YieldingFlexibleGangTask* new_task);
- void continue_task(YieldingFlexibleGangTask* gang_task);
-
- // Abort a currently running task, if any; returns when all the workers
- // have stopped working on the current task and have returned to their
- // waiting stations.
- void abort_task();
-
- // Yield: workers wait at their current working stations
- // until signalled to proceed by the overseer.
- void yield();
-
- // Abort: workers are expected to return to their waiting
- // stations, whence they are ready for the next task dispatched
- // by the overseer.
- void abort();
-
-private:
- uint _yielded_workers;
- void wait_for_gang();
-
-public:
- // Accessors for fields
- uint yielded_workers() const {
- return _yielded_workers;
- }
-
-private:
- friend class YieldingFlexibleGangWorker;
- void reset(); // NYI
-
-
- // The monitor which protects these data,
- // and notifies of changes in it.
- Monitor* _monitor;
- // Accessors for fields
- Monitor* monitor() const {
- return _monitor;
- }
-
- // The number of started workers.
- uint _started_workers;
- // The number of finished workers.
- uint _finished_workers;
-
- uint started_workers() const {
- return _started_workers;
- }
- uint finished_workers() const {
- return _finished_workers;
- }
-
- // A sequence number for the current task.
- int _sequence_number;
- int sequence_number() const {
- return _sequence_number;
- }
-
- YieldingFlexibleGangTask* _task;
- YieldingFlexibleGangTask* task() const {
- return _task;
- }
-
- void internal_worker_poll(YieldingWorkData* data) const;
- void internal_note_start();
- void internal_note_finish();
-};
-
-#endif // SHARE_GC_CMS_YIELDINGWORKGROUP_HPP
--- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -298,11 +298,7 @@
*
* In the case of slow allocation the allocation code must handle the barrier
* as part of the allocation in the case the allocated object is not located
- * in the nursery, this would happen for humongous objects. This is similar to
- * how CMS is required to handle this case, see the comments for the method
- * CollectedHeap::new_deferred_store_barrier and OptoRuntime::new_deferred_store_barrier.
- * A deferred card mark is required for these objects and handled in the above
- * mentioned methods.
+ * in the nursery; this would happen for humongous objects.
*
* Returns true if the post barrier can be removed
*/
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -102,8 +102,7 @@
// partially-scanned arrays (in the latter case, we push an oop to
// the from-space image of the array and the length on the
// from-space image indicates how many entries on the array we still
- // need to scan; this is basically how ParNew does partial array
- // scanning too). To be able to distinguish between reference
+ // need to scan. To be able to distinguish between reference
// locations and partially-scanned array oops we simply mask the
// latter oops with 0x01. The next three methods do the masking,
// unmasking, and checking whether the oop is masked or not. Notice
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -66,8 +66,7 @@
// Methods of protected closure types.
DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
- assert(_young_gen->kind() == Generation::ParNew ||
- _young_gen->kind() == Generation::DefNew, "Expected the young generation here");
+ assert(_young_gen->kind() == Generation::DefNew, "Expected the young generation here");
}
bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
@@ -884,7 +883,6 @@
log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
GCCause::to_string(gch->gc_cause()));
assert(gch->gc_cause() == GCCause::_scavenge_alot ||
- (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
!gch->incremental_collection_failed(),
"Twice in a row");
seen_incremental_collection_failed = false;
--- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -39,9 +39,6 @@
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/parOopClosures.hpp"
-#endif
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
size_t initial_byte_size,
--- a/src/hotspot/share/gc/shared/adaptiveSizePolicy.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/adaptiveSizePolicy.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -39,13 +39,11 @@
class AdaptiveSizePolicy : public CHeapObj<mtGC> {
friend class GCAdaptivePolicyCounters;
friend class PSGCAdaptivePolicyCounters;
- friend class CMSGCAdaptivePolicyCounters;
protected:
enum GCPolicyKind {
_gc_adaptive_size_policy,
- _gc_ps_adaptive_size_policy,
- _gc_cms_adaptive_size_policy
+ _gc_ps_adaptive_size_policy
};
virtual GCPolicyKind kind() const { return _gc_adaptive_size_policy; }
@@ -77,7 +75,7 @@
// Last calculated sizes, in bytes, and aligned
size_t _eden_size; // calculated eden free space in bytes
- size_t _promo_size; // calculated cms gen free space in bytes
+ size_t _promo_size; // calculated promoted free space in bytes
size_t _survivor_size; // calculated survivor size in bytes
@@ -122,7 +120,7 @@
// Variables for estimating the major and minor collection costs
// minor collection time vs. young gen size
LinearLeastSquareFit* _minor_collection_estimator;
- // major collection time vs. cms gen size
+ // major collection time vs. old gen size
LinearLeastSquareFit* _major_collection_estimator;
// These record the most recent collection times. They
@@ -326,9 +324,6 @@
double gc_pause_goal_sec,
uint gc_cost_ratio);
- bool is_gc_cms_adaptive_size_policy() {
- return kind() == _gc_cms_adaptive_size_policy;
- }
bool is_gc_ps_adaptive_size_policy() {
return kind() == _gc_ps_adaptive_size_policy;
}
--- a/src/hotspot/share/gc/shared/blockOffsetTable.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -352,306 +352,6 @@
}
//////////////////////////////////////////////////////////////////////
-// BlockOffsetArrayNonContigSpace
-//////////////////////////////////////////////////////////////////////
-
-// The block [blk_start, blk_end) has been allocated;
-// adjust the block offset table to represent this information;
-// NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using
-// the somewhat more lightweight split_block() or
-// (when init_to_zero()) mark_block() wherever possible.
-// right-open interval: [blk_start, blk_end)
-void
-BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start,
- HeapWord* blk_end) {
- assert(blk_start != NULL && blk_end > blk_start,
- "phantom block");
- single_block(blk_start, blk_end);
- allocated(blk_start, blk_end);
-}
-
-// Adjust BOT to show that a previously whole block has been split
-// into two. We verify the BOT for the first part (prefix) and
-// update the BOT for the second part (suffix).
-// blk is the start of the block
-// blk_size is the size of the original block
-// left_blk_size is the size of the first part of the split
-void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
- size_t blk_size,
- size_t left_blk_size) {
- // Verify that the BOT shows [blk, blk + blk_size) to be one block.
- verify_single_block(blk, blk_size);
- // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
- // is one single block.
- assert(blk_size > 0, "Should be positive");
- assert(left_blk_size > 0, "Should be positive");
- assert(left_blk_size < blk_size, "Not a split");
-
- // Start addresses of prefix block and suffix block.
- HeapWord* pref_addr = blk;
- HeapWord* suff_addr = blk + left_blk_size;
- HeapWord* end_addr = blk + blk_size;
-
- // Indices for starts of prefix block and suffix block.
- size_t pref_index = _array->index_for(pref_addr);
- if (_array->address_for_index(pref_index) != pref_addr) {
- // pref_addr does not begin pref_index
- pref_index++;
- }
-
- size_t suff_index = _array->index_for(suff_addr);
- if (_array->address_for_index(suff_index) != suff_addr) {
- // suff_addr does not begin suff_index
- suff_index++;
- }
-
- // Definition: A block B, denoted [B_start, B_end) __starts__
- // a card C, denoted [C_start, C_end), where C_start and C_end
- // are the heap addresses that card C covers, iff
- // B_start <= C_start < B_end.
- //
- // We say that a card C "is started by" a block B, iff
- // B "starts" C.
- //
- // Note that the cardinality of the set of cards {C}
- // started by a block B can be 0, 1, or more.
- //
- // Below, pref_index and suff_index are, respectively, the
- // first (least) card indices that the prefix and suffix of
- // the split start; end_index is one more than the index of
- // the last (greatest) card that blk starts.
- size_t end_index = _array->index_for(end_addr - 1) + 1;
-
- // Calculate the # cards that the prefix and suffix affect.
- size_t num_pref_cards = suff_index - pref_index;
-
- size_t num_suff_cards = end_index - suff_index;
- // Change the cards that need changing
- if (num_suff_cards > 0) {
- HeapWord* boundary = _array->address_for_index(suff_index);
- // Set the offset card for suffix block
- _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */);
- // Change any further cards that need changing in the suffix
- if (num_pref_cards > 0) {
- if (num_pref_cards >= num_suff_cards) {
- // Unilaterally fix all of the suffix cards: closed card
- // index interval in args below.
- set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */);
- } else {
- // Unilaterally fix the first (num_pref_cards - 1) following
- // the "offset card" in the suffix block.
- const size_t right_most_fixed_index = suff_index + num_pref_cards - 1;
- set_remainder_to_point_to_start_incl(suff_index + 1,
- right_most_fixed_index, true /* reducing */);
- // Fix the appropriate cards in the remainder of the
- // suffix block -- these are the last num_pref_cards
- // cards in each power block of the "new" range plumbed
- // from suff_addr.
- bool more = true;
- uint i = 1;
- // Fix the first power block with back_by > num_pref_cards.
- while (more && (i < BOTConstants::N_powers)) {
- size_t back_by = BOTConstants::power_to_cards_back(i);
- size_t right_index = suff_index + back_by - 1;
- size_t left_index = right_index - num_pref_cards + 1;
- if (right_index >= end_index - 1) { // last iteration
- right_index = end_index - 1;
- more = false;
- }
- if (left_index <= right_most_fixed_index) {
- left_index = right_most_fixed_index + 1;
- }
- if (back_by > num_pref_cards) {
- // Fill in the remainder of this "power block", if it
- // is non-null.
- if (left_index <= right_index) {
- _array->set_offset_array(left_index, right_index,
- BOTConstants::N_words + i - 1, true /* reducing */);
- } else {
- more = false; // we are done
- assert((end_index - 1) == right_index, "Must be at the end.");
- }
- i++;
- break;
- }
- i++;
- }
- // Fix the rest of the power blocks.
- while (more && (i < BOTConstants::N_powers)) {
- size_t back_by = BOTConstants::power_to_cards_back(i);
- size_t right_index = suff_index + back_by - 1;
- size_t left_index = right_index - num_pref_cards + 1;
- if (right_index >= end_index - 1) { // last iteration
- right_index = end_index - 1;
- if (left_index > right_index) {
- break;
- }
- more = false;
- }
- assert(left_index <= right_index, "Error");
- _array->set_offset_array(left_index, right_index, BOTConstants::N_words + i - 1, true /* reducing */);
- i++;
- }
- }
- } // else no more cards to fix in suffix
- } // else nothing needs to be done
- // Verify that we did the right thing
- verify_single_block(pref_addr, left_blk_size);
- verify_single_block(suff_addr, blk_size - left_blk_size);
-}
-
-
-// Mark the BOT such that if [blk_start, blk_end) straddles a card
-// boundary, the card following the first such boundary is marked
-// with the appropriate offset.
-// NOTE: this method does _not_ adjust _unallocated_block or
-// any cards subsequent to the first one.
-void
-BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start,
- HeapWord* blk_end, bool reducing) {
- do_block_internal(blk_start, blk_end, Action_mark, reducing);
-}
-
-HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
- const void* addr) const {
- assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
- assert(_bottom <= addr && addr < _end,
- "addr must be covered by this Array");
- // Must read this exactly once because it can be modified by parallel
- // allocation.
- HeapWord* ub = _unallocated_block;
- if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
- assert(ub < _end, "tautology (see above)");
- return ub;
- }
-
- // Otherwise, find the block start using the table.
- size_t index = _array->index_for(addr);
- HeapWord* q = _array->address_for_index(index);
-
- uint offset = _array->offset_array(index); // Extend u_char to uint.
- while (offset >= BOTConstants::N_words) {
- // The excess of the offset from N_words indicates a power of Base
- // to go back by.
- size_t n_cards_back = BOTConstants::entry_to_cards_back(offset);
- q -= (BOTConstants::N_words * n_cards_back);
- assert(q >= _sp->bottom(),
- "q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
- p2i(q), p2i(_sp->bottom()));
- assert(q < _sp->end(),
- "q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
- p2i(q), p2i(_sp->end()));
- index -= n_cards_back;
- offset = _array->offset_array(index);
- }
- assert(offset < BOTConstants::N_words, "offset too large");
- index--;
- q -= offset;
- assert(q >= _sp->bottom(),
- "q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
- p2i(q), p2i(_sp->bottom()));
- assert(q < _sp->end(),
- "q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
- p2i(q), p2i(_sp->end()));
- HeapWord* n = q;
-
- while (n <= addr) {
- debug_only(HeapWord* last = q); // for debugging
- q = n;
- n += _sp->block_size(n);
- assert(n > q,
- "Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT ","
- " while querying blk_start(" PTR_FORMAT ")"
- " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
- p2i(n), p2i(last), p2i(addr), p2i(_sp->bottom()), p2i(_sp->end()));
- }
- assert(q <= addr,
- "wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")",
- p2i(q), p2i(addr));
- assert(addr <= n,
- "wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")",
- p2i(addr), p2i(n));
- return q;
-}
-
-HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful(
- const void* addr) const {
- assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
-
- assert(_bottom <= addr && addr < _end,
- "addr must be covered by this Array");
- // Must read this exactly once because it can be modified by parallel
- // allocation.
- HeapWord* ub = _unallocated_block;
- if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
- assert(ub < _end, "tautology (see above)");
- return ub;
- }
-
- // Otherwise, find the block start using the table, but taking
- // care (cf block_start_unsafe() above) not to parse any objects/blocks
- // on the cards themselves.
- size_t index = _array->index_for(addr);
- assert(_array->address_for_index(index) == addr,
- "arg should be start of card");
-
- HeapWord* q = (HeapWord*)addr;
- uint offset;
- do {
- offset = _array->offset_array(index);
- if (offset < BOTConstants::N_words) {
- q -= offset;
- } else {
- size_t n_cards_back = BOTConstants::entry_to_cards_back(offset);
- q -= (n_cards_back * BOTConstants::N_words);
- index -= n_cards_back;
- }
- } while (offset >= BOTConstants::N_words);
- assert(q <= addr, "block start should be to left of arg");
- return q;
-}
-
-#ifndef PRODUCT
-// Verification & debugging - ensure that the offset table reflects the fact
-// that the block [blk_start, blk_end) or [blk, blk + size) is a
-// single block of storage. NOTE: can't const this because of
-// call to non-const do_block_internal() below.
-void BlockOffsetArrayNonContigSpace::verify_single_block(
- HeapWord* blk_start, HeapWord* blk_end) {
- if (VerifyBlockOffsetArray) {
- do_block_internal(blk_start, blk_end, Action_check);
- }
-}
-
-void BlockOffsetArrayNonContigSpace::verify_single_block(
- HeapWord* blk, size_t size) {
- verify_single_block(blk, blk + size);
-}
-
-// Verify that the given block is before _unallocated_block
-void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
- HeapWord* blk_start, HeapWord* blk_end) const {
- if (BlockOffsetArrayUseUnallocatedBlock) {
- assert(blk_start < blk_end, "Block inconsistency?");
- assert(blk_end <= _unallocated_block, "_unallocated_block problem");
- }
-}
-
-void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
- HeapWord* blk, size_t size) const {
- verify_not_unallocated(blk, blk + size);
-}
-#endif // PRODUCT
-
-size_t BlockOffsetArrayNonContigSpace::last_active_index() const {
- if (_unallocated_block == _bottom) {
- return 0;
- } else {
- return _array->index_for(_unallocated_block - 1);
- }
-}
-
-//////////////////////////////////////////////////////////////////////
// BlockOffsetArrayContigSpace
//////////////////////////////////////////////////////////////////////
--- a/src/hotspot/share/gc/shared/blockOffsetTable.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -41,7 +41,6 @@
//
// BlockOffsetTable (abstract)
// - BlockOffsetArray (abstract)
-// - BlockOffsetArrayNonContigSpace
// - BlockOffsetArrayContigSpace
//
@@ -155,12 +154,6 @@
void* start_ptr = &_offset_array[start];
// If collector is concurrent, special handling may be needed.
G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)
-#if INCLUDE_CMSGC
- if (UseConcMarkSweepGC) {
- memset_with_concurrent_readers(start_ptr, offset, num_cards);
- return;
- }
-#endif // INCLUDE_CMSGC
memset(start_ptr, offset, num_cards);
}
@@ -389,111 +382,6 @@
////////////////////////////////////////////////////////////////////////////
// A subtype of BlockOffsetArray that takes advantage of the fact
-// that its underlying space is a NonContiguousSpace, so that some
-// specialized interfaces can be made available for spaces that
-// manipulate the table.
-////////////////////////////////////////////////////////////////////////////
-class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
- friend class VMStructs;
- private:
- // The portion [_unallocated_block, _sp.end()) of the space that
- // is a single block known not to contain any objects.
- // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
- HeapWord* _unallocated_block;
-
- public:
- BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
- BlockOffsetArray(array, mr, false),
- _unallocated_block(_bottom) { }
-
- // Accessor
- HeapWord* unallocated_block() const {
- assert(BlockOffsetArrayUseUnallocatedBlock,
- "_unallocated_block is not being maintained");
- return _unallocated_block;
- }
-
- void set_unallocated_block(HeapWord* block) {
- assert(BlockOffsetArrayUseUnallocatedBlock,
- "_unallocated_block is not being maintained");
- assert(block >= _bottom && block <= _end, "out of range");
- _unallocated_block = block;
- }
-
- // These methods expect to be called with [blk_start, blk_end)
- // representing a block of memory in the heap.
- void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
- void alloc_block(HeapWord* blk, size_t size) {
- alloc_block(blk, blk + size);
- }
-
- // The following methods are useful and optimized for a
- // non-contiguous space.
-
- // Given a block [blk_start, blk_start + full_blk_size), and
- // a left_blk_size < full_blk_size, adjust the BOT to show two
- // blocks [blk_start, blk_start + left_blk_size) and
- // [blk_start + left_blk_size, blk_start + full_blk_size).
- // It is assumed (and verified in the non-product VM) that the
- // BOT was correct for the original block.
- void split_block(HeapWord* blk_start, size_t full_blk_size,
- size_t left_blk_size);
-
- // Adjust BOT to show that it has a block in the range
- // [blk_start, blk_start + size). Only the first card
- // of BOT is touched. It is assumed (and verified in the
- // non-product VM) that the remaining cards of the block
- // are correct.
- void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
- void mark_block(HeapWord* blk, size_t size, bool reducing = false) {
- mark_block(blk, blk + size, reducing);
- }
-
- // Adjust _unallocated_block to indicate that a particular
- // block has been newly allocated or freed. It is assumed (and
- // verified in the non-product VM) that the BOT is correct for
- // the given block.
- void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) {
- // Verify that the BOT shows [blk, blk + blk_size) to be one block.
- verify_single_block(blk_start, blk_end);
- if (BlockOffsetArrayUseUnallocatedBlock) {
- _unallocated_block = MAX2(_unallocated_block, blk_end);
- }
- }
-
- void allocated(HeapWord* blk, size_t size, bool reducing = false) {
- allocated(blk, blk + size, reducing);
- }
-
- void freed(HeapWord* blk_start, HeapWord* blk_end);
- void freed(HeapWord* blk, size_t size);
-
- HeapWord* block_start_unsafe(const void* addr) const;
-
- // Requires "addr" to be the start of a card and returns the
- // start of the block that contains the given address.
- HeapWord* block_start_careful(const void* addr) const;
-
- // Verification & debugging: ensure that the offset table reflects
- // the fact that the block [blk_start, blk_end) or [blk, blk + size)
- // is a single block of storage. NOTE: can't const this because of
- // call to non-const do_block_internal() below.
- void verify_single_block(HeapWord* blk_start, HeapWord* blk_end)
- PRODUCT_RETURN;
- void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN;
-
- // Verify that the given block is before _unallocated_block
- void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end)
- const PRODUCT_RETURN;
- void verify_not_unallocated(HeapWord* blk, size_t size)
- const PRODUCT_RETURN;
-
- // Debugging support
- virtual size_t last_active_index() const;
-};
-
-////////////////////////////////////////////////////////////////////////////
-// A subtype of BlockOffsetArray that takes advantage of the fact
// that its underlying space is a ContiguousSpace, so that its "active"
// region can be more efficiently tracked (than for a non-contiguous space).
////////////////////////////////////////////////////////////////////////////
--- a/src/hotspot/share/gc/shared/blockOffsetTable.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.inline.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -70,30 +70,4 @@
ParGCRareEvent_lock->owned_by_self()), "Crack");
}
-//////////////////////////////////////////////////////////////////////////
-// BlockOffsetArrayNonContigSpace inlines
-//////////////////////////////////////////////////////////////////////////
-inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk,
- size_t size) {
- freed(blk, blk + size);
-}
-
-inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk_start,
- HeapWord* blk_end) {
- // Verify that the BOT shows [blk_start, blk_end) to be one block.
- verify_single_block(blk_start, blk_end);
- // adjust _unallocated_block upward or downward
- // as appropriate
- if (BlockOffsetArrayUseUnallocatedBlock) {
- assert(_unallocated_block <= _end,
- "Inconsistent value for _unallocated_block");
- if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
- // CMS-specific note: a block abutting _unallocated_block to
- // its left is being freed, a new block is being added or
- // we are resetting following a compaction
- _unallocated_block = blk_start;
- }
- }
-}
-
#endif // SHARE_GC_SHARED_BLOCKOFFSETTABLE_INLINE_HPP
--- a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -45,7 +45,7 @@
}
}
-// vanilla/CMS post barrier
+// vanilla post barrier
// Insert a write-barrier store. This is to let generational GC work; we have
// to flag all oop-stores before the next GC point.
void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -96,7 +96,7 @@
// to a newly allocated object along the fast-path. We
// compensate for such elided card-marks as follows:
// (a) Generational, non-concurrent collectors, such as
-// GenCollectedHeap(ParNew,DefNew,Tenured) and
+// GenCollectedHeap(DefNew,Tenured) and
// ParallelScavengeHeap(ParallelGC, ParallelOldGC)
// need the card-mark if and only if the region is
// in the old gen, and do not care if the card-mark
@@ -105,17 +105,7 @@
// scavenge. For all these cases, we can do a card mark
// at the point at which we do a slow path allocation
// in the old gen, i.e. in this call.
-// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
-// in addition that the card-mark for an old gen allocated
-// object strictly follow any associated initializing stores.
-// In these cases, the memRegion remembered below is
-// used to card-mark the entire region either just before the next
-// slow-path allocation by this thread or just before the next scavenge or
-// CMS-associated safepoint, whichever of these events happens first.
-// (The implicit assumption is that the object has been fully
-// initialized by this point, a fact that we assert when doing the
-// card-mark.)
-// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
+// (b) G1CollectedHeap(G1) uses two kinds of write barriers. When a
// G1 concurrent marking is in progress an SATB (pre-write-)barrier
// is used to remember the pre-value of any store. Initializing
// stores will not need this barrier, so we need not worry about
@@ -124,11 +114,8 @@
// which simply enqueues a (sequence of) dirty cards which may
// optionally be refined by the concurrent update threads. Note
// that this barrier need only be applied to a non-young write,
-// but, like in CMS, because of the presence of concurrent refinement
-// (much like CMS' precleaning), must strictly follow the oop-store.
-// Thus, using the same protocol for maintaining the intended
-// invariants turns out, serendepitously, to be the same for both
-// G1 and CMS.
+// but, because of the presence of concurrent refinement,
+// must strictly follow the oop-store.
//
// For any future collector, this code should be reexamined with
// that specific collector in mind, and the documentation above suitably
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -335,9 +335,9 @@
#ifndef PRODUCT
void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
if (CheckMemoryInitialization && ZapUnusedHeapArea) {
- for (size_t slot = 0; slot < size; slot += 1) {
- assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
- "Found non badHeapWordValue in pre-allocation check");
+ // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
+ for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
+ assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
}
}
}
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -88,7 +88,6 @@
// CollectedHeap
// GenCollectedHeap
// SerialHeap
-// CMSHeap
// G1CollectedHeap
// ParallelScavengeHeap
// ShenandoahHeap
@@ -172,7 +171,6 @@
None,
Serial,
Parallel,
- CMS,
G1,
Epsilon,
Z,
--- a/src/hotspot/share/gc/shared/gcArguments.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -101,7 +101,7 @@
"AllocateHeapAt and AllocateOldGenAt cannot be used together.\n");
status = false;
}
- if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseConcMarkSweepGC || UseEpsilonGC || UseZGC)) {
+ if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseEpsilonGC || UseZGC)) {
jio_fprintf(defaultStream::error_stream(),
"AllocateOldGenAt is not supported for selected GC.\n");
status = false;
--- a/src/hotspot/share/gc/shared/gcCause.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcCause.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -78,18 +78,6 @@
case _metadata_GC_clear_soft_refs:
return "Metadata GC Clear Soft References";
- case _cms_generation_full:
- return "CMS Generation Full";
-
- case _cms_initial_mark:
- return "CMS Initial Mark";
-
- case _cms_final_remark:
- return "CMS Final Remark";
-
- case _cms_concurrent_mark:
- return "CMS Concurrent Mark";
-
case _old_generation_expanded_on_last_scavenge:
return "Old Generation Expanded On Last Scavenge";
--- a/src/hotspot/share/gc/shared/gcCause.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcCause.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -65,11 +65,6 @@
_metadata_GC_threshold,
_metadata_GC_clear_soft_refs,
- _cms_generation_full,
- _cms_initial_mark,
- _cms_final_remark,
- _cms_concurrent_mark,
-
_old_generation_expanded_on_last_scavenge,
_old_generation_too_full_to_scavenge,
_adaptive_size_policy,
@@ -114,13 +109,12 @@
cause != GCCause::_old_generation_expanded_on_last_scavenge,
"This GCCause may be correct but is not expected yet: %s",
to_string(cause));
- // _tenured_generation_full or _cms_generation_full for full tenured generations
+ // _tenured_generation_full for full tenured generations
// _adaptive_size_policy for a full collection after a young GC
// _allocation_failure is the generic cause a collection which could result
// in the collection of the tenured generation if there is not enough space
// in the tenured generation to support a young GC.
return (cause == GCCause::_tenured_generation_full ||
- cause == GCCause::_cms_generation_full ||
cause == GCCause::_adaptive_size_policy ||
cause == GCCause::_allocation_failure);
}
--- a/src/hotspot/share/gc/shared/gcConfig.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,9 +28,6 @@
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/cmsArguments.hpp"
-#endif
#if INCLUDE_EPSILONGC
#include "gc/epsilon/epsilonArguments.hpp"
#endif
@@ -60,7 +57,6 @@
_flag(flag), _name(name), _arguments(arguments), _hs_err_name(hs_err_name) {}
};
- CMSGC_ONLY(static CMSArguments cmsArguments;)
EPSILONGC_ONLY(static EpsilonArguments epsilonArguments;)
G1GC_ONLY(static G1Arguments g1Arguments;)
PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
@@ -71,7 +67,6 @@
// Table of supported GCs, for translating between command
// line flag, CollectedHeap::Name and GCArguments instance.
static const SupportedGC SupportedGCs[] = {
- CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS, cmsArguments, "concurrent mark sweep gc"))
EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC, CollectedHeap::Epsilon, epsilonArguments, "epsilon gc"))
G1GC_ONLY_ARG(SupportedGC(UseG1GC, CollectedHeap::G1, g1Arguments, "g1 gc"))
PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC, CollectedHeap::Parallel, parallelArguments, "parallel gc"))
@@ -95,7 +90,6 @@
bool GCConfig::_gc_selected_ergonomically = false;
void GCConfig::fail_if_unsupported_gc_is_selected() {
- NOT_CMSGC( FAIL_IF_SELECTED(UseConcMarkSweepGC, true));
NOT_EPSILONGC( FAIL_IF_SELECTED(UseEpsilonGC, true));
NOT_G1GC( FAIL_IF_SELECTED(UseG1GC, true));
NOT_PARALLELGC( FAIL_IF_SELECTED(UseParallelGC, true));
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,10 +41,6 @@
return ParallelScavenge;
}
- if (UseConcMarkSweepGC) {
- return ParNew;
- }
-
if (UseZGC || UseShenandoahGC) {
return NA;
}
@@ -57,10 +53,6 @@
return G1Old;
}
- if (UseConcMarkSweepGC) {
- return ConcurrentMarkSweep;
- }
-
if (UseParallelOldGC) {
return ParallelOld;
}
--- a/src/hotspot/share/gc/shared/gcName.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcName.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -33,9 +33,7 @@
PSMarkSweep,
ParallelScavenge,
DefNew,
- ParNew,
G1New,
- ConcurrentMarkSweep,
G1Old,
G1Full,
Z,
@@ -53,9 +51,7 @@
case PSMarkSweep: return "PSMarkSweep";
case ParallelScavenge: return "ParallelScavenge";
case DefNew: return "DefNew";
- case ParNew: return "ParNew";
case G1New: return "G1New";
- case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
case G1Old: return "G1Old";
case G1Full: return "G1Full";
case Z: return "Z";
--- a/src/hotspot/share/gc/shared/gcStats.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcStats.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -36,26 +36,7 @@
public:
GCStats();
- enum Name {
- GCStatsKind,
- CMSGCStatsKind
- };
-
- virtual Name kind() {
- return GCStatsKind;
- }
-
AdaptivePaddedNoZeroDevAverage* avg_promoted() const { return _avg_promoted; }
-
- // Average in bytes
- size_t average_promoted_in_bytes() const {
- return (size_t)_avg_promoted->average();
- }
-
- // Padded average in bytes
- size_t padded_average_promoted_in_bytes() const {
- return (size_t)_avg_promoted->padded_average();
- }
};
#endif // SHARE_GC_SHARED_GCSTATS_HPP
--- a/src/hotspot/share/gc/shared/gcTrace.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcTrace.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -207,14 +207,4 @@
DefNewTracer() : YoungGCTracer(DefNew) {}
};
-class ParNewTracer : public YoungGCTracer {
- public:
- ParNewTracer() : YoungGCTracer(ParNew) {}
-};
-
-class CMSTracer : public OldGCTracer {
- public:
- CMSTracer() : OldGCTracer(ConcurrentMarkSweep) {}
-};
-
#endif // SHARE_GC_SHARED_GCTRACE_HPP
--- a/src/hotspot/share/gc/shared/gcTraceSend.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcTraceSend.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -173,7 +173,7 @@
}
}
-// Common to CMS and G1
+// G1
void OldGCTracer::send_concurrent_mode_failure_event() {
EventConcurrentModeFailure e;
if (e.should_commit()) {
--- a/src/hotspot/share/gc/shared/gcVMOperations.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -192,13 +192,6 @@
// Returns true iff concurrent GCs unloads metadata.
bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
-#if INCLUDE_CMSGC
- if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
- MetaspaceGC::set_should_concurrent_collect(true);
- return true;
- }
-#endif
-
#if INCLUDE_G1GC
if (UseG1GC && ClassUnloadingWithConcurrentMark) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -238,13 +231,13 @@
}
if (initiate_concurrent_GC()) {
- // For CMS and G1 expand since the collection is going to be concurrent.
+ // For G1 expand since the collection is going to be concurrent.
_result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
if (_result != NULL) {
return;
}
- log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1");
+ log_debug(gc)("G1 full GC for Metaspace");
}
// Don't clear the soft refs yet.
--- a/src/hotspot/share/gc/shared/gc_globals.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -27,9 +27,6 @@
#include "runtime/globals_shared.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/cms_globals.hpp"
-#endif
#if INCLUDE_EPSILONGC
#include "gc/epsilon/epsilon_globals.hpp"
#endif
@@ -64,22 +61,6 @@
constraint, \
writeable) \
\
- CMSGC_ONLY(GC_CMS_FLAGS( \
- develop, \
- develop_pd, \
- product, \
- product_pd, \
- diagnostic, \
- diagnostic_pd, \
- experimental, \
- notproduct, \
- manageable, \
- product_rw, \
- lp64_product, \
- range, \
- constraint, \
- writeable)) \
- \
EPSILONGC_ONLY(GC_EPSILON_FLAGS( \
develop, \
develop_pd, \
@@ -178,9 +159,6 @@
\
/* gc */ \
\
- product(bool, UseConcMarkSweepGC, false, \
- "Use Concurrent Mark-Sweep GC in the old generation") \
- \
product(bool, UseSerialGC, false, \
"Use the Serial garbage collector") \
\
@@ -286,14 +264,6 @@
"bigger than this") \
range(1, max_jint/3) \
\
- product(uintx, OldPLABWeight, 50, \
- "Percentage (0-100) used to weight the current sample when " \
- "computing exponentially decaying average for resizing " \
- "OldPLABSize") \
- range(0, 100) \
- \
- product(bool, ResizeOldPLAB, true, \
- "Dynamically resize (old gen) promotion LAB's") \
\
product(bool, AlwaysPreTouch, false, \
"Force all freshly committed pages to be pre-touched") \
@@ -311,13 +281,6 @@
"Size of marking stack") \
constraint(MarkStackSizeConstraintFunc,AfterErgo) \
\
- develop(bool, VerifyBlockOffsetArray, false, \
- "Do (expensive) block offset array verification") \
- \
- diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false, \
- "Maintain _unallocated_block in BlockOffsetArray " \
- "(currently applicable only to CMS collector)") \
- \
product(intx, RefDiscoveryPolicy, 0, \
"Select type of reference discovery policy: " \
"reference-based(0) or referent-based(1)") \
@@ -364,9 +327,8 @@
"collection") \
\
develop(uintx, PromotionFailureALotCount, 1000, \
- "Number of promotion failures occurring at PLAB " \
- "refill attempts (ParNew) or promotion attempts " \
- "(other young collectors)") \
+ "Number of promotion failures occurring at PLAB promotion " \
+ "attempts at young collectors") \
\
develop(uintx, PromotionFailureALotInterval, 5, \
"Total collections between promotion failures a lot") \
@@ -759,8 +721,7 @@
constraint(YoungPLABSizeConstraintFunc,AfterMemoryInit) \
\
product(size_t, OldPLABSize, 1024, \
- "Size of old gen promotion LAB's (in HeapWords), or Number " \
- "of blocks to attempt to claim when refilling CMS LAB's") \
+ "Size of old gen promotion LAB's (in HeapWords)") \
constraint(OldPLABSizeConstraintFunc,AfterMemoryInit) \
\
product(uintx, TLABAllocationWeight, 35, \
@@ -827,7 +788,6 @@
"Percentage (0-100) of the old gen allowed as dead wood. " \
"Serial mark sweep treats this as both the minimum and maximum " \
"value. " \
- "CMS uses this value only if it falls back to mark sweep. " \
"Par compact uses a variable scale based on the density of the " \
"generation and treats this as the maximum value when the heap " \
"is either completely full or completely empty. Par compact " \
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -418,35 +418,6 @@
}
}
-#ifndef PRODUCT
-// Override of memory state checking method in CollectedHeap:
-// Some collectors (CMS for example) can't have badHeapWordVal written
-// in the first two words of an object. (For instance , in the case of
-// CMS these words hold state used to synchronize between certain
-// (concurrent) GC steps and direct allocating mutators.)
-// The skip_header_HeapWords() method below, allows us to skip
-// over the requisite number of HeapWord's. Note that (for
-// generational collectors) this means that those many words are
-// skipped in each object, irrespective of the generation in which
-// that object lives. The resultant loss of precision seems to be
-// harmless and the pain of avoiding that imprecision appears somewhat
-// higher than we are prepared to pay for such rudimentary debugging
-// support.
-void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
- size_t size) {
- if (CheckMemoryInitialization && ZapUnusedHeapArea) {
- // We are asked to check a size in HeapWords,
- // but the memory is mangled in juint words.
- juint* start = (juint*) (addr + skip_header_HeapWords());
- juint* end = (juint*) (addr + size);
- for (juint* slot = start; slot < end; slot += 1) {
- assert(*slot == badHeapWordVal,
- "Found non badHeapWordValue in pre-allocation check");
- }
- }
-}
-#endif
-
HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
bool is_tlab,
bool first_only) {
@@ -1229,8 +1200,7 @@
GenCollectedHeap* GenCollectedHeap::heap() {
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
- assert(heap->kind() == CollectedHeap::Serial ||
- heap->kind() == CollectedHeap::CMS, "Invalid name");
+ assert(heap->kind() == CollectedHeap::Serial, "Invalid name");
return (GenCollectedHeap*) heap;
}
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -45,8 +45,6 @@
friend class Generation;
friend class DefNewGeneration;
friend class TenuredGeneration;
- friend class ConcurrentMarkSweepGeneration;
- friend class CMSCollector;
friend class GenMarkSweep;
friend class VM_GenCollectForAllocation;
friend class VM_GenCollectFull;
@@ -386,11 +384,6 @@
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots);
- // Accessor for memory state verification support
- NOT_PRODUCT(
- virtual size_t skip_header_HeapWords() { return 0; }
- )
-
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
@@ -465,10 +458,6 @@
bool is_tlab,
bool* gc_overhead_limit_was_exceeded);
- // Override
- void check_for_non_bad_heap_word_value(HeapWord* addr,
- size_t size) PRODUCT_RETURN;
-
#if INCLUDE_SERIALGC
// For use by mark-sweep. As implemented, mark-sweep-compact is global
// in an essential way: compaction is performed across generations, by
--- a/src/hotspot/share/gc/shared/generation.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/generation.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,12 +68,6 @@
return gch->old_gen_spec()->init_size();
}
-// This is for CMS. It returns stable monotonic used space size.
-// Remove this when CMS is removed.
-size_t Generation::used_stable() const {
- return used();
-}
-
size_t Generation::max_capacity() const {
return reserved().byte_size();
}
--- a/src/hotspot/share/gc/shared/generation.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/generation.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -41,20 +41,13 @@
//
// Generation - abstract base class
// - DefNewGeneration - allocation area (copy collected)
-// - ParNewGeneration - a DefNewGeneration that is collected by
-// several threads
// - CardGeneration - abstract class adding offset array behavior
// - TenuredGeneration - tenured (old object) space (markSweepCompact)
-// - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation
-// (Detlefs-Printezis refinement of
-// Boehm-Demers-Schenker)
//
-// The system configurations currently allowed are:
+// The system configuration currently allowed is:
//
// DefNewGeneration + TenuredGeneration
//
-// ParNewGeneration + ConcurrentMarkSweepGeneration
-//
class DefNewGeneration;
class GCMemoryManager;
@@ -122,9 +115,7 @@
// The set of possible generation kinds.
enum Name {
DefNew,
- ParNew,
MarkSweepCompact,
- ConcurrentMarkSweep,
Other
};
@@ -156,7 +147,6 @@
virtual size_t capacity() const = 0; // The maximum number of object bytes the
// generation can currently hold.
virtual size_t used() const = 0; // The number of used bytes in the gen.
- virtual size_t used_stable() const; // The number of used bytes for memory monitoring tools.
virtual size_t free() const = 0; // The number of free bytes in the gen.
// Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
--- a/src/hotspot/share/gc/shared/generationSpec.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/generationSpec.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,10 +29,6 @@
#include "memory/filemap.hpp"
#include "runtime/java.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#endif
#if INCLUDE_SERIALGC
#include "gc/serial/defNewGeneration.hpp"
#include "gc/serial/tenuredGeneration.hpp"
@@ -48,15 +44,6 @@
return new TenuredGeneration(rs, _init_size, _min_size, _max_size, remset);
#endif
-#if INCLUDE_CMSGC
- case Generation::ParNew:
- return new ParNewGeneration(rs, _init_size, _min_size, _max_size);
-
- case Generation::ConcurrentMarkSweep: {
- return new ConcurrentMarkSweepGeneration(rs, _init_size, _min_size, _max_size, remset);
- }
-#endif // INCLUDE_CMSGC
-
default:
guarantee(false, "unrecognized GenerationName");
return NULL;
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -36,9 +36,6 @@
#include "runtime/thread.inline.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/jvmFlagConstraintsCMS.hpp"
-#endif
#if INCLUDE_G1GC
#include "gc/g1/jvmFlagConstraintsG1.hpp"
#endif
@@ -65,22 +62,14 @@
}
#endif
-#if INCLUDE_CMSGC
- status = ParallelGCThreadsConstraintFuncCMS(value, verbose);
- if (status != JVMFlag::SUCCESS) {
- return status;
- }
-#endif
-
return status;
}
// As ConcGCThreads should be smaller than ParallelGCThreads,
// we need constraint function.
JVMFlag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose) {
- // CMS and G1 GCs use ConcGCThreads.
- if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
- GCConfig::is_gc_selected(CollectedHeap::G1)) && (value > ParallelGCThreads)) {
+ // G1 GC use ConcGCThreads.
+ if (GCConfig::is_gc_selected(CollectedHeap::G1) && (value > ParallelGCThreads)) {
JVMFlag::printError(verbose,
"ConcGCThreads (" UINT32_FORMAT ") must be "
"less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
@@ -92,9 +81,8 @@
}
static JVMFlag::Error MinPLABSizeBounds(const char* name, size_t value, bool verbose) {
- if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
- GCConfig::is_gc_selected(CollectedHeap::G1) ||
- GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value < PLAB::min_size())) {
+ if ((GCConfig::is_gc_selected(CollectedHeap::G1) || GCConfig::is_gc_selected(CollectedHeap::Parallel)) &&
+ (value < PLAB::min_size())) {
JVMFlag::printError(verbose,
"%s (" SIZE_FORMAT ") must be "
"greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
@@ -106,8 +94,7 @@
}
JVMFlag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
- if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
- GCConfig::is_gc_selected(CollectedHeap::G1) ||
+ if ((GCConfig::is_gc_selected(CollectedHeap::G1) ||
GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value > PLAB::max_size())) {
JVMFlag::printError(verbose,
"%s (" SIZE_FORMAT ") must be "
@@ -135,11 +122,6 @@
JVMFlag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose) {
JVMFlag::Error status = JVMFlag::SUCCESS;
-#if INCLUDE_CMSGC
- if (UseConcMarkSweepGC) {
- return OldPLABSizeConstraintFuncCMS(value, verbose);
- } else
-#endif
{
status = MinMaxPLABSizeBounds("OldPLABSize", value, verbose);
}
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -28,9 +28,6 @@
#include "runtime/flags/jvmFlag.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/jvmFlagConstraintsCMS.hpp"
-#endif
#if INCLUDE_G1GC
#include "gc/g1/jvmFlagConstraintsG1.hpp"
#endif
--- a/src/hotspot/share/gc/shared/preservedMarks.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/preservedMarks.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -128,7 +128,7 @@
// Iterate over all stacks, restore all preserved marks, and reclaim
// the memory taken up by the stack segments.
- // Supported executors: SharedRestorePreservedMarksTaskExecutor (Serial, CMS, G1),
+ // Supported executors: SharedRestorePreservedMarksTaskExecutor (Serial, G1),
// PSRestorePreservedMarksTaskExecutor (PS).
inline void restore(RestorePreservedMarksTaskExecutor* executor);
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -333,7 +333,7 @@
log_develop_trace(gc, ref)("Enqueue %s reference (" INTPTR_FORMAT ": %s)",
reason, p2i(iter.obj()), iter.obj()->klass()->internal_name());
}
- assert(oopDesc::is_oop(iter.obj(), UseConcMarkSweepGC), "Adding a bad reference");
+ assert(oopDesc::is_oop(iter.obj()), "Adding a bad reference");
}
size_t ReferenceProcessor::process_soft_ref_reconsider_work(DiscoveredList& refs_list,
@@ -1154,7 +1154,7 @@
// Check assumption that an object is not potentially
// discovered twice except by concurrent collectors that potentially
// trace the same Reference object twice.
- assert(UseConcMarkSweepGC || UseG1GC || UseShenandoahGC,
+ assert(UseG1GC || UseShenandoahGC,
"Only possible with a concurrent marking collector");
return true;
}
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -217,8 +217,7 @@
// For collectors that do not keep GC liveness information
// in the object header, this field holds a closure that
// helps the reference processor determine the reachability
- // of an oop. It is currently initialized to NULL for all
- // collectors except for CMS and G1.
+ // of an oop.
BoolObjectClosure* _is_alive_non_header;
// Soft ref clearing policies
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -35,9 +35,6 @@
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/space.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/vmStructs_cms.hpp"
-#endif
#if INCLUDE_EPSILONGC
#include "gc/epsilon/vmStructs_epsilon.hpp"
#endif
@@ -62,9 +59,6 @@
volatile_nonstatic_field, \
static_field, \
unchecked_nonstatic_field) \
- CMSGC_ONLY(VM_STRUCTS_CMSGC(nonstatic_field, \
- volatile_nonstatic_field, \
- static_field)) \
EPSILONGC_ONLY(VM_STRUCTS_EPSILONGC(nonstatic_field, \
volatile_nonstatic_field, \
static_field)) \
@@ -107,8 +101,6 @@
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_threshold, HeapWord*) \
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_index, size_t) \
\
- nonstatic_field(BlockOffsetArrayNonContigSpace, _unallocated_block, HeapWord*) \
- \
nonstatic_field(CardGeneration, _rs, CardTableRS*) \
nonstatic_field(CardGeneration, _bts, BlockOffsetSharedArray*) \
nonstatic_field(CardGeneration, _shrink_factor, size_t) \
@@ -168,9 +160,6 @@
#define VM_TYPES_GC(declare_type, \
declare_toplevel_type, \
declare_integer_type) \
- CMSGC_ONLY(VM_TYPES_CMSGC(declare_type, \
- declare_toplevel_type, \
- declare_integer_type)) \
EPSILONGC_ONLY(VM_TYPES_EPSILONGC(declare_type, \
declare_toplevel_type, \
declare_integer_type)) \
@@ -213,7 +202,6 @@
declare_toplevel_type(BlockOffsetTable) \
declare_type(BlockOffsetArray, BlockOffsetTable) \
declare_type(BlockOffsetArrayContigSpace, BlockOffsetArray) \
- declare_type(BlockOffsetArrayNonContigSpace, BlockOffsetArray) \
\
/* Miscellaneous other GC types */ \
\
@@ -252,8 +240,6 @@
#define VM_INT_CONSTANTS_GC(declare_constant, \
declare_constant_with_value) \
- CMSGC_ONLY(VM_INT_CONSTANTS_CMSGC(declare_constant, \
- declare_constant_with_value)) \
EPSILONGC_ONLY(VM_INT_CONSTANTS_EPSILONGC(declare_constant, \
declare_constant_with_value)) \
G1GC_ONLY(VM_INT_CONSTANTS_G1GC(declare_constant, \
@@ -297,7 +283,6 @@
\
declare_constant(CollectedHeap::Serial) \
declare_constant(CollectedHeap::Parallel) \
- declare_constant(CollectedHeap::CMS) \
declare_constant(CollectedHeap::G1) \
\
/* constants from Generation::Name enum */ \
--- a/src/hotspot/share/gc/shared/workerPolicy.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/gc/shared/workerPolicy.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -59,14 +59,11 @@
// Return number of GC threads to use in the next GC.
// This is called sparingly so as not to change the
// number of GC workers gratuitously.
- // For ParNew collections
// For PS scavenge and ParOld collections
// For G1 evacuation pauses (subject to update)
// For G1 Full GCs (subject to update)
// Other collection phases inherit the number of
- // GC workers from the calls above. For example,
- // a CMS parallel remark uses the same number of GC
- // workers as the most recent ParNew collection.
+ // GC workers from the calls above.
static uint calc_active_workers(uintx total_workers,
uintx active_workers,
uintx application_workers);
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -228,7 +228,6 @@
do_bool_flag(UseCompressedOops) \
X86_ONLY(do_bool_flag(UseCountLeadingZerosInstruction)) \
X86_ONLY(do_bool_flag(UseCountTrailingZerosInstruction)) \
- do_bool_flag(UseConcMarkSweepGC) \
do_bool_flag(UseG1GC) \
do_bool_flag(UseParallelGC) \
do_bool_flag(UseParallelOldGC) \
--- a/src/hotspot/share/memory/metaspace.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/memory/metaspace.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -73,7 +73,6 @@
volatile size_t MetaspaceGC::_capacity_until_GC = 0;
uint MetaspaceGC::_shrink_factor = 0;
-bool MetaspaceGC::_should_concurrent_collect = false;
// BlockFreelist methods
--- a/src/hotspot/share/memory/metaspace.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/memory/metaspace.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -439,11 +439,6 @@
// When committed memory of all metaspaces reaches this value,
// a GC is induced and the value is increased. Size is in bytes.
static volatile size_t _capacity_until_GC;
-
- // For a CMS collection, signal that a concurrent collection should
- // be started.
- static bool _should_concurrent_collect;
-
static uint _shrink_factor;
static size_t shrink_factor() { return _shrink_factor; }
@@ -461,11 +456,6 @@
bool* can_retry = NULL);
static size_t dec_capacity_until_GC(size_t v);
- static bool should_concurrent_collect() { return _should_concurrent_collect; }
- static void set_should_concurrent_collect(bool v) {
- _should_concurrent_collect = v;
- }
-
// The amount to increase the high-water-mark (_capacity_until_GC)
static size_t delta_capacity_until_GC(size_t bytes);
--- a/src/hotspot/share/memory/universe.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/memory/universe.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -395,13 +395,8 @@
// so we allocate wherever, and hope that the first collection
// moves these objects to the bottom of the old generation.
// We can allocate directly in the permanent generation, so we do.
- int size;
- if (UseConcMarkSweepGC) {
- log_warning(gc)("Using +FullGCALot with concurrent mark sweep gc will not force all objects to relocate");
- size = FullGCALotDummies;
- } else {
- size = FullGCALotDummies * 2;
- }
+ int size = FullGCALotDummies * 2;
+
objArrayOop naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
objArrayHandle dummy_array(THREAD, naked_array);
int i = 0;
@@ -1224,10 +1219,10 @@
_fullgc_alot_dummy_array = NULL;
return false;
}
- if (!UseConcMarkSweepGC) {
- // Release dummy at bottom of old generation
- _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
- }
+
+ // Release dummy at bottom of old generation
+ _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
+
// Release dummy at bottom of permanent generation
_fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
}
--- a/src/hotspot/share/oops/markWord.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/oops/markWord.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -38,20 +38,11 @@
// --------
// hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object)
// JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object)
-// size:32 ------------------------------------------>| (CMS free block)
-// PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
//
// 64 bits:
// --------
-// unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object)
-// JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object)
-// PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
-// size:64 ----------------------------------------------------->| (CMS free block)
-//
-// unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object)
-// JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object)
-// narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
-// unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
+// unused:25 hash:31 -->| unused_gap:1 age:4 biased_lock:1 lock:2 (normal object)
+// JavaThread*:54 epoch:2 unused_gap:1 age:4 biased_lock:1 lock:2 (biased object)
//
// - hash contains the identity hash value: largest value is
// 31 bits, see os::random(). Also, 64-bit vm's require
@@ -82,7 +73,7 @@
// performed. The runtime system aligns all JavaThread* pointers to
// a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
// to make room for the age bits & the epoch bits (used in support of
-// biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
+// biased locking).
//
// [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread
// [0 | epoch | age | 1 | 01] lock is anonymously biased
@@ -136,7 +127,7 @@
static const int biased_lock_bits = 1;
static const int max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits;
static const int hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits;
- static const int cms_bits = LP64_ONLY(1) NOT_LP64(0);
+ static const int unused_gap_bits = LP64_ONLY(1) NOT_LP64(0);
static const int epoch_bits = 2;
// The biased locking code currently requires that the age bits be
@@ -144,8 +135,8 @@
static const int lock_shift = 0;
static const int biased_lock_shift = lock_bits;
static const int age_shift = lock_bits + biased_lock_bits;
- static const int cms_shift = age_shift + age_bits;
- static const int hash_shift = cms_shift + cms_bits;
+ static const int unused_gap_shift = age_shift + age_bits;
+ static const int hash_shift = unused_gap_shift + unused_gap_bits;
static const int epoch_shift = hash_shift;
static const uintptr_t lock_mask = right_n_bits(lock_bits);
@@ -157,8 +148,6 @@
static const uintptr_t age_mask_in_place = age_mask << age_shift;
static const uintptr_t epoch_mask = right_n_bits(epoch_bits);
static const uintptr_t epoch_mask_in_place = epoch_mask << epoch_shift;
- static const uintptr_t cms_mask = right_n_bits(cms_bits);
- static const uintptr_t cms_mask_in_place = cms_mask << cms_shift;
static const uintptr_t hash_mask = right_n_bits(hash_bits);
static const uintptr_t hash_mask_in_place = hash_mask << hash_shift;
@@ -269,12 +258,6 @@
template <typename KlassProxy>
inline bool must_be_preserved_for_promotion_failure(KlassProxy klass) const;
- // Should this header be preserved during a scavenge where CMS is
- // the old generation?
- // (This is basically the same body as must_be_preserved_for_promotion_failure(),
- // but takes the Klass* as argument instead)
- inline bool must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const;
-
// WARNING: The following routines are used EXCLUSIVELY by
// synchronization functions. They are not really gc safe.
// They must get updated if markWord layout get changed.
@@ -375,42 +358,6 @@
// Recover address of oop from encoded form used in mark
inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return (void*)clear_lock_bits().value(); }
-
- // These markWords indicate cms free chunk blocks and not objects.
- // In 64 bit, the markWord is set to distinguish them from oops.
- // These are defined in 32 bit mode for vmStructs.
- const static uintptr_t cms_free_chunk_pattern = 0x1;
-
- // Constants for the size field.
- enum { size_shift = cms_shift + cms_bits,
- size_bits = 35 // need for compressed oops 32G
- };
- // These values are too big for Win64
- const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))
- NOT_LP64(0);
- const static uintptr_t size_mask_in_place =
- (address_word)size_mask << size_shift;
-
-#ifdef _LP64
- static markWord cms_free_prototype() {
- return markWord((prototype().value() & ~cms_mask_in_place) |
- ((cms_free_chunk_pattern & cms_mask) << cms_shift));
- }
- uintptr_t cms_encoding() const {
- return mask_bits(value() >> cms_shift, cms_mask);
- }
- bool is_cms_free_chunk() const {
- return is_neutral() &&
- (cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern;
- }
-
- size_t get_size() const { return (size_t)(value() >> size_shift); }
- static markWord set_size_and_free(size_t size) {
- assert((size & ~size_mask) == 0, "shouldn't overflow size field");
- return markWord((cms_free_prototype().value() & ~size_mask_in_place) |
- ((size & size_mask) << size_shift));
- }
-#endif // _LP64
};
// Support atomic operations.
--- a/src/hotspot/share/oops/markWord.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/oops/markWord.inline.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -68,11 +68,6 @@
return (!is_unlocked() || !has_no_hash());
}
-// Same as must_be_preserved_for_promotion_failure().
-inline bool markWord::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
- return must_be_preserved_for_promotion_failure(klass_of_obj_containing_mark);
-}
-
inline markWord markWord::prototype_for_klass(const Klass* klass) {
markWord prototype_header = klass->prototype_header();
assert(prototype_header == prototype() || prototype_header.has_bias_pattern(), "corrupt prototype header");
--- a/src/hotspot/share/oops/oop.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/oops/oop.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -45,7 +45,6 @@
class ScanClosure;
class FastScanClosure;
class FilteringClosure;
-class CMSIsAliveClosure;
class PSPromotionManager;
class ParCompactionManager;
@@ -93,9 +92,6 @@
inline int klass_gap() const;
inline void set_klass_gap(int z);
static inline void set_klass_gap(HeapWord* mem, int z);
- // For when the klass pointer is being used as a linked list "next" field.
- inline void set_klass_to_list_ptr(oop k);
- inline oop list_ptr_from_klass();
// size of object header, aligned to platform wordSize
static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
--- a/src/hotspot/share/oops/oop.inline.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/oops/oop.inline.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -179,26 +179,6 @@
set_klass_gap((HeapWord*)this, v);
}
-void oopDesc::set_klass_to_list_ptr(oop k) {
- // This is only to be used during GC, for from-space objects, so no
- // barrier is needed.
- if (UseCompressedClassPointers) {
- _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k); // may be null (parnew overflow handling)
- } else {
- _metadata._klass = (Klass*)(address)k;
- }
-}
-
-oop oopDesc::list_ptr_from_klass() {
- // This is only to be used during GC, for from-space objects.
- if (UseCompressedClassPointers) {
- return CompressedOops::decode((narrowOop)_metadata._compressed_klass);
- } else {
- // Special case for GC
- return (oop)(address)_metadata._klass;
- }
-}
-
bool oopDesc::is_a(Klass* k) const {
return klass()->is_subtype_of(k);
}
@@ -244,25 +224,13 @@
// skipping the intermediate round to HeapWordSize.
s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize);
- // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
+ // UseParallelGC and UseG1GC can change the length field
// of an "old copy" of an object array in the young gen so it indicates
// the grey portion of an already copied array. This will cause the first
// disjunct below to fail if the two comparands are computed across such
// a concurrent change.
- // ParNew also runs with promotion labs (which look like int
- // filler arrays) which are subject to changing their declared size
- // when finally retiring a PLAB; this also can cause the first disjunct
- // to fail for another worker thread that is concurrently walking the block
- // offset table. Both these invariant failures are benign for their
- // current uses; we relax the assertion checking to cover these two cases below:
- // is_objArray() && is_forwarded() // covers first scenario above
- // || is_typeArray() // covers second scenario above
- // If and when UseParallelGC uses the same obj array oop stealing/chunking
- // technique, we will need to suitably modify the assertion.
assert((s == klass->oop_size(this)) ||
- (Universe::heap()->is_gc_active() &&
- ((is_typeArray() && UseConcMarkSweepGC) ||
- (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
+ (Universe::heap()->is_gc_active() && is_objArray() && is_forwarded() && (UseParallelGC || UseG1GC)),
"wrong array object size");
} else {
// Must be zero, so bite the bullet and take the virtual call.
--- a/src/hotspot/share/opto/lcm.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/opto/lcm.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -958,7 +958,7 @@
ready_cnt.at_put(n->_idx, local); // Count em up
#ifdef ASSERT
- if( UseConcMarkSweepGC || UseG1GC ) {
+ if (UseG1GC) {
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
// Check the precedence edges
for (uint prec = n->req(); prec < n->len(); prec++) {
--- a/src/hotspot/share/opto/macro.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/opto/macro.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1399,7 +1399,7 @@
// other threads.
// Other threads include java threads and JVM internal threads
// (for example concurrent GC threads). Current concurrent GC
- // implementation: CMS and G1 will not scan newly created object,
+ // implementation: G1 will not scan newly created object,
// so it's safe to skip storestore barrier when allocation does
// not escape.
if (!alloc->does_not_escape_thread() &&
--- a/src/hotspot/share/prims/whitebox.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/prims/whitebox.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1726,10 +1726,6 @@
return (jlong) MetaspaceGC::capacity_until_GC();
WB_END
-WB_ENTRY(jboolean, WB_MetaspaceShouldConcurrentCollect(JNIEnv* env, jobject wb))
- return MetaspaceGC::should_concurrent_collect();
-WB_END
-
WB_ENTRY(jlong, WB_MetaspaceReserveAlignment(JNIEnv* env, jobject wb))
return (jlong)Metaspace::reserve_alignment();
WB_END
@@ -2309,7 +2305,6 @@
CC"(Ljava/lang/ClassLoader;JJ)V", (void*)&WB_FreeMetaspace },
{CC"incMetaspaceCapacityUntilGC", CC"(J)J", (void*)&WB_IncMetaspaceCapacityUntilGC },
{CC"metaspaceCapacityUntilGC", CC"()J", (void*)&WB_MetaspaceCapacityUntilGC },
- {CC"metaspaceShouldConcurrentCollect", CC"()Z", (void*)&WB_MetaspaceShouldConcurrentCollect },
{CC"metaspaceReserveAlignment", CC"()J", (void*)&WB_MetaspaceReserveAlignment },
{CC"getCPUFeatures", CC"()Ljava/lang/String;", (void*)&WB_GetCPUFeatures },
{CC"getNMethod0", CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
--- a/src/hotspot/share/runtime/arguments.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/runtime/arguments.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -515,7 +515,6 @@
// -------------- Deprecated Flags --------------
// --- Non-alias flags - sorted by obsolete_in then expired_in:
{ "MaxGCMinorPauseMillis", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
- { "UseConcMarkSweepGC", JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() },
{ "MaxRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "InitialRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
@@ -544,6 +543,81 @@
{ "CompilationPolicyChoice", JDK_Version::jdk(13), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "TraceNMethodInstalls", JDK_Version::jdk(13), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "FailOverToOldVerifier", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "UseConcMarkSweepGC", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSAbortSemantics", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSAbortablePrecleanMinWorkPerIteration", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSBitMapYieldQuantum", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSBootstrapOccupancy", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSClassUnloadingEnabled", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSClassUnloadingMaxInterval", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSCleanOnEnter", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSConcMarkMultiple", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSConcurrentMTEnabled", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSCoordinatorYieldSleepCount", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSEdenChunksRecordAlways", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSExpAvgFactor", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSExtrapolateSweep", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSIncrementalSafetyFactor", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSIndexedFreeListReplenish", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSInitiatingOccupancyFraction", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSIsTooFullPercentage", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSLargeCoalSurplusPercent", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSLargeSplitSurplusPercent", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSLoopWarn", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSMaxAbortablePrecleanLoops", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSMaxAbortablePrecleanTime", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSOldPLABMax", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSOldPLABMin", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSOldPLABNumRefills", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSOldPLABReactivityFactor", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSOldPLABResizeQuicker", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSOldPLABToleranceFactor", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPLABRecordAlways", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSParallelInitialMarkEnabled", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSParallelRemarkEnabled", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSParallelSurvivorRemarkEnabled", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrecleanDenominator", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrecleanIter", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrecleanNumerator", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrecleanRefLists1", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrecleanRefLists2", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrecleanSurvivors1", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrecleanSurvivors2", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrecleanThreshold", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrecleaningEnabled", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrintChunksInDump", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSPrintObjectsInDump", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSRemarkVerifyVariant", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSReplenishIntermediate", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSRescanMultiple", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSSamplingGrain", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSScavengeBeforeRemark", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSScheduleRemarkEdenPenetration", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSScheduleRemarkEdenSizeThreshold", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSScheduleRemarkSamplingRatio", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSSmallCoalSurplusPercent", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSSmallSplitSurplusPercent", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSSplitIndexedFreeListBlocks", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSTriggerRatio", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSWorkQueueDrainThreshold", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSYield", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSYieldSleepCount", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMSYoungGenPerWorker", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMS_FLSPadding", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMS_FLSWeight", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMS_SweepPadding", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMS_SweepTimerThresholdMillis", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "CMS_SweepWeight", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "FLSAlwaysCoalesceLarge", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "FLSCoalescePolicy", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "FLSLargestBlockCoalesceProximity", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "OldPLABWeight", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "ParGCDesiredObjsFromOverflowList", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "ParGCTrimOverflow", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "ParGCUseLocalOverflow", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "ResizeOldPLAB", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "UseCMSBestFit", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+ { "UseCMSInitiatingOccupancyOnly", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "BindGCTaskThreadsToCPUs", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
{ "UseGCTaskAffinity", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
@@ -2547,16 +2621,10 @@
}
// -Xconcgc
} else if (match_option(option, "-Xconcgc")) {
- if (FLAG_SET_CMDLINE(UseConcMarkSweepGC, true) != JVMFlag::SUCCESS) {
- return JNI_EINVAL;
- }
- handle_extra_cms_flags("-Xconcgc uses UseConcMarkSweepGC");
+ warning("-Xconcgc uses UseConcMarkSweepGC; support was removed for both options in 14.0");
// -Xnoconcgc
} else if (match_option(option, "-Xnoconcgc")) {
- if (FLAG_SET_CMDLINE(UseConcMarkSweepGC, false) != JVMFlag::SUCCESS) {
- return JNI_EINVAL;
- }
- handle_extra_cms_flags("-Xnoconcgc uses UseConcMarkSweepGC");
+ warning("-Xnoconcgc uses UseConcMarkSweepGC; support was removed for both options in 14.0");
// -Xbatch
} else if (match_option(option, "-Xbatch")) {
if (FLAG_SET_CMDLINE(BackgroundCompilation, false) != JVMFlag::SUCCESS) {
@@ -3818,15 +3886,6 @@
return true;
}
-void Arguments::handle_extra_cms_flags(const char* msg) {
- SpecialFlag flag;
- const char *flag_name = "UseConcMarkSweepGC";
- if (lookup_special_flag(flag_name, flag)) {
- handle_aliases_and_deprecation(flag_name, /* print warning */ true);
- warning("%s", msg);
- }
-}
-
// Parse entry point called from JNI_CreateJavaVM
jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) {
@@ -4165,7 +4224,7 @@
// such as the parallel collector for Linux and Solaris will
// interleave old gen and survivor spaces on top of NUMA
// allocation policy for the eden space.
- // Non NUMA-aware collectors such as CMS, G1 and Serial-GC on
+ // Non NUMA-aware collectors such as G1 and Serial-GC on
// all platforms and ParallelGC on Windows will interleave all
// of the heap spaces across NUMA nodes.
if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) {
--- a/src/hotspot/share/runtime/arguments.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/runtime/arguments.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -425,8 +425,6 @@
static bool handle_deprecated_print_gc_flags();
- static void handle_extra_cms_flags(const char* msg);
-
static jint parse_vm_init_args(const JavaVMInitArgs *vm_options_args,
const JavaVMInitArgs *java_tool_options_args,
const JavaVMInitArgs *java_options_args,
--- a/src/hotspot/share/runtime/flags/jvmFlag.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/runtime/flags/jvmFlag.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -447,11 +447,11 @@
// an eye-pleasing tabular output is created.
//
// Sample output:
- // bool CMSScavengeBeforeRemark = false {product} {default}
- // uintx CMSScheduleRemarkEdenPenetration = 50 {product} {default}
- // size_t CMSScheduleRemarkEdenSizeThreshold = 2097152 {product} {default}
- // uintx CMSScheduleRemarkSamplingRatio = 5 {product} {default}
- // double CMSSmallCoalSurplusPercent = 1.050000 {product} {default}
+ // bool ThreadPriorityVerbose = false {product} {default}
+ // uintx ThresholdTolerance = 10 {product} {default}
+ // size_t TLABSize = 0 {product} {default}
+ // uintx SurvivorRatio = 8 {product} {default}
+ // double InitialRAMPercentage = 1.562500 {product} {default}
// ccstr CompileCommandFile = MyFile.cmd {product} {command line}
// ccstrlist CompileOnly = Method1
// CompileOnly += Method2 {product} {command line}
--- a/src/hotspot/share/runtime/globals.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/runtime/globals.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -1947,9 +1947,6 @@
experimental(bool, UseCriticalCompilerThreadPriority, false, \
"Compiler thread(s) run at critical scheduling priority") \
\
- experimental(bool, UseCriticalCMSThreadPriority, false, \
- "ConcurrentMarkSweep thread runs at critical scheduling priority")\
- \
develop(intx, NewCodeParameter, 0, \
"Testing Only: Create a dedicated integer parameter before " \
"putback") \
--- a/src/hotspot/share/runtime/mutexLocker.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/runtime/mutexLocker.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -263,10 +263,6 @@
def(PerfDataMemAlloc_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for allocating PerfData memory for performance data
def(PerfDataManager_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for synchronized access to PerfDataManager resources
- // CMS_modUnionTable_lock leaf
- // CMS_bitMap_lock leaf 1
- // CMS_freeList_lock leaf 2
-
def(Threads_lock , PaddedMonitor, barrier, true, _safepoint_check_always); // Used for safepoint protocol.
def(NonJavaThreadsList_lock , PaddedMutex, leaf, true, _safepoint_check_never);
def(NonJavaThreadsListSync_lock , PaddedMutex, leaf, true, _safepoint_check_never);
--- a/src/hotspot/share/runtime/thread.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/runtime/thread.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -4367,7 +4367,7 @@
// + Call before_exit(), prepare for VM exit
// > run VM level shutdown hooks (they are registered through JVM_OnExit(),
// currently the only user of this mechanism is File.deleteOnExit())
-// > stop StatSampler, watcher thread, CMS threads,
+// > stop StatSampler, watcher thread,
// post thread end and vm death events to JVMTI,
// stop signal thread
// + Call JavaThread::exit(), it will:
--- a/src/hotspot/share/runtime/vmOperations.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/runtime/vmOperations.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -63,8 +63,6 @@
template(GenCollectForAllocation) \
template(ParallelGCFailedAllocation) \
template(ParallelGCSystemGC) \
- template(CMS_Initial_Mark) \
- template(CMS_Final_Remark) \
template(G1CollectForAllocation) \
template(G1CollectFull) \
template(G1Concurrent) \
--- a/src/hotspot/share/runtime/vmStructs.cpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/runtime/vmStructs.cpp Wed Nov 13 11:37:29 2019 +0100
@@ -2674,11 +2674,6 @@
declare_constant(markWord::no_lock_in_place) \
declare_constant(markWord::max_age) \
\
- /* Constants in markWord used by CMS. */ \
- declare_constant(markWord::cms_shift) \
- declare_constant(markWord::cms_mask) \
- declare_constant(markWord::size_shift) \
- \
/* InvocationCounter constants */ \
declare_constant(InvocationCounter::count_increment) \
declare_constant(InvocationCounter::count_shift)
--- a/src/hotspot/share/utilities/dtrace_disabled.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/utilities/dtrace_disabled.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -121,14 +121,6 @@
#define HOTSPOT_VMOPS_END_ENABLED() 0
/* hs_private provider probes */
-#define HS_PRIVATE_CMS_INITMARK_BEGIN()
-#define HS_PRIVATE_CMS_INITMARK_BEGIN_ENABLED() 0
-#define HS_PRIVATE_CMS_INITMARK_END()
-#define HS_PRIVATE_CMS_INITMARK_END_ENABLED() 0
-#define HS_PRIVATE_CMS_REMARK_BEGIN()
-#define HS_PRIVATE_CMS_REMARK_BEGIN_ENABLED() 0
-#define HS_PRIVATE_CMS_REMARK_END()
-#define HS_PRIVATE_CMS_REMARK_END_ENABLED() 0
#define HS_PRIVATE_HASHTABLE_NEW_ENTRY(arg0, arg1, arg2, arg3)
#define HS_PRIVATE_HASHTABLE_NEW_ENTRY_ENABLED() 0
#define HS_PRIVATE_SAFEPOINT_BEGIN()
--- a/src/hotspot/share/utilities/macros.hpp Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/share/utilities/macros.hpp Wed Nov 13 11:37:29 2019 +0100
@@ -131,24 +131,6 @@
#define NOT_MANAGEMENT_RETURN_(code) { return code; }
#endif // INCLUDE_MANAGEMENT
-#ifndef INCLUDE_CMSGC
-#define INCLUDE_CMSGC 1
-#endif // INCLUDE_CMSGC
-
-#if INCLUDE_CMSGC
-#define CMSGC_ONLY(x) x
-#define CMSGC_ONLY_ARG(arg) arg,
-#define NOT_CMSGC(x)
-#define NOT_CMSGC_RETURN /* next token must be ; */
-#define NOT_CMSGC_RETURN_(code) /* next token must be ; */
-#else
-#define CMSGC_ONLY(x)
-#define CMSGC_ONLY_ARG(x)
-#define NOT_CMSGC(x) x
-#define NOT_CMSGC_RETURN {}
-#define NOT_CMSGC_RETURN_(code) { return code; }
-#endif // INCLUDE_CMSGC
-
#ifndef INCLUDE_EPSILONGC
#define INCLUDE_EPSILONGC 1
#endif // INCLUDE_EPSILONGC
--- a/src/java.base/share/man/java.1 Wed Nov 13 11:21:15 2019 +0100
+++ b/src/java.base/share/man/java.1 Wed Nov 13 11:37:29 2019 +0100
@@ -2975,70 +2975,6 @@
application uses the heap space.
.RS
.RE
-.TP
-.B \f[CB]\-XX:+CMSClassUnloadingEnabled\f[R]
-Enables class unloading when using the concurrent mark\-sweep (CMS)
-garbage collector.
-This option is enabled by default.
-To disable class unloading for the CMS garbage collector, specify
-\f[CB]\-XX:\-CMSClassUnloadingEnabled\f[R].
-.RS
-.RE
-.TP
-.B \f[CB]\-XX:CMSExpAvgFactor=\f[R]\f[I]percent\f[R]
-Sets the percentage of time (0 to 100) used to weight the current sample
-when computing exponential averages for the concurrent collection
-statistics.
-By default, the exponential averages factor is set to 25%.
-The following example shows how to set the factor to 15%:
-.RS
-.RS
-.PP
-\f[CB]\-XX:CMSExpAvgFactor=15\f[R]
-.RE
-.RE
-.TP
-.B \f[CB]\-XX:CMSInitiatingOccupancyFraction=\f[R]\f[I]percent\f[R]
-Sets the percentage of the old generation occupancy (0 to 100) at which
-to start a CMS collection cycle.
-The default value is set to \-1.
-Any negative value (including the default) implies that the option
-\f[CB]\-XX:CMSTriggerRatio\f[R] is used to define the value of the
-initiating occupancy fraction.
-.RS
-.PP
-The following example shows how to set the factor to 20%:
-.RS
-.PP
-\f[CB]\-XX:CMSInitiatingOccupancyFraction=20\f[R]
-.RE
-.RE
-.TP
-.B \f[CB]\-XX:CMSIncrementalDutySafetyFactor=\f[R]\f[I]percent\f[R]
-Sets the percentage (0 to 100) used to add conservatism when computing
-the duty cycle.
-The default value is 10.
-.RS
-.RE
-.TP
-.B \f[CB]\-XX:+CMSScavengeBeforeRemark\f[R]
-Enables scavenging attempts before the CMS remark step.
-By default, this option is disabled.
-.RS
-.RE
-.TP
-.B \f[CB]\-XX:CMSTriggerRatio=percent\f[R]
-Sets the percentage (0 to 100) of the value specified by the option
-\f[CB]\-XX:MinHeapFreeRatio\f[R] that\[aq]s allocated before a CMS
-collection cycle commences.
-The default value is set to 80%.
-.RS
-.PP
-The following example shows how to set the occupancy fraction to 75%:
-.RS
-.PP
-\f[CB]\-XX:CMSTriggerRatio=75\f[R]
-.RE
.RE
.TP
.B \f[CB]\-XX:ConcGCThreads=\f[R]\f[I]threads\f[R]
@@ -3070,7 +3006,6 @@
Enables invoking of concurrent GC by using the \f[CB]System.gc()\f[R]
request.
This option is disabled by default and can be enabled only with the
-deprecated \f[CB]\-XX:+UseConcMarkSweepGC\f[R] option and the
\f[CB]\-XX:+UseG1GC\f[R] option.
.RS
.RE
@@ -3460,8 +3395,7 @@
.B \f[CB]\-XX:MaxTenuringThreshold=\f[R]\f[I]threshold\f[R]
Sets the maximum tenuring threshold for use in adaptive GC sizing.
The largest value is 15.
-The default value is 15 for the parallel (throughput) collector, and 6
-for the CMS collector.
+The default value is 15 for the parallel (throughput) collector.
.RS
.PP
The following example shows how to set the maximum tenuring threshold to
@@ -3724,13 +3658,6 @@
.RS
.RE
.TP
-.B \f[CB]\-XX:+UseCMSInitiatingOccupancyOnly\f[R]
-Enables the use of the occupancy value as the only criterion for
-initiating the CMS collector.
-By default, this option is disabled and other criteria may be used.
-.RS
-.RE
-.TP
.B \f[CB]\-XX:+UseG1GC\f[R]
Enables the use of the garbage\-first (G1) garbage collector.
It\[aq]s a server\-style garbage collector, targeted for multiprocessor
@@ -3975,21 +3902,6 @@
See \f[B]Enable Logging with the JVM Unified Logging Framework\f[R].
.RE
.TP
-.B \f[CB]\-XX:+UseConcMarkSweepGC\f[R]
-Enables the use of the CMS garbage collector for the old generation.
-CMS is an alternative to the default garbage collector (G1), which also
-focuses on meeting application latency requirements.
-By default, this option is disabled and the collector is selected
-automatically based on the configuration of the machine and type of the
-JVM.
-The CMS garbage collector is deprecated.
-.RS
-.RE
-.SH OBSOLETE JAVA OPTIONS
-.PP
-These \f[CB]java\f[R] options are still accepted but ignored, and a
-warning is issued when they\[aq]re used.
-.TP
.B \f[CB]\-XX:+UseMembar\f[R]
Enabled issuing membars on thread\-state transitions.
This option was disabled by default on all platforms except ARM servers,
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/AdaptiveFreeList.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * @(#)AdaptiveFreeList.java
- *
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.util.Observable;
-import java.util.Observer;
-
-import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.runtime.VM;
-import sun.jvm.hotspot.runtime.VMObject;
-import sun.jvm.hotspot.types.CIntegerField;
-import sun.jvm.hotspot.types.Type;
-import sun.jvm.hotspot.types.TypeDataBase;
-
-public class AdaptiveFreeList extends VMObject {
- static {
- VM.registerVMInitializedObserver(new Observer() {
- public void update(Observable o, Object data) {
- initialize(VM.getVM().getTypeDataBase());
- }
- });
- }
-
- private static synchronized void initialize(TypeDataBase db) {
- Type type = db.lookupType("AdaptiveFreeList<FreeChunk>");
- sizeField = type.getCIntegerField("_size");
- countField = type.getCIntegerField("_count");
- headerSize = type.getSize();
- }
-
- // Fields
- private static CIntegerField sizeField;
- private static CIntegerField countField;
- private static long headerSize;
-
- //Constructor
- public AdaptiveFreeList(Address address) {
- super(address);
- }
-
- // Accessors
- public long size() {
- return sizeField.getValue(addr);
- }
-
- public long count() {
- return countField.getValue(addr);
- }
-
- public static long sizeOf() {
- return headerSize;
- }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSBitMap.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.memory.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
-
-public class CMSBitMap extends VMObject {
- private static AddressField bmStartWordField;
- private static CIntegerField bmWordSizeField;
- private static CIntegerField shifterField;
- //private static AddressField bmField;
- private static long virtualSpaceFieldOffset;
-
- public CMSBitMap(Address addr) {
- super(addr);
- }
-
- static {
- VM.registerVMInitializedObserver(new Observer() {
- public void update(Observable o, Object data) {
- initialize(VM.getVM().getTypeDataBase());
- }
- });
- }
-
- private static synchronized void initialize(TypeDataBase db) {
- Type type = db.lookupType("CMSBitMap");
- bmStartWordField = type.getAddressField("_bmStartWord");
- bmWordSizeField = type.getCIntegerField("_bmWordSize");
- shifterField = type.getCIntegerField("_shifter");
- //bmField = type.getAddressField("_bm");
- virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset();
- }
- public void printAll() {
- System.out.println("bmStartWord(): "+bmStartWord());
- System.out.println("bmWordSize(): "+bmWordSize());
- System.out.println("shifter(): "+shifter());
- }
-
- public Address bmStartWord() {
- return bmStartWordField.getValue(addr);
- }
- public long bmWordSize() {
- return bmWordSizeField.getValue(addr);
- }
- public long shifter() {
- return shifterField.getValue(addr);
- }
- public VirtualSpace virtualSpace() {
- return (VirtualSpace) VMObjectFactory.newObject(VirtualSpace.class, addr.addOffsetTo(virtualSpaceFieldOffset));
- }
-
- public BitMap bm() {
- BitMap bitMap = new BitMap((int) (bmWordSize() >> shifter() ));
- VirtualSpace vs = virtualSpace();
- bitMap.set_map(vs.low());
- return bitMap;
- }
-
- public Address getNextMarkedWordAddress(Address addr) {
- Address endWord = bmStartWord().addOffsetTo(bmWordSize());
- int nextOffset = bm().getNextOneOffset(heapWordToOffset(addr), heapWordToOffset(endWord) );
- Address nextAddr = offsetToHeapWord(nextOffset);
- return nextAddr;
- }
-
- int heapWordToOffset(Address addr) {
- int temp = (int)addr.minus(bmStartWord()) / (int) VM.getVM().getAddressSize();
- int ret_val = temp >> shifter();
- return ret_val;
- }
-
- Address offsetToHeapWord(int offset) {
- int temp = offset << shifter();
- return bmStartWord().addOffsetTo(temp*VM.getVM().getAddressSize());
- }
-
- boolean isMarked(Address addr) {
- BitMap bm = bm();
- return bm.at(heapWordToOffset(addr));
- }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSCollector.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-
-public class CMSCollector extends VMObject {
- private static long markBitMapFieldOffset;
-
- public CMSCollector(Address addr) {
- super(addr);
- }
-
- static {
- VM.registerVMInitializedObserver(new Observer() {
- public void update(Observable o, Object data) {
- initialize(VM.getVM().getTypeDataBase());
- }
- });
- }
-
- private static synchronized void initialize(TypeDataBase db) {
- Type type = db.lookupType("CMSCollector");
- markBitMapFieldOffset = type.getField("_markBitMap").getOffset();
- }
-
- //Accessing mark bitmap
- public CMSBitMap markBitMap() {
- return (CMSBitMap) VMObjectFactory.newObject(
- CMSBitMap.class,
- addr.addOffsetTo(markBitMapFieldOffset));
- }
-
- public long blockSizeUsingPrintezisBits(Address addr) {
- CMSBitMap markBitMap = markBitMap();
- long addressSize = VM.getVM().getAddressSize();
- if ( markBitMap.isMarked(addr) && markBitMap.isMarked(addr.addOffsetTo(1*addressSize)) ) {
- Address nextOneAddr = markBitMap.getNextMarkedWordAddress(addr.addOffsetTo(2*addressSize));
- //return size in bytes
- long size = (nextOneAddr.addOffsetTo(1*addressSize)).minus(addr);
- return size;
- } else {
- //missing Printezis marks
- return -1;
- }
-
- }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.gc.shared.GenCollectedHeap;
-import sun.jvm.hotspot.gc.shared.CollectedHeapName;
-
-public class CMSHeap extends GenCollectedHeap {
-
- public CMSHeap(Address addr) {
- super(addr);
- }
-
- public CollectedHeapName kind() {
- return CollectedHeapName.CMS;
- }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CompactibleFreeListSpace.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,209 +0,0 @@
-/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.io.*;
-import java.util.*;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.shared.*;
-import sun.jvm.hotspot.memory.*;
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
-
-public class CompactibleFreeListSpace extends CompactibleSpace implements LiveRegionsProvider {
- private static AddressField collectorField;
- private static AddressField indexedFreeListField;
- private static AddressField dictionaryField;
- private static long smallLinearAllocBlockFieldOffset;
-
- private int heapWordSize; // 4 for 32bit, 8 for 64 bits
- private int IndexSetStart; // for small indexed list
- private int IndexSetSize;
- private int IndexSetStride;
- private static long MinChunkSizeInBytes;
-
- static {
- VM.registerVMInitializedObserver(new Observer() {
- public void update(Observable o, Object data) {
- initialize(VM.getVM().getTypeDataBase());
- }
- });
- }
-
- private static synchronized void initialize(TypeDataBase db) {
- long sizeofFreeChunk = db.lookupType("FreeChunk").getSize();
- VM vm = VM.getVM();
-
- Type type = db.lookupType("CompactibleFreeListSpace");
- collectorField = type.getAddressField("_collector");
- collectorField = type.getAddressField("_collector");
- dictionaryField = type.getAddressField("_dictionary");
- indexedFreeListField = type.getAddressField("_indexedFreeList[0]");
- smallLinearAllocBlockFieldOffset = type.getField("_smallLinearAllocBlock").getOffset();
- MinChunkSizeInBytes = (type.getCIntegerField("_min_chunk_size_in_bytes")).getValue();
- }
-
- public CompactibleFreeListSpace(Address addr) {
- super(addr);
- VM vm = VM.getVM();
- heapWordSize = vm.getHeapWordSize();
- IndexSetStart = vm.getMinObjAlignmentInBytes() / heapWordSize;
- IndexSetStride = IndexSetStart;
- IndexSetSize = vm.getIndexSetSize();
- }
-
- // Accessing block offset table
- public CMSCollector collector() {
- return (CMSCollector) VMObjectFactory.newObject(
- CMSCollector.class,
- collectorField.getValue(addr));
- }
-
- public long free0() {
- return capacity() - used0();
- }
-
- public long used() {
- return capacity() - free();
- }
-
- public long used0() {
- List<MemRegion> regions = getLiveRegions();
- long usedSize = 0L;
- for (Iterator<MemRegion> itr = regions.iterator(); itr.hasNext();) {
- MemRegion mr = itr.next();
- usedSize += mr.byteSize();
- }
- return usedSize;
- }
-
- public long free() {
- // small chunks
- long size = 0;
- Address cur = addr.addOffsetTo( indexedFreeListField.getOffset() );
- cur = cur.addOffsetTo(IndexSetStart*AdaptiveFreeList.sizeOf());
- for (int i=IndexSetStart; i<IndexSetSize; i += IndexSetStride) {
- AdaptiveFreeList freeList = (AdaptiveFreeList) VMObjectFactory.newObject(AdaptiveFreeList.class, cur);
- size += i*freeList.count();
- cur= cur.addOffsetTo(IndexSetStride*AdaptiveFreeList.sizeOf());
- }
-
- // large block
- AFLBinaryTreeDictionary aflbd = (AFLBinaryTreeDictionary) VMObjectFactory.newObject(AFLBinaryTreeDictionary.class,
- dictionaryField.getValue(addr));
- size += aflbd.size();
-
-
- // linear block in TLAB
- LinearAllocBlock lab = (LinearAllocBlock) VMObjectFactory.newObject(LinearAllocBlock.class,
- addr.addOffsetTo(smallLinearAllocBlockFieldOffset));
- size += lab.word_size();
-
- return size*heapWordSize;
- }
-
- public void printOn(PrintStream tty) {
- tty.print("free-list-space");
- tty.print("[ " + bottom() + " , " + end() + " ) ");
- long cap = capacity();
- long used_size = used();
- long free_size = free();
- int used_perc = (int)((double)used_size/cap*100);
- tty.print("space capacity = " + cap + " used(" + used_perc + "%)= " + used_size + " ");
- tty.print("free= " + free_size );
- tty.print("\n");
-
- }
-
- public Address skipBlockSizeUsingPrintezisBits(Address pos) {
- CMSCollector collector = collector();
- long size = 0;
- Address addr = null;
-
- if (collector != null) {
- size = collector.blockSizeUsingPrintezisBits(pos);
- if (size >= 3) {
- addr = pos.addOffsetTo(adjustObjectSizeInBytes(size));
- }
- }
- return addr;
- }
-
- @Override
- public List<MemRegion> getLiveRegions() {
- List<MemRegion> res = new ArrayList<>();
- VM vm = VM.getVM();
- Debugger dbg = vm.getDebugger();
- ObjectHeap heap = vm.getObjectHeap();
- Address cur = bottom();
- Address regionStart = cur;
- Address limit = end();
- final long addressSize = vm.getAddressSize();
-
- for (; cur.lessThan(limit);) {
- Address k = cur.getAddressAt(addressSize);
- if (FreeChunk.indicatesFreeChunk(cur)) {
- if (! cur.equals(regionStart)) {
- res.add(new MemRegion(regionStart, cur));
- }
- FreeChunk fc = (FreeChunk) VMObjectFactory.newObject(FreeChunk.class, cur);
- long chunkSize = fc.size();
- if (Assert.ASSERTS_ENABLED) {
- Assert.that(chunkSize > 0, "invalid FreeChunk size");
- }
- // note that fc.size() gives chunk size in heap words
- cur = cur.addOffsetTo(chunkSize * addressSize);
- regionStart = cur;
- } else if (k != null) {
- Oop obj = heap.newOop(cur.addOffsetToAsOopHandle(0));
- long objectSize = obj.getObjectSize();
- cur = cur.addOffsetTo(adjustObjectSizeInBytes(objectSize));
- } else {
- // FIXME: need to do a better job here.
- // can I use bitMap here?
- //Find the object size using Printezis bits and skip over
- long size = collector().blockSizeUsingPrintezisBits(cur);
- if (size == -1) {
- break;
- }
- cur = cur.addOffsetTo(adjustObjectSizeInBytes(size));
- }
- }
- return res;
- }
-
- //-- Internals only below this point
-
- // Unlike corresponding VM code, we operate on byte size rather than
- // HeapWord size for convenience.
-
- public static long adjustObjectSizeInBytes(long sizeInBytes) {
- return Oop.alignObjectSize(Math.max(sizeInBytes, MinChunkSizeInBytes));
- }
-
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ConcurrentMarkSweepGeneration.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.shared.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-
-public class ConcurrentMarkSweepGeneration extends CardGeneration {
- private static AddressField cmsSpaceField;
-
- public ConcurrentMarkSweepGeneration(Address addr) {
- super(addr);
- }
-
- static {
- VM.registerVMInitializedObserver(new Observer() {
- public void update(Observable o, Object data) {
- initialize(VM.getVM().getTypeDataBase());
- }
- });
- }
-
- private static synchronized void initialize(TypeDataBase db) {
- Type type = db.lookupType("ConcurrentMarkSweepGeneration");
- cmsSpaceField = type.getAddressField("_cmsSpace");
- }
-
- // Accessing space
- public CompactibleFreeListSpace cmsSpace() {
- return (CompactibleFreeListSpace) VMObjectFactory.newObject(
- CompactibleFreeListSpace.class,
- cmsSpaceField.getValue(addr));
- }
-
- public long capacity() { return cmsSpace().capacity(); }
- public long used() { return cmsSpace().used(); }
- public long free() { return cmsSpace().free(); }
- public long contiguousAvailable() { throw new RuntimeException("not yet implemented"); }
- public boolean contains(Address p) { return cmsSpace().contains(p); }
- public void spaceIterate(SpaceClosure blk, boolean usedOnly) {
- blk.doSpace(cmsSpace());
- }
- public void liveRegionsIterate(LiveRegionsClosure closure) {
- closure.doLiveRegions(cmsSpace());
- }
-
- public Generation.Name kind() {
- return Generation.Name.CONCURRENT_MARK_SWEEP;
- }
-
- public String name() {
- return "concurrent mark-sweep generation";
- }
-
- public void printOn(PrintStream tty) {
- tty.println(name());
- cmsSpace().printOn(tty);
- }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/LinearAllocBlock.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * @(#)BinaryTreeDictionary.java
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.runtime.*;
-
-public class LinearAllocBlock extends VMObject {
- static {
- VM.registerVMInitializedObserver(new Observer() {
- public void update(Observable o, Object data) {
- initialize(VM.getVM().getTypeDataBase());
- }
- });
- }
-
- private static synchronized void initialize(TypeDataBase db) {
- Type type = db.lookupType("LinearAllocBlock");
- word_sizeField= type.getCIntegerField("_word_size");
- }
-
- // Fields
- private static CIntegerField word_sizeField;
-
- // Accessors
- public long word_size() {
- return word_sizeField.getValue(addr);
- }
-
- // Constructor
- public LinearAllocBlock(Address addr) {
- super(addr);
- }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ParNewGeneration.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.serial.*;
-import sun.jvm.hotspot.gc.shared.*;
-
-public class ParNewGeneration extends DefNewGeneration {
- public ParNewGeneration(Address addr) {
- super(addr);
- }
-
- public Generation.Name kind() {
- return Generation.Name.PAR_NEW;
- }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,7 @@
PSMarkSweep ("PSMarkSweep"),
ParallelScavenge ("ParallelScavenge"),
DefNew ("DefNew"),
- ParNew ("ParNew"),
G1New ("G1New"),
- ConcurrentMarkSweep ("ConcurrentMarkSweep"),
G1Old ("G1Old"),
G1Full ("G1Full"),
Z ("Z"),
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/Generation.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/Generation.java Wed Nov 13 11:37:29 2019 +0100
@@ -81,9 +81,7 @@
// constants from Generation::Name
NAME_DEF_NEW = db.lookupIntConstant("Generation::DefNew").intValue();
- NAME_PAR_NEW = db.lookupIntConstant("Generation::ParNew").intValue();
NAME_MARK_SWEEP_COMPACT = db.lookupIntConstant("Generation::MarkSweepCompact").intValue();
- NAME_CONCURRENT_MARK_SWEEP = db.lookupIntConstant("Generation::ConcurrentMarkSweep").intValue();
NAME_OTHER = db.lookupIntConstant("Generation::Other").intValue();
}
@@ -93,9 +91,7 @@
public static class Name {
public static final Name DEF_NEW = new Name("DefNew");
- public static final Name PAR_NEW = new Name("ParNew");
public static final Name MARK_SWEEP_COMPACT = new Name("MarkSweepCompact");
- public static final Name CONCURRENT_MARK_SWEEP = new Name("ConcurrentMarkSweep");
public static final Name OTHER = new Name("Other");
private Name(String value) {
@@ -115,12 +111,8 @@
static Generation.Name nameForEnum(int value) {
if (value == NAME_DEF_NEW) {
return Name.DEF_NEW;
- } else if (value == NAME_PAR_NEW) {
- return Name.PAR_NEW;
} else if (value == NAME_MARK_SWEEP_COMPACT) {
return Name.MARK_SWEEP_COMPACT;
- } else if (value == NAME_CONCURRENT_MARK_SWEEP) {
- return Name.CONCURRENT_MARK_SWEEP;
} else if (value == NAME_OTHER) {
return Name.OTHER;
} else {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GenerationFactory.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GenerationFactory.java Wed Nov 13 11:37:29 2019 +0100
@@ -27,7 +27,6 @@
import java.util.*;
import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.cms.*;
import sun.jvm.hotspot.gc.serial.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
@@ -50,9 +49,7 @@
ctor = new VirtualConstructor(db);
ctor.addMapping("DefNewGeneration", DefNewGeneration.class);
- ctor.addMapping("ParNewGeneration", ParNewGeneration.class);
ctor.addMapping("TenuredGeneration", TenuredGeneration.class);
- ctor.addMapping("ConcurrentMarkSweepGeneration", ConcurrentMarkSweepGeneration.class);
}
public static Generation newObject(Address addr) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.debugger.OopHandle;
-import sun.jvm.hotspot.gc.cms.CMSHeap;
import sun.jvm.hotspot.gc.epsilon.EpsilonHeap;
import sun.jvm.hotspot.gc.g1.G1CollectedHeap;
import sun.jvm.hotspot.gc.parallel.ParallelScavengeHeap;
@@ -84,7 +83,6 @@
collectedHeapField = type.getAddressField("_collectedHeap");
heapConstructor = new VirtualConstructor(db);
- addHeapTypeIfInDB(db, CMSHeap.class);
addHeapTypeIfInDB(db, SerialHeap.class);
addHeapTypeIfInDB(db, ParallelScavengeHeap.class);
addHeapTypeIfInDB(db, G1CollectedHeap.class);
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,11 +73,6 @@
noHashInPlace = db.lookupLongConstant("markWord::no_hash_in_place").longValue();
noLockInPlace = db.lookupLongConstant("markWord::no_lock_in_place").longValue();
maxAge = db.lookupLongConstant("markWord::max_age").longValue();
-
- /* Constants in markWord used by CMS. */
- cmsShift = db.lookupLongConstant("markWord::cms_shift").longValue();
- cmsMask = db.lookupLongConstant("markWord::cms_mask").longValue();
- sizeShift = db.lookupLongConstant("markWord::size_shift").longValue();
}
// Field accessors
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java Wed Nov 13 11:37:29 2019 +0100
@@ -32,7 +32,6 @@
import java.util.*;
import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.cms.*;
import sun.jvm.hotspot.gc.shared.*;
import sun.jvm.hotspot.gc.epsilon.*;
import sun.jvm.hotspot.gc.g1.*;
@@ -234,16 +233,11 @@
}
visitor.prologue(totalSize);
- CompactibleFreeListSpace cmsSpaceOld = null;
CollectedHeap heap = VM.getVM().getUniverse().heap();
if (heap instanceof GenCollectedHeap) {
GenCollectedHeap genHeap = (GenCollectedHeap) heap;
Generation genOld = genHeap.getGen(1);
- if (genOld instanceof ConcurrentMarkSweepGeneration) {
- ConcurrentMarkSweepGeneration concGen = (ConcurrentMarkSweepGeneration)genOld;
- cmsSpaceOld = concGen.cmsSpace();
- }
}
for (int i = 0; i < liveRegions.size(); i += 2) {
@@ -265,20 +259,7 @@
}
}
if (obj == null) {
- //Find the object size using Printezis bits and skip over
- long size = 0;
-
- if ( (cmsSpaceOld != null) && cmsSpaceOld.contains(handle) ){
- size = cmsSpaceOld.collector().blockSizeUsingPrintezisBits(handle);
- }
-
- if (size <= 0) {
- //Either Printezis bits not set or handle is not in cms space.
- throw new UnknownOopException();
- }
-
- handle = handle.addOffsetToAsOopHandle(CompactibleFreeListSpace.adjustObjectSizeInBytes(size));
- continue;
+ throw new UnknownOopException();
}
if (of == null || of.canInclude(obj)) {
if (visitor.doObj(obj)) {
@@ -286,11 +267,8 @@
break;
}
}
- if ( (cmsSpaceOld != null) && cmsSpaceOld.contains(handle)) {
- handle = handle.addOffsetToAsOopHandle(CompactibleFreeListSpace.adjustObjectSizeInBytes(obj.getObjectSize()) );
- } else {
- handle = handle.addOffsetToAsOopHandle(obj.getObjectSize());
- }
+
+ handle = handle.addOffsetToAsOopHandle(obj.getObjectSize());
}
}
catch (AddressException e) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Thread.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Thread.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -118,7 +118,6 @@
public boolean isHiddenFromExternalView() { return false; }
public boolean isJvmtiAgentThread() { return false; }
public boolean isWatcherThread() { return false; }
- public boolean isConcurrentMarkSweepThread() { return false; }
public boolean isServiceThread() { return false; }
/** Memory operations */
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java Wed Nov 13 11:37:29 2019 +0100
@@ -104,7 +104,6 @@
private int heapOopSize;
private int klassPtrSize;
private int oopSize;
- private final int IndexSetSize;
/** -XX flags (value origin) */
public static int Flags_DEFAULT;
public static int Flags_COMMAND_LINE;
@@ -491,7 +490,6 @@
Flags_VALUE_ORIGIN_MASK = db.lookupIntConstant("JVMFlag::VALUE_ORIGIN_MASK").intValue();
Flags_ORIG_COMMAND_LINE = db.lookupIntConstant("JVMFlag::ORIG_COMMAND_LINE").intValue();
oopSize = db.lookupIntConstant("oopSize").intValue();
- IndexSetSize = db.lookupIntConstant("CompactibleFreeListSpace::IndexSetSize").intValue();
intType = db.lookupType("int");
uintType = db.lookupType("uint");
@@ -711,10 +709,6 @@
return heapOopSize;
}
- public int getIndexSetSize() {
- return IndexSetSize;
- }
-
public int getKlassPtrSize() {
return klassPtrSize;
}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Wed Nov 13 11:37:29 2019 +0100
@@ -160,12 +160,6 @@
System.out.println("using thread-local object allocation.");
}
- l = getFlagValue("UseConcMarkSweepGC", flagMap);
- if (l == 1L) {
- System.out.println("Concurrent Mark-Sweep GC");
- return;
- }
-
l = getFlagValue("UseParallelGC", flagMap);
if (l == 1L) {
System.out.print("Parallel GC ");
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Wed Nov 13 11:37:29 2019 +0100
@@ -164,7 +164,6 @@
}
public final boolean useG1GC = getFlag("UseG1GC", Boolean.class);
- public final boolean useCMSGC = getFlag("UseConcMarkSweepGC", Boolean.class);
public final int allocatePrefetchStyle = getFlag("AllocatePrefetchStyle", Integer.class);
public final int allocatePrefetchInstr = getFlag("AllocatePrefetchInstr", Integer.class);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java Wed Nov 13 11:21:15 2019 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java Wed Nov 13 11:37:29 2019 +0100
@@ -231,8 +231,7 @@
public enum HotSpotGC {
// Supported GCs
Serial(true, "UseSerialGC"),
- Parallel(true, "UseParallelGC", "UseParallelOldGC", "UseParNewGC"),
- CMS(true, "UseConcMarkSweepGC"),
+ Parallel(true, "UseParallelGC", "UseParallelOldGC"),
G1(true, "UseG1GC"),
// Unsupported GCs
--- a/test/hotspot/jtreg/TEST.ROOT Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/TEST.ROOT Wed Nov 13 11:37:29 2019 +0100
@@ -47,7 +47,6 @@
vm.gc.G1 \
vm.gc.Serial \
vm.gc.Parallel \
- vm.gc.ConcMarkSweep \
vm.gc.Shenandoah \
vm.gc.Epsilon \
vm.gc.Z \
--- a/test/hotspot/jtreg/TEST.groups Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/TEST.groups Wed Nov 13 11:37:29 2019 +0100
@@ -37,7 +37,7 @@
hotspot_compiler_all_gcs = \
:hotspot_compiler \
- -:tier1_compiler_not_cms
+ -:tier1_compiler_aot_jvmci
hotspot_gc = \
gc \
@@ -152,7 +152,7 @@
compiler/aot \
compiler/profiling
-tier1_compiler_not_cms = \
+tier1_compiler_aot_jvmci = \
compiler/aot \
compiler/jvmci
@@ -198,7 +198,6 @@
-gc/logging/TestUnifiedLoggingSwitchStress.java \
-gc/stress \
-gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java \
- -gc/cms/TestMBeanCMS.java \
-gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
-gc/shenandoah \
-gc/nvdimm
@@ -210,13 +209,11 @@
tier1_gc_gcold = \
gc/stress/gcold/TestGCOldWithG1.java \
- gc/stress/gcold/TestGCOldWithCMS.java \
gc/stress/gcold/TestGCOldWithSerial.java \
gc/stress/gcold/TestGCOldWithParallel.java
tier1_gc_gcbasher = \
gc/stress/gcbasher/TestGCBasherWithG1.java \
- gc/stress/gcbasher/TestGCBasherWithCMS.java \
gc/stress/gcbasher/TestGCBasherWithSerial.java \
gc/stress/gcbasher/TestGCBasherWithParallel.java
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java Wed Nov 13 11:37:29 2019 +0100
@@ -36,8 +36,6 @@
* TestUnsafeVolatileCAE,
* TestUnsafeVolatileGAS}
* and <testtype> in {G1,
- * CMS,
- * CMSCondMark,
* Serial,
* Parallel,
* Shenandoah,
@@ -90,18 +88,6 @@
procArgs = new String[argcount];
procArgs[argcount - 2] = "-XX:+UseSerialGC";
break;
- case "CMS":
- argcount = 10;
- procArgs = new String[argcount];
- procArgs[argcount - 3] = "-XX:+UseConcMarkSweepGC";
- procArgs[argcount - 2] = "-XX:-UseCondCardMark";
- break;
- case "CMSCondMark":
- argcount = 10;
- procArgs = new String[argcount];
- procArgs[argcount - 3] = "-XX:+UseConcMarkSweepGC";
- procArgs[argcount - 2] = "-XX:+UseCondCardMark";
- break;
case "Shenandoah":
argcount = 10;
procArgs = new String[argcount];
@@ -340,36 +326,6 @@
"ret"
};
break;
- case "CMSCondMark":
- // a card mark volatile barrier should be generated
- // before the card mark strb from the StoreCM and the
- // storestore barrier from the StoreCM should be elided
- matches = new String[] {
- "membar_release \\(elided\\)",
- useCompressedOops ? "stlrw?" : "stlr",
- "membar_volatile",
- "dmb ish",
- "storestore \\(elided\\)",
- "strb",
- "membar_volatile \\(elided\\)",
- "ret"
- };
- break;
- case "CMS":
- // a volatile card mark membar should not be generated
- // before the card mark strb from the StoreCM and the
- // storestore barrier from the StoreCM should be
- // generated as "dmb ishst"
- matches = new String[] {
- "membar_release \\(elided\\)",
- useCompressedOops ? "stlrw?" : "stlr",
- "storestore",
- "dmb ishst",
- "strb",
- "membar_volatile \\(elided\\)",
- "ret"
- };
- break;
case "Shenandoah":
case "ShenandoahTraversal":
// Shenandoah generates normal object graphs for
@@ -531,35 +487,6 @@
"ret"
};
break;
- case "CMSCondMark":
- // a card mark volatile barrier should be generated
- // before the card mark strb from the StoreCM and the
- // storestore barrier from the StoreCM should be elided
- matches = new String[] {
- "membar_release \\(elided\\)",
- useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
- "membar_volatile",
- "dmb ish",
- "storestore \\(elided\\)",
- "strb",
- "membar_acquire \\(elided\\)",
- "ret"
- };
- break;
- case "CMS":
- // a volatile card mark membar should not be generated
- // before the card mark strb from the StoreCM and the
- // storestore barrier from the StoreCM should be elided
- matches = new String[] {
- "membar_release \\(elided\\)",
- useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
- "storestore",
- "dmb ishst",
- "strb",
- "membar_acquire \\(elided\\)",
- "ret"
- };
- break;
case "Shenandoah":
case "ShenandoahTraversal":
// For volatile CAS, Shenanodoah generates normal
@@ -736,35 +663,6 @@
"ret"
};
break;
- case "CMSCondMark":
- // a card mark volatile barrier should be generated
- // before the card mark strb from the StoreCM and the
- // storestore barrier from the StoreCM should be elided
- matches = new String[] {
- "membar_release \\(elided\\)",
- useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
- "membar_volatile",
- "dmb ish",
- "storestore \\(elided\\)",
- "strb",
- "membar_acquire \\(elided\\)",
- "ret"
- };
- break;
- case "CMS":
- // a volatile card mark membar should not be generated
- // before the card mark strb from the StoreCM and the
- // storestore barrier from the StoreCM should be elided
- matches = new String[] {
- "membar_release \\(elided\\)",
- useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
- "storestore",
- "dmb ishst",
- "strb",
- "membar_acquire \\(elided\\)",
- "ret"
- };
- break;
case "Shenandoah":
case "ShenandoahTraversal":
// For volatile CAS, Shenanodoah generates normal
@@ -921,35 +819,6 @@
"ret"
};
break;
- case "CMSCondMark":
- // a card mark volatile barrier should be generated
- // before the card mark strb from the StoreCM and the
- // storestore barrier from the StoreCM should be elided
- matches = new String[] {
- "membar_release \\(elided\\)",
- useCompressedOops ? "atomic_xchgw?_acq" : "atomic_xchg_acq",
- "membar_volatile",
- "dmb ish",
- "storestore \\(elided\\)",
- "strb",
- "membar_acquire \\(elided\\)",
- "ret"
- };
- break;
- case "CMS":
- // a volatile card mark membar should not be generated
- // before the card mark strb from the StoreCM and the
- // storestore barrier from the StoreCM should be elided
- matches = new String[] {
- "membar_release \\(elided\\)",
- useCompressedOops ? "atomic_xchgw?_acq" : "atomic_xchg_acq",
- "storestore",
- "dmb ishst",
- "strb",
- "membar_acquire \\(elided\\)",
- "ret"
- };
- break;
case "Shenandoah":
case "ShenandoahTraversal":
matches = new String[] {
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @summary C2 should use ldar, stlr and ldaxr+stlxr insns for volatile operations
- * @library /test/lib /
- *
- * @modules java.base/jdk.internal.misc
- *
- * @requires os.arch=="aarch64" & vm.debug == true &
- * vm.flavor == "server" & !vm.graal.enabled &
- * vm.gc.ConcMarkSweep
- *
- * @build compiler.c2.aarch64.TestVolatiles
- * compiler.c2.aarch64.TestVolatileLoad
- * compiler.c2.aarch64.TestUnsafeVolatileLoad
- * compiler.c2.aarch64.TestVolatileStore
- * compiler.c2.aarch64.TestUnsafeVolatileStore
- * compiler.c2.aarch64.TestUnsafeVolatileCAS
- * compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
- * compiler.c2.aarch64.TestUnsafeVolatileCAE
- * compiler.c2.aarch64.TestUnsafeVolatileGAS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- * TestVolatileLoad CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- * TestVolatileStore CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- * TestUnsafeVolatileLoad CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- * TestUnsafeVolatileStore CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- * TestUnsafeVolatileCAS CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- * TestUnsafeVolatileWeakCAS CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- * TestUnsafeVolatileCAE CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- * TestUnsafeVolatileGAS CMS
- */
-
-package compiler.c2.aarch64;
-
-public class TestVolatilesCMS {
- public static void main(String args[]) throws Throwable
- {
- // delegate work to shared code
- new TestVolatiles().runtest(args[0], args[1]);
- }
-}
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMSCondMark.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @summary C2 should use ldar, stlr and ldaxr+stlxr insns for volatile operations
- * @library /test/lib /
- *
- * @modules java.base/jdk.internal.misc
- *
- * @requires os.arch=="aarch64" & vm.debug == true &
- * vm.flavor == "server" & !vm.graal.enabled &
- * vm.gc.ConcMarkSweep
- *
- * @build compiler.c2.aarch64.TestVolatiles
- * compiler.c2.aarch64.TestVolatileLoad
- * compiler.c2.aarch64.TestUnsafeVolatileLoad
- * compiler.c2.aarch64.TestVolatileStore
- * compiler.c2.aarch64.TestUnsafeVolatileStore
- * compiler.c2.aarch64.TestUnsafeVolatileCAS
- * compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
- * compiler.c2.aarch64.TestUnsafeVolatileCAE
- * compiler.c2.aarch64.TestUnsafeVolatileGAS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- * TestVolatileLoad CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- * TestVolatileStore CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- * TestUnsafeVolatileLoad CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- * TestUnsafeVolatileStore CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- * TestUnsafeVolatileCAS CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- * TestUnsafeVolatileWeakCAS CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- * TestUnsafeVolatileCAE CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- * TestUnsafeVolatileGAS CMSCondMark
- */
-
-package compiler.c2.aarch64;
-
-public class TestVolatilesCMSCondMark {
- public static void main(String args[]) throws Throwable
- {
- // delegate work to shared code
- new TestVolatiles().runtest(args[0], args[1]);
- }
-}
--- a/test/hotspot/jtreg/gc/TestAgeOutput.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/TestAgeOutput.java Wed Nov 13 11:37:29 2019 +0100
@@ -48,19 +48,6 @@
* @run main/othervm -XX:+UseG1GC gc.TestAgeOutput UseG1GC
*/
-/*
- * @test TestAgeOutputCMS
- * @bug 8164936
- * @key gc
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @modules java.base/jdk.internal.misc
- * @library /test/lib
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -XX:+UseConcMarkSweepGC gc.TestAgeOutput UseConcMarkSweepGC
- */
-
import sun.hotspot.WhiteBox;
import java.util.regex.Matcher;
--- a/test/hotspot/jtreg/gc/TestFullGCCount.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/TestFullGCCount.java Wed Nov 13 11:37:29 2019 +0100
@@ -26,8 +26,7 @@
/**
* @test TestFullGCCount.java
* @bug 7072527
- * @summary CMS: JMM GC counters overcount in some cases
- * @requires !(vm.gc == "ConcMarkSweep" & vm.opt.ExplicitGCInvokesConcurrent == true)
+ * @summary JMM GC counters overcount in some cases
* @comment Shenandoah has "ExplicitGCInvokesConcurrent" on by default
* @requires !(vm.gc == "Shenandoah" & vm.opt.ExplicitGCInvokesConcurrent != false)
* @modules java.management
@@ -41,7 +40,7 @@
import java.util.List;
/*
- * Originally for a specific failure in CMS, this test now monitors all
+ * Originally for a specific failure in CMS[[keep]], this test now monitors all
* collectors for double-counting of collections.
*/
public class TestFullGCCount {
--- a/test/hotspot/jtreg/gc/TestGenerationPerfCounter.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/TestGenerationPerfCounter.java Wed Nov 13 11:37:29 2019 +0100
@@ -63,19 +63,6 @@
* @run main/othervm -XX:+UsePerfData -XX:+UseG1GC gc.TestGenerationPerfCounter
*/
-/* @test TestGenerationPerfCounterCMS
- * @bug 8080345
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib /
- * @summary Tests that the sun.gc.policy.generations returns 2 for all GCs.
- * @modules java.base/jdk.internal.misc
- * java.compiler
- * java.management/sun.management
- * jdk.internal.jvmstat/sun.jvmstat.monitor
- * @run main/othervm -XX:+UsePerfData -XX:+UseConcMarkSweepGC gc.TestGenerationPerfCounter
- */
-
public class TestGenerationPerfCounter {
public static void main(String[] args) throws Exception {
long numGenerations =
--- a/test/hotspot/jtreg/gc/TestMemoryInitializationWithCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc;
-
-/*
- * @test TestMemoryInitializationWithCMS
- * @key gc
- * @bug 4668531
- * @library /
- * @requires vm.debug & vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Simple test for -XX:+CheckMemoryInitialization doesn't crash VM
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+CheckMemoryInitialization gc.TestMemoryInitializationWithCMS
- */
-
-public class TestMemoryInitializationWithCMS {
-
- public static void main(String args[]) {
- TestMemoryInitialization.main(args);
- }
-}
--- a/test/hotspot/jtreg/gc/TestMemoryMXBeansAndPoolsPresence.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/TestMemoryMXBeansAndPoolsPresence.java Wed Nov 13 11:37:29 2019 +0100
@@ -58,16 +58,6 @@
* @run main/othervm -XX:+UseSerialGC gc.TestMemoryMXBeansAndPoolsPresence Serial
*/
-/* @test TestMemoryMXBeansAndPoolsPresenceCMS
- * @bug 8191564
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- * java.management
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @run main/othervm -XX:+UseConcMarkSweepGC gc.TestMemoryMXBeansAndPoolsPresence CMS
- */
-
class GCBeanDescription {
public String name;
public String[] poolNames;
@@ -108,10 +98,6 @@
test(new GCBeanDescription("G1 Young Generation", new String[] {"G1 Eden Space", "G1 Survivor Space", "G1 Old Gen"}),
new GCBeanDescription("G1 Old Generation", new String[] {"G1 Eden Space", "G1 Survivor Space", "G1 Old Gen"}));
break;
- case "CMS":
- test(new GCBeanDescription("ParNew", new String[] {"Par Eden Space", "Par Survivor Space"}),
- new GCBeanDescription("ConcurrentMarkSweep", new String[] {"Par Eden Space", "Par Survivor Space", "CMS Old Gen"}));
- break;
case "Parallel":
test(new GCBeanDescription("PS Scavenge", new String[] {"PS Eden Space", "PS Survivor Space"}),
new GCBeanDescription("PS MarkSweep", new String[] {"PS Eden Space", "PS Survivor Space", "PS Old Gen"}));
--- a/test/hotspot/jtreg/gc/TestNumWorkerOutput.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/TestNumWorkerOutput.java Wed Nov 13 11:37:29 2019 +0100
@@ -36,19 +36,6 @@
* @run main/othervm -XX:+UseG1GC gc.TestNumWorkerOutput UseG1GC
*/
-/*
- * @test TestNumWorkerOutputCMS
- * @bug 8165292
- * @key gc
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @modules java.base/jdk.internal.misc
- * @library /test/lib
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -XX:+UseConcMarkSweepGC gc.TestNumWorkerOutput UseConcMarkSweepGC
- */
-
import sun.hotspot.WhiteBox;
import java.util.regex.Matcher;
--- a/test/hotspot/jtreg/gc/TestPolicyNamePerfCounter.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/TestPolicyNamePerfCounter.java Wed Nov 13 11:37:29 2019 +0100
@@ -63,19 +63,6 @@
* @run main/othervm -XX:+UsePerfData -XX:+UseG1GC gc.TestPolicyNamePerfCounter GarbageFirst
*/
-/* @test TestPolicyNamePerfCounterCMS
- * @bug 8210192
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib /
- * @summary Tests that sun.gc.policy.name returns expected values for different GCs.
- * @modules java.base/jdk.internal.misc
- * java.compiler
- * java.management/sun.management
- * jdk.internal.jvmstat/sun.jvmstat.monitor
- * @run main/othervm -XX:+UsePerfData -XX:+UseConcMarkSweepGC gc.TestPolicyNamePerfCounter ParNew:CMS
- */
-
public class TestPolicyNamePerfCounter {
public static void main(String[] args) throws Exception {
if (args.length != 1) {
--- a/test/hotspot/jtreg/gc/TestSmallHeap.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/TestSmallHeap.java Wed Nov 13 11:37:29 2019 +0100
@@ -91,12 +91,8 @@
noneGCSupported = false;
verifySmallHeapSize("-XX:+UseG1GC", expectedMaxHeap);
}
- if (GC.ConcMarkSweep.isSupported()) {
- noneGCSupported = false;
- verifySmallHeapSize("-XX:+UseConcMarkSweepGC", expectedMaxHeap);
- }
if (noneGCSupported) {
- throw new SkippedException("Skipping test because none of Parallel/Serial/G1/ConcMarkSweep is supported.");
+ throw new SkippedException("Skipping test because none of Parallel/Serial/G1 is supported.");
}
}
--- a/test/hotspot/jtreg/gc/TestSystemGC.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/TestSystemGC.java Wed Nov 13 11:37:29 2019 +0100
@@ -52,15 +52,6 @@
*/
/*
- * @test TestSystemGCCMS
- * @key gc
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @run main/othervm -XX:+UseConcMarkSweepGC gc.TestSystemGC
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent gc.TestSystemGC
- */
-
-/*
* @test TestSystemGCShenandoah
* @key gc
* @requires vm.gc.Shenandoah & !vm.graal.enabled
--- a/test/hotspot/jtreg/gc/arguments/GCTypes.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/GCTypes.java Wed Nov 13 11:37:29 2019 +0100
@@ -70,7 +70,6 @@
public static enum YoungGCType implements GCType {
DefNew("Copy"),
- ParNew("ParNew"),
PSNew("PS Scavenge"),
G1("G1 Young Generation");
@@ -95,7 +94,6 @@
public static enum OldGCType implements GCType {
Serial("MarkSweepCompact"),
- CMS("ConcurrentMarkSweep"),
PSOld("PS MarkSweep"),
G1("G1 Old Generation");
--- a/test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java Wed Nov 13 11:37:29 2019 +0100
@@ -54,16 +54,6 @@
*/
/**
- * @test TestAlignmentToUseLargePagesCMS
- * @key gc regression
- * @bug 8024396
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @run main/othervm -Xms71M -Xmx91M -XX:+UseConcMarkSweepGC -XX:+UseLargePages gc.arguments.TestAlignmentToUseLargePages
- * @run main/othervm -Xms71M -Xmx91M -XX:+UseConcMarkSweepGC -XX:-UseLargePages gc.arguments.TestAlignmentToUseLargePages
- */
-
-/**
* @test TestAlignmentToUseLargePagesShenandoah
* @key gc
* @bug 8024396
--- a/test/hotspot/jtreg/gc/arguments/TestCMSHeapSizeFlags.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.arguments;
-
-/*
- * @test TestCMSHeapSizeFlags
- * @key gc
- * @bug 8006088
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Tests argument processing for initial and maximum heap size for the CMS collector
- * @library /test/lib
- * @library /
- * @modules java.base/jdk.internal.misc
- * java.management
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm gc.arguments.TestCMSHeapSizeFlags
- * @author thomas.schatzl@oracle.com
- */
-
-public class TestCMSHeapSizeFlags {
-
- public static void main(String args[]) throws Exception {
- final String gcName = "-XX:+UseConcMarkSweepGC";
-
- TestMaxHeapSizeTools.checkMinInitialMaxHeapFlags(gcName);
-
- TestMaxHeapSizeTools.checkGenMaxHeapErgo(gcName);
- }
-}
-
--- a/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java Wed Nov 13 11:37:29 2019 +0100
@@ -36,7 +36,6 @@
* @run driver gc.arguments.TestDisableDefaultGC
*/
-import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
public class TestDisableDefaultGC {
@@ -45,7 +44,6 @@
ProcessBuilder pb = GCArguments.createJavaProcessBuilder("-XX:-UseSerialGC",
"-XX:-UseParallelGC",
"-XX:-UseG1GC",
- "-XX:-UseConcMarkSweepGC",
"-XX:+UnlockExperimentalVMOptions",
"-XX:-UseShenandoahGC",
"-XX:-UseZGC",
--- a/test/hotspot/jtreg/gc/arguments/TestMaxNewSize.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestMaxNewSize.java Wed Nov 13 11:37:29 2019 +0100
@@ -68,19 +68,6 @@
* @author thomas.schatzl@oracle.com, jesper.wilhelmsson@oracle.com
*/
-/*
- * @test TestMaxNewSizeCMS
- * @key gc
- * @bug 7057939
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @library /
- * @modules java.base/jdk.internal.misc
- * java.management
- * @run main gc.arguments.TestMaxNewSize -XX:+UseConcMarkSweepGC
- */
-
import java.util.regex.Matcher;
import java.util.regex.Pattern;
--- a/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java Wed Nov 13 11:37:29 2019 +0100
@@ -101,7 +101,6 @@
int expectedRatio = Integer.valueOf(args[0]);
switch (GCTypes.YoungGCType.getYoungGCType()) {
case DefNew:
- case ParNew:
verifyDefNewNewRatio(expectedRatio);
break;
case PSNew:
--- a/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java Wed Nov 13 11:37:29 2019 +0100
@@ -306,7 +306,6 @@
public static long alignGenSize(long value) {
switch (YOUNG_GC_TYPE) {
case DefNew:
- case ParNew:
return HeapRegionUsageTool.alignDown(value, HEAP_SPACE_ALIGNMENT);
case PSNew:
return HeapRegionUsageTool.alignUp(HeapRegionUsageTool.alignDown(value,
--- a/test/hotspot/jtreg/gc/arguments/TestParallelGCThreads.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestParallelGCThreads.java Wed Nov 13 11:37:29 2019 +0100
@@ -80,7 +80,7 @@
}
public static void testFlags() throws Exception {
- // For each parallel collector (G1, Parallel, ParNew/CMS)
+ // For each parallel collector (G1, Parallel)
List<String> supportedGC = new ArrayList<String>();
if (GC.G1.isSupported()) {
@@ -89,12 +89,9 @@
if (GC.Parallel.isSupported()) {
supportedGC.add("Parallel");
}
- if (GC.ConcMarkSweep.isSupported()) {
- supportedGC.add("ConcMarkSweep");
- }
if (supportedGC.isEmpty()) {
- throw new SkippedException("Skipping test because none of G1/Parallel/ConcMarkSweep is supported.");
+ throw new SkippedException("Skipping test because none of G1/Parallel is supported.");
}
for (String gc : supportedGC) {
--- a/test/hotspot/jtreg/gc/arguments/TestParallelRefProc.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestParallelRefProc.java Wed Nov 13 11:37:29 2019 +0100
@@ -51,10 +51,6 @@
noneGCSupported = false;
testFlag(new String[] { "-XX:+UseSerialGC" }, false);
}
- if (GC.ConcMarkSweep.isSupported()) {
- noneGCSupported = false;
- testFlag(new String[] { "-XX:+UseConcMarkSweepGC" }, false);
- }
if (GC.Parallel.isSupported()) {
noneGCSupported = false;
testFlag(new String[] { "-XX:+UseParallelGC" }, false);
@@ -66,7 +62,7 @@
testFlag(new String[] { "-XX:+UseG1GC", "-XX:-ParallelRefProcEnabled", "-XX:ParallelGCThreads=2" }, false);
}
if (noneGCSupported) {
- throw new SkippedException("Skipping test because none of Serial/ConcMarkSweep/Parallel/G1 is supported.");
+ throw new SkippedException("Skipping test because none of Serial/Parallel/G1 is supported.");
}
}
--- a/test/hotspot/jtreg/gc/arguments/TestSelectDefaultGC.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestSelectDefaultGC.java Wed Nov 13 11:37:29 2019 +0100
@@ -64,8 +64,6 @@
assertVMOption(output, "UseG1GC", isServer);
// Serial is default for non-server class machines
assertVMOption(output, "UseSerialGC", !isServer);
- // CMS is never default
- assertVMOption(output, "UseConcMarkSweepGC", false);
}
public static void main(String[] args) throws Exception {
--- a/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java Wed Nov 13 11:37:29 2019 +0100
@@ -126,7 +126,6 @@
GCTypes.YoungGCType type = GCTypes.YoungGCType.getYoungGCType();
switch (type) {
case DefNew:
- case ParNew:
verifyDefNewSurvivorRatio(expectedRatio);
break;
case PSNew:
--- a/test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java Wed Nov 13 11:37:29 2019 +0100
@@ -73,22 +73,6 @@
*/
/*
- * @test TestUseCompressedOopsErgoCMS
- * @key gc
- * @bug 8010722
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @library /
- * @modules java.base/jdk.internal.misc
- * java.management/sun.management
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm gc.arguments.TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC
- */
-
-/*
* @test TestUseCompressedOopsErgoShenandoah
* @key gc
* @bug 8010722
--- a/test/hotspot/jtreg/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.class_unloading;
-
-/*
- * @test
- * @key gc
- * @bug 8049831
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- * java.management
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run driver gc.class_unloading.TestCMSClassUnloadingEnabledHWM
- * @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated.
- */
-
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.process.ProcessTools;
-import java.lang.management.GarbageCollectorMXBean;
-import java.lang.management.ManagementFactory;
-import sun.hotspot.WhiteBox;
-
-public class TestCMSClassUnloadingEnabledHWM {
- private static long MetaspaceSize = 32 * 1024 * 1024;
- private static long YoungGenSize = 32 * 1024 * 1024;
-
- private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
- "-Xbootclasspath/a:.",
- "-XX:+UnlockDiagnosticVMOptions",
- "-XX:+WhiteBoxAPI",
- "-Xmx128m",
- "-XX:CMSMaxAbortablePrecleanTime=1",
- "-XX:CMSWaitDuration=50",
- "-XX:MetaspaceSize=" + MetaspaceSize,
- "-Xmn" + YoungGenSize,
- "-XX:+UseConcMarkSweepGC",
- "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
- "-Xlog:gc",
- TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(),
- "" + MetaspaceSize);
- return new OutputAnalyzer(pb.start());
- }
-
- public static OutputAnalyzer runWithCMSClassUnloading() throws Exception {
- return run(true);
- }
-
- public static OutputAnalyzer runWithoutCMSClassUnloading() throws Exception {
- return run(false);
- }
-
- public static void testWithoutCMSClassUnloading() throws Exception {
- // -XX:-CMSClassUnloadingEnabled is used, so we expect a full GC instead of a concurrent cycle.
- OutputAnalyzer out = runWithoutCMSClassUnloading();
-
- out.shouldMatch(".*Pause Full.*");
- out.shouldNotMatch(".*Pause Initial Mark.*");
- }
-
- public static void testWithCMSClassUnloading() throws Exception {
- // -XX:+CMSClassUnloadingEnabled is used, so we expect a concurrent cycle instead of a full GC.
- OutputAnalyzer out = runWithCMSClassUnloading();
-
- out.shouldMatch(".*Pause Initial Mark.*");
- out.shouldNotMatch(".*Pause Full.*");
- }
-
- public static void main(String args[]) throws Exception {
- testWithCMSClassUnloading();
- testWithoutCMSClassUnloading();
- }
-
- public static class AllocateBeyondMetaspaceSize {
- public static void main(String [] args) throws Exception {
- if (args.length != 1) {
- throw new IllegalArgumentException("Usage: <MetaspaceSize>");
- }
-
- WhiteBox wb = WhiteBox.getWhiteBox();
-
- // Allocate past the MetaspaceSize limit.
- long metaspaceSize = Long.parseLong(args[0]);
- long allocationBeyondMetaspaceSize = metaspaceSize * 2;
- long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
-
- // Wait for at least one GC to occur. The caller will parse the log files produced.
- GarbageCollectorMXBean cmsGCBean = getCMSGCBean();
- while (cmsGCBean.getCollectionCount() == 0) {
- Thread.sleep(100);
- }
-
- wb.freeMetaspace(null, metaspace, metaspace);
- }
-
- private static GarbageCollectorMXBean getCMSGCBean() {
- for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) {
- if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) {
- return gcBean;
- }
- }
- return null;
- }
- }
-}
-
--- a/test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java Wed Nov 13 11:37:29 2019 +0100
@@ -81,24 +81,6 @@
*/
/*
- * @test TestClassUnloadingDisabledCMS
- * @key gc
- * @bug 8114823
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @requires vm.opt.ExplicitGCInvokesConcurrent != true
- * @requires vm.opt.ClassUnloading != true
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- * java.management
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
- * -XX:-ClassUnloading -XX:+UseConcMarkSweepGC gc.class_unloading.TestClassUnloadingDisabled
- */
-
-/*
* @test TestClassUnloadingDisabledShenandoah
* @key gc
* @bug 8114823
--- a/test/hotspot/jtreg/gc/cms/DisableResizePLAB.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-import static java.lang.ref.Reference.reachabilityFence;
-
-/*
- * @test DisableResizePLAB
- * @key gc
- * @bug 8060467
- * @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -Xlog:gc=debug gc.cms.DisableResizePLAB
- */
-
-public class DisableResizePLAB {
- public static void main(String args[]) throws Exception {
- Object garbage[] = new Object[1_000];
- for (int i = 0; i < garbage.length; i++) {
- garbage[i] = new byte[0];
- }
- long startTime = System.currentTimeMillis();
- while (System.currentTimeMillis() - startTime < 10_000) {
- reachabilityFence(new byte[1024]);
- }
- }
-}
--- a/test/hotspot/jtreg/gc/cms/GuardShrinkWarning.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-/**
- * @test GuardShrinkWarning
- * @key gc regression
- * @summary Remove warning about CMS generation shrinking.
- * @bug 8012111
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- * java.management
- * @run main/othervm gc.cms.GuardShrinkWarning
- * @author jon.masamitsu@oracle.com
- */
-
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.process.ProcessTools;
-
-public class GuardShrinkWarning {
- public static void main(String args[]) throws Exception {
-
- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
- "-showversion",
- "-XX:+UseConcMarkSweepGC",
- "-XX:+ExplicitGCInvokesConcurrent",
- SystemGCCaller.class.getName()
- );
-
- OutputAnalyzer output = new OutputAnalyzer(pb.start());
-
- output.shouldNotContain("Shrinking of CMS not yet implemented");
-
- output.shouldNotContain("error");
-
- output.shouldHaveExitValue(0);
- }
- static class SystemGCCaller {
- public static void main(String [] args) {
- System.gc();
- }
- }
-}
--- a/test/hotspot/jtreg/gc/cms/TestBubbleUpRef.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-import java.lang.ref.ReferenceQueue;
-import java.lang.ref.WeakReference;
-import java.util.LinkedList;
-import java.util.ListIterator;
-
-/*
- * @test
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @key cte_test
- * @bug 4950157
- * @summary Stress the behavior of ergonomics when the heap is nearly full and
- * stays nearly full.
- * @run main/othervm
- * -XX:+UseConcMarkSweepGC -XX:-CMSYield -XX:-CMSPrecleanRefLists1
- * -XX:CMSInitiatingOccupancyFraction=0 -Xmx80m gc.cms.TestBubbleUpRef 16000 50 10000
- */
-
-/**
- * Test program to stress the behavior of ergonomics when the
- * heap is nearly full and stays nearly full.
- * This is a test to catch references that have been discovered
- * during concurrent marking and whose referents have been
- * cleared by the mutator.
- * Allocate objects with weak references until the heap is full
- * Free the objects.
- * Do work so that concurrent marking has a chance to work
- * Clear the referents out of the weak references
- * System.gc() in the hopes that it will acquire the collection
- * Free the weak references
- * Do it again.
- *
- * Use the following VM options
- * -Xmx80m -XX:-CMSYield [-XX:+UseConcMarkSweepGC] -XX:-CMSPrecleanRefLists1
- * -XX:CMSInitiatingOccupancyFraction=0
- *
- * Use parameter:
- * args[0] - array size (16000)
- * args[1] - iterations (50)
- * args[2] - work (10000)
- */
-class MyList extends LinkedList {
-
- int[] a;
-
- MyList(int size) {
- a = new int[size];
- }
-}
-
-class MyRefList extends LinkedList {
-
- WeakReference ref;
-
- MyRefList(Object o, ReferenceQueue rq) {
- ref = new WeakReference(o, rq);
- }
-
- void clearReferent() {
- ref.clear();
- }
-}
-
-public class TestBubbleUpRef {
-
- MyList list;
- MyRefList refList;
- ReferenceQueue rq;
- int refListLen;
- int arraySize;
- int iterations;
- int workUnits;
-
- TestBubbleUpRef(int as, int cnt, int wk) {
- arraySize = as;
- iterations = cnt;
- workUnits = wk;
- list = new MyList(arraySize);
- refList = new MyRefList(list, rq);
- }
-
- public void fill() {
- System.out.println("fill() " + iterations + " times");
- int count = 0;
- while (true) {
- try {
- // Allocations
- MyList next = new MyList(arraySize);
- list.add(next);
- MyRefList nextRef = new MyRefList(next, rq);
- refList.add(nextRef);
- } catch (OutOfMemoryError e) {
- // When the heap is full
- try {
- if (count++ > iterations) {
- return;
- }
- System.out.println("Freeing list");
- while (!list.isEmpty()) {
- list.removeFirst();
- }
- System.out.println("Doing work");
- int j = 0;
- for (int i = 1; i < workUnits; i++) {
- j = j + i;
- }
- System.out.println("Clearing refs");
- ListIterator listIt = refList.listIterator();
- while (listIt.hasNext()) {
- MyRefList next = (MyRefList) listIt.next();
- next.clearReferent();
- }
- System.gc();
- System.out.println("Freeing refs");
- while (!refList.isEmpty()) {
- refList.removeFirst();
- }
- } catch (OutOfMemoryError e2) {
- System.err.println("Out of Memory - 2 ");
- continue;
- }
- } catch (Exception e) {
- System.err.println("Unexpected exception: " + e);
- return;
- }
- }
- }
-
- /**
- * Test entry point.
- * args[0] - array size (is the size of the int array in a list item)
- * args[1] - iterations (is the number of out-of-memory exceptions before exit)
- * args[2] - work (is the work done between allocations)
- * @param args
- */
- public static void main(String[] args) {
- // Get the input parameters.
- if (args.length != 3) {
- throw new IllegalArgumentException("Wrong number of input argumets");
- }
-
- int as = Integer.parseInt(args[0]);
- int cnt = Integer.parseInt(args[1]);
- int work = Integer.parseInt(args[2]);
-
- System.out.println("<array size> " + as + "\n"
- + "<OOM's> " + cnt + "\n"
- + "<work units> " + work + "\n");
-
- // Initialization
- TestBubbleUpRef b = new TestBubbleUpRef(as, cnt, work);
-
- // Run the test
- try {
- b.fill();
- } catch (OutOfMemoryError e) {
- b = null; // Free memory before trying to print anything
- System.err.println("Out of Memory - exiting ");
- } catch (Exception e) {
- System.err.println("Exiting ");
- }
- }
-}
-
--- a/test/hotspot/jtreg/gc/cms/TestCMSScavengeBeforeRemark.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-/*
- * @test TestCMSScavengeBeforeRemark
- * @key gc
- * @bug 8139868
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Run CMS with CMSScavengeBeforeRemark
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+CMSScavengeBeforeRemark -XX:+ExplicitGCInvokesConcurrent -Xmx256m -Xlog:gc=debug gc.cms.TestCMSScavengeBeforeRemark
- */
-
-public class TestCMSScavengeBeforeRemark {
- public static void main(String args[]) throws Exception {
- System.gc();
- }
-}
--- a/test/hotspot/jtreg/gc/cms/TestCriticalPriority.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-/*
- * @test TestCriticalPriority
- * @key gc
- * @bug 8217378
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Test critical priority is accepted
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+UnlockExperimentalVMOptions -XX:+UseCriticalCMSThreadPriority gc.cms.TestCriticalPriority
- */
-
-public class TestCriticalPriority {
- public static void main(String args[]) throws Exception {
- // The failure would be detected before entering main().
- }
-}
--- a/test/hotspot/jtreg/gc/cms/TestMBeanCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,157 +0,0 @@
-/*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-/*
- * @test TestMBeanCMS.java
- * @bug 6581734
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary CMS Old Gen's collection usage is zero after GC which is incorrect
- * @modules java.management
- * @run main/othervm -Xmx512m -verbose:gc -XX:+UseConcMarkSweepGC gc.cms.TestMBeanCMS
- *
- */
-
-import java.lang.management.GarbageCollectorMXBean;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryPoolMXBean;
-import java.util.LinkedList;
-import java.util.List;
-
-// 6581734 states that memory pool usage via the mbean is wrong
-// for CMS (zero, even after a collection).
-//
-// 6580448 states that the collection count similarly is wrong
-// (stays at zero for CMS collections)
-// -- closed as dup of 6581734 as the same fix resolves both.
-
-
-public class TestMBeanCMS {
-
- private String poolName = "CMS";
- private String collectorName = "ConcurrentMarkSweep";
-
- public static void main(String [] args) {
-
- TestMBeanCMS t = null;
- if (args.length==2) {
- t = new TestMBeanCMS(args[0], args[1]);
- } else {
- System.out.println("Defaulting to monitor CMS pool and collector.");
- t = new TestMBeanCMS();
- }
- t.run();
- }
-
- public TestMBeanCMS(String pool, String collector) {
- poolName = pool;
- collectorName = collector;
- }
-
- public TestMBeanCMS() {
- }
-
- public void run() {
- // Use some memory, enough that we expect collections should
- // have happened.
- // Must run with options to ensure no stop the world full GC,
- // but e.g. at least one CMS cycle.
- allocationWork(300*1024*1024);
- System.out.println("Done allocationWork");
-
- // Verify some non-zero results are stored.
- List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
- int poolsFound = 0;
- int poolsWithStats = 0;
- for (int i=0; i<pools.size(); i++) {
- MemoryPoolMXBean pool = pools.get(i);
- String name = pool.getName();
- System.out.println("found pool: " + name);
-
- if (name.contains(poolName)) {
- long usage = pool.getCollectionUsage().getUsed();
- System.out.println(name + ": usage after GC = " + usage);
- poolsFound++;
- if (usage > 0) {
- poolsWithStats++;
- }
- }
- }
- if (poolsFound == 0) {
- throw new RuntimeException("No matching memory pools found: test with -XX:+UseConcMarkSweepGC");
- }
-
- List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
- int collectorsFound = 0;
- int collectorsWithTime= 0;
- for (int i=0; i<collectors.size(); i++) {
- GarbageCollectorMXBean collector = collectors.get(i);
- String name = collector.getName();
- System.out.println("found collector: " + name);
- if (name.contains(collectorName)) {
- collectorsFound++;
- System.out.println(name + ": collection count = "
- + collector.getCollectionCount());
- System.out.println(name + ": collection time = "
- + collector.getCollectionTime());
- if (collector.getCollectionCount() <= 0) {
- throw new RuntimeException("collection count <= 0");
- }
- if (collector.getCollectionTime() > 0) {
- collectorsWithTime++;
- }
- }
- }
- // verify:
- if (poolsWithStats < poolsFound) {
- throw new RuntimeException("pools found with zero stats");
- }
-
- if (collectorsWithTime<collectorsFound) {
- throw new RuntimeException("collectors found with zero time");
- }
- System.out.println("Test passed.");
- }
-
- public void allocationWork(long target) {
-
- long sizeAllocated = 0;
- List<byte[]> list = new LinkedList<>();
- long delay = 50;
- long count = 0;
-
- while (sizeAllocated < target) {
- int size = 1024*1024;
- byte [] alloc = new byte[size];
- if (count % 2 == 0) {
- list.add(alloc);
- sizeAllocated+=size;
- System.out.print(".");
- }
- try { Thread.sleep(delay); } catch (InterruptedException ie) { }
- count++;
- }
- }
-
-}
--- a/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.concurrent_phase_control;
-
-/*
- * @test TestConcurrentPhaseControlCMS
- * @bug 8169517
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Verify CMS GC doesn't support WhiteBox concurrent phase control.
- * @key gc
- * @modules java.base
- * @library /test/lib /
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -XX:+UseConcMarkSweepGC
- * -Xbootclasspath/a:.
- * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
- * gc.concurrent_phase_control.TestConcurrentPhaseControlCMS
- */
-
-import gc.concurrent_phase_control.CheckUnsupported;
-
-public class TestConcurrentPhaseControlCMS {
-
- public static void main(String[] args) throws Exception {
- CheckUnsupported.check("CMS");
- }
-}
--- a/test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java Wed Nov 13 11:37:29 2019 +0100
@@ -44,11 +44,6 @@
public static void main(String[] args) throws Exception {
boolean noneGCSupported = true;
- if (GC.ConcMarkSweep.isSupported()) {
- noneGCSupported = false;
- testDynamicNumberOfGCThreads("UseConcMarkSweepGC");
- }
-
if (GC.G1.isSupported()) {
noneGCSupported = false;
testDynamicNumberOfGCThreads("UseG1GC");
@@ -65,7 +60,7 @@
}
if (noneGCSupported) {
- throw new SkippedException("Skipping test because none of ConcMarkSweep/G1/Parallel/Shenandoah is supported.");
+ throw new SkippedException("Skipping test because none of G1/Parallel/Shenandoah is supported.");
}
}
--- a/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java Wed Nov 13 11:37:29 2019 +0100
@@ -44,11 +44,6 @@
public static void main(String[] args) throws Exception {
boolean noneGCSupported = true;
- if (GC.ConcMarkSweep.isSupported()) {
- noneGCSupported = false;
- testInitialGCThreadLogging("UseConcMarkSweepGC", "GC Thread");
- }
-
if (GC.G1.isSupported()) {
noneGCSupported = false;
testInitialGCThreadLogging("UseG1GC", "GC Thread");
@@ -65,7 +60,7 @@
}
if (noneGCSupported) {
- throw new SkippedException("Skipping test because none of ConcMarkSweep/G1/Parallel/Shenandoah is supported.");
+ throw new SkippedException("Skipping test because none of G1/Parallel/Shenandoah is supported.");
}
}
--- a/test/hotspot/jtreg/gc/logging/TestGCId.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/logging/TestGCId.java Wed Nov 13 11:37:29 2019 +0100
@@ -53,10 +53,6 @@
noneGCSupported = false;
testGCId("UseG1GC");
}
- if (GC.ConcMarkSweep.isSupported()) {
- noneGCSupported = false;
- testGCId("UseConcMarkSweepGC");
- }
if (GC.Serial.isSupported()) {
noneGCSupported = false;
testGCId("UseSerialGC");
@@ -67,7 +63,7 @@
}
if (noneGCSupported) {
- throw new SkippedException("Skipping test because none of Parallel/G1/ConcMarkSweep/Serial/Shenandoah is supported.");
+ throw new SkippedException("Skipping test because none of Parallel/G1/Serial/Shenandoah is supported.");
}
}
--- a/test/hotspot/jtreg/gc/metaspace/TestMetaspaceCMSCancel.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.metaspace;
-import jdk.test.lib.process.ProcessTools;
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.Asserts;
-import sun.hotspot.WhiteBox;
-
-/* @test TestMetaspaceCMSCancel
- * @bug 8026752
- * @summary Tests cancel of CMS concurrent cycle for Metaspace after a full GC
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm gc.metaspace.TestMetaspaceCMSCancel
- */
-
-
-public class TestMetaspaceCMSCancel {
-
- public static void main(String[] args) throws Exception {
- // Set a small MetaspaceSize so that a CMS concurrent collection will be
- // scheduled. Set CMSWaitDuration to 5s so that the concurrent collection
- // start may be delayed. It does not guarantee 5s before the start of the
- // concurrent collection but does increase the probability that it will
- // be started later. System.gc() is used to invoke a full collection. Set
- // ExplicitGCInvokesConcurrent to off so it is a STW collection.
- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xbootclasspath/a:.",
- "-XX:+UnlockDiagnosticVMOptions",
- "-XX:+WhiteBoxAPI",
- "-XX:+UseConcMarkSweepGC",
- "-XX:MetaspaceSize=2m",
- "-XX:CMSWaitDuration=5000",
- "-XX:-ExplicitGCInvokesConcurrent",
- "-Xlog:gc*=debug",
- MetaspaceGCTest.class.getName());
-
- OutputAnalyzer output = new OutputAnalyzer(pb.start());
- output.shouldNotContain("Concurrent Reset");
- output.shouldHaveExitValue(0);
- }
-
- static class MetaspaceGCTest {
- public static void main(String [] args) {
- WhiteBox wb = WhiteBox.getWhiteBox();
- System.gc();
- Asserts.assertFalse(wb.metaspaceShouldConcurrentCollect());
- }
- }
-}
--- a/test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java Wed Nov 13 11:37:29 2019 +0100
@@ -58,15 +58,6 @@
* @run driver gc.metaspace.TestSizeTransitions true -XX:+UseG1GC
*/
-/* @test TestSizeTransitionsCMS
- * @key gc
- * @requires vm.gc.ConcMarkSweep
- * @summary Tests that the metaspace size transition logging is done correctly.
- * @library /test/lib
- * @run driver gc.metaspace.TestSizeTransitions false -XX:+UseConcMarkSweepGC
- * @run driver gc.metaspace.TestSizeTransitions true -XX:+UseConcMarkSweepGC
- */
-
public class TestSizeTransitions {
public static class Run {
public static void main(String... args) throws Exception {
--- a/test/hotspot/jtreg/gc/startup_warnings/TestCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.startup_warnings;
-
-/*
- * @test TestCMS
- * @key gc
- * @bug 8006398 8155948 8179013
- * @summary Test that CMS prints a warning message
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- * java.management
- * @run main gc.startup_warnings.TestCMS
- */
-
-import jdk.test.lib.process.ProcessTools;
-import jdk.test.lib.process.OutputAnalyzer;
-
-public class TestCMS {
-
- public static void runTest(String[] args) throws Exception {
- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args);
- OutputAnalyzer output = new OutputAnalyzer(pb.start());
- output.shouldContain("deprecated");
- output.shouldNotContain("error");
- output.shouldHaveExitValue(0);
- }
-
- public static void main(String args[]) throws Exception {
- runTest(new String[] {"-XX:+UseConcMarkSweepGC", "-version"});
- runTest(new String[] {"-Xconcgc", "-version"});
- runTest(new String[] {"-Xnoconcgc", "-version"});
- }
-
-}
--- a/test/hotspot/jtreg/gc/stress/TestReclaimStringsLeaksMemory.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/stress/TestReclaimStringsLeaksMemory.java Wed Nov 13 11:37:29 2019 +0100
@@ -35,7 +35,6 @@
* @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseSerialGC
* @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseParallelGC
* @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseParallelGC -XX:-UseParallelOldGC
- * @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseConcMarkSweepGC
* @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseG1GC
*/
--- a/test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package gc.stress.gcbasher;
-
-import java.io.IOException;
-
-/*
- * @test TestGCBasherWithCMS
- * @key gc stress
- * @library /
- * @requires vm.gc.ConcMarkSweep
- * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled
- * @summary Stress the CMS GC by trying to make old objects more likely to be garbage than young objects.
- * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx256m -server -XX:+UseConcMarkSweepGC gc.stress.gcbasher.TestGCBasherWithCMS 120000
- */
-public class TestGCBasherWithCMS {
- public static void main(String[] args) throws IOException {
- TestGCBasher.main(args);
- }
-}
--- a/test/hotspot/jtreg/gc/stress/gclocker/TestGCLockerWithCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package gc.stress.gclocker;
-
-/*
- * @test TestGCLockerWithCMS
- * @key gc
- * @library /
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Stress CMS' GC locker by calling GetPrimitiveArrayCritical while concurrently filling up old gen.
- * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UseConcMarkSweepGC gc.stress.gclocker.TestGCLockerWithCMS
- */
-public class TestGCLockerWithCMS {
- public static void main(String[] args) {
- String[] testArgs = {"2", "CMS Old Gen"};
- TestGCLocker.main(testArgs);
- }
-}
--- a/test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package gc.stress.gcold;
-
-/*
- * @test TestGCOldWithCMS
- * @key gc
- * @library /
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Stress the CMS GC by trying to make old objects more likely to be garbage than young objects.
- * @run main/othervm -Xmx384M -XX:+UseConcMarkSweepGC gc.stress.gcold.TestGCOldWithCMS 50 1 20 10 10000
- */
-public class TestGCOldWithCMS {
- public static void main(String[] args) {
- TestGCOld.main(args);
- }
-}
--- a/test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @requires vm.hasJFR
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xmx64m jdk.jfr.event.gc.detailed.TestStressAllocationGCEventsWithCMS
- */
-public class TestStressAllocationGCEventsWithCMS {
-
- public static void main(String[] args) throws Exception {
- new StressAllocationGCEvents().run(args);
- }
-}
--- a/test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithParNew.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @requires vm.hasJFR
- * @requires vm.gc == "null" & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xmx64m jdk.jfr.event.gc.detailed.TestStressAllocationGCEventsWithParNew
- */
-public class TestStressAllocationGCEventsWithParNew {
-
- public static void main(String[] args) throws Exception {
- new StressAllocationGCEvents().run(args);
- }
-}
--- a/test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @requires vm.hasJFR
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xmx256m jdk.jfr.event.gc.detailed.TestStressBigAllocationGCEventsWithCMS 1048576
- */
-public class TestStressBigAllocationGCEventsWithCMS {
-
- public static void main(String[] args) throws Exception {
- new StressAllocationGCEvents().run(args);
- }
-}
--- a/test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithParNew.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @requires vm.hasJFR
- * @requires vm.gc == "null" & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xmx256m jdk.jfr.event.gc.detailed.TestStressBigAllocationGCEventsWithParNew 1048576
- */
-public class TestStressBigAllocationGCEventsWithParNew {
-
- public static void main(String[] args) throws Exception {
- new StressAllocationGCEvents().run(args);
- }
-}
--- a/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package gc.stress.systemgc;
-
-/*
- * @test TestSystemGCWithCMS
- * @key gc stress
- * @bug 8190703
- * @library /
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Stress the CMS GC full GC by allocating objects of different lifetimes concurrently with System.gc().
- * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UseConcMarkSweepGC gc.stress.systemgc.TestSystemGCWithCMS 270
- */
-public class TestSystemGCWithCMS {
- public static void main(String[] args) throws Exception {
- TestSystemGC.main(args);
- }
-}
--- a/test/hotspot/jtreg/gc/survivorAlignment/SurvivorAlignmentTestMain.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/gc/survivorAlignment/SurvivorAlignmentTestMain.java Wed Nov 13 11:37:29 2019 +0100
@@ -71,7 +71,6 @@
private static final String G1_EDEN = "G1 Eden Space";
private static final String G1_SURVIVOR = "G1 Survivor Space";
private static final String SERIAL_TENURED = "Tenured Gen";
- private static final String CMS_TENURED = "CMS Old Gen";
private static final String PS_TENURED = "PS Old Gen";
private static final String G1_TENURED = "G1 Old Gen";
@@ -79,14 +78,6 @@
SurvivorAlignmentTestMain.WHITE_BOX.getUintxVMFlag(
"G1HeapRegionSize")).orElse(-1L);
- /**
- * Min size of free chunk in CMS generation.
- * An object allocated in CMS generation will at least occupy this amount
- * of bytes.
- */
- private static final long CMS_MIN_FREE_CHUNK_SIZE
- = 3L * Unsafe.ADDRESS_SIZE;
-
private static final AlignmentHelper EDEN_SPACE_HELPER;
private static final AlignmentHelper SURVIVOR_SPACE_HELPER;
private static final AlignmentHelper TENURED_SPACE_HELPER;
@@ -125,11 +116,6 @@
* alignment in other spaces is expected to be equal to
* {@code ObjectAlignmentInBytes} value.
*
- * In CMS generation we can't allocate less then {@code MinFreeChunk} value,
- * for other CGs we expect that object of size {@code MIN_OBJECT_SIZE}
- * could be allocated as it is (of course, its size could be aligned
- * according to alignment value used in a particular space).
- *
* For G1 GC MXBeans could report memory usage only with region size
* precision (if an object allocated in some G1 heap region, then all region
* will claimed as used), so for G1's spaces precision is equal to
@@ -187,15 +173,6 @@
AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
AlignmentHelper.MIN_OBJECT_SIZE, pool);
break;
- case SurvivorAlignmentTestMain.CMS_TENURED:
- Asserts.assertNull(tenuredHelper,
- "Only one bean for tenured space is expected.");
- tenuredHelper = new AlignmentHelper(
- AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
- AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
- SurvivorAlignmentTestMain.CMS_MIN_FREE_CHUNK_SIZE,
- pool);
- break;
}
}
EDEN_SPACE_HELPER = Objects.requireNonNull(edenHelper,
--- a/test/hotspot/jtreg/runtime/7167069/PrintAsFlag.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/runtime/7167069/PrintAsFlag.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
*
* @test PrintAsFlag
* @summary verify that Flag::print_as_flag() works correctly. This is used by "jinfo -flag" and -XX:+PrintCommandLineFlags.
- * @run main/othervm -XX:+PrintCommandLineFlags -XX:-ShowMessageBoxOnError -XX:BiasedLockingStartupDelay=4000 -XX:ParallelGCThreads=4 -XX:MaxRAM=1G -XX:CMSSmallCoalSurplusPercent=1.05 -XX:ErrorFile="file" PrintAsFlag
+ * @run main/othervm -XX:+PrintCommandLineFlags -XX:-ShowMessageBoxOnError -XX:BiasedLockingStartupDelay=4000 -XX:ParallelGCThreads=4 -XX:MaxRAM=1G -XX:ErrorFile="file" PrintAsFlag
*/
public class PrintAsFlag {
--- a/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java Wed Nov 13 11:37:29 2019 +0100
@@ -219,11 +219,6 @@
excludeTestMinRange("MallocMaxTestWords");
/*
- * Exclude CMSSamplingGrain as it can cause intermittent failures on Windows
- */
- excludeTestRange("CMSSamplingGrain");
-
- /*
* Exclude below options as their maximum value would consume too much memory
* and would affect other tests that run in parallel.
*/
--- a/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOption.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOption.java Wed Nov 13 11:37:29 2019 +0100
@@ -400,8 +400,7 @@
}
if (GCType != null &&
- !(prepend.contains("-XX:+UseConcMarkSweepGC") ||
- prepend.contains("-XX:+UseSerialGC") ||
+ !(prepend.contains("-XX:+UseSerialGC") ||
prepend.contains("-XX:+UseParallelGC") ||
prepend.contains("-XX:+UseG1GC"))) {
explicitGC = GCType;
--- a/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java Wed Nov 13 11:37:29 2019 +0100
@@ -79,9 +79,6 @@
for (GarbageCollectorMXBean gcMxBean : gcMxBeans) {
switch (gcMxBean.getName()) {
- case "ConcurrentMarkSweep":
- GCType = "-XX:+UseConcMarkSweepGC";
- break;
case "MarkSweepCompact":
GCType = "-XX:+UseSerialGC";
break;
@@ -186,10 +183,6 @@
option.addPrepend("-XX:+UseG1GC");
}
- if (name.startsWith("CMS")) {
- option.addPrepend("-XX:+UseConcMarkSweepGC");
- }
-
if (name.startsWith("NUMA")) {
option.addPrepend("-XX:+UseNUMA");
}
@@ -207,18 +200,6 @@
case "MaxMetaspaceFreeRatio":
option.addPrepend("-XX:MinMetaspaceFreeRatio=0");
break;
- case "CMSOldPLABMin":
- option.addPrepend("-XX:CMSOldPLABMax=" + option.getMax());
- break;
- case "CMSOldPLABMax":
- option.addPrepend("-XX:CMSOldPLABMin=" + option.getMin());
- break;
- case "CMSPrecleanNumerator":
- option.addPrepend("-XX:CMSPrecleanDenominator=" + option.getMax());
- break;
- case "CMSPrecleanDenominator":
- option.addPrepend("-XX:CMSPrecleanNumerator=" + ((new Integer(option.getMin())) - 1));
- break;
case "G1RefProcDrainInterval":
option.addPrepend("-XX:+ExplicitGCInvokesConcurrent");
break;
@@ -228,9 +209,6 @@
case "NUMAInterleaveGranularity":
option.addPrepend("-XX:+UseNUMAInterleaving");
break;
- case "CPUForCMSThread":
- option.addPrepend("-XX:+BindCMSThreadToCPU");
- break;
case "VerifyGCStartAt":
option.addPrepend("-XX:+VerifyBeforeGC");
option.addPrepend("-XX:+VerifyAfterGC");
--- a/test/hotspot/jtreg/runtime/CommandLine/TestNullTerminatedFlags.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/runtime/CommandLine/TestNullTerminatedFlags.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,6 @@
public class TestNullTerminatedFlags {
public static String[] options = {
"-Xnoclassgc",
- "-Xconcgc",
- "-Xnoconcgc",
"-Xbatch",
"-green",
"-native",
--- a/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java Wed Nov 13 11:37:29 2019 +0100
@@ -58,9 +58,6 @@
testCompressedOopsModes(args);
// Test GCs.
testCompressedOopsModes(args, "-XX:+UseG1GC");
- if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
- testCompressedOopsModes(args, "-XX:+UseConcMarkSweepGC");
- }
testCompressedOopsModes(args, "-XX:+UseSerialGC");
testCompressedOopsModes(args, "-XX:+UseParallelGC");
testCompressedOopsModes(args, "-XX:+UseParallelOldGC");
--- a/test/hotspot/jtreg/runtime/cds/appcds/CommandLineFlagCombo.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/runtime/cds/appcds/CommandLineFlagCombo.java Wed Nov 13 11:37:29 2019 +0100
@@ -50,7 +50,7 @@
// shared base address test table
private static final String[] testTable = {
- "-XX:+UseG1GC", "-XX:+UseSerialGC", "-XX:+UseParallelGC", "-XX:+UseConcMarkSweepGC",
+ "-XX:+UseG1GC", "-XX:+UseSerialGC", "-XX:+UseParallelGC",
"-XX:+FlightRecorder",
"-XX:+UseLargePages", // may only take effect on machines with large-pages
"-XX:+UseCompressedClassPointers",
@@ -123,18 +123,11 @@
}
}
- if (Compiler.isGraalEnabled() && testEntry.equals("-XX:+UseConcMarkSweepGC"))
- {
- System.out.println("Graal does not support CMS");
- return true;
- }
-
if (!WhiteBox.getWhiteBox().isJFRIncludedInVmBuild() && testEntry.equals("-XX:+FlightRecorder"))
{
System.out.println("JFR does not exist");
return true;
}
-
return false;
}
}
--- a/test/hotspot/jtreg/runtime/cds/appcds/sharedStrings/IncompatibleOptions.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/runtime/cds/appcds/sharedStrings/IncompatibleOptions.java Wed Nov 13 11:37:29 2019 +0100
@@ -112,9 +112,6 @@
// incompatible GCs
testDump(2, "-XX:+UseParallelGC", "", GC_WARNING, false);
testDump(3, "-XX:+UseSerialGC", "", GC_WARNING, false);
- if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
- testDump(4, "-XX:+UseConcMarkSweepGC", "", GC_WARNING, false);
- }
// ======= archive with compressed oops, run w/o
testDump(5, "-XX:+UseG1GC", "-XX:+UseCompressedOops", null, false);
@@ -125,9 +122,6 @@
// Still run, to ensure no crash or exception
testExec(6, "-XX:+UseParallelGC", "", "", false);
testExec(7, "-XX:+UseSerialGC", "", "", false);
- if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
- testExec(8, "-XX:+UseConcMarkSweepGC", "", "", false);
- }
// Test various oops encodings, by varying ObjectAlignmentInBytes and heap sizes
testDump(9, "-XX:+UseG1GC", "-XX:ObjectAlignmentInBytes=8", null, false);
--- a/test/hotspot/jtreg/runtime/testlibrary/ClassUnloadCommon.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/runtime/testlibrary/ClassUnloadCommon.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@
}
public static void triggerUnloading() {
- allocateMemory(16 * 1024); // yg size is 8m with cms, force young collection
+ allocateMemory(16 * 1024); // force young collection
System.gc();
}
--- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorGCCMSTest.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, Google and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package MyPackage;
-
-/**
- * @test
- * @summary Verifies the JVMTI Heap Monitor Statistics using CMS GC
- * @build Frame HeapMonitor
- * @requires vm.gc == "ConcMarkSweep" | vm.gc == "null"
- * @requires !vm.graal.enabled
- * @compile HeapMonitorGCCMSTest.java
- * @run main/othervm/native -agentlib:HeapMonitorTest -XX:+UseConcMarkSweepGC MyPackage.HeapMonitorGCTest
- */
--- a/test/hotspot/jtreg/serviceability/sa/TestIntConstant.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/serviceability/sa/TestIntConstant.java Wed Nov 13 11:37:29 2019 +0100
@@ -64,11 +64,10 @@
// with names and the values derived from enums and #define preprocessor
// macros in hotspot.
expStrMap.put("intConstant", List.of(
- "CollectedHeap::G1 4",
+ "CollectedHeap::G1 3",
"RUNNABLE 2",
"Deoptimization::Reason_class_check 4",
"InstanceKlass::_misc_is_unsafe_anonymous 32",
- "Generation::ParNew 1",
"_thread_uninitialized 0"));
expStrMap.put("intConstant _temp_constant", List.of(
"intConstant _temp_constant 45"));
--- a/test/hotspot/jtreg/serviceability/sa/TestUniverse.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/serviceability/sa/TestUniverse.java Wed Nov 13 11:37:29 2019 +0100
@@ -63,10 +63,6 @@
expStrings.add("eden");
break;
- case ConcMarkSweep:
- expStrings.add("Gen 1: concurrent mark-sweep generation");
- break;
-
case G1:
expStrings.add("garbage-first heap");
expStrings.add("region size");
@@ -112,7 +108,7 @@
}
if (Compiler.isGraalEnabled()) {
- if (gc == GC.ConcMarkSweep || gc == GC.Epsilon || gc == GC.Z || gc == GC.Shenandoah) {
+ if (gc == GC.Epsilon || gc == GC.Z || gc == GC.Shenandoah) {
// Not supported
System.out.println ("Skipped testing of " + gc + "GC, not supported by Graal");
return false;
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_0_1/TestDescription.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_0_1/TestDescription.java Wed Nov 13 11:37:29 2019 +0100
@@ -32,7 +32,6 @@
* @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
* @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
* @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
- * @requires vm.gc != "ConcMarkSweep"
* @requires vm.gc != "Z"
* @library /vmTestbase /test/lib
* @run driver jdk.test.lib.FileInstaller . .
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_10_20/TestDescription.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_10_20/TestDescription.java Wed Nov 13 11:37:29 2019 +0100
@@ -32,7 +32,6 @@
* @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
* @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
* @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
- * @requires vm.gc != "ConcMarkSweep"
* @requires vm.gc != "Z"
* @library /vmTestbase /test/lib
* @run driver jdk.test.lib.FileInstaller . .
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_70_80/TestDescription.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_70_80/TestDescription.java Wed Nov 13 11:37:29 2019 +0100
@@ -32,7 +32,6 @@
* @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
* @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
* @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
- * @requires vm.gc != "ConcMarkSweep"
* @requires vm.gc != "Z"
* @library /vmTestbase /test/lib
* @run driver jdk.test.lib.FileInstaller . .
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_99_100/TestDescription.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_99_100/TestDescription.java Wed Nov 13 11:37:29 2019 +0100
@@ -32,7 +32,6 @@
* @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
* @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
* @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
- * @requires vm.gc != "ConcMarkSweep"
* @requires vm.gc != "Z"
* @library /vmTestbase /test/lib
* @run driver jdk.test.lib.FileInstaller . .
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,9 +44,9 @@
* First time, with "setVerboseMode=yes" agent mode. Second
* time, with "setVerboseMode=no" agent mode and with
* "-verbose:gc" VM option. In both cases the output is
- * searched for 'Full GC' string, unless ExplicitGCInvokesConcurrent
- * is enabled and G1 or CMS GCs are enbled. If ExplicitGCInvokesConcurrent and
- * either G1 or CMS GCs are enbled the test searches for 'GC' string in output.
+ * searched for 'Pause Full' string, unless ExplicitGCInvokesConcurrent
+ * is enabled and G1 is enabled. If ExplicitGCInvokesConcurrent and
+ * G1 is enabled the test searches for 'GC' string in output.
* The test fails if this string is not found in the output.
* COMMENTS
*
@@ -70,18 +70,17 @@
sun.hotspot.WhiteBox wb = sun.hotspot.WhiteBox.getWhiteBox();
Boolean isExplicitGCInvokesConcurrentOn = wb.getBooleanVMFlag("ExplicitGCInvokesConcurrent");
Boolean isUseG1GCon = wb.getBooleanVMFlag("UseG1GC");
- Boolean isUseConcMarkSweepGCon = wb.getBooleanVMFlag("UseConcMarkSweepGC");
Boolean isUseZGCon = wb.getBooleanVMFlag("UseZGC");
Boolean isShenandoahGCon = wb.getBooleanVMFlag("UseShenandoahGC");
Boolean isUseEpsilonGCon = wb.getBooleanVMFlag("UseEpsilonGC");
if (Compiler.isGraalEnabled() &&
- (isUseConcMarkSweepGCon || isUseZGCon || isUseEpsilonGCon || isShenandoahGCon)) {
+ (isUseZGCon || isUseEpsilonGCon || isShenandoahGCon)) {
return; // Graal does not support these GCs
}
String keyPhrase;
- if ((isExplicitGCInvokesConcurrentOn && (isUseG1GCon || isUseConcMarkSweepGCon)) || isUseZGCon || isShenandoahGCon) {
+ if ((isExplicitGCInvokesConcurrentOn && isUseG1GCon) || isUseZGCon || isShenandoahGCon) {
keyPhrase = "GC";
} else {
keyPhrase = "Pause Full";
--- a/test/jdk/com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,6 @@
* @test
* @bug 8028994
* @author Staffan Larsen
- * @comment Graal does not support CMS
- * @requires !vm.graal.enabled
* @library /test/lib
* @modules jdk.attach/sun.tools.attach
* jdk.management
@@ -63,7 +61,7 @@
ProcessBuilder pb = ProcessTools.
createJavaProcessBuilder(
"--add-exports", "jdk.attach/sun.tools.attach=ALL-UNNAMED",
- "-XX:+UseConcMarkSweepGC", // this will cause MaxNewSize to be FLAG_SET_ERGO
+ "-XX:+UseG1GC", // this will cause MaxNewSize to be FLAG_SET_ERGO
"-XX:+UseCodeAging",
"-XX:+UseCerealGC", // Should be ignored.
"-XX:Flags=" + flagsFile.getAbsolutePath(),
@@ -73,8 +71,7 @@
"-runtests");
Map<String, String> env = pb.environment();
- // "UseCMSGC" should be ignored.
- env.put("_JAVA_OPTIONS", "-XX:+CheckJNICalls -XX:+UseCMSGC");
+ env.put("_JAVA_OPTIONS", "-XX:+CheckJNICalls");
// "UseGOneGC" should be ignored.
env.put("JAVA_TOOL_OPTIONS", "-XX:+IgnoreUnrecognizedVMOptions "
+ "-XX:+PrintVMOptions -XX:+UseGOneGC");
@@ -110,7 +107,7 @@
checkOrigin("PrintVMQWaitTime", Origin.CONFIG_FILE);
// Set through j.l.m
checkOrigin("HeapDumpOnOutOfMemoryError", Origin.MANAGEMENT);
- // Should be set by the VM, when we set UseConcMarkSweepGC
+ // Should be set by the VM, when we set UseG1GC
checkOrigin("MaxNewSize", Origin.ERGONOMIC);
// Set using attach
checkOrigin("HeapDumpPath", Origin.ATTACH_ON_DEMAND);
--- a/test/jdk/java/lang/management/GarbageCollectorMXBean/GcInfoCompositeType.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/java/lang/management/GarbageCollectorMXBean/GcInfoCompositeType.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
* @run main/othervm -XX:-ExplicitGCInvokesConcurrent GcInfoCompositeType
*/
// Passing "-XX:-ExplicitGCInvokesConcurrent" to force System.gc()
-// run on foreground when CMS is used and prevent situations when "GcInfo"
+// run on foreground when a concurrent collector is used and prevent situations when "GcInfo"
// is missing even though System.gc() was successfuly processed.
import java.util.*;
--- a/test/jdk/java/lang/management/MemoryMXBean/CollectionUsageThreshold.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/java/lang/management/MemoryMXBean/CollectionUsageThreshold.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -76,9 +76,6 @@
RunUtil.runTestClearGcOpts(main, "-XX:+UseSerialGC");
RunUtil.runTestClearGcOpts(main, "-XX:+UseParallelGC");
RunUtil.runTestClearGcOpts(main, "-XX:+UseG1GC");
- if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
- RunUtil.runTestClearGcOpts(main, "-XX:+UseConcMarkSweepGC");
- }
}
static class PoolRecord {
--- a/test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -84,9 +84,6 @@
traceTest(classMain + ", -XX:+UseSerialGC", nmFlag, lpFlag, "-XX:+UseSerialGC");
traceTest(classMain + ", -XX:+UseParallelGC", nmFlag, lpFlag, "-XX:+UseParallelGC");
traceTest(classMain + ", -XX:+UseG1GC", nmFlag, lpFlag, "-XX:+UseG1GC", g1Flag);
- if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
- traceTest(classMain + ", -XX:+UseConcMarkSweepGC", nmFlag, lpFlag, "-XX:+UseConcMarkSweepGC");
- }
}
/*
@@ -169,15 +166,10 @@
}
static class TestListener implements NotificationListener {
- private boolean isRelaxed = false;
private int triggers = 0;
private final long[] count = new long[NUM_TRIGGERS * 2];
private final long[] usedMemory = new long[NUM_TRIGGERS * 2];
- public TestListener() {
- isRelaxed = ManagementFactory.getRuntimeMXBean().getInputArguments().contains("-XX:+UseConcMarkSweepGC");
- }
-
@Override
public void handleNotification(Notification notif, Object handback) {
MemoryNotificationInfo minfo = MemoryNotificationInfo.
@@ -212,11 +204,7 @@
}
private boolean checkValue(long value, int target) {
- if (!isRelaxed) {
- return value == target;
- } else {
- return value >= target;
- }
+ return value == target;
}
}
--- a/test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest2.sh Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest2.sh Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,6 @@
go -noclassgc -XX:MaxMetaspaceSize=32m -XX:+UseSerialGC LowMemoryTest2
go -noclassgc -XX:MaxMetaspaceSize=32m -XX:+UseParallelGC LowMemoryTest2
-go -noclassgc -XX:MaxMetaspaceSize=32m -XX:+UseConcMarkSweepGC LowMemoryTest2
# Test class metaspace - might hit MaxMetaspaceSize instead if
# UseCompressedClassPointers is off or if 32 bit.
--- a/test/jdk/java/lang/management/MemoryMXBean/MemoryManagementConcMarkSweepGC.sh Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-#
-# Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-#
-# @test
-# @bug 4530538
-# @summary Run MemoryManagement test with concurrent mark sweep GC
-# @author Mandy Chung
-#
-# @requires (vm.gc=="ConcMarkSweep" | vm.gc=="null") & !vm.graal.enabled
-#
-# @run build MemoryManagement
-# @run shell/timeout=600 MemoryManagementConcMarkSweepGC.sh
-#
-
-#Set appropriate jdk
-
-if [ ! -z "${TESTJAVA}" ] ; then
- jdk="$TESTJAVA"
-else
- echo "--Error: TESTJAVA must be defined as the pathname of a jdk to test."
- exit 1
-fi
-
-runOne()
-{
- echo "runOne $@"
- $TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES $@ || exit 2
-}
-
-# Test MemoryManagement with concurrent collector
-runOne -XX:+UseConcMarkSweepGC MemoryManagement
-
-exit 0
--- a/test/jdk/java/lang/management/MemoryMXBean/PendingAllGC.sh Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/java/lang/management/MemoryMXBean/PendingAllGC.sh Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,4 @@
# Test Pending with parallel scavenger collector
runOne -XX:+UseParallelGC Pending
-# Test Pending with concurrent collector
-runOne -XX:+UseConcMarkSweepGC Pending
-
exit 0
--- a/test/jdk/java/lang/management/MemoryMXBean/ResetPeakMemoryUsage.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/java/lang/management/MemoryMXBean/ResetPeakMemoryUsage.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,9 +63,7 @@
final String main = "ResetPeakMemoryUsage$TestMain";
final String ms = "-Xms256m";
final String mn = "-Xmn8m";
- if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
- RunUtil.runTestClearGcOpts(main, ms, mn, "-XX:+UseConcMarkSweepGC");
- }
+
RunUtil.runTestClearGcOpts(main, ms, mn, "-XX:+UseParallelGC");
RunUtil.runTestClearGcOpts(main, ms, mn, "-XX:+UseG1GC", "-XX:G1HeapRegionSize=1m");
RunUtil.runTestClearGcOpts(main, ms, mn, "-XX:+UseSerialGC",
--- a/test/jdk/jdk/jfr/event/gc/collection/GCEventAll.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/jdk/jfr/event/gc/collection/GCEventAll.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,7 @@
// recording.stop and getBeanCollectionCount().
doSystemGc();
// Add an extra System.gc() to make sure we get at least one full garbage_collection batch at
- // the end of the test. This extra System.gc() is only necessary when using "UseConcMarkSweepGC" and "+ExplicitGCInvokesConcurrent".
+ // the end of the test. This extra System.gc() is only necessary when using "+ExplicitGCInvokesConcurrent".
doSystemGc();
recording.stop();
@@ -170,7 +170,6 @@
// For some GC configurations, the JFR recording may have stopped before we received the last gc event.
try {
- events = filterIncompleteGcBatch(events);
gcBatches = GCHelper.GcBatch.createFromEvents(events);
eventCounts = GCHelper.CollectionSummary.createFromEvents(gcBatches);
@@ -191,41 +190,6 @@
}
}
- /**
- * When using collector ConcurrentMarkSweep with -XX:+ExplicitGCInvokesConcurrent, the JFR recording may
- * stop before we have received the last garbage_collection event.
- *
- * This function does 3 things:
- * 1. Check if the last batch is incomplete.
- * 2. If it is incomplete, then asserts that incomplete batches are allowed for this configuration.
- * 3. If incomplete batches are allowed, then the incomplete batch is removed.
- *
- * @param events All events
- * @return All events with any incomplete batch removed.
- * @throws Throwable
- */
- private List<RecordedEvent> filterIncompleteGcBatch(List<RecordedEvent> events) throws Throwable {
- List<RecordedEvent> returnEvents = new ArrayList<RecordedEvent>(events);
- int lastGcId = getLastGcId(events);
- List<RecordedEvent> lastBatchEvents = getEventsWithGcId(events, lastGcId);
- String[] endEvents = {GCHelper.event_garbage_collection, GCHelper.event_old_garbage_collection, GCHelper.event_young_garbage_collection};
- boolean isComplete = containsAnyPath(lastBatchEvents, endEvents);
- if (!isComplete) {
- // The last GC batch does not contain an end event. The batch is incomplete.
- // This is only allowed if we are using old_collector="ConcurrentMarkSweep" and "-XX:+ExplicitGCInvokesConcurrent"
- boolean isExplicitGCInvokesConcurrent = hasInputArgument("-XX:+ExplicitGCInvokesConcurrent");
- boolean isConcurrentMarkSweep = GCHelper.gcConcurrentMarkSweep.equals(oldCollector);
- String msg = String.format(
- "Incomplete batch only allowed for '%s' with -XX:+ExplicitGCInvokesConcurrent",
- GCHelper.gcConcurrentMarkSweep);
- Asserts.assertTrue(isConcurrentMarkSweep && isExplicitGCInvokesConcurrent, msg);
-
- // Incomplete batch is allowed with the current settings. Remove incomplete batch.
- returnEvents.removeAll(lastBatchEvents);
- }
- return returnEvents;
- }
-
private boolean hasInputArgument(String arg) {
return ManagementFactory.getRuntimeMXBean().getInputArguments().contains(arg);
}
@@ -276,8 +240,7 @@
}
private void verifyCollectionCount(String collector, long eventCounts, long beanCounts) {
- if (GCHelper.gcConcurrentMarkSweep.equals(collector) || GCHelper.gcG1Old.equals(oldCollector)) {
- // ConcurrentMarkSweep mixes old and new collections. Not same values as in MXBean.
+ if (GCHelper.gcG1Old.equals(oldCollector)) {
// MXBean does not report old collections for G1Old, so we have nothing to compare with.
return;
}
@@ -338,11 +301,8 @@
}
}
}
- if (!GCHelper.gcConcurrentMarkSweep.equals(batch.getName())) {
- // We do not get heap_summary events for ConcurrentMarkSweep
- Asserts.assertEquals(1, countBeforeGc, "Unexpected number of heap_summary.before_gc");
- Asserts.assertEquals(1, countAfterGc, "Unexpected number of heap_summary.after_gc");
- }
+ Asserts.assertEquals(1, countBeforeGc, "Unexpected number of heap_summary.before_gc");
+ Asserts.assertEquals(1, countAfterGc, "Unexpected number of heap_summary.after_gc");
} catch (Throwable e) {
GCHelper.log("verifySingleGcBatch failed for gcEvent:");
GCHelper.log(batch.getLog());
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSConcurrent.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @requires vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- * @run driver jdk.jfr.event.gc.collection.TestGCCauseWithCMSConcurrent
- */
-public class TestGCCauseWithCMSConcurrent {
- public static void main(String[] args) throws Exception {
- String testID = "CMSConcurrent";
- String[] vmFlags = {"-XX:+UseConcMarkSweepGC", "-XX:+ExplicitGCInvokesConcurrent"};
- String[] gcNames = {GCHelper.gcConcurrentMarkSweep, GCHelper.gcParNew, GCHelper.gcSerialOld};
- String[] gcCauses = {"CMS Concurrent Mark", "Allocation Failure", "System.gc()"};
- GCGarbageCollectionUtil.test(testID, vmFlags, gcNames, gcCauses);
- }
-}
-
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSMarkSweep.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @requires vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- * @run driver jdk.jfr.event.gc.collection.TestGCCauseWithCMSMarkSweep
- */
-public class TestGCCauseWithCMSMarkSweep {
- public static void main(String[] args) throws Exception {
- String testID = "CMSMarkSweep";
- String[] vmFlags = {"-XX:+UseConcMarkSweepGC"};
- String[] gcNames = {GCHelper.gcConcurrentMarkSweep, GCHelper.gcParNew, GCHelper.gcSerialOld};
- String[] gcCauses = {"CMS Concurrent Mark", "Allocation Failure", "System.gc()"};
- GCGarbageCollectionUtil.test(testID, vmFlags, gcNames, gcCauses);
- }
-}
-
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSConcurrent.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * & vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- *
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xmx32m -Xmn8m -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent jdk.jfr.event.gc.collection.TestGCEventMixedWithCMSConcurrent
- * good debug flags: -Xlog:gc+heap=trace,gc*=debug
- */
-public class TestGCEventMixedWithCMSConcurrent {
- public static void main(String[] args) throws Throwable {
- GCEventAll.doTest();
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSMarkSweep.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * & vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- *
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xmx32m -Xmn8m -XX:+UseConcMarkSweepGC -XX:-ExplicitGCInvokesConcurrent jdk.jfr.event.gc.collection.TestGCEventMixedWithCMSMarkSweep
- * good debug flags: -Xlog:gc+heap=trace,gc*=debug
- */
-public class TestGCEventMixedWithCMSMarkSweep {
- public static void main(String[] args) throws Throwable {
- GCEventAll.doTest();
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithParNew.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -Xmx32m -Xmn8m -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC jdk.jfr.event.gc.collection.TestGCEventMixedWithParNew
- * good debug flags: -Xlog:gc*=debug
- */
-
-public class TestGCEventMixedWithParNew {
- public static void main(String[] args) throws Throwable {
- GCEventAll.doTest();
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/collection/TestYoungGarbageCollectionEventWithParNew.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -Xmx50m -Xmn2m -XX:+UseConcMarkSweepGC -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc+heap=trace,gc*=debug jdk.jfr.event.gc.collection.TestYoungGarbageCollectionEventWithParNew
- */
-public class TestYoungGarbageCollectionEventWithParNew {
-
- public static void main(String[] args) throws Exception {
- YoungGarbageCollectionEvent.test();
- }
-
-}
--- a/test/jdk/jdk/jfr/event/gc/detailed/PromotionEvent.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/jdk/jfr/event/gc/detailed/PromotionEvent.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -69,8 +69,7 @@
List<GarbageCollectorMXBean> gcBeans = ManagementFactory.getGarbageCollectorMXBeans();
for (GarbageCollectorMXBean gcBean : gcBeans) {
if ("PS Scavenge".equals(gcBean.getName())
- || "G1 Young Generation".equals(gcBean.getName())
- || ("ParNew".equals(gcBean.getName()))) {
+ || "G1 Young Generation".equals(gcBean.getName())) {
ycBean = gcBean;
}
--- a/test/jdk/jdk/jfr/event/gc/detailed/TestCMSConcurrentModeFailureEvent.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.detailed;
-
-import java.io.IOException;
-import java.io.File;
-import java.nio.charset.Charset;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Optional;
-
-import jdk.jfr.consumer.RecordedEvent;
-import jdk.jfr.consumer.RecordingFile;
-import jdk.test.lib.Asserts;
-import jdk.test.lib.jfr.EventNames;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @library /test/lib /test/jdk
- *
- * @run main jdk.jfr.event.gc.detailed.TestCMSConcurrentModeFailureEvent
- */
-public class TestCMSConcurrentModeFailureEvent {
-
- private final static String EVENT_NAME = EventNames.ConcurrentModeFailure;
- private final static String EVENT_SETTINGS_FILE = System.getProperty("test.src", ".") + File.separator + "concurrentmodefailure-testsettings.jfc";
- private final static String JFR_FILE = "TestCMSConcurrentModeFailureEvent.jfr";
- private final static int BYTES_TO_ALLOCATE = 1024 * 512;
-
- public static void main(String[] args) throws Exception {
- String[] vmFlags = {"-Xmx128m", "-XX:MaxTenuringThreshold=0", "-Xlog:gc*=debug:testCMSGC.log",
- "-XX:+UseConcMarkSweepGC", "-XX:+UnlockExperimentalVMOptions", "-XX:-UseFastUnorderedTimeStamps"};
-
- if (!ExecuteOOMApp.execute(EVENT_SETTINGS_FILE, JFR_FILE, vmFlags, BYTES_TO_ALLOCATE)) {
- System.out.println("OOM happened in the other thread(not test thread). Skip test.");
- // Skip test, process terminates due to the OOME error in the different thread
- return;
- }
-
- Optional<RecordedEvent> event = RecordingFile.readAllEvents(Paths.get(JFR_FILE)).stream().findFirst();
- if (event.isPresent()) {
- Asserts.assertEquals(EVENT_NAME, event.get().getEventType().getName(), "Wrong event type");
- } else {
- // No event received. Check if test did trigger the event.
- boolean isEventTriggered = fileContainsString("testCMSGC.log", "concurrent mode failure");
- System.out.println("isEventTriggered=" +isEventTriggered);
- Asserts.assertFalse(isEventTriggered, "Event found in log, but not in JFR");
- }
- }
-
- private static boolean fileContainsString(String filename, String text) throws IOException {
- Path p = Paths.get(filename);
- for (String line : Files.readAllLines(p, Charset.defaultCharset())) {
- if (line.contains(text)) {
- return true;
- }
- }
- return false;
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/detailed/TestPromotionFailedEventWithParNew.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm jdk.jfr.event.gc.detailed.TestPromotionFailedEventWithParNew
- */
-public class TestPromotionFailedEventWithParNew {
-
- public static void main(String[] args) throws Throwable {
- PromotionFailedEvent.test("TestPromotionFailedEventWithParNew",
- new String[]{"-Xmx32m", "-Xmn30m", "-XX:-UseDynamicNumberOfGCThreads",
- "-XX:ParallelGCThreads=3", "-XX:MaxTenuringThreshold=0",
- "-Xlog:gc*=debug", "-XX:+UseConcMarkSweepGC",
- "-XX:+UnlockExperimentalVMOptions", "-XX:-UseFastUnorderedTimeStamps"});
- }
-
-}
--- a/test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventConcurrentCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.heapsummary;
-
-import java.time.Duration;
-import java.util.List;
-
-import jdk.jfr.Recording;
-import jdk.jfr.consumer.RecordedEvent;
-import jdk.test.lib.Asserts;
-import jdk.test.lib.jfr.EventNames;
-import jdk.test.lib.jfr.Events;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * & vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent jdk.jfr.event.gc.heapsummary.TestHeapSummaryEventConcurrentCMS
- */
-public class TestHeapSummaryEventConcurrentCMS {
-
- public static void main(String[] args) throws Exception {
- Recording recording = new Recording();
- recording.enable(EventNames.GarbageCollection).withThreshold(Duration.ofMillis(0));
- recording.enable(EventNames.GCHeapSummary).withThreshold(Duration.ofMillis(0));
-
- recording.start();
- // Need several GCs to ensure at least one heap summary event from concurrent CMS
- GCHelper.callSystemGc(6, true);
- recording.stop();
-
- // Remove first and last GCs which can be incomplete
- List<RecordedEvent> events = GCHelper.removeFirstAndLastGC(Events.fromRecording(recording));
- Asserts.assertFalse(events.isEmpty(), "No events found");
- for (RecordedEvent event : events) {
- System.out.println("Event: " + event);
- if (!isCmsGcEvent(event)) {
- continue;
- }
- int gcId = Events.assertField(event, "gcId").getValue();
- verifyHeapSummary(events, gcId, "Before GC");
- verifyHeapSummary(events, gcId, "After GC");
- }
- }
-
- private static boolean isCmsGcEvent(RecordedEvent event) {
- if (!Events.isEventType(event, EventNames.GarbageCollection)) {
- return false;
- }
- final String gcName = Events.assertField(event, "name").notEmpty().getValue();
- return "ConcurrentMarkSweep".equals(gcName);
- }
-
- private static void verifyHeapSummary(List<RecordedEvent> events, int gcId, String when) {
- for (RecordedEvent event : events) {
- if (!Events.isEventType(event, EventNames.GCHeapSummary)) {
- continue;
- }
- if (gcId == (int)Events.assertField(event, "gcId").getValue() &&
- when.equals(Events.assertField(event, "when").getValue())) {
- System.out.printf("Found " + EventNames.GCHeapSummary + " for id=%d, when=%s%n", gcId, when);
- return;
- }
- }
- Asserts.fail(String.format("No " + EventNames.GCHeapSummary + " for id=%d, when=%s", gcId, when));
- }
-
-}
--- a/test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventParNewCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.heapsummary;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * & vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC jdk.jfr.event.gc.heapsummary.TestHeapSummaryEventParNewCMS
- */
-public class TestHeapSummaryEventParNewCMS {
- public static void main(String[] args) throws Exception {
- HeapSummaryEventAllGcs.test(GCHelper.gcParNew, GCHelper.gcConcurrentMarkSweep);
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSConcurrent.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.objectcount;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * & vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithCMSConcurrent
- */
-public class TestObjectCountAfterGCEventWithCMSConcurrent {
- public static void main(String[] args) throws Exception {
- ObjectCountAfterGCEvent.test(GCHelper.gcConcurrentMarkSweep);
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSMarkSweep.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.objectcount;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * & vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithCMSMarkSweep
- */
-public class TestObjectCountAfterGCEventWithCMSMarkSweep {
- public static void main(String[] args) throws Exception {
- ObjectCountAfterGCEvent.test(GCHelper.gcSerialOld);
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSConcurrent.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.refstat;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * & vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc+heap=trace,gc*=debug -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent jdk.jfr.event.gc.refstat.TestRefStatEventWithCMSConcurrent
- */
-public class TestRefStatEventWithCMSConcurrent {
- public static void main(String[] args) throws Exception {
- RefStatEvent.test(GCHelper.gcConcurrentMarkSweep);
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSMarkSweep.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.refstat;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * & vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc+heap=trace,gc*=debug -XX:+UseConcMarkSweepGC jdk.jfr.event.gc.refstat.TestRefStatEventWithCMSMarkSweep
- */
-public class TestRefStatEventWithCMSMarkSweep {
- public static void main(String[] args) throws Exception {
- RefStatEvent.test(GCHelper.gcSerialOld);
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/stacktrace/AllocationStackTrace.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/jdk/jfr/event/gc/stacktrace/AllocationStackTrace.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -210,52 +210,6 @@
}
/**
- * Tests event stacktrace for young GC if -XX:+UseConcMarkSweepGC is used
- */
- public static void testParNewAllocEvent() throws Exception {
- GarbageCollectorMXBean bean = garbageCollectorMXBean("ParNew");
- MemoryAllocator memory = new EdenMemoryAllocator();
-
- String[] expectedStack = new String[]{
- "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testAllocEvent",
- "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testParNewAllocEvent"
- };
-
- testAllocEvent(bean, memory, expectedStack);
- }
-
- /**
- * Tests event stacktrace for old GC if -XX:+UseConcMarkSweepGC is used
- */
- public static void testConcMarkSweepAllocEvent() throws Exception {
- GarbageCollectorMXBean bean = garbageCollectorMXBean("ConcurrentMarkSweep");
- MemoryAllocator memory = new OldGenMemoryAllocator();
-
- String[] expectedStack = new String[]{
- "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testAllocEvent",
- "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testConcMarkSweepAllocEvent"
- };
-
- testAllocEvent(bean, memory, expectedStack);
- }
-
- /**
- * Tests event stacktrace during metaspace GC threshold if
- * -XX:+UseConcMarkSweepGC is used
- */
- public static void testMetaspaceConcMarkSweepGCAllocEvent() throws Exception {
- GarbageCollectorMXBean bean = garbageCollectorMXBean("ConcurrentMarkSweep");
- MemoryAllocator memory = new MetaspaceMemoryAllocator();
-
- String[] expectedStack = new String[]{
- "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testAllocEvent",
- "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testMetaspaceConcMarkSweepGCAllocEvent"
- };
-
- testAllocEvent(bean, memory, expectedStack);
- }
-
- /**
* Tests event stacktrace for young GC if -XX:+UseParallelGC is used
*/
public static void testParallelScavengeAllocEvent() throws Exception {
--- a/test/jdk/jdk/jfr/event/gc/stacktrace/TestConcMarkSweepAllocationPendingStackTrace.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.stacktrace;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:MaxNewSize=10M -Xmx64M -XX:+UseConcMarkSweepGC -Xlog:gc* jdk.jfr.event.gc.stacktrace.TestConcMarkSweepAllocationPendingStackTrace
- */
-public class TestConcMarkSweepAllocationPendingStackTrace {
-
- public static void main(String[] args) throws Exception {
- AllocationStackTrace.testConcMarkSweepAllocEvent();
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/stacktrace/TestMetaspaceConcMarkSweepGCAllocationPendingStackTrace.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.stacktrace;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @requires !(vm.compMode == "Xcomp" & os.arch == "aarch64")
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:MaxMetaspaceSize=64M -Xlog:gc* jdk.jfr.event.gc.stacktrace.TestMetaspaceConcMarkSweepGCAllocationPendingStackTrace
- */
-public class TestMetaspaceConcMarkSweepGCAllocationPendingStackTrace {
-
- public static void main(String[] args) throws Exception {
- AllocationStackTrace.testMetaspaceConcMarkSweepGCAllocEvent();
- }
-}
--- a/test/jdk/jdk/jfr/event/gc/stacktrace/TestParNewAllocationPendingStackTrace.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.stacktrace;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xlog:gc* -XX:+FlightRecorder jdk.jfr.event.gc.stacktrace.TestParNewAllocationPendingStackTrace
- */
-public class TestParNewAllocationPendingStackTrace {
-
- public static void main(String[] args) throws Exception {
- AllocationStackTrace.testParNewAllocEvent();
- }
-}
--- a/test/jdk/jdk/jfr/event/oldobject/TestCMS.java Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.oldobject;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import jdk.jfr.Recording;
-import jdk.jfr.consumer.RecordedEvent;
-import jdk.jfr.internal.test.WhiteBox;
-import jdk.test.lib.jfr.EventNames;
-import jdk.test.lib.jfr.Events;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires vm.gc == "null" & !vm.graal.enabled
- * @summary Test leak profiler with CMS GC
- * @library /test/lib /test/jdk
- * @modules jdk.jfr/jdk.jfr.internal.test
- * @run main/othervm -XX:TLABSize=2k -XX:+UseConcMarkSweepGC jdk.jfr.event.oldobject.TestCMS
- */
-public class TestCMS {
-
- static private class FindMe {
- }
-
- public static List<FindMe[]> list = new ArrayList<>(OldObjects.MIN_SIZE);
-
- public static void main(String[] args) throws Exception {
- WhiteBox.setWriteAllObjectSamples(true);
-
- try (Recording r = new Recording()) {
- r.enable(EventNames.OldObjectSample).withStackTrace().with("cutoff", "infinity");
- r.start();
- allocateFindMe();
- System.gc();
- r.stop();
- List<RecordedEvent> events = Events.fromRecording(r);
- System.out.println(events);
- if (OldObjects.countMatchingEvents(events, FindMe[].class, null, null, -1, "allocateFindMe") == 0) {
- throw new Exception("Could not find leak with " + FindMe[].class);
- }
- }
- }
-
- public static void allocateFindMe() {
- for (int i = 0; i < OldObjects.MIN_SIZE; i++) {
- // Allocate array to trigger sampling code path for interpreter / c1
- list.add(new FindMe[0]);
- }
- }
-
-}
--- a/test/jdk/jdk/jfr/event/oldobject/TestMetadataRetention.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/jdk/jfr/event/oldobject/TestMetadataRetention.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -88,9 +88,8 @@
allocatorThread = null;
// System.gc() will trigger class unloading if -XX:+ExplicitGCInvokesConcurrent
- // is NOT set. If this flag is set G1 will never unload classes on System.gc()
- // and CMS will not guarantee that all semantically dead classes will be
- // unloaded. As far as the "jfr" key guarantees no VM flags are set from the
+ // is NOT set. If this flag is set G1 will never unload classes on System.gc().
+ // As far as the "jfr" key guarantees no VM flags are set from the
// outside it should be enough with System.gc().
System.gc();
--- a/test/jdk/jdk/jfr/event/runtime/TestClassLoadingStatisticsEvent.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/jdk/jfr/event/runtime/TestClassLoadingStatisticsEvent.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,9 +47,8 @@
* the loadedClassCount and unloadedClassCount attributes are correct.
*
* System.gc() will trigger class unloading if -XX:+ExplicitGCInvokesConcurrent
- * is NOT set. If this flag is set G1 will never unload classes on System.gc()
- * and CMS will not guarantee that all semantically dead classes will be
- * unloaded. As far as the "jfr" key guarantees no VM flags are set from the
+ * is NOT set. If this flag is set G1 will never unload classes on System.gc().
+ * As far as the "jfr" key guarantees no VM flags are set from the
* outside it should be enough with System.gc().
*/
public class TestClassLoadingStatisticsEvent {
--- a/test/jdk/jdk/jfr/event/runtime/TestClassUnloadEvent.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/jdk/jfr/event/runtime/TestClassUnloadEvent.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,8 +49,7 @@
/**
* System.gc() will trigger class unloading if -XX:+ExplicitGCInvokesConcurrent is NOT set.
- * If this flag is set G1 will never unload classes on System.gc() and
- * CMS will not guarantee that all semantically dead classes will be unloaded.
+ * If this flag is set G1 will never unload classes on System.gc().
* As far as the "jfr" key guarantees no VM flags are set from the outside
* it should be enough with System.gc().
*/
--- a/test/jdk/jdk/jfr/event/runtime/TestVmFlagChangedEvent.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/jdk/jdk/jfr/event/runtime/TestVmFlagChangedEvent.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,6 @@
public static void main(String[] args) throws Throwable {
EventFlag[] eventFlags = {
- new EventFlag(EventNames.LongFlagChanged, "CMSWaitDuration", "2500"),
new EventFlag(EventNames.StringFlagChanged, "HeapDumpPath", "/a/sample/path"),
new EventFlag(EventNames.BooleanFlagChanged, "HeapDumpOnOutOfMemoryError", "true")
};
--- a/test/lib/jdk/test/lib/Utils.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/lib/jdk/test/lib/Utils.java Wed Nov 13 11:37:29 2019 +0100
@@ -198,8 +198,7 @@
* @return A copy of given opts with all GC options removed.
*/
private static final Pattern useGcPattern = Pattern.compile(
- "(?:\\-XX\\:[\\+\\-]Use.+GC)"
- + "|(?:\\-Xconcgc)");
+ "(?:\\-XX\\:[\\+\\-]Use.+GC)");
public static List<String> removeGcOpts(List<String> opts) {
List<String> optsWithoutGC = new ArrayList<String>();
for (String opt : opts) {
--- a/test/lib/jdk/test/lib/jfr/GCHelper.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/lib/jdk/test/lib/jfr/GCHelper.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,12 +68,10 @@
public static final String event_phases_level_3 = EventNames.GCPhasePauseLevel3;
public static final String gcG1New = "G1New";
- public static final String gcParNew = "ParNew";
public static final String gcDefNew = "DefNew";
public static final String gcParallelScavenge = "ParallelScavenge";
public static final String gcG1Old = "G1Old";
public static final String gcG1Full = "G1Full";
- public static final String gcConcurrentMarkSweep = "ConcurrentMarkSweep";
public static final String gcSerialOld = "SerialOld";
public static final String gcPSMarkSweep = "PSMarkSweep";
public static final String gcParallelOld = "ParallelOld";
@@ -174,26 +172,21 @@
beanCollectorTypes.put("G1 Young Generation", true);
beanCollectorTypes.put("Copy", true);
beanCollectorTypes.put("PS Scavenge", true);
- beanCollectorTypes.put("ParNew", true);
// old GarbageCollectionMXBeans.
beanCollectorTypes.put("G1 Old Generation", false);
- beanCollectorTypes.put("ConcurrentMarkSweep", false);
beanCollectorTypes.put("PS MarkSweep", false);
beanCollectorTypes.put("MarkSweepCompact", false);
// List of expected collector overrides. "A.B" means that collector A may use collector B.
collectorOverrides.add("G1Old.G1Full");
- collectorOverrides.add("ConcurrentMarkSweep.SerialOld");
collectorOverrides.add("SerialOld.PSMarkSweep");
requiredEvents.put(gcG1New, new String[] {event_heap_summary, event_young_garbage_collection});
- requiredEvents.put(gcParNew, new String[] {event_heap_summary, event_heap_metaspace_summary, event_phases_pause, event_phases_level_1, event_young_garbage_collection});
requiredEvents.put(gcDefNew, new String[] {event_heap_summary, event_heap_metaspace_summary, event_phases_pause, event_phases_level_1, event_young_garbage_collection});
requiredEvents.put(gcParallelScavenge, new String[] {event_heap_summary, event_heap_ps_summary, event_heap_metaspace_summary, event_reference_statistics, event_phases_pause, event_phases_level_1, event_young_garbage_collection});
requiredEvents.put(gcG1Old, new String[] {event_heap_summary, event_old_garbage_collection});
requiredEvents.put(gcG1Full, new String[] {event_heap_summary, event_heap_metaspace_summary, event_phases_pause, event_phases_level_1, event_old_garbage_collection});
- requiredEvents.put(gcConcurrentMarkSweep, new String[] {event_phases_pause, event_phases_level_1, event_old_garbage_collection});
requiredEvents.put(gcSerialOld, new String[] {event_heap_summary, event_heap_metaspace_summary, event_phases_pause, event_phases_level_1, event_old_garbage_collection});
requiredEvents.put(gcParallelOld, new String[] {event_heap_summary, event_heap_ps_summary, event_heap_metaspace_summary, event_reference_statistics, event_phases_pause, event_phases_level_1, event_old_garbage_collection, event_parold_garbage_collection});
--- a/test/lib/sun/hotspot/WhiteBox.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/lib/sun/hotspot/WhiteBox.java Wed Nov 13 11:37:29 2019 +0100
@@ -384,7 +384,6 @@
public native void freeMetaspace(ClassLoader classLoader, long addr, long size);
public native long incMetaspaceCapacityUntilGC(long increment);
public native long metaspaceCapacityUntilGC();
- public native boolean metaspaceShouldConcurrentCollect();
public native long metaspaceReserveAlignment();
// Don't use these methods directly
--- a/test/lib/sun/hotspot/gc/GC.java Wed Nov 13 11:21:15 2019 +0100
+++ b/test/lib/sun/hotspot/gc/GC.java Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,11 +35,10 @@
*/
Serial(1),
Parallel(2),
- ConcMarkSweep(3),
- G1(4),
- Epsilon(5),
- Z(6),
- Shenandoah(7);
+ G1(3),
+ Epsilon(4),
+ Z(5),
+ Shenandoah(6);
private static final WhiteBox WB = WhiteBox.getWhiteBox();