# HG changeset patch # User eosterlund # Date 1521441498 -3600 # Node ID 848864ed9b17d9f97a7a43623b8c6ff9ea344c09 # Parent 689ebcfe04fdd372676c2184d56f77a2b18f95d7 8199604: Rename CardTableModRefBS to CardTableBarrierSet Reviewed-by: stefank, pliden diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/aarch64/aarch64.ad --- a/src/hotspot/cpu/aarch64/aarch64.ad Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/aarch64/aarch64.ad Mon Mar 19 07:38:18 2018 +0100 @@ -996,7 +996,7 @@ source_hpp %{ #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "opto/addnode.hpp" class CallStubImpl { @@ -5845,8 +5845,8 @@ operand immByteMapBase() %{ // Get base of card map - predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) && - (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base()); + predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableBarrierSet) && + (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base()); match(ConP); op_cost(0); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -34,7 +34,7 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_aarch64.hpp" #include "oops/objArrayKlass.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp --- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -31,7 +31,7 @@ #include "c1/c1_Runtime1.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_aarch64.hpp" #include "oops/compiledICHolder.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -30,7 +30,7 @@ #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "compiler/disassembler.hpp" #include "memory/resourceArea.hpp" @@ -3618,10 +3618,10 @@ // register obj is destroyed afterwards. BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); @@ -4129,7 +4129,7 @@ DirtyCardQueue::byte_offset_of_buf())); BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); @@ -4515,7 +4515,7 @@ void MacroAssembler::load_byte_map_base(Register reg) { jbyte *byte_map_base = - ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base(); + ((CardTableBarrierSet*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base(); if (is_valid_AArch64_address((address)byte_map_base)) { // Strictly speaking the byte_map_base isn't an address at all, diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -27,7 +27,7 @@ #include "asm/macroAssembler.hpp" #include "asm/macroAssembler.inline.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_aarch64.hpp" #include "oops/instanceOop.hpp" @@ -654,7 +654,7 @@ __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); __ pop(saved_regs, sp); break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: break; default: ShouldNotReachHere(); @@ -695,9 +695,9 @@ __ pop(saved_regs, sp); } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/aarch64/templateTable_aarch64.cpp --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -184,7 +184,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (val == noreg) { __ store_heap_oop_null(obj); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/assembler_arm.cpp --- a/src/hotspot/cpu/arm/assembler_arm.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/assembler_arm.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -26,7 +26,7 @@ #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" #include "ci/ciEnv.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/assembler_arm_32.cpp --- a/src/hotspot/cpu/arm/assembler_arm_32.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/assembler_arm_32.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -26,7 +26,7 @@ #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" #include "ci/ciEnv.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/assembler_arm_64.cpp --- a/src/hotspot/cpu/arm/assembler_arm_64.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/assembler_arm_64.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -26,7 +26,7 @@ #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" #include "ci/ciEnv.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/c1_Defs_arm.hpp --- a/src/hotspot/cpu/arm/c1_Defs_arm.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/c1_Defs_arm.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -79,7 +79,7 @@ #else #define PATCHED_ADDR (204) #endif -#define CARDTABLEMODREF_POST_BARRIER_HELPER +#define CARDTABLEBARRIERSET_POST_BARRIER_HELPER #define GENERATE_ADDRESS_IS_PREFERRED #endif // CPU_ARM_VM_C1_DEFS_ARM_HPP diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp --- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -31,7 +31,7 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_arm.hpp" #include "oops/objArrayKlass.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp --- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -35,7 +35,7 @@ #include "ci/ciTypeArrayKlass.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "vmreg_arm.inline.hpp" @@ -497,7 +497,7 @@ #endif // AARCH64 } -void LIRGenerator::CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) { +void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) { assert(addr->is_register(), "must be a register at this point"); LIR_Opr tmp = FrameMap::LR_ptr_opr; diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/c1_Runtime1_arm.cpp --- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -30,7 +30,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_arm.hpp" #include "oops/compiledICHolder.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/interp_masm_arm.cpp --- a/src/hotspot/cpu/arm/interp_masm_arm.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -26,7 +26,7 @@ #include "jvm.h" #include "gc/shared/barrierSet.inline.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.inline.hpp" +#include "gc/shared/cardTableBarrierSet.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "interp_masm_arm.hpp" #include "interpreter/interpreter.hpp" @@ -411,10 +411,10 @@ void InterpreterMacroAssembler::store_check_part1(Register card_table_base) { // Check barrier set type (should be card table) and element size BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code"); @@ -473,7 +473,7 @@ #ifdef AARCH64 strb(ZR, card_table_addr); #else - CardTableModRefBS* ctbs = barrier_set_cast(Universe::heap()->barrier_set()); + CardTableBarrierSet* ctbs = barrier_set_cast(Universe::heap()->barrier_set()); CardTable* ct = ctbs->card_table(); if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) { // Card table is aligned so the lowest byte of the table address base is zero. diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/macroAssembler_arm.cpp --- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -30,7 +30,7 @@ #include "code/nativeInst.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" @@ -2267,7 +2267,7 @@ DirtyCardQueue::byte_offset_of_buf())); BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); Label done; Label runtime; diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/stubGenerator_arm.cpp --- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -26,7 +26,7 @@ #include "asm/assembler.hpp" #include "assembler_arm.inline.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_arm.hpp" #include "oops/instanceOop.hpp" @@ -2909,7 +2909,7 @@ __ pop(saved_regs | R9ifScratched); #endif // AARCH64 } - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: break; default: ShouldNotReachHere(); @@ -2962,10 +2962,10 @@ #endif // !AARCH64 } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { BLOCK_COMMENT("CardTablePostBarrier"); - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/arm/templateTable_arm.cpp --- a/src/hotspot/cpu/arm/templateTable_arm.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -228,7 +228,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (is_null) { __ store_heap_oop_null(new_val, obj); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/ppc/assembler_ppc.cpp --- a/src/hotspot/cpu/ppc/assembler_ppc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/ppc/assembler_ppc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "asm/assembler.inline.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp --- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -33,7 +33,7 @@ #include "ci/ciInstance.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "nativeInst_ppc.hpp" #include "oops/objArrayKlass.hpp" #include "runtime/safepointMechanism.inline.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp --- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -29,7 +29,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_ppc.hpp" #include "oops/compiledICHolder.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/ppc/macroAssembler_ppc.cpp --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -27,7 +27,7 @@ #include "asm/macroAssembler.inline.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" @@ -3036,9 +3036,9 @@ // Write the card table byte if needed. void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) { - CardTableModRefBS* bs = - barrier_set_cast(Universe::heap()->barrier_set()); - assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier"); + CardTableBarrierSet* bs = + barrier_set_cast(Universe::heap()->barrier_set()); + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier"); CardTable* ct = bs->card_table(); #ifdef ASSERT cmpdi(CCR0, Rnew_val, 0); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/ppc/ppc.ad --- a/src/hotspot/cpu/ppc/ppc.ad Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/ppc/ppc.ad Mon Mar 19 07:38:18 2018 +0100 @@ -1274,12 +1274,12 @@ return offsets; } const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); - + // Emit the trampoline stub which will be related to the branch-and-link below. CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset); if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full. __ relocate(rtype); - + // Note: At this point we do not have the address of the trampoline // stub, and the entry point might be too far away for bl, so __ pc() // serves as dummy and the bl will be patched later. @@ -1526,7 +1526,7 @@ // Save return pc. ___(std) std(return_pc, _abi(lr), callers_sp); } - + C->set_frame_complete(cbuf.insts_size()); } #undef ___ @@ -2695,13 +2695,13 @@ ciEnv::current()->record_out_of_memory_failure(); return; } - + // Get the constant's TOC offset. toc_offset = __ offset_to_method_toc(const_toc_addr); - + // Keep the current instruction offset in mind. ((loadConLNode*)this)->_cbuf_insts_offset = __ offset(); - + __ ld($dst$$Register, toc_offset, $toc$$Register); %} @@ -2819,7 +2819,7 @@ MachNode *_last; } loadConLReplicatedNodesTuple; -loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc, +loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc, vecXOper *dst, immI_0Oper *zero, OptoReg::Name reg_second, OptoReg::Name reg_first, OptoReg::Name reg_vec_second, OptoReg::Name reg_vec_first) { @@ -3158,7 +3158,7 @@ Label skip_storestore; #if 0 // TODO: PPC port - // Check CMSCollectorCardTableModRefBSExt::_requires_release and do the + // Check CMSCollectorCardTableBarrierSetBSExt::_requires_release and do the // StoreStore barrier conditionally. __ lwz(R0, 0, $releaseFieldAddr$$Register); __ cmpwi($crx$$CondRegister, R0, 0); @@ -6852,7 +6852,7 @@ // Card-mark for CMS garbage collection. // This cardmark does an optimization so that it must not always // do a releasing store. For this, it gets the address of -// CMSCollectorCardTableModRefBSExt::_requires_release as input. +// CMSCollectorCardTableBarrierSetBSExt::_requires_release as input. // (Using releaseFieldAddr in the match rule is a hack.) instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{ match(Set mem (StoreCM mem releaseFieldAddr)); @@ -6871,7 +6871,7 @@ // Card-mark for CMS garbage collection. // This cardmark does an optimization so that it must not always // do a releasing store. For this, it needs the constant address of -// CMSCollectorCardTableModRefBSExt::_requires_release. +// CMSCollectorCardTableBarrierSetBSExt::_requires_release. // This constant address is split off here by expand so we can use // adlc / matcher functionality to load it from the constant section. instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{ @@ -6879,7 +6879,7 @@ predicate(UseConcMarkSweepGC); expand %{ - immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableModRefBSExt::requires_release_address() */ %} + immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableBarrierSetBSExt::requires_release_address() */ %} iRegLdst releaseFieldAddress; flagsReg crx; loadConL_Ex(releaseFieldAddress, baseImm); @@ -13665,7 +13665,7 @@ instruct mtvsrwz(vecX temp1, iRegIsrc src) %{ effect(DEF temp1, USE src); - + size(4); ins_encode %{ __ mtvsrwz($temp1$$VectorSRegister, $src$$Register); @@ -13678,7 +13678,7 @@ size(4); ins_encode %{ - __ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant); + __ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant); %} ins_pipe(pipe_class_default); %} @@ -13843,7 +13843,7 @@ expand %{ iRegLdst tmpL; vecX tmpV; - immI8 zero %{ (int) 0 %} + immI8 zero %{ (int) 0 %} moveReg(tmpL, src); repl48(tmpL); repl32(tmpL); @@ -13915,10 +13915,10 @@ predicate(n->as_Vector()->length() == 4); ins_cost(2 * DEFAULT_COST); - expand %{ + expand %{ iRegLdst tmpL; vecX tmpV; - immI8 zero %{ (int) 0 %} + immI8 zero %{ (int) 0 %} moveReg(tmpL, src); repl32(tmpL); mtvsrd(tmpV, tmpL); @@ -14057,7 +14057,7 @@ iRegIdst tmpI; iRegLdst tmpL; vecX tmpV; - immI8 zero %{ (int) 0 %} + immI8 zero %{ (int) 0 %} moveF2I_reg_stack(tmpS, src); // Move float to stack. moveF2I_stack_reg(tmpI, tmpS); // Move stack to int reg. @@ -14096,7 +14096,7 @@ iRegLdst tmpL; iRegLdst tmp; vecX tmpV; - immI8 zero %{ (int) 0 %} + immI8 zero %{ (int) 0 %} moveD2L_reg_stack(tmpS, src); moveD2L_stack_reg(tmpL, tmpS); mtvsrd(tmpV, tmpL); @@ -14132,7 +14132,7 @@ predicate(false); effect(DEF dst, USE src); - format %{ "MTVSRD $dst, $src \t// Move to 16-byte register"%} + format %{ "MTVSRD $dst, $src \t// Move to 16-byte register"%} size(4); ins_encode %{ __ mtvsrd($dst$$VectorSRegister, $src$$Register); @@ -14147,7 +14147,7 @@ size(4); ins_encode %{ __ xxpermdi($dst$$VectorSRegister, $src$$VectorSRegister, $src$$VectorSRegister, $zero$$constant); - %} + %} ins_pipe(pipe_class_default); %} @@ -14158,7 +14158,7 @@ size(4); ins_encode %{ __ xxpermdi($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister, $zero$$constant); - %} + %} ins_pipe(pipe_class_default); %} @@ -14167,8 +14167,8 @@ predicate(n->as_Vector()->length() == 2); expand %{ vecX tmpV; - immI8 zero %{ (int) 0 %} - mtvsrd(tmpV, src); + immI8 zero %{ (int) 0 %} + mtvsrd(tmpV, src); xxpermdi(dst, tmpV, tmpV, zero); %} %} diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/ppc/stubGenerator_ppc.cpp --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -26,7 +26,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_ppc.hpp" #include "oops/instanceOop.hpp" @@ -669,7 +669,7 @@ __ bind(filtered); } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: break; default: ShouldNotReachHere(); @@ -703,7 +703,7 @@ __ restore_LR_CR(R0); } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { Label Lskip_loop, Lstore_loop; if (UseConcMarkSweepGC) { @@ -711,7 +711,7 @@ __ release(); } - CardTableModRefBS* const ctbs = barrier_set_cast(bs); + CardTableBarrierSet* const ctbs = barrier_set_cast(bs); CardTable* const ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); assert_different_registers(addr, count, tmp); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/ppc/templateTable_ppc_64.cpp --- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -104,7 +104,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { Label Lnull, Ldone; if (Rval != noreg) { diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/s390/assembler_s390.cpp --- a/src/hotspot/cpu/s390/assembler_s390.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/s390/assembler_s390.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -28,7 +28,7 @@ #include "compiler/disassembler.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp --- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -33,7 +33,7 @@ #include "ci/ciInstance.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "nativeInst_s390.hpp" #include "oops/objArrayKlass.hpp" #include "runtime/safepointMechanism.inline.hpp" @@ -631,7 +631,7 @@ }; // Index register is normally not supported, but for - // LIRGenerator::CardTableModRef_post_barrier we make an exception. + // LIRGenerator::CardTableBarrierSet_post_barrier we make an exception. if (type == T_BYTE && dest->as_address_ptr()->index()->is_valid()) { __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint())); store_offset = __ offset(); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/s390/c1_Runtime1_s390.cpp --- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -29,7 +29,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_s390.hpp" #include "oops/compiledICHolder.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/s390/macroAssembler_s390.cpp --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -30,7 +30,7 @@ #include "gc/shared/cardTable.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/klass.inline.hpp" @@ -3505,9 +3505,9 @@ // Write to card table for modification at store_addr - register is destroyed afterwards. void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) { BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); - assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier"); + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier"); assert_different_registers(store_addr, tmp); z_srlg(store_addr, store_addr, CardTable::card_shift); load_absolute_address(tmp, (address)ct->byte_map_base()); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/s390/stubGenerator_s390.cpp --- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -27,7 +27,7 @@ #include "asm/macroAssembler.inline.hpp" #include "registerSaver_s390.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" #include "nativeInst_s390.hpp" @@ -724,7 +724,7 @@ __ bind(filtered); } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: case BarrierSet::ModRef: break; default: @@ -762,12 +762,12 @@ } } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: // These cases formerly known as // void array_store_check(Register addr, Register count, bool branchToEnd). { NearLabel doXC, done; - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); assert_different_registers(Z_R0, Z_R1, addr, count); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/s390/templateTable_s390.cpp --- a/src/hotspot/cpu/s390/templateTable_s390.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -261,7 +261,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (val_is_null) { __ store_heap_oop_null(val, offset, base); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp --- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -31,7 +31,7 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_sparc.hpp" #include "oops/objArrayKlass.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp --- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -28,7 +28,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_sparc.hpp" #include "oops/compiledICHolder.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/sparc/macroAssembler_sparc.cpp --- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -27,7 +27,7 @@ #include "asm/macroAssembler.inline.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" @@ -3729,11 +3729,11 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { // If we're writing constant NULL, we can skip the write barrier. if (new_val == G0) return; - CardTableModRefBS* bs = - barrier_set_cast(Universe::heap()->barrier_set()); + CardTableBarrierSet* bs = + barrier_set_cast(Universe::heap()->barrier_set()); CardTable* ct = bs->card_table(); - assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier"); + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier"); card_table_write(ct->byte_map_base(), tmp, store_addr); } diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/sparc/stubGenerator_sparc.cpp --- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_sparc.hpp" #include "oops/instanceOop.hpp" @@ -877,7 +877,7 @@ DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: break; default: ShouldNotReachHere(); @@ -908,9 +908,9 @@ __ restore(); } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); assert_different_registers(addr, count, tmp); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/sparc/templateTable_sparc.cpp --- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -91,7 +91,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (index == noreg ) { assert(Assembler::is_simm13(offset), "fix this code"); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/x86/assembler_x86.cpp --- a/src/hotspot/cpu/x86/assembler_x86.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/x86/assembler_x86.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -33,7 +33,7 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_x86.hpp" #include "oops/objArrayKlass.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/x86/c1_Runtime1_x86.cpp --- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -29,7 +29,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_x86.hpp" #include "oops/compiledICHolder.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/x86/macroAssembler_x86.cpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -28,7 +28,7 @@ #include "asm/assembler.inline.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" @@ -5409,8 +5409,8 @@ Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_buf())); - CardTableModRefBS* ctbs = - barrier_set_cast(Universe::heap()->barrier_set()); + CardTableBarrierSet* ctbs = + barrier_set_cast(Universe::heap()->barrier_set()); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); @@ -5497,10 +5497,10 @@ // Does a store check for the oop in register obj. The content of // register obj is destroyed afterwards. BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/x86/stubGenerator_x86_32.cpp --- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -26,7 +26,7 @@ #include "asm/macroAssembler.hpp" #include "asm/macroAssembler.inline.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_x86.hpp" #include "oops/instanceOop.hpp" @@ -707,7 +707,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: break; default : ShouldNotReachHere(); @@ -739,9 +739,9 @@ break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/x86/stubGenerator_x86_64.cpp --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -27,7 +27,7 @@ #include "asm/macroAssembler.inline.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_x86.hpp" #include "oops/instanceOop.hpp" @@ -1235,7 +1235,7 @@ __ bind(filtered); } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: break; default: ShouldNotReachHere(); @@ -1273,7 +1273,7 @@ __ popa(); } break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { Label L_loop, L_done; const Register end = count; diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/x86/templateTable_x86.cpp --- a/src/hotspot/cpu/x86/templateTable_x86.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -199,7 +199,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (val == noreg) { __ store_heap_oop_null(obj); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/cpu/zero/assembler_zero.cpp --- a/src/hotspot/cpu/zero/assembler_zero.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/cpu/zero/assembler_zero.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "assembler_zero.inline.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp --- a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "gc/shared/barrierSet.inline.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.inline.hpp" +#include "gc/shared/cardTableBarrierSet.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/metaspaceShared.hpp" #include "runtime/frame.inline.hpp" @@ -42,8 +42,8 @@ _heap_top_addr = NULL; } - if (bs->is_a(BarrierSet::CardTableModRef)) { - _card_table_base = (address) (barrier_set_cast(bs)->card_table()->byte_map_base()); + if (bs->is_a(BarrierSet::CardTableBarrierSet)) { + _card_table_base = (address) (barrier_set_cast(bs)->card_table()->byte_map_base()); } else { _card_table_base = NULL; } diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/aot/aotCodeHeap.cpp --- a/src/hotspot/share/aot/aotCodeHeap.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/aot/aotCodeHeap.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -28,7 +28,7 @@ #include "ci/ciUtilities.inline.hpp" #include "classfile/javaAssertions.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/shared/gcLocker.hpp" #include "interpreter/abstractInterpreter.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/aot/aotCompiledMethod.cpp --- a/src/hotspot/share/aot/aotCompiledMethod.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/aot/aotCompiledMethod.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -30,7 +30,7 @@ #include "code/compiledIC.hpp" #include "code/nativeInst.hpp" #include "compiler/compilerOracle.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/gcLocker.hpp" #include "jvmci/compilerRuntime.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/c1/c1_LIRGenerator.cpp --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -35,7 +35,7 @@ #include "ci/ciObjArray.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "runtime/arguments.hpp" #include "runtime/sharedRuntime.hpp" @@ -1461,10 +1461,10 @@ switch (_bs->kind()) { #if INCLUDE_ALL_GCS case BarrierSet::G1BarrierSet: - G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info); + G1BarrierSet_pre_barrier(addr_opr, pre_val, do_load, patch, info); break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: // No pre barriers break; default : @@ -1477,11 +1477,11 @@ switch (_bs->kind()) { #if INCLUDE_ALL_GCS case BarrierSet::G1BarrierSet: - G1SATBCardTableModRef_post_barrier(addr, new_val); + G1BarrierSet_post_barrier(addr, new_val); break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: - CardTableModRef_post_barrier(addr, new_val); + case BarrierSet::CardTableBarrierSet: + CardTableBarrierSet_post_barrier(addr, new_val); break; default : ShouldNotReachHere(); @@ -1491,8 +1491,8 @@ //////////////////////////////////////////////////////////////////////// #if INCLUDE_ALL_GCS -void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, - bool do_load, bool patch, CodeEmitInfo* info) { +void LIRGenerator::G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, + bool do_load, bool patch, CodeEmitInfo* info) { // First we test whether marking is in progress. BasicType flag_type; if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { @@ -1546,7 +1546,7 @@ __ branch_destination(slow->continuation()); } -void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { +void LIRGenerator::G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { // If the "new_val" is a constant NULL, no barrier is necessary. if (new_val->is_constant() && new_val->as_constant_ptr()->as_jobject() == NULL) return; @@ -1610,7 +1610,7 @@ #endif // INCLUDE_ALL_GCS //////////////////////////////////////////////////////////////////////// -void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { +void LIRGenerator::CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { LIR_Const* card_table_base = new LIR_Const(ci_card_table_address()); if (addr->is_address()) { LIR_Address* address = addr->as_address_ptr(); @@ -1627,8 +1627,8 @@ } assert(addr->is_register(), "must be a register at this point"); -#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER - CardTableModRef_post_barrier_helper(addr, card_table_base); +#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER + CardTableBarrierSet_post_barrier_helper(addr, card_table_base); #else LIR_Opr tmp = new_pointer_register(); if (TwoOperandLIRForm) { diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/c1/c1_LIRGenerator.hpp --- a/src/hotspot/share/c1/c1_LIRGenerator.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -275,15 +275,15 @@ // specific implementations // pre barriers - void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, - bool do_load, bool patch, CodeEmitInfo* info); + void G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, + bool do_load, bool patch, CodeEmitInfo* info); // post barriers - void G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); - void CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); -#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER - void CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base); + void G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); + void CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val); +#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER + void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base); #endif diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/ci/ciUtilities.cpp --- a/src/hotspot/share/ci/ciUtilities.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/ci/ciUtilities.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "ci/ciUtilities.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/cardTable.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" @@ -52,7 +52,7 @@ // card_table_base jbyte *ci_card_table_address() { BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust users of this code"); return ct->byte_map_base(); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/code/relocInfo_ext.cpp --- a/src/hotspot/share/code/relocInfo_ext.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/code/relocInfo_ext.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -27,7 +27,7 @@ #include "code/relocInfo.hpp" #include "code/relocInfo_ext.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" #include "runtime/os.hpp" @@ -60,7 +60,7 @@ } case symbolic_Relocation::card_table_reference: { BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ctbs = barrier_set_cast(bs); + CardTableBarrierSet* ctbs = barrier_set_cast(bs); CardTable* ct = ctbs->card_table(); return (address)ct->byte_map_base(); } diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/compiler/disassembler.cpp --- a/src/hotspot/share/compiler/disassembler.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/compiler/disassembler.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -28,7 +28,7 @@ #include "code/codeCache.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" @@ -319,7 +319,7 @@ } BarrierSet* bs = Universe::heap()->barrier_set(); - if (bs->is_a(BarrierSet::CardTableModRef) && + if (bs->is_a(BarrierSet::CardTableBarrierSet) && adr == ci_card_table_address_as
()) { st->print("word_map_base"); if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr)); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/cms/cmsCardTable.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/gc/cms/cmsCardTable.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -0,0 +1,432 @@ +/* + * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/cms/cmsHeap.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" +#include "gc/shared/cardTableRS.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "gc/shared/space.inline.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/virtualspace.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/java.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.inline.hpp" +#include "runtime/vmThread.hpp" + +void CardTableRS:: +non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, + OopsInGenClosure* cl, + CardTableRS* ct, + uint n_threads) { + assert(n_threads > 0, "expected n_threads > 0"); + assert(n_threads <= ParallelGCThreads, + "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads); + + // Make sure the LNC array is valid for the space. + jbyte** lowest_non_clean; + uintptr_t lowest_non_clean_base_chunk_index; + size_t lowest_non_clean_chunk_size; + get_LNC_array_for_space(sp, lowest_non_clean, + lowest_non_clean_base_chunk_index, + lowest_non_clean_chunk_size); + + uint n_strides = n_threads * ParGCStridesPerThread; + SequentialSubTasksDone* pst = sp->par_seq_tasks(); + // Sets the condition for completion of the subtask (how many threads + // need to finish in order to be done). + pst->set_n_threads(n_threads); + pst->set_n_tasks(n_strides); + + uint stride = 0; + while (!pst->is_task_claimed(/* reference */ stride)) { + process_stride(sp, mr, stride, n_strides, + cl, ct, + lowest_non_clean, + lowest_non_clean_base_chunk_index, + lowest_non_clean_chunk_size); + } + if (pst->all_tasks_completed()) { + // Clear lowest_non_clean array for next time. + intptr_t first_chunk_index = addr_to_chunk_index(mr.start()); + uintptr_t last_chunk_index = addr_to_chunk_index(mr.last()); + for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) { + intptr_t ind = ch - lowest_non_clean_base_chunk_index; + assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size, + "Bounds error"); + lowest_non_clean[ind] = NULL; + } + } +} + +void +CardTableRS:: +process_stride(Space* sp, + MemRegion used, + jint stride, int n_strides, + OopsInGenClosure* cl, + CardTableRS* ct, + jbyte** lowest_non_clean, + uintptr_t lowest_non_clean_base_chunk_index, + size_t lowest_non_clean_chunk_size) { + // We go from higher to lower addresses here; it wouldn't help that much + // because of the strided parallelism pattern used here. + + // Find the first card address of the first chunk in the stride that is + // at least "bottom" of the used region. + jbyte* start_card = byte_for(used.start()); + jbyte* end_card = byte_after(used.last()); + uintptr_t start_chunk = addr_to_chunk_index(used.start()); + uintptr_t start_chunk_stride_num = start_chunk % n_strides; + jbyte* chunk_card_start; + + if ((uintptr_t)stride >= start_chunk_stride_num) { + chunk_card_start = (jbyte*)(start_card + + (stride - start_chunk_stride_num) * + ParGCCardsPerStrideChunk); + } else { + // Go ahead to the next chunk group boundary, then to the requested stride. + chunk_card_start = (jbyte*)(start_card + + (n_strides - start_chunk_stride_num + stride) * + ParGCCardsPerStrideChunk); + } + + while (chunk_card_start < end_card) { + // Even though we go from lower to higher addresses below, the + // strided parallelism can interleave the actual processing of the + // dirty pages in various ways. For a specific chunk within this + // stride, we take care to avoid double scanning or missing a card + // by suitably initializing the "min_done" field in process_chunk_boundaries() + // below, together with the dirty region extension accomplished in + // DirtyCardToOopClosure::do_MemRegion(). + jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; + // Invariant: chunk_mr should be fully contained within the "used" region. + MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), + chunk_card_end >= end_card ? + used.end() : addr_for(chunk_card_end)); + assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); + assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); + + // This function is used by the parallel card table iteration. + const bool parallel = true; + + DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), + cl->gen_boundary(), + parallel); + ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); + + + // Process the chunk. + process_chunk_boundaries(sp, + dcto_cl, + chunk_mr, + used, + lowest_non_clean, + lowest_non_clean_base_chunk_index, + lowest_non_clean_chunk_size); + + // We want the LNC array updates above in process_chunk_boundaries + // to be visible before any of the card table value changes as a + // result of the dirty card iteration below. + OrderAccess::storestore(); + + // We want to clear the cards: clear_cl here does the work of finding + // contiguous dirty ranges of cards to process and clear. + clear_cl.do_MemRegion(chunk_mr); + + // Find the next chunk of the stride. + chunk_card_start += ParGCCardsPerStrideChunk * n_strides; + } +} + +void +CardTableRS:: +process_chunk_boundaries(Space* sp, + DirtyCardToOopClosure* dcto_cl, + MemRegion chunk_mr, + MemRegion used, + jbyte** lowest_non_clean, + uintptr_t lowest_non_clean_base_chunk_index, + size_t lowest_non_clean_chunk_size) +{ + // We must worry about non-array objects that cross chunk boundaries, + // because such objects are both precisely and imprecisely marked: + // .. if the head of such an object is dirty, the entire object + // needs to be scanned, under the interpretation that this + // was an imprecise mark + // .. if the head of such an object is not dirty, we can assume + // precise marking and it's efficient to scan just the dirty + // cards. + // In either case, each scanned reference must be scanned precisely + // once so as to avoid cloning of a young referent. For efficiency, + // our closures depend on this property and do not protect against + // double scans. + + uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start()); + assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error."); + uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index; + + // First, set "our" lowest_non_clean entry, which would be + // used by the thread scanning an adjoining left chunk with + // a non-array object straddling the mutual boundary. + // Find the object that spans our boundary, if one exists. + // first_block is the block possibly straddling our left boundary. + HeapWord* first_block = sp->block_start(chunk_mr.start()); + assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()), + "First chunk should always have a co-initial block"); + // Does the block straddle the chunk's left boundary, and is it + // a non-array object? + if (first_block < chunk_mr.start() // first block straddles left bdry + && sp->block_is_obj(first_block) // first block is an object + && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied) + || oop(first_block)->is_typeArray())) { + // Find our least non-clean card, so that a left neighbor + // does not scan an object straddling the mutual boundary + // too far to the right, and attempt to scan a portion of + // that object twice. + jbyte* first_dirty_card = NULL; + jbyte* last_card_of_first_obj = + byte_for(first_block + sp->block_size(first_block) - 1); + jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); + jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); + jbyte* last_card_to_check = + (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, + (intptr_t) last_card_of_first_obj); + // Note that this does not need to go beyond our last card + // if our first object completely straddles this chunk. + for (jbyte* cur = first_card_of_cur_chunk; + cur <= last_card_to_check; cur++) { + jbyte val = *cur; + if (card_will_be_scanned(val)) { + first_dirty_card = cur; break; + } else { + assert(!card_may_have_been_dirty(val), "Error"); + } + } + if (first_dirty_card != NULL) { + assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error."); + assert(lowest_non_clean[cur_chunk_index] == NULL, + "Write exactly once : value should be stable hereafter for this round"); + lowest_non_clean[cur_chunk_index] = first_dirty_card; + } + } else { + // In this case we can help our neighbor by just asking them + // to stop at our first card (even though it may not be dirty). + assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); + jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); + lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; + } + + // Next, set our own max_to_do, which will strictly/exclusively bound + // the highest address that we will scan past the right end of our chunk. + HeapWord* max_to_do = NULL; + if (chunk_mr.end() < used.end()) { + // This is not the last chunk in the used region. + // What is our last block? We check the first block of + // the next (right) chunk rather than strictly check our last block + // because it's potentially more efficient to do so. + HeapWord* const last_block = sp->block_start(chunk_mr.end()); + assert(last_block <= chunk_mr.end(), "In case this property changes."); + if ((last_block == chunk_mr.end()) // our last block does not straddle boundary + || !sp->block_is_obj(last_block) // last_block isn't an object + || oop(last_block)->is_objArray() // last_block is an array (precisely marked) + || oop(last_block)->is_typeArray()) { + max_to_do = chunk_mr.end(); + } else { + assert(last_block < chunk_mr.end(), "Tautology"); + // It is a non-array object that straddles the right boundary of this chunk. + // last_obj_card is the card corresponding to the start of the last object + // in the chunk. Note that the last object may not start in + // the chunk. + jbyte* const last_obj_card = byte_for(last_block); + const jbyte val = *last_obj_card; + if (!card_will_be_scanned(val)) { + assert(!card_may_have_been_dirty(val), "Error"); + // The card containing the head is not dirty. Any marks on + // subsequent cards still in this chunk must have been made + // precisely; we can cap processing at the end of our chunk. + max_to_do = chunk_mr.end(); + } else { + // The last object must be considered dirty, and extends onto the + // following chunk. Look for a dirty card in that chunk that will + // bound our processing. + jbyte* limit_card = NULL; + const size_t last_block_size = sp->block_size(last_block); + jbyte* const last_card_of_last_obj = + byte_for(last_block + last_block_size - 1); + jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end()); + // This search potentially goes a long distance looking + // for the next card that will be scanned, terminating + // at the end of the last_block, if no earlier dirty card + // is found. + assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk, + "last card of next chunk may be wrong"); + for (jbyte* cur = first_card_of_next_chunk; + cur <= last_card_of_last_obj; cur++) { + const jbyte val = *cur; + if (card_will_be_scanned(val)) { + limit_card = cur; break; + } else { + assert(!card_may_have_been_dirty(val), "Error: card can't be skipped"); + } + } + if (limit_card != NULL) { + max_to_do = addr_for(limit_card); + assert(limit_card != NULL && max_to_do != NULL, "Error"); + } else { + // The following is a pessimistic value, because it's possible + // that a dirty card on a subsequent chunk has been cleared by + // the time we get to look at it; we'll correct for that further below, + // using the LNC array which records the least non-clean card + // before cards were cleared in a particular chunk. + limit_card = last_card_of_last_obj; + max_to_do = last_block + last_block_size; + assert(limit_card != NULL && max_to_do != NULL, "Error"); + } + assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size, + "Bounds error."); + // It is possible that a dirty card for the last object may have been + // cleared before we had a chance to examine it. In that case, the value + // will have been logged in the LNC for that chunk. + // We need to examine as many chunks to the right as this object + // covers. However, we need to bound this checking to the largest + // entry in the LNC array: this is because the heap may expand + // after the LNC array has been created but before we reach this point, + // and the last block in our chunk may have been expanded to include + // the expansion delta (and possibly subsequently allocated from, so + // it wouldn't be sufficient to check whether that last block was + // or was not an object at this point). + uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1) + - lowest_non_clean_base_chunk_index; + const uintptr_t last_chunk_index = addr_to_chunk_index(used.last()) + - lowest_non_clean_base_chunk_index; + if (last_chunk_index_to_check > last_chunk_index) { + assert(last_block + last_block_size > used.end(), + "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]" + " does not exceed used.end() = " PTR_FORMAT "," + " yet last_chunk_index_to_check " INTPTR_FORMAT + " exceeds last_chunk_index " INTPTR_FORMAT, + p2i(last_block), p2i(last_block + last_block_size), + p2i(used.end()), + last_chunk_index_to_check, last_chunk_index); + assert(sp->used_region().end() > used.end(), + "Expansion did not happen: " + "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")", + p2i(sp->used_region().start()), p2i(sp->used_region().end()), + p2i(used.start()), p2i(used.end())); + last_chunk_index_to_check = last_chunk_index; + } + for (uintptr_t lnc_index = cur_chunk_index + 1; + lnc_index <= last_chunk_index_to_check; + lnc_index++) { + jbyte* lnc_card = lowest_non_clean[lnc_index]; + if (lnc_card != NULL) { + // we can stop at the first non-NULL entry we find + if (lnc_card <= limit_card) { + limit_card = lnc_card; + max_to_do = addr_for(limit_card); + assert(limit_card != NULL && max_to_do != NULL, "Error"); + } + // In any case, we break now + break; + } // else continue to look for a non-NULL entry if any + } + assert(limit_card != NULL && max_to_do != NULL, "Error"); + } + assert(max_to_do != NULL, "OOPS 1 !"); + } + assert(max_to_do != NULL, "OOPS 2!"); + } else { + max_to_do = used.end(); + } + assert(max_to_do != NULL, "OOPS 3!"); + // Now we can set the closure we're using so it doesn't to beyond + // max_to_do. + dcto_cl->set_min_done(max_to_do); +#ifndef PRODUCT + dcto_cl->set_last_bottom(max_to_do); +#endif +} + +void +CardTableRS:: +get_LNC_array_for_space(Space* sp, + jbyte**& lowest_non_clean, + uintptr_t& lowest_non_clean_base_chunk_index, + size_t& lowest_non_clean_chunk_size) { + + int i = find_covering_region_containing(sp->bottom()); + MemRegion covered = _covered[i]; + size_t n_chunks = chunks_to_cover(covered); + + // Only the first thread to obtain the lock will resize the + // LNC array for the covered region. Any later expansion can't affect + // the used_at_save_marks region. + // (I observed a bug in which the first thread to execute this would + // resize, and then it would cause "expand_and_allocate" that would + // increase the number of chunks in the covered region. Then a second + // thread would come and execute this, see that the size didn't match, + // and free and allocate again. So the first thread would be using a + // freed "_lowest_non_clean" array.) + + // Do a dirty read here. If we pass the conditional then take the rare + // event lock and do the read again in case some other thread had already + // succeeded and done the resize. + int cur_collection = CMSHeap::heap()->total_collections(); + // Updated _last_LNC_resizing_collection[i] must not be visible before + // _lowest_non_clean and friends are visible. Therefore use acquire/release + // to guarantee this on non TSO architecures. + if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { + MutexLocker x(ParGCRareEvent_lock); + // This load_acquire is here for clarity only. The MutexLocker already fences. + if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { + if (_lowest_non_clean[i] == NULL || + n_chunks != _lowest_non_clean_chunk_size[i]) { + + // Should we delete the old? + if (_lowest_non_clean[i] != NULL) { + assert(n_chunks != _lowest_non_clean_chunk_size[i], + "logical consequence"); + FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]); + _lowest_non_clean[i] = NULL; + } + // Now allocate a new one if necessary. + if (_lowest_non_clean[i] == NULL) { + _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC); + _lowest_non_clean_chunk_size[i] = n_chunks; + _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start()); + for (int j = 0; j < (int)n_chunks; j++) + _lowest_non_clean[i][j] = NULL; + } + } + // Make sure this gets visible only after _lowest_non_clean* was initialized + OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection); + } + } + // In any case, now do the initialization. + lowest_non_clean = _lowest_non_clean[i]; + lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i]; + lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i]; +} diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/cms/parCardTableModRefBS.cpp --- a/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp Fri Mar 16 14:47:53 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,432 +0,0 @@ -/* - * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "gc/cms/cmsHeap.hpp" -#include "gc/shared/cardTableModRefBS.hpp" -#include "gc/shared/cardTableRS.hpp" -#include "gc/shared/collectedHeap.hpp" -#include "gc/shared/space.inline.hpp" -#include "memory/allocation.inline.hpp" -#include "memory/virtualspace.hpp" -#include "oops/oop.inline.hpp" -#include "runtime/java.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" -#include "runtime/vmThread.hpp" - -void CardTableRS:: -non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, - OopsInGenClosure* cl, - CardTableRS* ct, - uint n_threads) { - assert(n_threads > 0, "expected n_threads > 0"); - assert(n_threads <= ParallelGCThreads, - "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads); - - // Make sure the LNC array is valid for the space. - jbyte** lowest_non_clean; - uintptr_t lowest_non_clean_base_chunk_index; - size_t lowest_non_clean_chunk_size; - get_LNC_array_for_space(sp, lowest_non_clean, - lowest_non_clean_base_chunk_index, - lowest_non_clean_chunk_size); - - uint n_strides = n_threads * ParGCStridesPerThread; - SequentialSubTasksDone* pst = sp->par_seq_tasks(); - // Sets the condition for completion of the subtask (how many threads - // need to finish in order to be done). - pst->set_n_threads(n_threads); - pst->set_n_tasks(n_strides); - - uint stride = 0; - while (!pst->is_task_claimed(/* reference */ stride)) { - process_stride(sp, mr, stride, n_strides, - cl, ct, - lowest_non_clean, - lowest_non_clean_base_chunk_index, - lowest_non_clean_chunk_size); - } - if (pst->all_tasks_completed()) { - // Clear lowest_non_clean array for next time. - intptr_t first_chunk_index = addr_to_chunk_index(mr.start()); - uintptr_t last_chunk_index = addr_to_chunk_index(mr.last()); - for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) { - intptr_t ind = ch - lowest_non_clean_base_chunk_index; - assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size, - "Bounds error"); - lowest_non_clean[ind] = NULL; - } - } -} - -void -CardTableRS:: -process_stride(Space* sp, - MemRegion used, - jint stride, int n_strides, - OopsInGenClosure* cl, - CardTableRS* ct, - jbyte** lowest_non_clean, - uintptr_t lowest_non_clean_base_chunk_index, - size_t lowest_non_clean_chunk_size) { - // We go from higher to lower addresses here; it wouldn't help that much - // because of the strided parallelism pattern used here. - - // Find the first card address of the first chunk in the stride that is - // at least "bottom" of the used region. - jbyte* start_card = byte_for(used.start()); - jbyte* end_card = byte_after(used.last()); - uintptr_t start_chunk = addr_to_chunk_index(used.start()); - uintptr_t start_chunk_stride_num = start_chunk % n_strides; - jbyte* chunk_card_start; - - if ((uintptr_t)stride >= start_chunk_stride_num) { - chunk_card_start = (jbyte*)(start_card + - (stride - start_chunk_stride_num) * - ParGCCardsPerStrideChunk); - } else { - // Go ahead to the next chunk group boundary, then to the requested stride. - chunk_card_start = (jbyte*)(start_card + - (n_strides - start_chunk_stride_num + stride) * - ParGCCardsPerStrideChunk); - } - - while (chunk_card_start < end_card) { - // Even though we go from lower to higher addresses below, the - // strided parallelism can interleave the actual processing of the - // dirty pages in various ways. For a specific chunk within this - // stride, we take care to avoid double scanning or missing a card - // by suitably initializing the "min_done" field in process_chunk_boundaries() - // below, together with the dirty region extension accomplished in - // DirtyCardToOopClosure::do_MemRegion(). - jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; - // Invariant: chunk_mr should be fully contained within the "used" region. - MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), - chunk_card_end >= end_card ? - used.end() : addr_for(chunk_card_end)); - assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)"); - assert(used.contains(chunk_mr), "chunk_mr should be subset of used"); - - // This function is used by the parallel card table iteration. - const bool parallel = true; - - DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), - cl->gen_boundary(), - parallel); - ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); - - - // Process the chunk. - process_chunk_boundaries(sp, - dcto_cl, - chunk_mr, - used, - lowest_non_clean, - lowest_non_clean_base_chunk_index, - lowest_non_clean_chunk_size); - - // We want the LNC array updates above in process_chunk_boundaries - // to be visible before any of the card table value changes as a - // result of the dirty card iteration below. - OrderAccess::storestore(); - - // We want to clear the cards: clear_cl here does the work of finding - // contiguous dirty ranges of cards to process and clear. - clear_cl.do_MemRegion(chunk_mr); - - // Find the next chunk of the stride. - chunk_card_start += ParGCCardsPerStrideChunk * n_strides; - } -} - -void -CardTableRS:: -process_chunk_boundaries(Space* sp, - DirtyCardToOopClosure* dcto_cl, - MemRegion chunk_mr, - MemRegion used, - jbyte** lowest_non_clean, - uintptr_t lowest_non_clean_base_chunk_index, - size_t lowest_non_clean_chunk_size) -{ - // We must worry about non-array objects that cross chunk boundaries, - // because such objects are both precisely and imprecisely marked: - // .. if the head of such an object is dirty, the entire object - // needs to be scanned, under the interpretation that this - // was an imprecise mark - // .. if the head of such an object is not dirty, we can assume - // precise marking and it's efficient to scan just the dirty - // cards. - // In either case, each scanned reference must be scanned precisely - // once so as to avoid cloning of a young referent. For efficiency, - // our closures depend on this property and do not protect against - // double scans. - - uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start()); - assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error."); - uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index; - - // First, set "our" lowest_non_clean entry, which would be - // used by the thread scanning an adjoining left chunk with - // a non-array object straddling the mutual boundary. - // Find the object that spans our boundary, if one exists. - // first_block is the block possibly straddling our left boundary. - HeapWord* first_block = sp->block_start(chunk_mr.start()); - assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()), - "First chunk should always have a co-initial block"); - // Does the block straddle the chunk's left boundary, and is it - // a non-array object? - if (first_block < chunk_mr.start() // first block straddles left bdry - && sp->block_is_obj(first_block) // first block is an object - && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied) - || oop(first_block)->is_typeArray())) { - // Find our least non-clean card, so that a left neighbor - // does not scan an object straddling the mutual boundary - // too far to the right, and attempt to scan a portion of - // that object twice. - jbyte* first_dirty_card = NULL; - jbyte* last_card_of_first_obj = - byte_for(first_block + sp->block_size(first_block) - 1); - jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); - jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last()); - jbyte* last_card_to_check = - (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk, - (intptr_t) last_card_of_first_obj); - // Note that this does not need to go beyond our last card - // if our first object completely straddles this chunk. - for (jbyte* cur = first_card_of_cur_chunk; - cur <= last_card_to_check; cur++) { - jbyte val = *cur; - if (card_will_be_scanned(val)) { - first_dirty_card = cur; break; - } else { - assert(!card_may_have_been_dirty(val), "Error"); - } - } - if (first_dirty_card != NULL) { - assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error."); - assert(lowest_non_clean[cur_chunk_index] == NULL, - "Write exactly once : value should be stable hereafter for this round"); - lowest_non_clean[cur_chunk_index] = first_dirty_card; - } - } else { - // In this case we can help our neighbor by just asking them - // to stop at our first card (even though it may not be dirty). - assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter"); - jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start()); - lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; - } - - // Next, set our own max_to_do, which will strictly/exclusively bound - // the highest address that we will scan past the right end of our chunk. - HeapWord* max_to_do = NULL; - if (chunk_mr.end() < used.end()) { - // This is not the last chunk in the used region. - // What is our last block? We check the first block of - // the next (right) chunk rather than strictly check our last block - // because it's potentially more efficient to do so. - HeapWord* const last_block = sp->block_start(chunk_mr.end()); - assert(last_block <= chunk_mr.end(), "In case this property changes."); - if ((last_block == chunk_mr.end()) // our last block does not straddle boundary - || !sp->block_is_obj(last_block) // last_block isn't an object - || oop(last_block)->is_objArray() // last_block is an array (precisely marked) - || oop(last_block)->is_typeArray()) { - max_to_do = chunk_mr.end(); - } else { - assert(last_block < chunk_mr.end(), "Tautology"); - // It is a non-array object that straddles the right boundary of this chunk. - // last_obj_card is the card corresponding to the start of the last object - // in the chunk. Note that the last object may not start in - // the chunk. - jbyte* const last_obj_card = byte_for(last_block); - const jbyte val = *last_obj_card; - if (!card_will_be_scanned(val)) { - assert(!card_may_have_been_dirty(val), "Error"); - // The card containing the head is not dirty. Any marks on - // subsequent cards still in this chunk must have been made - // precisely; we can cap processing at the end of our chunk. - max_to_do = chunk_mr.end(); - } else { - // The last object must be considered dirty, and extends onto the - // following chunk. Look for a dirty card in that chunk that will - // bound our processing. - jbyte* limit_card = NULL; - const size_t last_block_size = sp->block_size(last_block); - jbyte* const last_card_of_last_obj = - byte_for(last_block + last_block_size - 1); - jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end()); - // This search potentially goes a long distance looking - // for the next card that will be scanned, terminating - // at the end of the last_block, if no earlier dirty card - // is found. - assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk, - "last card of next chunk may be wrong"); - for (jbyte* cur = first_card_of_next_chunk; - cur <= last_card_of_last_obj; cur++) { - const jbyte val = *cur; - if (card_will_be_scanned(val)) { - limit_card = cur; break; - } else { - assert(!card_may_have_been_dirty(val), "Error: card can't be skipped"); - } - } - if (limit_card != NULL) { - max_to_do = addr_for(limit_card); - assert(limit_card != NULL && max_to_do != NULL, "Error"); - } else { - // The following is a pessimistic value, because it's possible - // that a dirty card on a subsequent chunk has been cleared by - // the time we get to look at it; we'll correct for that further below, - // using the LNC array which records the least non-clean card - // before cards were cleared in a particular chunk. - limit_card = last_card_of_last_obj; - max_to_do = last_block + last_block_size; - assert(limit_card != NULL && max_to_do != NULL, "Error"); - } - assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size, - "Bounds error."); - // It is possible that a dirty card for the last object may have been - // cleared before we had a chance to examine it. In that case, the value - // will have been logged in the LNC for that chunk. - // We need to examine as many chunks to the right as this object - // covers. However, we need to bound this checking to the largest - // entry in the LNC array: this is because the heap may expand - // after the LNC array has been created but before we reach this point, - // and the last block in our chunk may have been expanded to include - // the expansion delta (and possibly subsequently allocated from, so - // it wouldn't be sufficient to check whether that last block was - // or was not an object at this point). - uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1) - - lowest_non_clean_base_chunk_index; - const uintptr_t last_chunk_index = addr_to_chunk_index(used.last()) - - lowest_non_clean_base_chunk_index; - if (last_chunk_index_to_check > last_chunk_index) { - assert(last_block + last_block_size > used.end(), - "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]" - " does not exceed used.end() = " PTR_FORMAT "," - " yet last_chunk_index_to_check " INTPTR_FORMAT - " exceeds last_chunk_index " INTPTR_FORMAT, - p2i(last_block), p2i(last_block + last_block_size), - p2i(used.end()), - last_chunk_index_to_check, last_chunk_index); - assert(sp->used_region().end() > used.end(), - "Expansion did not happen: " - "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")", - p2i(sp->used_region().start()), p2i(sp->used_region().end()), - p2i(used.start()), p2i(used.end())); - last_chunk_index_to_check = last_chunk_index; - } - for (uintptr_t lnc_index = cur_chunk_index + 1; - lnc_index <= last_chunk_index_to_check; - lnc_index++) { - jbyte* lnc_card = lowest_non_clean[lnc_index]; - if (lnc_card != NULL) { - // we can stop at the first non-NULL entry we find - if (lnc_card <= limit_card) { - limit_card = lnc_card; - max_to_do = addr_for(limit_card); - assert(limit_card != NULL && max_to_do != NULL, "Error"); - } - // In any case, we break now - break; - } // else continue to look for a non-NULL entry if any - } - assert(limit_card != NULL && max_to_do != NULL, "Error"); - } - assert(max_to_do != NULL, "OOPS 1 !"); - } - assert(max_to_do != NULL, "OOPS 2!"); - } else { - max_to_do = used.end(); - } - assert(max_to_do != NULL, "OOPS 3!"); - // Now we can set the closure we're using so it doesn't to beyond - // max_to_do. - dcto_cl->set_min_done(max_to_do); -#ifndef PRODUCT - dcto_cl->set_last_bottom(max_to_do); -#endif -} - -void -CardTableRS:: -get_LNC_array_for_space(Space* sp, - jbyte**& lowest_non_clean, - uintptr_t& lowest_non_clean_base_chunk_index, - size_t& lowest_non_clean_chunk_size) { - - int i = find_covering_region_containing(sp->bottom()); - MemRegion covered = _covered[i]; - size_t n_chunks = chunks_to_cover(covered); - - // Only the first thread to obtain the lock will resize the - // LNC array for the covered region. Any later expansion can't affect - // the used_at_save_marks region. - // (I observed a bug in which the first thread to execute this would - // resize, and then it would cause "expand_and_allocate" that would - // increase the number of chunks in the covered region. Then a second - // thread would come and execute this, see that the size didn't match, - // and free and allocate again. So the first thread would be using a - // freed "_lowest_non_clean" array.) - - // Do a dirty read here. If we pass the conditional then take the rare - // event lock and do the read again in case some other thread had already - // succeeded and done the resize. - int cur_collection = CMSHeap::heap()->total_collections(); - // Updated _last_LNC_resizing_collection[i] must not be visible before - // _lowest_non_clean and friends are visible. Therefore use acquire/release - // to guarantee this on non TSO architecures. - if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { - MutexLocker x(ParGCRareEvent_lock); - // This load_acquire is here for clarity only. The MutexLocker already fences. - if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { - if (_lowest_non_clean[i] == NULL || - n_chunks != _lowest_non_clean_chunk_size[i]) { - - // Should we delete the old? - if (_lowest_non_clean[i] != NULL) { - assert(n_chunks != _lowest_non_clean_chunk_size[i], - "logical consequence"); - FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]); - _lowest_non_clean[i] = NULL; - } - // Now allocate a new one if necessary. - if (_lowest_non_clean[i] == NULL) { - _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC); - _lowest_non_clean_chunk_size[i] = n_chunks; - _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start()); - for (int j = 0; j < (int)n_chunks; j++) - _lowest_non_clean[i][j] = NULL; - } - } - // Make sure this gets visible only after _lowest_non_clean* was initialized - OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection); - } - } - // In any case, now do the initialization. - lowest_non_clean = _lowest_non_clean[i]; - lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i]; - lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i]; -} diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/g1/g1BarrierSet.cpp --- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -34,7 +34,7 @@ #include "runtime/thread.inline.hpp" G1BarrierSet::G1BarrierSet(G1CardTable* card_table) : - CardTableModRefBS(card_table, BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)), + CardTableBarrierSet(card_table, BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)), _dcqs(JavaThread::dirty_card_queue_set()) { } @@ -167,7 +167,7 @@ void G1BarrierSet::on_thread_detach(JavaThread* thread) { // Flush any deferred card marks, SATB buffers and dirty card queue buffers - CardTableModRefBS::on_thread_detach(thread); + CardTableBarrierSet::on_thread_detach(thread); thread->satb_mark_queue().flush(); thread->dirty_card_queue().flush(); } diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/g1/g1BarrierSet.hpp --- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -25,7 +25,7 @@ #ifndef SHARE_VM_GC_G1_G1BARRIERSET_HPP #define SHARE_VM_GC_G1_G1BARRIERSET_HPP -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" class DirtyCardQueueSet; class CardTable; @@ -34,7 +34,7 @@ // This barrier is specialized to use a logging barrier to support // snapshot-at-the-beginning marking. -class G1BarrierSet: public CardTableModRefBS { +class G1BarrierSet: public CardTableBarrierSet { friend class VMStructs; private: DirtyCardQueueSet& _dcqs; diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/g1/g1CardCounts.cpp --- a/src/hotspot/share/gc/g1/g1CardCounts.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/g1/g1CardCounts.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "gc/g1/g1CardCounts.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "services/memTracker.hpp" #include "utilities/copy.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/g1/g1CardCounts.hpp --- a/src/hotspot/share/gc/g1/g1CardCounts.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/g1/g1CardCounts.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -31,7 +31,7 @@ #include "memory/virtualspace.hpp" #include "utilities/globalDefinitions.hpp" -class CardTableModRefBS; +class CardTableBarrierSet; class G1CardCounts; class G1CollectedHeap; class G1RegionToSpaceMapper; diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/g1/g1RemSet.hpp --- a/src/hotspot/share/gc/g1/g1RemSet.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -37,7 +37,7 @@ // collection set. class BitMap; -class CardTableModRefBS; +class CardTableBarrierSet; class G1BlockOffsetTable; class CodeBlobClosure; class G1CollectedHeap; diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/g1/sparsePRT.cpp --- a/src/hotspot/share/gc/g1/sparsePRT.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/g1/sparsePRT.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -27,7 +27,7 @@ #include "gc/g1/heapRegionBounds.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "gc/g1/sparsePRT.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/space.inline.hpp" #include "memory/allocation.inline.hpp" #include "runtime/atomic.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/g1/sparsePRT.hpp --- a/src/hotspot/share/gc/g1/sparsePRT.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/g1/sparsePRT.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -27,7 +27,7 @@ #include "gc/g1/g1CollectedHeap.hpp" #include "gc/g1/heapRegion.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "memory/allocation.hpp" #include "runtime/mutex.hpp" #include "utilities/align.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/parallel/asPSOldGen.cpp --- a/src/hotspot/share/gc/parallel/asPSOldGen.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/parallel/asPSOldGen.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -27,7 +27,7 @@ #include "gc/parallel/parallelScavengeHeap.hpp" #include "gc/parallel/psAdaptiveSizePolicy.hpp" #include "gc/parallel/psMarkSweepDecorator.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "utilities/align.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/parallel/objectStartArray.cpp --- a/src/hotspot/share/gc/parallel/objectStartArray.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "gc/parallel/objectStartArray.inline.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -71,7 +71,7 @@ PSCardTable* card_table = new PSCardTable(reserved_region()); card_table->initialize(); - CardTableModRefBS* const barrier_set = new CardTableModRefBS(card_table); + CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table); barrier_set->initialize(); set_barrier_set(barrier_set); @@ -626,8 +626,8 @@ return (ParallelScavengeHeap*)heap; } -CardTableModRefBS* ParallelScavengeHeap::barrier_set() { - return barrier_set_cast(CollectedHeap::barrier_set()); +CardTableBarrierSet* ParallelScavengeHeap::barrier_set() { + return barrier_set_cast(CollectedHeap::barrier_set()); } PSCardTable* ParallelScavengeHeap::card_table() { diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -30,7 +30,7 @@ #include "gc/parallel/psGCAdaptivePolicyCounters.hpp" #include "gc/parallel/psOldGen.hpp" #include "gc/parallel/psYoungGen.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectorPolicy.hpp" #include "gc/shared/gcPolicyCounters.hpp" @@ -127,7 +127,7 @@ static GCTaskManager* const gc_task_manager() { return _gc_task_manager; } - CardTableModRefBS* barrier_set(); + CardTableBarrierSet* barrier_set(); PSCardTable* card_table(); AdjoiningGenerations* gens() { return _gens; } diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/parallel/psCardTable.cpp --- a/src/hotspot/share/gc/parallel/psCardTable.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/parallel/psCardTable.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -392,10 +392,10 @@ // Assumes that only the base or the end changes. This allows indentification // of the region that is being resized. The -// CardTableModRefBS::resize_covered_region() is used for the normal case +// CardTable::resize_covered_region() is used for the normal case // where the covered regions are growing or shrinking at the high end. // The method resize_covered_region_by_end() is analogous to -// CardTableModRefBS::resize_covered_region() but +// CardTable::resize_covered_region() but // for regions that grow or shrink at the low end. void PSCardTable::resize_covered_region(MemRegion new_region) { for (int i = 0; i < _cur_covered_regions; i++) { @@ -463,7 +463,7 @@ resize_update_covered_table(changed_region, new_region); int ind = changed_region; - log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: "); + log_trace(gc, barrier)("CardTable::resize_covered_region: "); log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/parallel/psOldGen.cpp --- a/src/hotspot/share/gc/parallel/psOldGen.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -29,7 +29,7 @@ #include "gc/parallel/psCardTable.hpp" #include "gc/parallel/psMarkSweepDecorator.hpp" #include "gc/parallel/psOldGen.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/spaceDecorator.hpp" #include "logging/log.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/barrierSetConfig.hpp --- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -36,7 +36,7 @@ // Do something for each concrete barrier set part of the build. #define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \ - f(CardTableModRef) \ + f(CardTableBarrierSet) \ FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) #define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \ diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp --- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -28,7 +28,7 @@ #include "gc/shared/barrierSetConfig.hpp" #include "gc/shared/modRefBarrierSet.inline.hpp" -#include "gc/shared/cardTableModRefBS.inline.hpp" +#include "gc/shared/cardTableBarrierSet.inline.hpp" #if INCLUDE_ALL_GCS #include "gc/g1/g1BarrierSet.inline.hpp" // G1 support diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/cardTable.hpp --- a/src/hotspot/share/gc/shared/cardTable.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/shared/cardTable.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -87,7 +87,7 @@ MemRegion committed_unique_to_self(int self, MemRegion mr) const; // Some barrier sets create tables whose elements correspond to parts of - // the heap; the CardTableModRefBS is an example. Such barrier sets will + // the heap; the CardTableBarrierSet is an example. Such barrier sets will // normally reserve space for such tables, and commit parts of the table // "covering" parts of the heap that are committed. At most one covered // region per generation is needed. @@ -114,7 +114,7 @@ virtual ~CardTable(); virtual void initialize(); - // The kinds of precision a CardTableModRefBS may offer. + // The kinds of precision a CardTable may offer. enum PrecisionStyle { Precise, ObjHeadPreciseArray diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/cardTableBarrierSet.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/shared/cardTableBarrierSet.inline.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "gc/shared/genCollectedHeap.hpp" +#include "gc/shared/space.inline.hpp" +#include "logging/log.hpp" +#include "memory/virtualspace.hpp" +#include "oops/oop.inline.hpp" +#include "runtime/thread.hpp" +#include "services/memTracker.hpp" +#include "utilities/align.hpp" +#include "utilities/macros.hpp" + +// This kind of "BarrierSet" allows a "CollectedHeap" to detect and +// enumerate ref fields that have been modified (since the last +// enumeration.) + +CardTableBarrierSet::CardTableBarrierSet( + CardTable* card_table, + const BarrierSet::FakeRtti& fake_rtti) : + ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableBarrierSet)), + _defer_initial_card_mark(false), + _card_table(card_table) +{} + +CardTableBarrierSet::CardTableBarrierSet(CardTable* card_table) : + ModRefBarrierSet(BarrierSet::FakeRtti(BarrierSet::CardTableBarrierSet)), + _defer_initial_card_mark(false), + _card_table(card_table) +{} + +void CardTableBarrierSet::initialize() { + initialize_deferred_card_mark_barriers(); +} + +CardTableBarrierSet::~CardTableBarrierSet() { + delete _card_table; +} + +void CardTableBarrierSet::write_ref_array_work(MemRegion mr) { + _card_table->dirty_MemRegion(mr); +} + +void CardTableBarrierSet::invalidate(MemRegion mr) { + _card_table->invalidate(mr); +} + +void CardTableBarrierSet::print_on(outputStream* st) const { + _card_table->print_on(st); +} + +// Helper for ReduceInitialCardMarks. For performance, +// compiled code may elide card-marks for initializing stores +// to a newly allocated object along the fast-path. We +// compensate for such elided card-marks as follows: +// (a) Generational, non-concurrent collectors, such as +// GenCollectedHeap(ParNew,DefNew,Tenured) and +// ParallelScavengeHeap(ParallelGC, ParallelOldGC) +// need the card-mark if and only if the region is +// in the old gen, and do not care if the card-mark +// succeeds or precedes the initializing stores themselves, +// so long as the card-mark is completed before the next +// scavenge. For all these cases, we can do a card mark +// at the point at which we do a slow path allocation +// in the old gen, i.e. in this call. +// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires +// in addition that the card-mark for an old gen allocated +// object strictly follow any associated initializing stores. +// In these cases, the memRegion remembered below is +// used to card-mark the entire region either just before the next +// slow-path allocation by this thread or just before the next scavenge or +// CMS-associated safepoint, whichever of these events happens first. +// (The implicit assumption is that the object has been fully +// initialized by this point, a fact that we assert when doing the +// card-mark.) +// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a +// G1 concurrent marking is in progress an SATB (pre-write-)barrier +// is used to remember the pre-value of any store. Initializing +// stores will not need this barrier, so we need not worry about +// compensating for the missing pre-barrier here. Turning now +// to the post-barrier, we note that G1 needs a RS update barrier +// which simply enqueues a (sequence of) dirty cards which may +// optionally be refined by the concurrent update threads. Note +// that this barrier need only be applied to a non-young write, +// but, like in CMS, because of the presence of concurrent refinement +// (much like CMS' precleaning), must strictly follow the oop-store. +// Thus, using the same protocol for maintaining the intended +// invariants turns out, serendepitously, to be the same for both +// G1 and CMS. +// +// For any future collector, this code should be reexamined with +// that specific collector in mind, and the documentation above suitably +// extended and updated. +void CardTableBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) { +#if defined(COMPILER2) || INCLUDE_JVMCI + if (!ReduceInitialCardMarks) { + return; + } + // If a previous card-mark was deferred, flush it now. + flush_deferred_card_mark_barrier(thread); + if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) { + // Arrays of non-references don't need a post-barrier. + // The deferred_card_mark region should be empty + // following the flush above. + assert(thread->deferred_card_mark().is_empty(), "Error"); + } else { + MemRegion mr((HeapWord*)new_obj, new_obj->size()); + assert(!mr.is_empty(), "Error"); + if (_defer_initial_card_mark) { + // Defer the card mark + thread->set_deferred_card_mark(mr); + } else { + // Do the card mark + invalidate(mr); + } + } +#endif // COMPILER2 || JVMCI +} + +void CardTableBarrierSet::initialize_deferred_card_mark_barriers() { + // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used); + // otherwise remains unused. +#if defined(COMPILER2) || INCLUDE_JVMCI + _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers() + && (DeferInitialCardMark || card_mark_must_follow_store()); +#else + assert(_defer_initial_card_mark == false, "Who would set it?"); +#endif +} + +void CardTableBarrierSet::flush_deferred_card_mark_barrier(JavaThread* thread) { +#if defined(COMPILER2) || INCLUDE_JVMCI + MemRegion deferred = thread->deferred_card_mark(); + if (!deferred.is_empty()) { + assert(_defer_initial_card_mark, "Otherwise should be empty"); + { + // Verify that the storage points to a parsable object in heap + DEBUG_ONLY(oop old_obj = oop(deferred.start());) + assert(!_card_table->is_in_young(old_obj), + "Else should have been filtered in on_slowpath_allocation_exit()"); + assert(oopDesc::is_oop(old_obj, true), "Not an oop"); + assert(deferred.word_size() == (size_t)(old_obj->size()), + "Mismatch: multiple objects?"); + } + write_region(deferred); + // "Clear" the deferred_card_mark field + thread->set_deferred_card_mark(MemRegion()); + } + assert(thread->deferred_card_mark().is_empty(), "invariant"); +#else + assert(!_defer_initial_card_mark, "Should be false"); + assert(thread->deferred_card_mark().is_empty(), "Should be empty"); +#endif +} + +void CardTableBarrierSet::on_thread_detach(JavaThread* thread) { + // The deferred store barriers must all have been flushed to the + // card-table (or other remembered set structure) before GC starts + // processing the card-table (or other remembered set). + flush_deferred_card_mark_barrier(thread); +} + +bool CardTableBarrierSet::card_mark_must_follow_store() const { + return _card_table->scanned_concurrently(); +} diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/cardTableBarrierSet.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_HPP +#define SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_HPP + +#include "gc/shared/modRefBarrierSet.hpp" +#include "utilities/align.hpp" + +class CardTable; + +// This kind of "BarrierSet" allows a "CollectedHeap" to detect and +// enumerate ref fields that have been modified (since the last +// enumeration.) + +// As it currently stands, this barrier is *imprecise*: when a ref field in +// an object "o" is modified, the card table entry for the card containing +// the head of "o" is dirtied, not necessarily the card containing the +// modified field itself. For object arrays, however, the barrier *is* +// precise; only the card containing the modified element is dirtied. +// Closures used to scan dirty cards should take these +// considerations into account. + +class CardTableBarrierSet: public ModRefBarrierSet { + // Some classes get to look at some private stuff. + friend class VMStructs; + protected: + + // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 + // or INCLUDE_JVMCI is being used + bool _defer_initial_card_mark; + CardTable* _card_table; + + CardTableBarrierSet(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti); + + public: + CardTableBarrierSet(CardTable* card_table); + ~CardTableBarrierSet(); + + CardTable* card_table() const { return _card_table; } + + virtual void initialize(); + + void write_region(MemRegion mr) { + invalidate(mr); + } + + protected: + void write_ref_array_work(MemRegion mr); + + public: + // Record a reference update. Note that these versions are precise! + // The scanning code has to handle the fact that the write barrier may be + // either precise or imprecise. We make non-virtual inline variants of + // these functions here for performance. + template + void write_ref_field_post(T* field, oop newVal); + + virtual void invalidate(MemRegion mr); + + // ReduceInitialCardMarks + void initialize_deferred_card_mark_barriers(); + + // If the CollectedHeap was asked to defer a store barrier above, + // this informs it to flush such a deferred store barrier to the + // remembered set. + void flush_deferred_card_mark_barrier(JavaThread* thread); + + // Can a compiler initialize a new object without store barriers? + // This permission only extends from the creation of a new object + // via a TLAB up to the first subsequent safepoint. If such permission + // is granted for this heap type, the compiler promises to call + // defer_store_barrier() below on any slow path allocation of + // a new object for which such initializing store barriers will + // have been elided. G1, like CMS, allows this, but should be + // ready to provide a compensating write barrier as necessary + // if that storage came out of a non-young region. The efficiency + // of this implementation depends crucially on being able to + // answer very efficiently in constant time whether a piece of + // storage in the heap comes from a young region or not. + // See ReduceInitialCardMarks. + virtual bool can_elide_tlab_store_barriers() const { + return true; + } + + // If a compiler is eliding store barriers for TLAB-allocated objects, + // we will be informed of a slow-path allocation by a call + // to on_slowpath_allocation_exit() below. Such a call precedes the + // initialization of the object itself, and no post-store-barriers will + // be issued. Some heap types require that the barrier strictly follows + // the initializing stores. (This is currently implemented by deferring the + // barrier until the next slow-path allocation or gc-related safepoint.) + // This interface answers whether a particular barrier type needs the card + // mark to be thus strictly sequenced after the stores. + virtual bool card_mark_must_follow_store() const; + + virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj); + virtual void on_thread_detach(JavaThread* thread); + + virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); } + + virtual void print_on(outputStream* st) const; + + template + class AccessBarrier: public ModRefBarrierSet::AccessBarrier {}; +}; + +template<> +struct BarrierSet::GetName { + static const BarrierSet::Name value = BarrierSet::CardTableBarrierSet; +}; + +template<> +struct BarrierSet::GetType { + typedef ::CardTableBarrierSet type; +}; + +#endif // SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_HPP diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_INLINE_HPP +#define SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_INLINE_HPP + +#include "gc/shared/cardTableBarrierSet.hpp" +#include "gc/shared/cardTable.hpp" +#include "runtime/orderAccess.inline.hpp" + +template +inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) { + volatile jbyte* byte = _card_table->byte_for(field); + if (UseConcMarkSweepGC) { + // Perform a releasing store if using CMS so that it may + // scan and clear the cards concurrently during pre-cleaning. + OrderAccess::release_store(byte, CardTable::dirty_card_val()); + } else { + *byte = CardTable::dirty_card_val(); + } +} + +#endif // SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_INLINE_HPP diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/cardTableModRefBS.cpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp Fri Mar 16 14:47:53 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,189 +0,0 @@ -/* - * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "gc/shared/cardTableModRefBS.inline.hpp" -#include "gc/shared/collectedHeap.hpp" -#include "gc/shared/genCollectedHeap.hpp" -#include "gc/shared/space.inline.hpp" -#include "logging/log.hpp" -#include "memory/virtualspace.hpp" -#include "oops/oop.inline.hpp" -#include "runtime/thread.hpp" -#include "services/memTracker.hpp" -#include "utilities/align.hpp" -#include "utilities/macros.hpp" - -// This kind of "BarrierSet" allows a "CollectedHeap" to detect and -// enumerate ref fields that have been modified (since the last -// enumeration.) - -CardTableModRefBS::CardTableModRefBS( - CardTable* card_table, - const BarrierSet::FakeRtti& fake_rtti) : - ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)), - _defer_initial_card_mark(false), - _card_table(card_table) -{} - -CardTableModRefBS::CardTableModRefBS(CardTable* card_table) : - ModRefBarrierSet(BarrierSet::FakeRtti(BarrierSet::CardTableModRef)), - _defer_initial_card_mark(false), - _card_table(card_table) -{} - -void CardTableModRefBS::initialize() { - initialize_deferred_card_mark_barriers(); -} - -CardTableModRefBS::~CardTableModRefBS() { - delete _card_table; -} - -void CardTableModRefBS::write_ref_array_work(MemRegion mr) { - _card_table->dirty_MemRegion(mr); -} - -void CardTableModRefBS::invalidate(MemRegion mr) { - _card_table->invalidate(mr); -} - -void CardTableModRefBS::print_on(outputStream* st) const { - _card_table->print_on(st); -} - -// Helper for ReduceInitialCardMarks. For performance, -// compiled code may elide card-marks for initializing stores -// to a newly allocated object along the fast-path. We -// compensate for such elided card-marks as follows: -// (a) Generational, non-concurrent collectors, such as -// GenCollectedHeap(ParNew,DefNew,Tenured) and -// ParallelScavengeHeap(ParallelGC, ParallelOldGC) -// need the card-mark if and only if the region is -// in the old gen, and do not care if the card-mark -// succeeds or precedes the initializing stores themselves, -// so long as the card-mark is completed before the next -// scavenge. For all these cases, we can do a card mark -// at the point at which we do a slow path allocation -// in the old gen, i.e. in this call. -// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires -// in addition that the card-mark for an old gen allocated -// object strictly follow any associated initializing stores. -// In these cases, the memRegion remembered below is -// used to card-mark the entire region either just before the next -// slow-path allocation by this thread or just before the next scavenge or -// CMS-associated safepoint, whichever of these events happens first. -// (The implicit assumption is that the object has been fully -// initialized by this point, a fact that we assert when doing the -// card-mark.) -// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a -// G1 concurrent marking is in progress an SATB (pre-write-)barrier -// is used to remember the pre-value of any store. Initializing -// stores will not need this barrier, so we need not worry about -// compensating for the missing pre-barrier here. Turning now -// to the post-barrier, we note that G1 needs a RS update barrier -// which simply enqueues a (sequence of) dirty cards which may -// optionally be refined by the concurrent update threads. Note -// that this barrier need only be applied to a non-young write, -// but, like in CMS, because of the presence of concurrent refinement -// (much like CMS' precleaning), must strictly follow the oop-store. -// Thus, using the same protocol for maintaining the intended -// invariants turns out, serendepitously, to be the same for both -// G1 and CMS. -// -// For any future collector, this code should be reexamined with -// that specific collector in mind, and the documentation above suitably -// extended and updated. -void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) { -#if defined(COMPILER2) || INCLUDE_JVMCI - if (!ReduceInitialCardMarks) { - return; - } - // If a previous card-mark was deferred, flush it now. - flush_deferred_card_mark_barrier(thread); - if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) { - // Arrays of non-references don't need a post-barrier. - // The deferred_card_mark region should be empty - // following the flush above. - assert(thread->deferred_card_mark().is_empty(), "Error"); - } else { - MemRegion mr((HeapWord*)new_obj, new_obj->size()); - assert(!mr.is_empty(), "Error"); - if (_defer_initial_card_mark) { - // Defer the card mark - thread->set_deferred_card_mark(mr); - } else { - // Do the card mark - invalidate(mr); - } - } -#endif // COMPILER2 || JVMCI -} - -void CardTableModRefBS::initialize_deferred_card_mark_barriers() { - // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used); - // otherwise remains unused. -#if defined(COMPILER2) || INCLUDE_JVMCI - _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers() - && (DeferInitialCardMark || card_mark_must_follow_store()); -#else - assert(_defer_initial_card_mark == false, "Who would set it?"); -#endif -} - -void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) { -#if defined(COMPILER2) || INCLUDE_JVMCI - MemRegion deferred = thread->deferred_card_mark(); - if (!deferred.is_empty()) { - assert(_defer_initial_card_mark, "Otherwise should be empty"); - { - // Verify that the storage points to a parsable object in heap - DEBUG_ONLY(oop old_obj = oop(deferred.start());) - assert(!_card_table->is_in_young(old_obj), - "Else should have been filtered in on_slowpath_allocation_exit()"); - assert(oopDesc::is_oop(old_obj, true), "Not an oop"); - assert(deferred.word_size() == (size_t)(old_obj->size()), - "Mismatch: multiple objects?"); - } - write_region(deferred); - // "Clear" the deferred_card_mark field - thread->set_deferred_card_mark(MemRegion()); - } - assert(thread->deferred_card_mark().is_empty(), "invariant"); -#else - assert(!_defer_initial_card_mark, "Should be false"); - assert(thread->deferred_card_mark().is_empty(), "Should be empty"); -#endif -} - -void CardTableModRefBS::on_thread_detach(JavaThread* thread) { - // The deferred store barriers must all have been flushed to the - // card-table (or other remembered set structure) before GC starts - // processing the card-table (or other remembered set). - flush_deferred_card_mark_barrier(thread); -} - -bool CardTableModRefBS::card_mark_must_follow_store() const { - return _card_table->scanned_concurrently(); -} diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/cardTableModRefBS.hpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp Fri Mar 16 14:47:53 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,139 +0,0 @@ -/* - * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP -#define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP - -#include "gc/shared/modRefBarrierSet.hpp" -#include "utilities/align.hpp" - -class CardTable; - -// This kind of "BarrierSet" allows a "CollectedHeap" to detect and -// enumerate ref fields that have been modified (since the last -// enumeration.) - -// As it currently stands, this barrier is *imprecise*: when a ref field in -// an object "o" is modified, the card table entry for the card containing -// the head of "o" is dirtied, not necessarily the card containing the -// modified field itself. For object arrays, however, the barrier *is* -// precise; only the card containing the modified element is dirtied. -// Closures used to scan dirty cards should take these -// considerations into account. - -class CardTableModRefBS: public ModRefBarrierSet { - // Some classes get to look at some private stuff. - friend class VMStructs; - protected: - - // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 - // or INCLUDE_JVMCI is being used - bool _defer_initial_card_mark; - CardTable* _card_table; - - CardTableModRefBS(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti); - - public: - CardTableModRefBS(CardTable* card_table); - ~CardTableModRefBS(); - - CardTable* card_table() const { return _card_table; } - - virtual void initialize(); - - void write_region(MemRegion mr) { - invalidate(mr); - } - - protected: - void write_ref_array_work(MemRegion mr); - - public: - // Record a reference update. Note that these versions are precise! - // The scanning code has to handle the fact that the write barrier may be - // either precise or imprecise. We make non-virtual inline variants of - // these functions here for performance. - template - void write_ref_field_post(T* field, oop newVal); - - virtual void invalidate(MemRegion mr); - - // ReduceInitialCardMarks - void initialize_deferred_card_mark_barriers(); - - // If the CollectedHeap was asked to defer a store barrier above, - // this informs it to flush such a deferred store barrier to the - // remembered set. - void flush_deferred_card_mark_barrier(JavaThread* thread); - - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. If such permission - // is granted for this heap type, the compiler promises to call - // defer_store_barrier() below on any slow path allocation of - // a new object for which such initializing store barriers will - // have been elided. G1, like CMS, allows this, but should be - // ready to provide a compensating write barrier as necessary - // if that storage came out of a non-young region. The efficiency - // of this implementation depends crucially on being able to - // answer very efficiently in constant time whether a piece of - // storage in the heap comes from a young region or not. - // See ReduceInitialCardMarks. - virtual bool can_elide_tlab_store_barriers() const { - return true; - } - - // If a compiler is eliding store barriers for TLAB-allocated objects, - // we will be informed of a slow-path allocation by a call - // to on_slowpath_allocation_exit() below. Such a call precedes the - // initialization of the object itself, and no post-store-barriers will - // be issued. Some heap types require that the barrier strictly follows - // the initializing stores. (This is currently implemented by deferring the - // barrier until the next slow-path allocation or gc-related safepoint.) - // This interface answers whether a particular barrier type needs the card - // mark to be thus strictly sequenced after the stores. - virtual bool card_mark_must_follow_store() const; - - virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj); - virtual void on_thread_detach(JavaThread* thread); - - virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); } - - virtual void print_on(outputStream* st) const; - - template - class AccessBarrier: public ModRefBarrierSet::AccessBarrier {}; -}; - -template<> -struct BarrierSet::GetName { - static const BarrierSet::Name value = BarrierSet::CardTableModRef; -}; - -template<> -struct BarrierSet::GetType { - typedef CardTableModRefBS type; -}; - -#endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp Fri Mar 16 14:47:53 2018 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP -#define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP - -#include "gc/shared/cardTableModRefBS.hpp" -#include "gc/shared/cardTable.hpp" -#include "runtime/orderAccess.inline.hpp" - -template -inline void CardTableModRefBS::write_ref_field_post(T* field, oop newVal) { - volatile jbyte* byte = _card_table->byte_for(field); - if (UseConcMarkSweepGC) { - // Perform a releasing store if using CMS so that it may - // scan and clear the cards concurrently during pre-cleaning. - OrderAccess::release_store(byte, CardTable::dirty_card_val()); - } else { - *byte = CardTable::dirty_card_val(); - } -} - -#endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/cardTableRS.cpp --- a/src/hotspot/share/gc/shared/cardTableRS.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/shared/cardTableRS.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -571,7 +571,7 @@ // [End Case 3] // // (Please refer to the code in the helper class - // ClearNonCleanCardWrapper and in CardTableModRefBS for details.) + // ClearNonCleanCardWrapper and in CardTable for details.) // // The informal arguments above can be tightened into a formal // correctness proof and it behooves us to write up such a proof, diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/genCollectedHeap.cpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -31,7 +31,7 @@ #include "code/codeCache.hpp" #include "code/icBuffer.hpp" #include "gc/shared/adaptiveSizePolicy.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/cardTableRS.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectorCounters.hpp" @@ -112,7 +112,7 @@ _rem_set = new CardTableRS(reserved_region()); _rem_set->initialize(); - CardTableModRefBS *bs = new CardTableModRefBS(_rem_set); + CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set); bs->initialize(); set_barrier_set(bs); diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/gc/shared/genOopClosures.hpp --- a/src/hotspot/share/gc/shared/genOopClosures.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/gc/shared/genOopClosures.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -31,7 +31,7 @@ class Generation; class HeapWord; class CardTableRS; -class CardTableModRefBS; +class CardTableBarrierSet; class DefNewGeneration; class KlassRemSet; diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp --- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -119,7 +119,7 @@ symbol_clinit = (address) vmSymbols::class_initializer_name(); BarrierSet* bs = Universe::heap()->barrier_set(); - if (bs->is_a(BarrierSet::CardTableModRef)) { + if (bs->is_a(BarrierSet::CardTableBarrierSet)) { jbyte* base = ci_card_table_address(); assert(base != NULL, "unexpected byte_map_base"); cardtable_start_address = base; @@ -420,4 +420,3 @@ #undef ADD_UINTX_FLAG #undef CHECK_FLAG } - diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/memory/memRegion.hpp --- a/src/hotspot/share/memory/memRegion.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/memory/memRegion.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -35,7 +35,7 @@ // Note that MemRegions are passed by value, not by reference. // The intent is that they remain very small and contain no // objects. These should never be allocated in heap but we do -// create MemRegions (in CardTableModRefBS) in heap so operator +// create MemRegions (in CardTableBarrierSet) in heap so operator // new and operator new [] added for this special case. class MetaWord; diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/memory/universe.cpp --- a/src/hotspot/share/memory/universe.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/memory/universe.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -32,7 +32,7 @@ #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" #include "code/dependencies.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/gcArguments.hpp" #include "gc/shared/gcLocker.inline.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/opto/graphKit.cpp --- a/src/hotspot/share/opto/graphKit.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/opto/graphKit.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -30,7 +30,7 @@ #include "gc/g1/heapRegion.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "memory/resourceArea.hpp" #include "opto/addnode.hpp" @@ -1565,7 +1565,7 @@ g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt); break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: break; default : @@ -1580,7 +1580,7 @@ case BarrierSet::G1BarrierSet: return true; // Can move it if no safepoint - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: return true; // There is no pre-barrier default : @@ -1604,7 +1604,7 @@ g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise); break; - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: write_barrier_post(store, obj, adr, adr_idx, val, use_precise); break; @@ -3811,8 +3811,8 @@ bool GraphKit::use_ReduceInitialCardMarks() { BarrierSet *bs = Universe::heap()->barrier_set(); - return bs->is_a(BarrierSet::CardTableModRef) - && barrier_set_cast(bs)->can_elide_tlab_store_barriers() + return bs->is_a(BarrierSet::CardTableBarrierSet) + && barrier_set_cast(bs)->can_elide_tlab_store_barriers() && ReduceInitialCardMarks; } @@ -3881,7 +3881,7 @@ Node* cast = __ CastPX(__ ctrl(), adr); // Divide by card size - assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef), + assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableBarrierSet), "Only one we handle so far."); Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) ); @@ -4159,7 +4159,7 @@ * as part of the allocation in the case the allocated object is not located * in the nursery, this would happen for humongous objects. This is similar to * how CMS is required to handle this case, see the comments for the method - * CardTableModRefBS::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier. + * CardTableBarrierSet::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier. * A deferred card mark is required for these objects and handled in the above * mentioned methods. * diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/precompiled/precompiled.hpp --- a/src/hotspot/share/precompiled/precompiled.hpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/precompiled/precompiled.hpp Mon Mar 19 07:38:18 2018 +0100 @@ -95,7 +95,7 @@ # include "gc/shared/ageTable.hpp" # include "gc/shared/barrierSet.hpp" # include "gc/shared/blockOffsetTable.hpp" -# include "gc/shared/cardTableModRefBS.hpp" +# include "gc/shared/cardTableBarrierSet.hpp" # include "gc/shared/collectedHeap.hpp" # include "gc/shared/collectorCounters.hpp" # include "gc/shared/collectorPolicy.hpp" diff -r 689ebcfe04fd -r 848864ed9b17 src/hotspot/share/runtime/vmStructs.cpp --- a/src/hotspot/share/runtime/vmStructs.cpp Fri Mar 16 14:47:53 2018 +0100 +++ b/src/hotspot/share/runtime/vmStructs.cpp Mon Mar 19 07:38:18 2018 +0100 @@ -475,8 +475,8 @@ nonstatic_field(CardTable, _committed, MemRegion*) \ nonstatic_field(CardTable, _guard_region, MemRegion) \ nonstatic_field(CardTable, _byte_map_base, jbyte*) \ - nonstatic_field(CardTableModRefBS, _defer_initial_card_mark, bool) \ - nonstatic_field(CardTableModRefBS, _card_table, CardTable*) \ + nonstatic_field(CardTableBarrierSet, _defer_initial_card_mark, bool) \ + nonstatic_field(CardTableBarrierSet, _card_table, CardTable*) \ \ nonstatic_field(CollectedHeap, _reserved, MemRegion) \ nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \ @@ -1473,7 +1473,7 @@ declare_type(TenuredSpace, OffsetTableContigSpace) \ declare_toplevel_type(BarrierSet) \ declare_type(ModRefBarrierSet, BarrierSet) \ - declare_type(CardTableModRefBS, ModRefBarrierSet) \ + declare_type(CardTableBarrierSet, ModRefBarrierSet) \ declare_toplevel_type(CardTable) \ declare_type(CardTableRS, CardTable) \ declare_toplevel_type(BarrierSet::Name) \ @@ -1502,8 +1502,8 @@ declare_toplevel_type(CardTable*) \ declare_toplevel_type(CardTable*const) \ declare_toplevel_type(CardTableRS*) \ - declare_toplevel_type(CardTableModRefBS*) \ - declare_toplevel_type(CardTableModRefBS**) \ + declare_toplevel_type(CardTableBarrierSet*) \ + declare_toplevel_type(CardTableBarrierSet**) \ declare_toplevel_type(CollectedHeap*) \ declare_toplevel_type(ContiguousSpace*) \ declare_toplevel_type(DefNewGeneration*) \ @@ -2237,7 +2237,7 @@ declare_constant(AgeTable::table_size) \ \ declare_constant(BarrierSet::ModRef) \ - declare_constant(BarrierSet::CardTableModRef) \ + declare_constant(BarrierSet::CardTableBarrierSet) \ declare_constant(BarrierSet::G1BarrierSet) \ \ declare_constant(BOTConstants::LogN) \