8195142: Refactor out card table from CardTableModRefBS to flatten the BarrierSet hierarchy
authoreosterlund
Mon, 26 Feb 2018 09:34:12 +0100
changeset 49164 7e958a8ebcd3
parent 49163 580bb0b85f63
child 49165 75e2a82fed81
8195142: Refactor out card table from CardTableModRefBS to flatten the BarrierSet hierarchy Reviewed-by: stefank, coleenp, kvn, ehelin
src/hotspot/cpu/aarch64/aarch64.ad
src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
src/hotspot/cpu/arm/interp_masm_arm.cpp
src/hotspot/cpu/arm/macroAssembler_arm.cpp
src/hotspot/cpu/arm/stubGenerator_arm.cpp
src/hotspot/cpu/arm/templateTable_arm.cpp
src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
src/hotspot/cpu/s390/macroAssembler_s390.cpp
src/hotspot/cpu/s390/stubGenerator_s390.cpp
src/hotspot/cpu/s390/templateTable_s390.cpp
src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp
src/hotspot/cpu/sparc/macroAssembler_sparc.cpp
src/hotspot/cpu/sparc/stubGenerator_sparc.cpp
src/hotspot/cpu/sparc/templateTable_sparc.cpp
src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
src/hotspot/cpu/x86/macroAssembler_x86.cpp
src/hotspot/cpu/x86/stubGenerator_x86_32.cpp
src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
src/hotspot/cpu/x86/templateTable_x86.cpp
src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp
src/hotspot/share/aot/aotCodeHeap.cpp
src/hotspot/share/c1/c1_LIRGenerator.cpp
src/hotspot/share/ci/ciUtilities.cpp
src/hotspot/share/ci/ciUtilities.hpp
src/hotspot/share/code/relocInfo_ext.cpp
src/hotspot/share/compiler/disassembler.cpp
src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp
src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp
src/hotspot/share/gc/cms/parCardTableModRefBS.cpp
src/hotspot/share/gc/g1/g1CardCounts.cpp
src/hotspot/share/gc/g1/g1CardCounts.hpp
src/hotspot/share/gc/g1/g1CardLiveData.cpp
src/hotspot/share/gc/g1/g1CardTable.cpp
src/hotspot/share/gc/g1/g1CardTable.hpp
src/hotspot/share/gc/g1/g1CardTable.inline.hpp
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
src/hotspot/share/gc/g1/g1CollectedHeap.hpp
src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
src/hotspot/share/gc/g1/g1EvacFailure.cpp
src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp
src/hotspot/share/gc/g1/g1HeapVerifier.cpp
src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
src/hotspot/share/gc/g1/g1ParScanThreadState.hpp
src/hotspot/share/gc/g1/g1RemSet.cpp
src/hotspot/share/gc/g1/g1RemSet.hpp
src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp
src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp
src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp
src/hotspot/share/gc/g1/heapRegion.cpp
src/hotspot/share/gc/g1/heapRegion.hpp
src/hotspot/share/gc/g1/heapRegionRemSet.cpp
src/hotspot/share/gc/g1/sparsePRT.cpp
src/hotspot/share/gc/parallel/asPSYoungGen.cpp
src/hotspot/share/gc/parallel/cardTableExtension.cpp
src/hotspot/share/gc/parallel/cardTableExtension.hpp
src/hotspot/share/gc/parallel/objectStartArray.cpp
src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
src/hotspot/share/gc/parallel/psCardTable.cpp
src/hotspot/share/gc/parallel/psCardTable.hpp
src/hotspot/share/gc/parallel/psMarkSweep.cpp
src/hotspot/share/gc/parallel/psOldGen.cpp
src/hotspot/share/gc/parallel/psParallelCompact.cpp
src/hotspot/share/gc/parallel/psScavenge.cpp
src/hotspot/share/gc/parallel/psScavenge.hpp
src/hotspot/share/gc/parallel/psScavenge.inline.hpp
src/hotspot/share/gc/parallel/psTasks.cpp
src/hotspot/share/gc/parallel/psTasks.hpp
src/hotspot/share/gc/parallel/psYoungGen.cpp
src/hotspot/share/gc/serial/defNewGeneration.cpp
src/hotspot/share/gc/shared/barrierSet.hpp
src/hotspot/share/gc/shared/barrierSetConfig.hpp
src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp
src/hotspot/share/gc/shared/cardGeneration.cpp
src/hotspot/share/gc/shared/cardTable.cpp
src/hotspot/share/gc/shared/cardTable.hpp
src/hotspot/share/gc/shared/cardTableModRefBS.cpp
src/hotspot/share/gc/shared/cardTableModRefBS.hpp
src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp
src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.cpp
src/hotspot/share/gc/shared/cardTableRS.cpp
src/hotspot/share/gc/shared/cardTableRS.hpp
src/hotspot/share/gc/shared/collectorPolicy.cpp
src/hotspot/share/gc/shared/genCollectedHeap.cpp
src/hotspot/share/gc/shared/modRefBarrierSet.hpp
src/hotspot/share/gc/shared/space.cpp
src/hotspot/share/gc/shared/space.hpp
src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
src/hotspot/share/jvmci/vmStructs_jvmci.cpp
src/hotspot/share/opto/graphKit.cpp
src/hotspot/share/runtime/commandLineFlagConstraintsGC.cpp
src/hotspot/share/runtime/globals.hpp
src/hotspot/share/runtime/vmStructs.cpp
src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
 // Copyright (c) 2014, Red Hat Inc. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
@@ -995,6 +995,7 @@
 
 source_hpp %{
 
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "opto/addnode.hpp"
 
@@ -4438,8 +4439,8 @@
     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
                Assembler::byte, /*acquire*/ false, /*release*/ true,
                /*weak*/ false, noreg);
-  %}  
-    
+  %}
+
 
   // The only difference between aarch64_enc_cmpxchg and
   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
@@ -5845,7 +5846,7 @@
 %{
   // Get base of card map
   predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
-    (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
+            (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base());
   match(ConP);
 
   op_cost(0);
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -30,6 +30,8 @@
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
 #include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_aarch64.hpp"
 #include "oops/compiledICHolder.hpp"
@@ -42,6 +44,7 @@
 #include "runtime/vframeArray.hpp"
 #include "vmreg_aarch64.inline.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #endif
 
@@ -1162,10 +1165,6 @@
         // arg0: store_address
         Address store_addr(rfp, 2*BytesPerWord);
 
-        BarrierSet* bs = Universe::heap()->barrier_set();
-        CardTableModRefBS* ct = (CardTableModRefBS*)bs;
-        assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
         Label done;
         Label runtime;
 
@@ -1186,13 +1185,13 @@
         assert_different_registers(card_offset, byte_map_base, rscratch1);
 
         f.load_argument(0, card_offset);
-        __ lsr(card_offset, card_offset, CardTableModRefBS::card_shift);
+        __ lsr(card_offset, card_offset, CardTable::card_shift);
         __ load_byte_map_base(byte_map_base);
         __ ldrb(rscratch1, Address(byte_map_base, card_offset));
-        __ cmpw(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+        __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
         __ br(Assembler::EQ, done);
 
-        assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
+        assert((int)CardTable::dirty_card_val() == 0, "must be 0");
 
         __ membar(Assembler::StoreLoad);
         __ ldrb(rscratch1, Address(byte_map_base, card_offset));
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,8 +29,9 @@
 #include "jvm.h"
 #include "asm/assembler.hpp"
 #include "asm/assembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
-
 #include "compiler/disassembler.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_aarch64.hpp"
@@ -46,6 +47,7 @@
 #include "runtime/thread.hpp"
 
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -3615,16 +3617,16 @@
   // register obj is destroyed afterwards.
 
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableForRS ||
-         bs->kind() == BarrierSet::CardTableExtension,
+  assert(bs->kind() == BarrierSet::CardTableModRef,
          "Wrong barrier set kind");
 
-  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
-  lsr(obj, obj, CardTableModRefBS::card_shift);
-
-  assert(CardTableModRefBS::dirty_card_val() == 0, "must be");
+  CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+  CardTable* ct = ctbs->card_table();
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+  lsr(obj, obj, CardTable::card_shift);
+
+  assert(CardTable::dirty_card_val() == 0, "must be");
 
   load_byte_map_base(rscratch1);
 
@@ -4126,8 +4128,9 @@
                                        DirtyCardQueue::byte_offset_of_buf()));
 
   BarrierSet* bs = Universe::heap()->barrier_set();
-  CardTableModRefBS* ct = (CardTableModRefBS*)bs;
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+  CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+  CardTable* ct = ctbs->card_table();
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
 
   Label done;
   Label runtime;
@@ -4144,20 +4147,20 @@
 
   // storing region crossing non-NULL, is card already dirty?
 
-  ExternalAddress cardtable((address) ct->byte_map_base);
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+  ExternalAddress cardtable((address) ct->byte_map_base());
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
   const Register card_addr = tmp;
 
-  lsr(card_addr, store_addr, CardTableModRefBS::card_shift);
+  lsr(card_addr, store_addr, CardTable::card_shift);
 
   // get the address of the card
   load_byte_map_base(tmp2);
   add(card_addr, card_addr, tmp2);
   ldrb(tmp2, Address(card_addr));
-  cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+  cmpw(tmp2, (int)G1CardTable::g1_young_card_val());
   br(Assembler::EQ, done);
 
-  assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
+  assert((int)CardTable::dirty_card_val() == 0, "must be 0");
 
   membar(Assembler::StoreLoad);
 
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,6 +26,8 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_aarch64.hpp"
 #include "oops/instanceOop.hpp"
@@ -652,9 +654,7 @@
         __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
         __ pop(saved_regs, sp);
         break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
-      case BarrierSet::ModRef:
+      case BarrierSet::CardTableModRef:
         break;
       default:
         ShouldNotReachHere();
@@ -695,16 +695,16 @@
           __ pop(saved_regs, sp);
         }
         break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
+      case BarrierSet::CardTableModRef:
         {
-          CardTableModRefBS* ct = (CardTableModRefBS*)bs;
-          assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+          CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+          CardTable* ct = ctbs->card_table();
+          assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
 
           Label L_loop;
 
-           __ lsr(start, start, CardTableModRefBS::card_shift);
-           __ lsr(end, end, CardTableModRefBS::card_shift);
+           __ lsr(start, start, CardTable::card_shift);
+           __ lsr(end, end, CardTable::card_shift);
            __ sub(end, end, start); // number of bytes to copy
 
           const Register count = end; // 'end' register contains bytes count now
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -184,8 +184,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
       {
         if (val == noreg) {
           __ store_heap_oop_null(obj);
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
 #include "ci/ciArray.hpp"
 #include "ci/ciObjArrayKlass.hpp"
 #include "ci/ciTypeArrayKlass.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -475,22 +477,21 @@
 }
 
 void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
-  assert(CardTableModRefBS::dirty_card_val() == 0,
+  assert(CardTable::dirty_card_val() == 0,
     "Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise");
 #ifdef AARCH64
   // AARCH64 has a register that is constant zero. We can use that one to set the
   // value in the card table to dirty.
   __ move(FrameMap::ZR_opr, card_addr);
 #else // AARCH64
-  CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
-  if(((intx)ct->byte_map_base & 0xff) == 0) {
+  if((ci_card_table_address_as<intx>() & 0xff) == 0) {
     // If the card table base address is aligned to 256 bytes, we can use the register
     // that contains the card_table_base_address.
     __ move(value, card_addr);
   } else {
     // Otherwise we need to create a register containing that value.
     LIR_Opr tmp_zero = new_register(T_INT);
-    __ move(LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()), tmp_zero);
+    __ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero);
     __ move(tmp_zero, card_addr);
   }
 #endif // AARCH64
@@ -510,14 +511,14 @@
   }
 
 #ifdef AARCH64
-  LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
+  LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE);
   LIR_Opr tmp2 = tmp;
-  __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTableModRefBS::card_shift)
+  __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift)
   LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE);
 #else
   // Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load
   // byte instruction does not support the addressing mode we need.
-  LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BOOLEAN);
+  LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
 #endif
   if (UseCondCardMark) {
     if (UseConcMarkSweepGC) {
@@ -527,7 +528,7 @@
     __ move(card_addr, cur_value);
 
     LabelObj* L_already_dirty = new LabelObj();
-    __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()));
+    __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val()));
     __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
     set_card(tmp, card_addr);
     __ branch_destination(L_already_dirty->label());
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,9 @@
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_arm.hpp"
 #include "oops/compiledICHolder.hpp"
@@ -40,6 +43,7 @@
 #include "utilities/align.hpp"
 #include "vmreg_arm.inline.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #endif
 
@@ -608,8 +612,6 @@
 
         __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
 
-        BarrierSet* bs = Universe::heap()->barrier_set();
-        CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
         Label done;
         Label recheck;
         Label runtime;
@@ -619,8 +621,7 @@
         Address buffer(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() +
                                          DirtyCardQueue::byte_offset_of_buf()));
 
-        AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
-        assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+        AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
 
         // save at least the registers that need saving if the runtime is called
 #ifdef AARCH64
@@ -649,12 +650,12 @@
         // explicitly specify that 'cardtable' has a relocInfo::none
         // type.
         __ lea(r_card_base_1, cardtable);
-        __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTableModRefBS::card_shift));
+        __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
 
         // first quick check without barrier
         __ ldrb(r_tmp2, Address(r_card_addr_0));
 
-        __ cmp(r_tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+        __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
         __ b(recheck, ne);
 
         __ bind(done);
@@ -675,14 +676,14 @@
         // reload card state after the barrier that ensures the stored oop was visible
         __ ldrb(r_tmp2, Address(r_card_addr_0));
 
-        assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code");
+        assert(CardTable::dirty_card_val() == 0, "adjust this code");
         __ cbz(r_tmp2, done);
 
         // storing region crossing non-NULL, card is clean.
         // dirty card and log.
 
-        assert(0 == (int)CardTableModRefBS::dirty_card_val(), "adjust this code");
-        if (((intptr_t)ct->byte_map_base & 0xff) == 0) {
+        assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
+        if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
           // Card table is aligned so the lowest byte of the table address base is zero.
           __ strb(r_card_base_1, Address(r_card_addr_0));
         } else {
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "jvm.h"
 #include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.inline.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "interp_masm_arm.hpp"
@@ -410,12 +411,12 @@
 void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
   // Check barrier set type (should be card table) and element size
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableForRS ||
-         bs->kind() == BarrierSet::CardTableExtension,
+  assert(bs->kind() == BarrierSet::CardTableModRef,
          "Wrong barrier set kind");
 
-  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "Adjust store check code");
+  CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+  CardTable* ct = ctbs->card_table();
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
 
   // Load card table base address.
 
@@ -433,19 +434,19 @@
      rarely accessed area of thread descriptor).
   */
   // TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64
-  mov_address(card_table_base, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
+  mov_address(card_table_base, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
 }
 
 // The 2nd part of the store check.
 void InterpreterMacroAssembler::store_check_part2(Register obj, Register card_table_base, Register tmp) {
   assert_different_registers(obj, card_table_base, tmp);
 
-  assert(CardTableModRefBS::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
+  assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
 #ifdef AARCH64
-  add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTableModRefBS::card_shift));
+  add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTable::card_shift));
   Address card_table_addr(card_table_base);
 #else
-  Address card_table_addr(card_table_base, obj, lsr, CardTableModRefBS::card_shift);
+  Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);
 #endif
 
   if (UseCondCardMark) {
@@ -472,8 +473,9 @@
 #ifdef AARCH64
   strb(ZR, card_table_addr);
 #else
-  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  if ((((uintptr_t)ct->byte_map_base & 0xff) == 0)) {
+  CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
+  CardTable* ct = ctbs->card_table();
+  if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
     // Card table is aligned so the lowest byte of the table address base is zero.
     // This works only if the code is not saved for later use, possibly
     // in a context where the base would no longer be aligned.
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "ci/ciEnv.hpp"
 #include "code/nativeInst.hpp"
 #include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
@@ -43,6 +44,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -2265,7 +2267,8 @@
                                    DirtyCardQueue::byte_offset_of_buf()));
 
   BarrierSet* bs = Universe::heap()->barrier_set();
-  CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+  CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+  CardTable* ct = ctbs->card_table();
   Label done;
   Label runtime;
 
@@ -2286,18 +2289,18 @@
 
   // storing region crossing non-NULL, is card already dirty?
   const Register card_addr = tmp1;
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
-  mov_address(tmp2, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
-  add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTableModRefBS::card_shift));
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+  mov_address(tmp2, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
+  add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));
 
   ldrb(tmp2, Address(card_addr));
-  cmp(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+  cmp(tmp2, (int)G1CardTable::g1_young_card_val());
   b(done, eq);
 
   membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
 
-  assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code");
+  assert(CardTable::dirty_card_val() == 0, "adjust this code");
   ldrb(tmp2, Address(card_addr));
   cbz(tmp2, done);
 
@@ -3023,7 +3026,6 @@
 }
 
 #endif // COMPILER2
-
 // Must preserve condition codes, or C2 encodeKlass_not_null rule
 // must be changed.
 void MacroAssembler::encode_klass_not_null(Register r) {
@@ -3261,4 +3263,3 @@
 
 }
 #endif // COMPILER2
-
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
 #include "precompiled.hpp"
 #include "asm/assembler.hpp"
 #include "assembler_arm.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_arm.hpp"
 #include "oops/instanceOop.hpp"
@@ -2907,8 +2909,7 @@
         __ pop(saved_regs | R9ifScratched);
 #endif // AARCH64
       }
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
       break;
     default:
       ShouldNotReachHere();
@@ -2961,12 +2962,12 @@
 #endif // !AARCH64
       }
       break;
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
       {
         BLOCK_COMMENT("CardTablePostBarrier");
-        CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
-        assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+        CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+        CardTable* ct = ctbs->card_table();
+        assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
 
         Label L_cardtable_loop, L_done;
 
@@ -2975,12 +2976,12 @@
         __ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
         __ sub(count, count, BytesPerHeapOop);                            // last addr
 
-        __ logical_shift_right(addr, addr, CardTableModRefBS::card_shift);
-        __ logical_shift_right(count, count, CardTableModRefBS::card_shift);
+        __ logical_shift_right(addr, addr, CardTable::card_shift);
+        __ logical_shift_right(count, count, CardTable::card_shift);
         __ sub(count, count, addr); // nb of cards
 
         // warning: Rthread has not been preserved
-        __ mov_address(tmp, (address) ct->byte_map_base, symbolic_Relocation::card_table_reference);
+        __ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference);
         __ add(addr,tmp, addr);
 
         Register zero = __ zero_register(tmp);
@@ -2992,8 +2993,6 @@
         __ BIND(L_done);
       }
       break;
-    case BarrierSet::ModRef:
-      break;
     default:
       ShouldNotReachHere();
     }
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -228,8 +228,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
       {
         if (is_null) {
           __ store_heap_oop_null(new_val, obj);
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -27,6 +27,9 @@
 #include "c1/c1_Defs.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_ppc.hpp"
 #include "oops/compiledICHolder.hpp"
@@ -40,6 +43,7 @@
 #include "utilities/macros.hpp"
 #include "vmreg_ppc.inline.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #endif
 
@@ -795,7 +799,7 @@
         Register tmp = R0;
         Register addr = R14;
         Register tmp2 = R15;
-        jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
+        jbyte* byte_map_base = ci_card_table_address();
 
         Label restart, refill, ret;
 
@@ -803,26 +807,26 @@
         __ std(addr, -8, R1_SP);
         __ std(tmp2, -16, R1_SP);
 
-        __ srdi(addr, R0, CardTableModRefBS::card_shift); // Addr is passed in R0.
+        __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
         __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
         __ add(addr, tmp2, addr);
         __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
 
         // Return if young card.
-        __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::g1_young_card_val());
+        __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
         __ beq(CCR0, ret);
 
         // Return if sequential consistent value is already dirty.
         __ membar(Assembler::StoreLoad);
         __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
 
-        __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::dirty_card_val());
+        __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
         __ beq(CCR0, ret);
 
         // Not dirty.
 
         // First, dirty it.
-        __ li(tmp, G1SATBCardTableModRefBS::dirty_card_val());
+        __ li(tmp, G1CardTable::dirty_card_val());
         __ stb(tmp, 0, addr);
 
         int dirty_card_q_index_byte_offset =
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -26,6 +26,7 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
@@ -43,6 +44,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -3036,20 +3038,20 @@
 void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
   CardTableModRefBS* bs =
     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(bs->kind() == BarrierSet::CardTableForRS ||
-         bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
+  assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
+  CardTable* ct = bs->card_table();
 #ifdef ASSERT
   cmpdi(CCR0, Rnew_val, 0);
   asm_assert_ne("null oop not allowed", 0x321);
 #endif
-  card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
+  card_table_write(ct->byte_map_base(), Rtmp, Rstore_addr);
 }
 
 // Write the card table byte.
 void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
   assert_different_registers(Robj, Rtmp, R0);
   load_const_optimized(Rtmp, (address)byte_map_base, R0);
-  srdi(Robj, Robj, CardTableModRefBS::card_shift);
+  srdi(Robj, Robj, CardTable::card_shift);
   li(R0, 0); // dirty
   if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
   stbx(R0, Rtmp, Robj);
@@ -3171,6 +3173,7 @@
 
   G1SATBCardTableLoggingModRefBS* bs =
     barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
+  CardTable* ct = bs->card_table();
 
   // Does store cross heap regions?
   if (G1RSBarrierRegionFilter) {
@@ -3187,26 +3190,26 @@
 #endif
 
   // Storing region crossing non-NULL, is card already dirty?
-  assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
   const Register Rcard_addr = Rtmp1;
   Register Rbase = Rtmp2;
-  load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
-
-  srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
+  load_const_optimized(Rbase, (address)ct->byte_map_base(), /*temp*/ Rtmp3);
+
+  srdi(Rcard_addr, Rstore_addr, CardTable::card_shift);
 
   // Get the address of the card.
   lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
-  cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+  cmpwi(CCR0, Rtmp3, (int)G1CardTable::g1_young_card_val());
   beq(CCR0, filtered);
 
   membar(Assembler::StoreLoad);
   lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);  // Reload after membar.
-  cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
+  cmpwi(CCR0, Rtmp3 /* card value */, CardTable::dirty_card_val());
   beq(CCR0, filtered);
 
   // Storing a region crossing, non-NULL oop, card is clean.
   // Dirty card and log.
-  li(Rtmp3, CardTableModRefBS::dirty_card_val());
+  li(Rtmp3, CardTable::dirty_card_val());
   //release(); // G1: oops are allowed to get visible after dirty marking.
   stbx(Rtmp3, Rbase, Rcard_addr);
 
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -25,6 +25,8 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_ppc.hpp"
 #include "oops/instanceOop.hpp"
@@ -667,9 +669,7 @@
           __ bind(filtered);
         }
         break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
-      case BarrierSet::ModRef:
+      case BarrierSet::CardTableModRef:
         break;
       default:
         ShouldNotReachHere();
@@ -703,8 +703,7 @@
           __ restore_LR_CR(R0);
         }
         break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
+      case BarrierSet::CardTableModRef:
         {
           Label Lskip_loop, Lstore_loop;
           if (UseConcMarkSweepGC) {
@@ -712,19 +711,20 @@
             __ release();
           }
 
-          CardTableModRefBS* const ct = barrier_set_cast<CardTableModRefBS>(bs);
-          assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+          CardTableModRefBS* const ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+          CardTable* const ct = ctbs->card_table();
+          assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
           assert_different_registers(addr, count, tmp);
 
           __ sldi(count, count, LogBytesPerHeapOop);
           __ addi(count, count, -BytesPerHeapOop);
           __ add(count, addr, count);
           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
-          __ srdi(addr, addr, CardTableModRefBS::card_shift);
-          __ srdi(count, count, CardTableModRefBS::card_shift);
+          __ srdi(addr, addr, CardTable::card_shift);
+          __ srdi(count, count, CardTable::card_shift);
           __ subf(count, addr, count);
           assert_different_registers(R0, addr, count, tmp);
-          __ load_const(tmp, (address)ct->byte_map_base);
+          __ load_const(tmp, (address)ct->byte_map_base());
           __ addic_(count, count, 1);
           __ beq(CCR0, Lskip_loop);
           __ li(R0, 0);
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2013, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -103,8 +103,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
       {
         Label Lnull, Ldone;
         if (Rval != noreg) {
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -27,6 +27,9 @@
 #include "c1/c1_Defs.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_s390.hpp"
 #include "oops/compiledICHolder.hpp"
@@ -40,6 +43,7 @@
 #include "vmreg_s390.inline.hpp"
 #include "registerSaver_s390.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #endif
 
@@ -845,7 +849,7 @@
         Register r1        = Z_R6; // Must be saved/restored.
         Register r2        = Z_R7; // Must be saved/restored.
         Register cardtable = r1;   // Must be non-volatile, because it is used to save addr_card.
-        jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
+        jbyte* byte_map_base = ci_card_table_address();
 
         // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
         __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
@@ -854,17 +858,17 @@
 
         // Calculate address of card corresponding to the updated oop slot.
         AddressLiteral rs(byte_map_base);
-        __ z_srlg(addr_card, addr_oop, CardTableModRefBS::card_shift);
+        __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
         addr_oop = noreg; // dead now
         __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
         __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
 
-        __ z_cli(0, addr_card, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+        __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
         __ z_bre(young_card);
 
         __ z_sync(); // Required to support concurrent cleaning.
 
-        __ z_cli(0, addr_card, (int)CardTableModRefBS::dirty_card_val());
+        __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
         __ z_brne(not_already_dirty);
 
         __ bind(young_card);
@@ -877,7 +881,7 @@
         __ bind(not_already_dirty);
 
         // First, dirty it: [addr_card] := 0
-        __ z_mvi(0, addr_card, CardTableModRefBS::dirty_card_val());
+        __ z_mvi(0, addr_card, CardTable::dirty_card_val());
 
         Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
         Register buf = r2;
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -27,6 +27,7 @@
 #include "asm/codeBuffer.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
@@ -50,6 +51,7 @@
 #include "utilities/events.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -3502,12 +3504,13 @@
 
 // Write to card table for modification at store_addr - register is destroyed afterwards.
 void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
-  CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableForRS ||
-         bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
+  BarrierSet* bs = Universe::heap()->barrier_set();
+  CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+  CardTable* ct = ctbs->card_table();
+  assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
   assert_different_registers(store_addr, tmp);
-  z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift);
-  load_absolute_address(tmp, (address)bs->byte_map_base);
+  z_srlg(store_addr, store_addr, CardTable::card_shift);
+  load_absolute_address(tmp, (address)ct->byte_map_base());
   z_agr(store_addr, tmp);
   z_mvi(0, store_addr, 0); // Store byte 0.
 }
@@ -3707,6 +3710,7 @@
   assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
 
   G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
+  G1CardTable* ct = bs->card_table();
   assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
 
   BLOCK_COMMENT("g1_write_barrier_post {");
@@ -3733,15 +3737,15 @@
   Rnew_val = noreg; // end of lifetime
 
   // Storing region crossing non-NULL, is card already dirty?
-  assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
   assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
   // Make sure not to use Z_R0 for any of these registers.
   Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
   Register Rbase      = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
 
   // calculate address of card
-  load_const_optimized(Rbase, (address)bs->byte_map_base);        // Card table base.
-  z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table.
+  load_const_optimized(Rbase, (address)ct->byte_map_base());      // Card table base.
+  z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift);         // Index into card table.
   z_algr(Rcard_addr, Rbase);                                      // Explicit calculation needed for cli.
   Rbase = noreg; // end of lifetime
 
@@ -3753,13 +3757,13 @@
   // Check the card value. If dirty, we're done.
   // This also avoids false sharing of the (already dirty) card.
   z_sync(); // Required to support concurrent cleaning.
-  assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code");
-  z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar.
+  assert((unsigned int)CardTable::dirty_card_val() <= 255, "otherwise check this code");
+  z_cli(0, Rcard_addr, CardTable::dirty_card_val()); // Reload after membar.
   z_bre(filtered);
 
   // Storing a region crossing, non-NULL oop, card is clean.
   // Dirty card and log.
-  z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val());
+  z_mvi(0, Rcard_addr, CardTable::dirty_card_val());
 
   Register Rcard_addr_x = Rcard_addr;
   Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
--- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,6 +26,8 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "registerSaver_s390.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "nativeInst_s390.hpp"
@@ -722,8 +724,7 @@
           __ bind(filtered);
         }
         break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
+      case BarrierSet::CardTableModRef:
       case BarrierSet::ModRef:
         break;
       default:
@@ -761,14 +762,14 @@
           }
         }
         break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
+      case BarrierSet::CardTableModRef:
         // These cases formerly known as
         //   void array_store_check(Register addr, Register count, bool branchToEnd).
         {
           NearLabel doXC, done;
-          CardTableModRefBS* ct = (CardTableModRefBS*)bs;
-          assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+          CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+          CardTable* ct = ctbs->card_table();
+          assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
           assert_different_registers(Z_R0, Z_R1, addr, count);
 
           // Nothing to do if count <= 0.
@@ -787,11 +788,11 @@
           __ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
 
           // Get base address of card table.
-          __ load_const_optimized(Z_R1, (address)ct->byte_map_base);
+          __ load_const_optimized(Z_R1, (address)ct->byte_map_base());
 
           // count = (count>>shift) - (addr>>shift)
-          __ z_srlg(addr,  addr,  CardTableModRefBS::card_shift);
-          __ z_srlg(count, count, CardTableModRefBS::card_shift);
+          __ z_srlg(addr,  addr,  CardTable::card_shift);
+          __ z_srlg(count, count, CardTable::card_shift);
 
           // Prefetch first elements of card table for update.
           if (VM_Version::has_Prefetch()) {
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -260,8 +260,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
     {
       if (val_is_null) {
         __ store_heap_oop_null(val, offset, base);
--- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,9 @@
 #include "c1/c1_Defs.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_sparc.hpp"
 #include "oops/compiledICHolder.hpp"
@@ -38,6 +41,7 @@
 #include "utilities/align.hpp"
 #include "vmreg_sparc.inline.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #endif
 
@@ -843,22 +847,22 @@
         Register cardtable = G5;
         Register tmp  = G1_scratch;
         Register tmp2 = G3_scratch;
-        jbyte* byte_map_base = barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base;
+        jbyte* byte_map_base = ci_card_table_address();
 
         Label not_already_dirty, restart, refill, young_card;
 
-        __ srlx(addr, CardTableModRefBS::card_shift, addr);
+        __ srlx(addr, CardTable::card_shift, addr);
 
         AddressLiteral rs(byte_map_base);
         __ set(rs, cardtable);         // cardtable := <card table base>
         __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
 
-        __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
+        __ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
 
         __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
         __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
 
-        assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
+        assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
         __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
 
         __ bind(young_card);
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #include "jvm.h"
 #include "asm/macroAssembler.inline.hpp"
 #include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
@@ -44,6 +45,7 @@
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -658,7 +660,7 @@
 
 void MacroAssembler::card_table_write(jbyte* byte_map_base,
                                       Register tmp, Register obj) {
-  srlx(obj, CardTableModRefBS::card_shift, obj);
+  srlx(obj, CardTable::card_shift, obj);
   assert(tmp != obj, "need separate temp reg");
   set((address) byte_map_base, tmp);
   stb(G0, tmp, obj);
@@ -3574,17 +3576,17 @@
 
   Label not_already_dirty, restart, refill, young_card;
 
-  __ srlx(O0, CardTableModRefBS::card_shift, O0);
+  __ srlx(O0, CardTable::card_shift, O0);
   AddressLiteral addrlit(byte_map_base);
   __ set(addrlit, O1); // O1 := <card table base>
   __ ldub(O0, O1, O2); // O2 := [O0 + O1]
 
-  __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
+  __ cmp_and_br_short(O2, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
 
   __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
   __ ldub(O0, O1, O2); // O2 := [O0 + O1]
 
-  assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
+  assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
   __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
 
   __ bind(young_card);
@@ -3664,6 +3666,7 @@
 
   G1SATBCardTableLoggingModRefBS* bs =
     barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
+  CardTable* ct = bs->card_table();
 
   if (G1RSBarrierRegionFilter) {
     xor3(store_addr, new_val, tmp);
@@ -3704,7 +3707,8 @@
     if (dirty_card_log_enqueue == 0) {
       G1SATBCardTableLoggingModRefBS* bs =
         barrier_set_cast<G1SATBCardTableLoggingModRefBS>(heap->barrier_set());
-      generate_dirty_card_log_enqueue(bs->byte_map_base);
+      CardTable *ct = bs->card_table();
+      generate_dirty_card_log_enqueue(ct->byte_map_base());
       assert(dirty_card_log_enqueue != 0, "postcondition.");
     }
     if (satb_log_enqueue_with_frame == 0) {
@@ -3726,9 +3730,10 @@
   if (new_val == G0) return;
   CardTableModRefBS* bs =
     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(bs->kind() == BarrierSet::CardTableForRS ||
-         bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
-  card_table_write(bs->byte_map_base, tmp, store_addr);
+  CardTable* ct = bs->card_table();
+
+  assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
+  card_table_write(ct->byte_map_base(), tmp, store_addr);
 }
 
 // ((OopHandle)result).resolve();
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -24,6 +24,8 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_sparc.hpp"
 #include "oops/instanceOop.hpp"
@@ -875,9 +877,7 @@
           DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
         }
         break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
-      case BarrierSet::ModRef:
+      case BarrierSet::CardTableModRef:
         break;
       default:
         ShouldNotReachHere();
@@ -908,11 +908,11 @@
           __ restore();
         }
         break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
+      case BarrierSet::CardTableModRef:
         {
-          CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
-          assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+          CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+          CardTable* ct = ctbs->card_table();
+          assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
           assert_different_registers(addr, count, tmp);
 
           Label L_loop, L_done;
@@ -923,10 +923,10 @@
           __ sub(count, BytesPerHeapOop, count);
           __ add(count, addr, count);
           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
-          __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
-          __ srl_ptr(count, CardTableModRefBS::card_shift, count);
+          __ srl_ptr(addr, CardTable::card_shift, addr);
+          __ srl_ptr(count, CardTable::card_shift, count);
           __ sub(count, addr, count);
-          AddressLiteral rs(ct->byte_map_base);
+          AddressLiteral rs(ct->byte_map_base());
           __ set(rs, tmp);
         __ BIND(L_loop);
           __ stb(G0, tmp, addr);
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,8 +90,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
       {
         if (index == noreg ) {
           assert(Assembler::is_simm13(offset), "fix this code");
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,9 @@
 #include "c1/c1_Defs.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_x86.hpp"
 #include "oops/compiledICHolder.hpp"
@@ -39,6 +42,7 @@
 #include "utilities/macros.hpp"
 #include "vmreg_x86.inline.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #endif
 
@@ -1632,10 +1636,6 @@
         // arg0: store_address
         Address store_addr(rbp, 2*BytesPerWord);
 
-        CardTableModRefBS* ct =
-          barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-        assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
         Label done;
         Label enqueued;
         Label runtime;
@@ -1657,25 +1657,25 @@
         const Register card_addr = rcx;
 
         f.load_argument(0, card_addr);
-        __ shrptr(card_addr, CardTableModRefBS::card_shift);
+        __ shrptr(card_addr, CardTable::card_shift);
         // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
         // a valid address and therefore is not properly handled by the relocation code.
-        __ movptr(cardtable, (intptr_t)ct->byte_map_base);
+        __ movptr(cardtable, ci_card_table_address_as<intptr_t>());
         __ addptr(card_addr, cardtable);
 
         NOT_LP64(__ get_thread(thread);)
 
-        __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
+        __ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
         __ jcc(Assembler::equal, done);
 
         __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
-        __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+        __ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
         __ jcc(Assembler::equal, done);
 
         // storing region crossing non-NULL, card is clean.
         // dirty card and log.
 
-        __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+        __ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
 
         const Register tmp = rdx;
         __ push(rdx);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -27,6 +27,7 @@
 #include "asm/assembler.hpp"
 #include "asm/assembler.inline.hpp"
 #include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "interpreter/interpreter.hpp"
@@ -45,6 +46,7 @@
 #include "runtime/thread.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -5407,9 +5409,10 @@
   Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
                                        DirtyCardQueue::byte_offset_of_buf()));
 
-  CardTableModRefBS* ct =
+  CardTableModRefBS* ctbs =
     barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+  CardTable* ct = ctbs->card_table();
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
 
   Label done;
   Label runtime;
@@ -5432,24 +5435,24 @@
   const Register cardtable = tmp2;
 
   movptr(card_addr, store_addr);
-  shrptr(card_addr, CardTableModRefBS::card_shift);
+  shrptr(card_addr, CardTable::card_shift);
   // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
   // a valid address and therefore is not properly handled by the relocation code.
-  movptr(cardtable, (intptr_t)ct->byte_map_base);
+  movptr(cardtable, (intptr_t)ct->byte_map_base());
   addptr(card_addr, cardtable);
 
-  cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
+  cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
   jcc(Assembler::equal, done);
 
   membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
-  cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+  cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
   jcc(Assembler::equal, done);
 
 
   // storing a region crossing, non-NULL oop, card is clean.
   // dirty card and log.
 
-  movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+  movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
 
   cmpl(queue_index, 0);
   jcc(Assembler::equal, runtime);
@@ -5494,14 +5497,14 @@
   // Does a store check for the oop in register obj. The content of
   // register obj is destroyed afterwards.
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->kind() == BarrierSet::CardTableForRS ||
-         bs->kind() == BarrierSet::CardTableExtension,
+  assert(bs->kind() == BarrierSet::CardTableModRef,
          "Wrong barrier set kind");
 
-  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
-  shrptr(obj, CardTableModRefBS::card_shift);
+  CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+  CardTable* ct = ctbs->card_table();
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+  shrptr(obj, CardTable::card_shift);
 
   Address card_addr;
 
@@ -5510,7 +5513,7 @@
   // So this essentially converts an address to a displacement and it will
   // never need to be relocated. On 64bit however the value may be too
   // large for a 32bit displacement.
-  intptr_t disp = (intptr_t) ct->byte_map_base;
+  intptr_t disp = (intptr_t) ct->byte_map_base();
   if (is_simm32(disp)) {
     card_addr = Address(noreg, obj, Address::times_1, disp);
   } else {
@@ -5518,12 +5521,12 @@
     // displacement and done in a single instruction given favorable mapping and a
     // smarter version of as_Address. However, 'ExternalAddress' generates a relocation
     // entry and that entry is not properly handled by the relocation code.
-    AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
+    AddressLiteral cardtable((address)ct->byte_map_base(), relocInfo::none);
     Address index(noreg, obj, Address::times_1);
     card_addr = as_Address(ArrayAddress(cardtable, index));
   }
 
-  int dirty = CardTableModRefBS::dirty_card_val();
+  int dirty = CardTable::dirty_card_val();
   if (UseCondCardMark) {
     Label L_already_dirty;
     if (UseConcMarkSweepGC) {
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -25,6 +25,8 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_x86.hpp"
 #include "oops/instanceOop.hpp"
@@ -704,10 +706,7 @@
            __ bind(filtered);
          }
         break;
-#endif // INCLUDE_ALL_GCS
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
-      case BarrierSet::ModRef:
+      case BarrierSet::CardTableModRef:
         break;
       default      :
         ShouldNotReachHere();
@@ -739,22 +738,22 @@
         break;
 #endif // INCLUDE_ALL_GCS
 
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
+      case BarrierSet::CardTableModRef:
         {
-          CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
-          assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+          CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+          CardTable* ct = ctbs->card_table();
+          assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
 
           Label L_loop;
           const Register end = count;  // elements count; end == start+count-1
           assert_different_registers(start, end);
 
           __ lea(end,  Address(start, count, Address::times_ptr, -wordSize));
-          __ shrptr(start, CardTableModRefBS::card_shift);
-          __ shrptr(end,   CardTableModRefBS::card_shift);
+          __ shrptr(start, CardTable::card_shift);
+          __ shrptr(end,   CardTable::card_shift);
           __ subptr(end, start); // end --> count
         __ BIND(L_loop);
-          intptr_t disp = (intptr_t) ct->byte_map_base;
+        intptr_t disp = (intptr_t) ct->byte_map_base();
           Address cardtable(start, count, Address::times_1, disp);
           __ movb(cardtable, 0);
           __ decrement(count);
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -25,6 +25,9 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "interpreter/interpreter.hpp"
 #include "nativeInst_x86.hpp"
 #include "oops/instanceOop.hpp"
@@ -1232,9 +1235,7 @@
            __ bind(filtered);
         }
          break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
-      case BarrierSet::ModRef:
+      case BarrierSet::CardTableModRef:
         break;
       default:
         ShouldNotReachHere();
@@ -1272,12 +1273,8 @@
           __ popa();
         }
         break;
-      case BarrierSet::CardTableForRS:
-      case BarrierSet::CardTableExtension:
+      case BarrierSet::CardTableModRef:
         {
-          CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
-          assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
           Label L_loop, L_done;
           const Register end = count;
 
@@ -1286,11 +1283,11 @@
 
           __ leaq(end, Address(start, count, TIMES_OOP, 0));  // end == start+count*oop_size
           __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
-          __ shrptr(start, CardTableModRefBS::card_shift);
-          __ shrptr(end,   CardTableModRefBS::card_shift);
+          __ shrptr(start, CardTable::card_shift);
+          __ shrptr(end,   CardTable::card_shift);
           __ subptr(end, start); // end --> cards count
 
-          int64_t disp = (int64_t) ct->byte_map_base;
+          int64_t disp = ci_card_table_address_as<int64_t>();
           __ mov64(scratch, disp);
           __ addptr(start, scratch);
         __ BIND(L_loop);
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -198,8 +198,7 @@
       }
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
       {
         if (val == noreg) {
           __ store_heap_oop_null(obj);
--- a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.inline.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/metaspaceShared.hpp"
@@ -42,7 +43,7 @@
   }
 
   if (bs->is_a(BarrierSet::CardTableModRef)) {
-    _card_table_base = (address) (barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base);
+    _card_table_base = (address) (barrier_set_cast<CardTableModRefBS>(bs)->card_table()->byte_map_base());
   } else {
     _card_table_base = NULL;
   }
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -25,7 +25,10 @@
 
 #include "aot/aotCodeHeap.hpp"
 #include "aot/aotLoader.hpp"
+#include "ci/ciUtilities.hpp"
 #include "classfile/javaAssertions.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/gcLocker.hpp"
 #include "interpreter/abstractInterpreter.hpp"
@@ -539,8 +542,7 @@
     _lib_symbols_initialized = true;
 
     CollectedHeap* heap = Universe::heap();
-    CardTableModRefBS* ct = (CardTableModRefBS*)(heap->barrier_set());
-    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ct->byte_map_base);
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ci_card_table_address());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_top_address", address, (heap->supports_inline_contig_alloc() ? heap->top_addr() : NULL));
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_end_address", address, (heap->supports_inline_contig_alloc() ? heap->end_addr() : NULL));
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page());
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
 #include "ci/ciArrayKlass.hpp"
 #include "ci/ciInstance.hpp"
 #include "ci/ciObjArray.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -1461,11 +1463,7 @@
       G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
-      // No pre barriers
-      break;
-    case BarrierSet::ModRef:
+    case BarrierSet::CardTableModRef:
       // No pre barriers
       break;
     default      :
@@ -1481,13 +1479,9 @@
       G1SATBCardTableModRef_post_barrier(addr,  new_val);
       break;
 #endif // INCLUDE_ALL_GCS
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
       CardTableModRef_post_barrier(addr,  new_val);
       break;
-    case BarrierSet::ModRef:
-      // No post barriers
-      break;
     default      :
       ShouldNotReachHere();
     }
@@ -1616,9 +1610,7 @@
 ////////////////////////////////////////////////////////////////////////
 
 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
-  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(_bs);
-  assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code");
-  LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base);
+  LIR_Const* card_table_base = new LIR_Const(ci_card_table_address());
   if (addr->is_address()) {
     LIR_Address* address = addr->as_address_ptr();
     // ptr cannot be an object because we use this barrier for array card marks
@@ -1640,9 +1632,9 @@
   LIR_Opr tmp = new_pointer_register();
   if (TwoOperandLIRForm) {
     __ move(addr, tmp);
-    __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
+    __ unsigned_shift_right(tmp, CardTable::card_shift, tmp);
   } else {
-    __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
+    __ unsigned_shift_right(addr, CardTable::card_shift, tmp);
   }
 
   LIR_Address* card_addr;
@@ -1652,7 +1644,7 @@
     card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
   }
 
-  LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
+  LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
   if (UseCondCardMark) {
     LIR_Opr cur_value = new_register(T_INT);
     if (UseConcMarkSweepGC) {
--- a/src/hotspot/share/ci/ciUtilities.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/ci/ciUtilities.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -24,6 +24,9 @@
 
 #include "precompiled.hpp"
 #include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "memory/universe.hpp"
 
 // ciUtilities
 //
@@ -43,3 +46,13 @@
   char c = type2char(t);
   return c ? c : 'X';
 }
+
+// ------------------------------------------------------------------
+// card_table_base
+jbyte *ci_card_table_address() {
+  BarrierSet* bs = Universe::heap()->barrier_set();
+  CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+  CardTable* ct = ctbs->card_table();
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust users of this code");
+  return ct->byte_map_base();
+}
--- a/src/hotspot/share/ci/ciUtilities.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/ci/ciUtilities.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -27,6 +27,7 @@
 
 #include "ci/ciEnv.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "utilities/globalDefinitions.hpp"
 
 // The following routines and definitions are used internally in the
 // compiler interface.
@@ -114,4 +115,9 @@
 const char* basictype_to_str(BasicType t);
 const char  basictype_to_char(BasicType t);
 
+jbyte *ci_card_table_address();
+template <typename T> T ci_card_table_address_as() {
+  return reinterpret_cast<T>(ci_card_table_address());
+}
+
 #endif // SHARE_VM_CI_CIUTILITIES_HPP
--- a/src/hotspot/share/code/relocInfo_ext.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/code/relocInfo_ext.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #include "code/codeCache.hpp"
 #include "code/relocInfo.hpp"
 #include "code/relocInfo_ext.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/universe.hpp"
@@ -59,8 +60,9 @@
   }
   case symbolic_Relocation::card_table_reference: {
     BarrierSet* bs = Universe::heap()->barrier_set();
-    CardTableModRefBS* ct = (CardTableModRefBS*)bs;
-    return (address)ct->byte_map_base;
+    CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+    CardTable* ct = ctbs->card_table();
+    return (address)ct->byte_map_base();
   }
   case symbolic_Relocation::mark_bits_reference: {
     return (address)Universe::verify_mark_bits();
--- a/src/hotspot/share/compiler/disassembler.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/compiler/disassembler.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,9 +23,11 @@
  */
 
 #include "precompiled.hpp"
+#include "ci/ciUtilities.hpp"
 #include "classfile/javaClasses.hpp"
 #include "code/codeCache.hpp"
 #include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/resourceArea.hpp"
@@ -318,7 +320,7 @@
 
     BarrierSet* bs = Universe::heap()->barrier_set();
     if (bs->is_a(BarrierSet::CardTableModRef) &&
-        adr == (address)(barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base)) {
+        adr == ci_card_table_address_as<address>()) {
       st->print("word_map_base");
       if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr));
       return;
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -88,9 +88,9 @@
   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
                           "CompactibleFreeListSpace._dict_par_lock", true,
                           Monitor::_safepoint_check_never),
-  _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
+  _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
                     CMSRescanMultiple),
-  _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
+  _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
                     CMSConcMarkMultiple),
   _collector(NULL),
   _preconsumptionDirtyCardClosure(NULL)
@@ -609,7 +609,7 @@
   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
                      CMSCollector* collector,
                      ExtendedOopClosure* cl,
-                     CardTableModRefBS::PrecisionStyle precision,
+                     CardTable::PrecisionStyle precision,
                      HeapWord* boundary,
                      bool parallel) :
     FilteringDCTOC(sp, cl, precision, boundary),
@@ -693,7 +693,7 @@
 
 DirtyCardToOopClosure*
 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
-                                      CardTableModRefBS::PrecisionStyle precision,
+                                      CardTable::PrecisionStyle precision,
                                       HeapWord* boundary,
                                       bool parallel) {
   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
@@ -2828,7 +2828,7 @@
 }
 
 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
-  const size_t ergo_max = _old_gen->reserved().word_size() / (CardTableModRefBS::card_size_in_words * BitsPerWord);
+  const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
   return ergo_max;
 }
 
@@ -2865,15 +2865,15 @@
   // The "size" of each task is fixed according to rescan_task_size.
   assert(n_threads > 0, "Unexpected n_threads argument");
   const size_t task_size = marking_task_size();
-  assert(task_size > CardTableModRefBS::card_size_in_words &&
-         (task_size %  CardTableModRefBS::card_size_in_words == 0),
+  assert(task_size > CardTable::card_size_in_words &&
+         (task_size %  CardTable::card_size_in_words == 0),
          "Otherwise arithmetic below would be incorrect");
   MemRegion span = _old_gen->reserved();
   if (low != NULL) {
     if (span.contains(low)) {
       // Align low down to  a card boundary so that
       // we can use block_offset_careful() on span boundaries.
-      HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);
+      HeapWord* aligned_low = align_down(low, CardTable::card_size);
       // Clip span prefix at aligned_low
       span = span.intersection(MemRegion(aligned_low, span.end()));
     } else if (low > span.end()) {
@@ -2881,7 +2881,7 @@
     } // else use entire span
   }
   assert(span.is_empty() ||
-         ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
+         ((uintptr_t)span.start() %  CardTable::card_size == 0),
         "span should start at a card boundary");
   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "gc/cms/adaptiveFreeList.hpp"
 #include "gc/cms/promotionInfo.hpp"
 #include "gc/shared/blockOffsetTable.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/space.hpp"
 #include "logging/log.hpp"
 #include "memory/binaryTreeDictionary.hpp"
@@ -432,7 +433,7 @@
 
   // Override: provides a DCTO_CL specific to this kind of space.
   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
-                                     CardTableModRefBS::PrecisionStyle precision,
+                                     CardTable::PrecisionStyle precision,
                                      HeapWord* boundary,
                                      bool parallel);
 
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -448,7 +448,7 @@
   _start_sampling(false),
   _between_prologue_and_epilogue(false),
   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
-  _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
+  _modUnionTable((CardTable::card_shift - LogHeapWordSize),
                  -1 /* lock-free */, "No_lock" /* dummy */),
   _modUnionClosurePar(&_modUnionTable),
   // Adjust my span to cover old (cms) gen
@@ -900,7 +900,7 @@
         // card size.
         MemRegion mr(start,
                      align_up(start + obj_size,
-                        CardTableModRefBS::card_size /* bytes */));
+                              CardTable::card_size /* bytes */));
         if (par) {
           _modUnionTable.par_mark_range(mr);
         } else {
@@ -3223,7 +3223,7 @@
   if (sp->used_region().contains(_restart_addr)) {
     // Align down to a card boundary for the start of 0th task
     // for this space.
-    aligned_start = align_down(_restart_addr, CardTableModRefBS::card_size);
+    aligned_start = align_down(_restart_addr, CardTable::card_size);
   }
 
   size_t chunk_size = sp->marking_task_size();
@@ -4026,17 +4026,16 @@
       startTimer();
       sample_eden();
       // Get and clear dirty region from card table
-      dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
-                                    MemRegion(nextAddr, endAddr),
-                                    true,
-                                    CardTableModRefBS::precleaned_card_val());
+      dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr),
+                                                      true,
+                                                      CardTable::precleaned_card_val());
 
       assert(dirtyRegion.start() >= nextAddr,
              "returned region inconsistent?");
     }
     lastAddr = dirtyRegion.end();
     numDirtyCards =
-      dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
+      dirtyRegion.word_size()/CardTable::card_size_in_words;
 
     if (!dirtyRegion.is_empty()) {
       stopTimer();
@@ -4050,7 +4049,7 @@
       if (stop_point != NULL) {
         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
                "Should only be AbortablePreclean.");
-        _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
+        _ct->invalidate(MemRegion(stop_point, dirtyRegion.end()));
         if (should_abort_preclean()) {
           break; // out of preclean loop
         } else {
@@ -4577,7 +4576,7 @@
   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
   assert(pst->valid(), "Uninitialized use?");
   uint nth_task = 0;
-  const int alignment = CardTableModRefBS::card_size * BitsPerWord;
+  const int alignment = CardTable::card_size * BitsPerWord;
   MemRegion span = sp->used_region();
   HeapWord* start_addr = span.start();
   HeapWord* end_addr = align_up(span.end(), alignment);
@@ -4603,7 +4602,7 @@
     // precleaned, and setting the corresponding bits in the mod union
     // table. Since we have been careful to partition at Card and MUT-word
     // boundaries no synchronization is needed between parallel threads.
-    _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
+    _collector->_ct->dirty_card_iterate(this_span,
                                                  &modUnionClosure);
 
     // Having transferred these marks into the modUnionTable,
@@ -4914,16 +4913,14 @@
     // mod union table.
     {
       ModUnionClosure modUnionClosure(&_modUnionTable);
-      _ct->ct_bs()->dirty_card_iterate(
-                      _cmsGen->used_region(),
-                      &modUnionClosure);
+      _ct->dirty_card_iterate(_cmsGen->used_region(),
+                              &modUnionClosure);
     }
     // Having transferred these marks into the modUnionTable, we just need
     // to rescan the marked objects on the dirty cards in the modUnionTable.
     // The initial marking may have been done during an asynchronous
     // collection so there may be dirty bits in the mod-union table.
-    const int alignment =
-      CardTableModRefBS::card_size * BitsPerWord;
+    const int alignment = CardTable::card_size * BitsPerWord;
     {
       // ... First handle dirty cards in CMS gen
       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
@@ -5633,9 +5630,9 @@
   }
   assert(sz > 0, "size must be nonzero");
   HeapWord* next_block = addr + sz;
-  HeapWord* next_card  = align_up(next_block, CardTableModRefBS::card_size);
-  assert(align_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
-         align_down((uintptr_t)next_card, CardTableModRefBS::card_size),
+  HeapWord* next_card  = align_up(next_block, CardTable::card_size);
+  assert(align_down((uintptr_t)addr,      CardTable::card_size) <
+         align_down((uintptr_t)next_card, CardTable::card_size),
          "must be different cards");
   return next_card;
 }
@@ -6294,7 +6291,7 @@
   assert(_markStack->isEmpty(), "would cause duplicates on stack");
   assert(_span.contains(addr), "Out of bounds _finger?");
   _finger = addr;
-  _threshold = align_up(_finger, CardTableModRefBS::card_size);
+  _threshold = align_up(_finger, CardTable::card_size);
 }
 
 // Should revisit to see if this should be restructured for
@@ -6321,7 +6318,7 @@
         // during the preclean or remark phase. (CMSCleanOnEnter)
         if (CMSCleanOnEnter) {
           size_t sz = _collector->block_size_using_printezis_bits(addr);
-          HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
+          HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
           MemRegion redirty_range = MemRegion(addr, end_card_addr);
           assert(!redirty_range.is_empty(), "Arithmetical tautology");
           // Bump _threshold to end_card_addr; note that
@@ -6408,9 +6405,9 @@
       // _threshold is always kept card-aligned but _finger isn't
       // always card-aligned.
       HeapWord* old_threshold = _threshold;
-      assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
+      assert(is_aligned(old_threshold, CardTable::card_size),
              "_threshold should always be card-aligned");
-      _threshold = align_up(_finger, CardTableModRefBS::card_size);
+      _threshold = align_up(_finger, CardTable::card_size);
       MemRegion mr(old_threshold, _threshold);
       assert(!mr.is_empty(), "Control point invariant");
       assert(_span.contains(mr), "Should clear within span");
@@ -6520,9 +6517,9 @@
     // _threshold is always kept card-aligned but _finger isn't
     // always card-aligned.
     HeapWord* old_threshold = _threshold;
-    assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
+    assert(is_aligned(old_threshold, CardTable::card_size),
            "_threshold should always be card-aligned");
-    _threshold = align_up(_finger, CardTableModRefBS::card_size);
+    _threshold = align_up(_finger, CardTable::card_size);
     MemRegion mr(old_threshold, _threshold);
     assert(!mr.is_empty(), "Control point invariant");
     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
@@ -6890,7 +6887,7 @@
          // are required.
          if (obj->is_objArray()) {
            size_t sz = obj->size();
-           HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
+           HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
            MemRegion redirty_range = MemRegion(addr, end_card_addr);
            assert(!redirty_range.is_empty(), "Arithmetical tautology");
            _mod_union_table->mark_range(redirty_range);
@@ -7003,15 +7000,15 @@
 }
 
 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
-  assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
+  assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
          "mr should be aligned to start at a card boundary");
   // We'd like to assert:
-  // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
+  // assert(mr.word_size()%CardTable::card_size_in_words == 0,
   //        "mr should be a range of cards");
   // However, that would be too strong in one case -- the last
   // partition ends at _unallocated_block which, in general, can be
   // an arbitrary boundary, not necessarily card aligned.
-  _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
+  _num_dirty_cards += mr.word_size()/CardTable::card_size_in_words;
   _space->object_iterate_mem(mr, &_scan_cl);
 }
 
@@ -7620,7 +7617,7 @@
         // table.
         if (obj->is_objArray()) {
           size_t sz = obj->size();
-          HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
+          HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
           MemRegion redirty_range = MemRegion(addr, end_card_addr);
           assert(!redirty_range.is_empty(), "Arithmetical tautology");
           _collector->_modUnionTable.mark_range(redirty_range);
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -77,7 +77,7 @@
 // methods are used). This is essentially a wrapper around the BitMap class,
 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
 // we have _shifter == 0. and for the mod union table we have
-// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
+// shifter == CardTable::card_shift - LogHeapWordSize.)
 // XXX 64-bit issues in BitMap?
 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
   friend class VMStructs;
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -448,7 +448,7 @@
   // This is superfluous except at the end of the space;
   // we should do better than this XXX
   MemRegion mr2(mr.start(), align_up(mr.end(),
-                 CardTableModRefBS::card_size /* bytes */));
+                CardTable::card_size /* bytes */));
   _t->mark_range(mr2);
 }
 
@@ -457,7 +457,7 @@
   // This is superfluous except at the end of the space;
   // we should do better than this XXX
   MemRegion mr2(mr.start(), align_up(mr.end(),
-                 CardTableModRefBS::card_size /* bytes */));
+                CardTable::card_size /* bytes */));
   _t->par_mark_range(mr2);
 }
 
--- a/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 
-void CardTableModRefBSForCTRS::
+void CardTableRS::
 non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
                                      OopsInGenClosure* cl,
                                      CardTableRS* ct,
@@ -82,7 +82,7 @@
 }
 
 void
-CardTableModRefBSForCTRS::
+CardTableRS::
 process_stride(Space* sp,
                MemRegion used,
                jint stride, int n_strides,
@@ -162,7 +162,7 @@
 }
 
 void
-CardTableModRefBSForCTRS::
+CardTableRS::
 process_chunk_boundaries(Space* sp,
                          DirtyCardToOopClosure* dcto_cl,
                          MemRegion chunk_mr,
@@ -371,7 +371,7 @@
 }
 
 void
-CardTableModRefBSForCTRS::
+CardTableRS::
 get_LNC_array_for_space(Space* sp,
                         jbyte**& lowest_non_clean,
                         uintptr_t& lowest_non_clean_base_chunk_index,
--- a/src/hotspot/share/gc/g1/g1CardCounts.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CardCounts.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,12 +40,12 @@
 size_t G1CardCounts::compute_size(size_t mem_region_size_in_words) {
   // We keep card counts for every card, so the size of the card counts table must
   // be the same as the card table.
-  return G1SATBCardTableLoggingModRefBS::compute_size(mem_region_size_in_words);
+  return G1CardTable::compute_size(mem_region_size_in_words);
 }
 
 size_t G1CardCounts::heap_map_factor() {
   // See G1CardCounts::compute_size() why we reuse the card table value.
-  return G1SATBCardTableLoggingModRefBS::heap_map_factor();
+  return G1CardTable::heap_map_factor();
 }
 
 void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
@@ -72,8 +72,8 @@
     // threshold limit is no more than this.
     guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
 
-    _ct_bs = _g1h->g1_barrier_set();
-    _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
+    _ct = _g1h->card_table();
+    _ct_bot = _ct->byte_for_const(_g1h->reserved_region().start());
 
     _card_counts = (jubyte*) mapper->reserved().start();
     _reserved_max_card_num = mapper->reserved().byte_size();
@@ -116,17 +116,17 @@
 
 void G1CardCounts::clear_range(MemRegion mr) {
   if (has_count_table()) {
-    const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
+    const jbyte* from_card_ptr = _ct->byte_for_const(mr.start());
     // We use the last address in the range as the range could represent the
     // last region in the heap. In which case trying to find the card will be an
     // OOB access to the card table.
-    const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
+    const jbyte* last_card_ptr = _ct->byte_for_const(mr.last());
 
 #ifdef ASSERT
-    HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
+    HeapWord* start_addr = _ct->addr_for(from_card_ptr);
     assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
-    HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
-    assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
+    HeapWord* last_addr = _ct->addr_for(last_card_ptr);
+    assert((last_addr + G1CardTable::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
 #endif // ASSERT
 
     // Clear the counts for the (exclusive) card range.
--- a/src/hotspot/share/gc/g1/g1CardCounts.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CardCounts.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
 #define SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
 
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/allocation.hpp"
 #include "memory/virtualspace.hpp"
@@ -56,6 +57,7 @@
   G1CardCountsMappingChangedListener _listener;
 
   G1CollectedHeap* _g1h;
+  G1CardTable*     _ct;
 
   // The table of counts
   jubyte* _card_counts;
@@ -66,9 +68,6 @@
   // CardTable bottom.
   const jbyte* _ct_bot;
 
-  // Barrier set
-  CardTableModRefBS* _ct_bs;
-
   // Returns true if the card counts table has been reserved.
   bool has_reserved_count_table() { return _card_counts != NULL; }
 
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,10 +68,10 @@
   assert(max_capacity % num_max_regions == 0,
          "Given capacity must be evenly divisible by region size.");
   size_t region_size = max_capacity / num_max_regions;
-  assert(region_size % (G1SATBCardTableModRefBS::card_size * BitsPerWord) == 0,
+  assert(region_size % (G1CardTable::card_size * BitsPerWord) == 0,
          "Region size must be evenly divisible by area covered by a single word.");
   _max_capacity = max_capacity;
-  _cards_per_region = region_size / G1SATBCardTableModRefBS::card_size;
+  _cards_per_region = region_size / G1CardTable::card_size;
 
   _live_regions_size_in_bits = live_region_bitmap_size_in_bits();
   _live_regions = allocate_large_bitmap(_live_regions_size_in_bits);
@@ -85,11 +85,11 @@
 }
 
 size_t G1CardLiveData::live_region_bitmap_size_in_bits() const {
-  return _max_capacity / (_cards_per_region << G1SATBCardTableModRefBS::card_shift);
+  return _max_capacity / (_cards_per_region << G1CardTable::card_shift);
 }
 
 size_t G1CardLiveData::live_card_bitmap_size_in_bits() const {
-  return _max_capacity >> G1SATBCardTableModRefBS::card_shift;
+  return _max_capacity >> G1CardTable::card_shift;
 }
 
 // Helper class that provides functionality to generate the Live Data Count
@@ -132,7 +132,7 @@
 
   void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
     BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
+    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
 
     _card_bm.clear_range(start_idx, end_idx);
   }
@@ -140,7 +140,7 @@
   // Mark the card liveness bitmap for the object spanning from start to end.
   void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
     BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
+    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
 
     assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
 
@@ -168,7 +168,7 @@
     // by the card shift -- address 0 corresponds to card number 0.  One
     // must subtract the card num of the bottom of the heap to obtain a
     // card table index.
-    BitMap::idx_t card_num = uintptr_t(addr) >> CardTableModRefBS::card_shift;
+    BitMap::idx_t card_num = uintptr_t(addr) >> G1CardTable::card_shift;
     return card_num - _heap_card_bias;
   }
 
@@ -262,7 +262,7 @@
     // Calculate the card number for the bottom of the heap. Used
     // in biasing indexes into the accounting card bitmaps.
     _heap_card_bias =
-      uintptr_t(base_address) >> CardTableModRefBS::card_shift;
+      uintptr_t(base_address) >> G1CardTable::card_shift;
   }
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1CardTable.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/shared/memset_with_concurrent_readers.hpp"
+#include "logging/log.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+bool G1CardTable::mark_card_deferred(size_t card_index) {
+  jbyte val = _byte_map[card_index];
+  // It's already processed
+  if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
+    return false;
+  }
+
+  // Cached bit can be installed either on a clean card or on a claimed card.
+  jbyte new_val = val;
+  if (val == clean_card_val()) {
+    new_val = (jbyte)deferred_card_val();
+  } else {
+    if (val & claimed_card_val()) {
+      new_val = val | (jbyte)deferred_card_val();
+    }
+  }
+  if (new_val != val) {
+    Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
+  }
+  return true;
+}
+
+void G1CardTable::g1_mark_as_young(const MemRegion& mr) {
+  jbyte *const first = byte_for(mr.start());
+  jbyte *const last = byte_after(mr.last());
+
+  memset_with_concurrent_readers(first, g1_young_gen, last - first);
+}
+
+#ifndef PRODUCT
+void G1CardTable::verify_g1_young_region(MemRegion mr) {
+  verify_region(mr, g1_young_gen,  true);
+}
+#endif
+
+void G1CardTableChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
+  // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
+  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
+  _card_table->clear(mr);
+}
+
+void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) {
+  mapper->set_mapping_changed_listener(&_listener);
+
+  _byte_map_size = mapper->reserved().byte_size();
+
+  _guard_index = cards_required(_whole_heap.word_size()) - 1;
+  _last_valid_index = _guard_index - 1;
+
+  HeapWord* low_bound  = _whole_heap.start();
+  HeapWord* high_bound = _whole_heap.end();
+
+  _cur_covered_regions = 1;
+  _covered[0] = _whole_heap;
+
+  _byte_map = (jbyte*) mapper->reserved().start();
+  _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+  assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
+  assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
+
+  log_trace(gc, barrier)("G1CardTable::G1CardTable: ");
+  log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
+                         p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
+  log_trace(gc, barrier)("    _byte_map_base: " INTPTR_FORMAT,  p2i(_byte_map_base));
+}
+
+bool G1CardTable::is_in_young(oop obj) const {
+  volatile jbyte* p = byte_for(obj);
+  return *p == G1CardTable::g1_young_card_val();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1CardTable.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CARDTABLE_HPP
+#define SHARE_VM_GC_G1_G1CARDTABLE_HPP
+
+#include "gc/g1/g1RegionToSpaceMapper.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/macros.hpp"
+
+class G1CardTable;
+class G1RegionToSpaceMapper;
+
+class G1CardTableChangedListener : public G1MappingChangedListener {
+ private:
+  G1CardTable* _card_table;
+ public:
+  G1CardTableChangedListener() : _card_table(NULL) { }
+
+  void set_card_table(G1CardTable* card_table) { _card_table = card_table; }
+
+  virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
+};
+
+class G1CardTable: public CardTable {
+  friend class VMStructs;
+  friend class G1CardTableChangedListener;
+
+  G1CardTableChangedListener _listener;
+
+  enum G1CardValues {
+    g1_young_gen = CT_MR_BS_last_reserved << 1
+  };
+
+public:
+  G1CardTable(MemRegion whole_heap): CardTable(whole_heap, /* scanned concurrently */ true), _listener() {
+    _listener.set_card_table(this);
+  }
+  bool is_card_dirty(size_t card_index) {
+    return _byte_map[card_index] == dirty_card_val();
+  }
+
+  static jbyte g1_young_card_val() { return g1_young_gen; }
+
+/*
+   Claimed and deferred bits are used together in G1 during the evacuation
+   pause. These bits can have the following state transitions:
+   1. The claimed bit can be put over any other card state. Except that
+      the "dirty -> dirty and claimed" transition is checked for in
+      G1 code and is not used.
+   2. Deferred bit can be set only if the previous state of the card
+      was either clean or claimed. mark_card_deferred() is wait-free.
+      We do not care if the operation is be successful because if
+      it does not it will only result in duplicate entry in the update
+      buffer because of the "cache-miss". So it's not worth spinning.
+ */
+
+  bool is_card_claimed(size_t card_index) {
+    jbyte val = _byte_map[card_index];
+    return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
+  }
+
+  inline void set_card_claimed(size_t card_index);
+
+  void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
+  void g1_mark_as_young(const MemRegion& mr);
+
+  bool mark_card_deferred(size_t card_index);
+
+  bool is_card_deferred(size_t card_index) {
+    jbyte val = _byte_map[card_index];
+    return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
+  }
+
+  static size_t compute_size(size_t mem_region_size_in_words) {
+    size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
+    return ReservedSpace::allocation_align_size_up(number_of_slots);
+  }
+
+  // Returns how many bytes of the heap a single byte of the Card Table corresponds to.
+  static size_t heap_map_factor() { return card_size; }
+
+  void initialize() {}
+  void initialize(G1RegionToSpaceMapper* mapper);
+
+  virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
+
+  virtual bool is_in_young(oop obj) const;
+};
+
+#endif // SHARE_VM_GC_G1_G1CARDTABLE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1CardTable.inline.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
+#define SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
+
+#include "gc/g1/g1CardTable.hpp"
+
+void G1CardTable::set_card_claimed(size_t card_index) {
+  jbyte val = _byte_map[card_index];
+  if (val == clean_card_val()) {
+    val = (jbyte)claimed_card_val();
+  } else {
+    val |= (jbyte)claimed_card_val();
+  }
+  _byte_map[card_index] = val;
+}
+
+#endif // SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -52,6 +52,7 @@
 #include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
+#include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1YCTypes.hpp"
 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
@@ -103,10 +104,10 @@
  private:
   size_t _num_dirtied;
   G1CollectedHeap* _g1h;
-  G1SATBCardTableLoggingModRefBS* _g1_bs;
+  G1CardTable* _g1_ct;
 
   HeapRegion* region_for_card(jbyte* card_ptr) const {
-    return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
+    return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
   }
 
   bool will_become_free(HeapRegion* hr) const {
@@ -117,14 +118,14 @@
 
  public:
   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
-    _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
+    _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
 
   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     HeapRegion* hr = region_for_card(card_ptr);
 
     // Should only dirty cards in regions that won't be freed.
     if (!will_become_free(hr)) {
-      *card_ptr = CardTableModRefBS::dirty_card_val();
+      *card_ptr = G1CardTable::dirty_card_val();
       _num_dirtied++;
     }
 
@@ -1465,6 +1466,7 @@
   _young_gen_sampling_thread(NULL),
   _collector_policy(collector_policy),
   _soft_ref_policy(),
+  _card_table(NULL),
   _memory_manager("G1 Young Generation", "end of minor GC"),
   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
   _eden_pool(NULL),
@@ -1616,11 +1618,13 @@
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   // Create the barrier set for the entire reserved region.
-  G1SATBCardTableLoggingModRefBS* bs
-    = new G1SATBCardTableLoggingModRefBS(reserved_region());
+  G1CardTable* ct = new G1CardTable(reserved_region());
+  ct->initialize();
+  G1SATBCardTableLoggingModRefBS* bs = new G1SATBCardTableLoggingModRefBS(ct);
   bs->initialize();
   assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
   set_barrier_set(bs);
+  _card_table = ct;
 
   // Create the hot card cache.
   _hot_card_cache = new G1HotCardCache(this);
@@ -1651,8 +1655,8 @@
 
   G1RegionToSpaceMapper* cardtable_storage =
     create_aux_memory_mapper("Card Table",
-                             G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
-                             G1SATBCardTableLoggingModRefBS::heap_map_factor());
+                             G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
+                             G1CardTable::heap_map_factor());
 
   G1RegionToSpaceMapper* card_counts_storage =
     create_aux_memory_mapper("Card Counts Table",
@@ -1666,7 +1670,7 @@
     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
 
   _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
-  g1_barrier_set()->initialize(cardtable_storage);
+  _card_table->initialize(cardtable_storage);
   // Do later initialization work for concurrent refinement.
   _hot_card_cache->initialize(card_counts_storage);
 
@@ -1676,7 +1680,7 @@
   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
 
   // Also create a G1 rem set.
-  _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
+  _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
   _g1_rem_set->initialize(max_capacity(), max_regions());
 
   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
@@ -2691,17 +2695,17 @@
       if (!r->rem_set()->is_empty()) {
         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
-        G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
+        G1CardTable* ct = g1h->card_table();
         HeapRegionRemSetIterator hrrs(r->rem_set());
         size_t card_index;
         while (hrrs.has_next(card_index)) {
-          jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
+          jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index);
           // The remembered set might contain references to already freed
           // regions. Filter out such entries to avoid failing card table
           // verification.
-          if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) {
-            if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
-              *card_ptr = CardTableModRefBS::dirty_card_val();
+          if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) {
+            if (*card_ptr != G1CardTable::dirty_card_val()) {
+              *card_ptr = G1CardTable::dirty_card_val();
               _dcq.enqueue(card_ptr);
             }
           }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -28,6 +28,7 @@
 #include "gc/g1/evacuationInfo.hpp"
 #include "gc/g1/g1AllocationContext.hpp"
 #include "gc/g1/g1BiasedArray.hpp"
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
@@ -150,6 +151,7 @@
 
   WorkGang* _workers;
   G1CollectorPolicy* _collector_policy;
+  G1CardTable* _card_table;
 
   SoftRefPolicy      _soft_ref_policy;
 
@@ -1178,6 +1180,10 @@
 
   G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
 
+  G1CardTable* card_table() const {
+    return _card_table;
+  }
+
   // Iteration functions.
 
   // Iterate over all objects, calling "cl.do_object" on each.
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -123,7 +123,7 @@
   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 
   MemRegion mr(start, end);
-  g1_barrier_set()->g1_mark_as_young(mr);
+  card_table()->g1_mark_as_young(mr);
 }
 
 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,12 +38,12 @@
 class UpdateRSetDeferred : public ExtendedOopClosure {
 private:
   G1CollectedHeap* _g1;
-  DirtyCardQueue *_dcq;
-  G1SATBCardTableModRefBS* _ct_bs;
+  DirtyCardQueue* _dcq;
+  G1CardTable*    _ct;
 
 public:
   UpdateRSetDeferred(DirtyCardQueue* dcq) :
-    _g1(G1CollectedHeap::heap()), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
+    _g1(G1CollectedHeap::heap()), _ct(_g1->card_table()), _dcq(dcq) {}
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
@@ -59,9 +59,9 @@
     if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) {
       return;
     }
-    size_t card_index = _ct_bs->index_for(p);
-    if (_ct_bs->mark_card_deferred(card_index)) {
-      _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
+    size_t card_index = _ct->index_for(p);
+    if (_ct->mark_card_deferred(card_index)) {
+      _dcq->enqueue((jbyte*)_ct->byte_for_index(card_index));
     }
   }
 };
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,7 +112,7 @@
   hr->reset_gc_time_stamp();
   hr->rem_set()->clear();
 
-  _g1h->g1_barrier_set()->clear(MemRegion(hr->bottom(), hr->end()));
+  _g1h->card_table()->clear(MemRegion(hr->bottom(), hr->end()));
 
   if (_g1h->g1_hot_card_cache()->use_cache()) {
     _g1h->g1_hot_card_cache()->reset_card_counts(hr);
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -604,10 +604,9 @@
 #ifndef PRODUCT
 class G1VerifyCardTableCleanup: public HeapRegionClosure {
   G1HeapVerifier* _verifier;
-  G1SATBCardTableModRefBS* _ct_bs;
 public:
-  G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs)
-    : _verifier(verifier), _ct_bs(ct_bs) { }
+  G1VerifyCardTableCleanup(G1HeapVerifier* verifier)
+    : _verifier(verifier) { }
   virtual bool do_heap_region(HeapRegion* r) {
     if (r->is_survivor()) {
       _verifier->verify_dirty_region(r);
@@ -620,16 +619,16 @@
 
 void G1HeapVerifier::verify_card_table_cleanup() {
   if (G1VerifyCTCleanup || VerifyAfterGC) {
-    G1VerifyCardTableCleanup cleanup_verifier(this, _g1h->g1_barrier_set());
+    G1VerifyCardTableCleanup cleanup_verifier(this);
     _g1h->heap_region_iterate(&cleanup_verifier);
   }
 }
 
 void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
   // All of the region should be clean.
-  G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
+  G1CardTable* ct = _g1h->card_table();
   MemRegion mr(hr->bottom(), hr->end());
-  ct_bs->verify_not_dirty_region(mr);
+  ct->verify_not_dirty_region(mr);
 }
 
 void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
@@ -640,12 +639,12 @@
   // not dirty that area (one less thing to have to do while holding
   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
   // is dirty.
-  G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
+  G1CardTable* ct = _g1h->card_table();
   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
   if (hr->is_young()) {
-    ct_bs->verify_g1_young_region(mr);
+    ct->verify_g1_young_region(mr);
   } else {
-    ct_bs->verify_dirty_region(mr);
+    ct->verify_dirty_region(mr);
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
   : _g1h(g1h),
     _refs(g1h->task_queue(worker_id)),
     _dcq(&g1h->dirty_card_queue_set()),
-    _ct_bs(g1h->g1_barrier_set()),
+    _ct(g1h->card_table()),
     _closures(NULL),
     _hash_seed(17),
     _worker_id(worker_id),
@@ -390,7 +390,6 @@
     return forward_ptr;
   }
 }
-
 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) :
     _g1h(g1h),
     _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
   G1CollectedHeap* _g1h;
   RefToScanQueue*  _refs;
   DirtyCardQueue   _dcq;
-  G1SATBCardTableModRefBS* _ct_bs;
+  G1CardTable*     _ct;
   G1EvacuationRootClosures* _closures;
 
   G1PLABAllocator*  _plab_allocator;
@@ -72,7 +72,7 @@
 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
 
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
-  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
+  G1CardTable* ct()                              { return _ct; }
 
   InCSetState dest(InCSetState original) const {
     assert(original.is_valid(),
@@ -104,10 +104,10 @@
     // If the field originates from the to-space, we don't need to include it
     // in the remembered set updates.
     if (!from->is_young()) {
-      size_t card_index = ctbs()->index_for(p);
+      size_t card_index = ct()->index_for(p);
       // If the card hasn't been added to the buffer, do it.
-      if (ctbs()->mark_card_deferred(card_index)) {
-        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
+      if (ct()->mark_card_deferred(card_index)) {
+        dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
       }
     }
   }
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
+#include "gc/g1/g1CardTable.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
 #include "gc/g1/g1FromCardCache.hpp"
@@ -74,7 +75,7 @@
     static size_t chunk_size() { return M; }
 
     void work(uint worker_id) {
-      G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
+      G1CardTable* ct = _g1h->card_table();
 
       while (_cur_dirty_regions < _num_dirty_regions) {
         size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
@@ -83,7 +84,7 @@
         for (size_t i = next; i < max; i++) {
           HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
           if (!r->is_survivor()) {
-            ct_bs->clear(MemRegion(r->bottom(), r->end()));
+            ct->clear(MemRegion(r->bottom(), r->end()));
           }
         }
       }
@@ -280,12 +281,12 @@
 };
 
 G1RemSet::G1RemSet(G1CollectedHeap* g1,
-                   CardTableModRefBS* ct_bs,
+                   G1CardTable* ct,
                    G1HotCardCache* hot_card_cache) :
   _g1(g1),
   _scan_state(new G1RemSetScanState()),
   _num_conc_refined_cards(0),
-  _ct_bs(ct_bs),
+  _ct(ct),
   _g1p(_g1->g1_policy()),
   _hot_card_cache(hot_card_cache),
   _prev_period_summary() {
@@ -328,7 +329,7 @@
   _worker_i(worker_i) {
   _g1h = G1CollectedHeap::heap();
   _bot = _g1h->bot();
-  _ct_bs = _g1h->g1_barrier_set();
+  _ct = _g1h->card_table();
 }
 
 void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
@@ -345,7 +346,7 @@
 }
 
 void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){
-  _ct_bs->set_card_claimed(card_index);
+  _ct->set_card_claimed(card_index);
   _scan_state->add_dirty_region(region_idx_for_card);
 }
 
@@ -381,7 +382,7 @@
     _cards_claimed++;
 
     // If the card is dirty, then G1 will scan it during Update RS.
-    if (_ct_bs->is_card_claimed(card_index) || _ct_bs->is_card_dirty(card_index)) {
+    if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) {
       continue;
     }
 
@@ -535,15 +536,15 @@
   _g1->heap_region_par_iterate_from_worker_offset(&scrub_cl, hrclaimer, worker_num);
 }
 
-inline void check_card_ptr(jbyte* card_ptr, CardTableModRefBS* ct_bs) {
+inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
 #ifdef ASSERT
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
-  assert(g1->is_in_exact(ct_bs->addr_for(card_ptr)),
+  assert(g1->is_in_exact(ct->addr_for(card_ptr)),
          "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
          p2i(card_ptr),
-         ct_bs->index_for(ct_bs->addr_for(card_ptr)),
-         p2i(ct_bs->addr_for(card_ptr)),
-         g1->addr_to_region(ct_bs->addr_for(card_ptr)));
+         ct->index_for(ct->addr_for(card_ptr)),
+         p2i(ct->addr_for(card_ptr)),
+         g1->addr_to_region(ct->addr_for(card_ptr)));
 #endif
 }
 
@@ -551,15 +552,15 @@
                                         uint worker_i) {
   assert(!_g1->is_gc_active(), "Only call concurrently");
 
-  check_card_ptr(card_ptr, _ct_bs);
+  check_card_ptr(card_ptr, _ct);
 
   // If the card is no longer dirty, nothing to do.
-  if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
+  if (*card_ptr != G1CardTable::dirty_card_val()) {
     return;
   }
 
   // Construct the region representing the card.
-  HeapWord* start = _ct_bs->addr_for(card_ptr);
+  HeapWord* start = _ct->addr_for(card_ptr);
   // And find the region containing it.
   HeapRegion* r = _g1->heap_region_containing(start);
 
@@ -605,7 +606,7 @@
       return;
     } else if (card_ptr != orig_card_ptr) {
       // Original card was inserted and an old card was evicted.
-      start = _ct_bs->addr_for(card_ptr);
+      start = _ct->addr_for(card_ptr);
       r = _g1->heap_region_containing(start);
 
       // Check whether the region formerly in the cache should be
@@ -639,7 +640,7 @@
   // Okay to clean and process the card now.  There are still some
   // stale card cases that may be detected by iteration and dealt with
   // as iteration failure.
-  *const_cast<volatile jbyte*>(card_ptr) = CardTableModRefBS::clean_card_val();
+  *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val();
 
   // This fence serves two purposes.  First, the card must be cleaned
   // before processing the contents.  Second, we can't proceed with
@@ -651,7 +652,7 @@
 
   // Don't use addr_for(card_ptr + 1) which can ask for
   // a card beyond the heap.
-  HeapWord* end = start + CardTableModRefBS::card_size_in_words;
+  HeapWord* end = start + G1CardTable::card_size_in_words;
   MemRegion dirty_region(start, MIN2(scan_limit, end));
   assert(!dirty_region.is_empty(), "sanity");
 
@@ -668,8 +669,8 @@
   if (!card_processed) {
     // The card might have gotten re-dirtied and re-enqueued while we
     // worked.  (In fact, it's pretty likely.)
-    if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
-      *card_ptr = CardTableModRefBS::dirty_card_val();
+    if (*card_ptr != G1CardTable::dirty_card_val()) {
+      *card_ptr = G1CardTable::dirty_card_val();
       MutexLockerEx x(Shared_DirtyCardQ_lock,
                       Mutex::_no_safepoint_check_flag);
       DirtyCardQueue* sdcq =
@@ -685,20 +686,20 @@
                                      G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
   assert(_g1->is_gc_active(), "Only call during GC");
 
-  check_card_ptr(card_ptr, _ct_bs);
+  check_card_ptr(card_ptr, _ct);
 
   // If the card is no longer dirty, nothing to do. This covers cards that were already
   // scanned as parts of the remembered sets.
-  if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
+  if (*card_ptr != G1CardTable::dirty_card_val()) {
     return false;
   }
 
   // We claim lazily (so races are possible but they're benign), which reduces the
   // number of potential duplicate scans (multiple threads may enqueue the same card twice).
-  *card_ptr = CardTableModRefBS::clean_card_val() | CardTableModRefBS::claimed_card_val();
+  *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val();
 
   // Construct the region representing the card.
-  HeapWord* card_start = _ct_bs->addr_for(card_ptr);
+  HeapWord* card_start = _ct->addr_for(card_ptr);
   // And find the region containing it.
   uint const card_region_idx = _g1->addr_to_region(card_start);
 
@@ -711,7 +712,7 @@
 
   // Don't use addr_for(card_ptr + 1) which can ask for
   // a card beyond the heap.
-  HeapWord* card_end = card_start + CardTableModRefBS::card_size_in_words;
+  HeapWord* card_end = card_start + G1CardTable::card_size_in_words;
   MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
   assert(!dirty_region.is_empty(), "sanity");
 
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1CardLiveData.hpp"
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1RemSetSummary.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "memory/allocation.hpp"
@@ -72,7 +73,7 @@
   G1CollectedHeap* _g1;
   size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
 
-  CardTableModRefBS*     _ct_bs;
+  G1CardTable*           _ct;
   G1Policy*              _g1p;
   G1HotCardCache*        _hot_card_cache;
 
@@ -93,7 +94,7 @@
   void cleanupHRRS();
 
   G1RemSet(G1CollectedHeap* g1,
-           CardTableModRefBS* ct_bs,
+           G1CardTable* ct,
            G1HotCardCache* hot_card_cache);
   ~G1RemSet();
 
@@ -162,7 +163,7 @@
   CodeBlobClosure* _code_root_cl;
 
   G1BlockOffsetTable* _bot;
-  G1SATBCardTableModRefBS *_ct_bs;
+  G1CardTable *_ct;
 
   double _strong_code_root_scan_time_sec;
   uint   _worker_i;
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -23,22 +23,20 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/g1CardTable.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/satbMarkQueue.hpp"
-#include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(
-  MemRegion whole_heap,
+  G1CardTable* card_table,
   const BarrierSet::FakeRtti& fake_rtti) :
-  CardTableModRefBS(whole_heap, fake_rtti.add_tag(BarrierSet::G1SATBCT))
+  CardTableModRefBS(card_table, fake_rtti.add_tag(BarrierSet::G1SATBCT))
 { }
 
 void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
@@ -80,88 +78,17 @@
   }
 }
 
-bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
-  jbyte val = _byte_map[card_index];
-  // It's already processed
-  if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
-    return false;
-  }
-
-  // Cached bit can be installed either on a clean card or on a claimed card.
-  jbyte new_val = val;
-  if (val == clean_card_val()) {
-    new_val = (jbyte)deferred_card_val();
-  } else {
-    if (val & claimed_card_val()) {
-      new_val = val | (jbyte)deferred_card_val();
-    }
-  }
-  if (new_val != val) {
-    Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
-  }
-  return true;
-}
-
-void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
-  jbyte *const first = byte_for(mr.start());
-  jbyte *const last = byte_after(mr.last());
-
-  memset_with_concurrent_readers(first, g1_young_gen, last - first);
-}
-
-#ifndef PRODUCT
-void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
-  verify_region(mr, g1_young_gen,  true);
-}
-#endif
-
-void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
-  // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
-  MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
-  _card_table->clear(mr);
-}
-
 G1SATBCardTableLoggingModRefBS::
-G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
-  G1SATBCardTableModRefBS(whole_heap, BarrierSet::FakeRtti(G1SATBCTLogging)),
-  _dcqs(JavaThread::dirty_card_queue_set()),
-  _listener()
-{
-  _listener.set_card_table(this);
-}
-
-void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
-  initialize_deferred_card_mark_barriers();
-  mapper->set_mapping_changed_listener(&_listener);
-
-  _byte_map_size = mapper->reserved().byte_size();
-
-  _guard_index = cards_required(_whole_heap.word_size()) - 1;
-  _last_valid_index = _guard_index - 1;
-
-  HeapWord* low_bound  = _whole_heap.start();
-  HeapWord* high_bound = _whole_heap.end();
-
-  _cur_covered_regions = 1;
-  _covered[0] = _whole_heap;
-
-  _byte_map = (jbyte*) mapper->reserved().start();
-  byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
-  assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
-  assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
-
-  log_trace(gc, barrier)("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
-  log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
-                         p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
-  log_trace(gc, barrier)("    byte_map_base: " INTPTR_FORMAT,  p2i(byte_map_base));
-}
+G1SATBCardTableLoggingModRefBS(G1CardTable* card_table) :
+  G1SATBCardTableModRefBS(card_table, BarrierSet::FakeRtti(G1SATBCTLogging)),
+  _dcqs(JavaThread::dirty_card_queue_set()) {}
 
 void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) {
   // In the slow path, we know a card is not young
-  assert(*byte != g1_young_gen, "slow path invoked without filtering");
+  assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering");
   OrderAccess::storeload();
-  if (*byte != dirty_card) {
-    *byte = dirty_card;
+  if (*byte != G1CardTable::dirty_card_val()) {
+    *byte = G1CardTable::dirty_card_val();
     Thread* thr = Thread::current();
     if (thr->is_Java_thread()) {
       JavaThread* jt = (JavaThread*)thr;
@@ -174,16 +101,15 @@
   }
 }
 
-void
-G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
+void G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
   if (mr.is_empty()) {
     return;
   }
-  volatile jbyte* byte = byte_for(mr.start());
-  jbyte* last_byte = byte_for(mr.last());
+  volatile jbyte* byte = _card_table->byte_for(mr.start());
+  jbyte* last_byte = _card_table->byte_for(mr.last());
   Thread* thr = Thread::current();
     // skip all consecutive young cards
-  for (; byte <= last_byte && *byte == g1_young_gen; byte++);
+  for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++);
 
   if (byte <= last_byte) {
     OrderAccess::storeload();
@@ -191,11 +117,11 @@
     if (thr->is_Java_thread()) {
       JavaThread* jt = (JavaThread*)thr;
       for (; byte <= last_byte; byte++) {
-        if (*byte == g1_young_gen) {
+        if (*byte == G1CardTable::g1_young_card_val()) {
           continue;
         }
-        if (*byte != dirty_card) {
-          *byte = dirty_card;
+        if (*byte != G1CardTable::dirty_card_val()) {
+          *byte = G1CardTable::dirty_card_val();
           jt->dirty_card_queue().enqueue(byte);
         }
       }
@@ -203,11 +129,11 @@
       MutexLockerEx x(Shared_DirtyCardQ_lock,
                       Mutex::_no_safepoint_check_flag);
       for (; byte <= last_byte; byte++) {
-        if (*byte == g1_young_gen) {
+        if (*byte == G1CardTable::g1_young_card_val()) {
           continue;
         }
-        if (*byte != dirty_card) {
-          *byte = dirty_card;
+        if (*byte != G1CardTable::dirty_card_val()) {
+          *byte = G1CardTable::dirty_card_val();
           _dcqs.shared_dirty_card_queue()->enqueue(byte);
         }
       }
@@ -215,11 +141,6 @@
   }
 }
 
-bool G1SATBCardTableModRefBS::is_in_young(oop obj) const {
-  volatile jbyte* p = byte_for((void*)obj);
-  return *p == g1_young_card_val();
-}
-
 void G1SATBCardTableLoggingModRefBS::on_thread_attach(JavaThread* thread) {
   // This method initializes the SATB and dirty card queues before a
   // JavaThread is added to the Java thread list. Right now, we don't
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -33,6 +33,8 @@
 
 class DirtyCardQueueSet;
 class G1SATBCardTableLoggingModRefBS;
+class CardTable;
+class G1CardTable;
 
 // This barrier is specialized to use a logging barrier to support
 // snapshot-at-the-beginning marking.
@@ -40,16 +42,10 @@
 class G1SATBCardTableModRefBS: public CardTableModRefBS {
   friend class VMStructs;
 protected:
-  enum G1CardValues {
-    g1_young_gen = CT_MR_BS_last_reserved << 1
-  };
-
-  G1SATBCardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
+  G1SATBCardTableModRefBS(G1CardTable* table, const BarrierSet::FakeRtti& fake_rtti);
   ~G1SATBCardTableModRefBS() { }
 
 public:
-  static int g1_young_card_val()   { return g1_young_gen; }
-
   // Add "pre_val" to a set of objects that may have been disconnected from the
   // pre-marking object graph.
   static void enqueue(oop pre_val);
@@ -62,38 +58,6 @@
 
   template <DecoratorSet decorators, typename T>
   void write_ref_field_pre(T* field);
-
-/*
-   Claimed and deferred bits are used together in G1 during the evacuation
-   pause. These bits can have the following state transitions:
-   1. The claimed bit can be put over any other card state. Except that
-      the "dirty -> dirty and claimed" transition is checked for in
-      G1 code and is not used.
-   2. Deferred bit can be set only if the previous state of the card
-      was either clean or claimed. mark_card_deferred() is wait-free.
-      We do not care if the operation is be successful because if
-      it does not it will only result in duplicate entry in the update
-      buffer because of the "cache-miss". So it's not worth spinning.
- */
-
-  bool is_card_claimed(size_t card_index) {
-    jbyte val = _byte_map[card_index];
-    return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
-  }
-
-  inline void set_card_claimed(size_t card_index);
-
-  void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
-  void g1_mark_as_young(const MemRegion& mr);
-
-  bool mark_card_deferred(size_t card_index);
-
-  bool is_card_deferred(size_t card_index) {
-    jbyte val = _byte_map[card_index];
-    return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
-  }
-
-  virtual bool is_in_young(oop obj) const;
 };
 
 template<>
@@ -106,42 +70,14 @@
   typedef G1SATBCardTableModRefBS type;
 };
 
-class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
- private:
-  G1SATBCardTableLoggingModRefBS* _card_table;
- public:
-  G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
-
-  void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
-
-  virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
-};
-
 // Adds card-table logging to the post-barrier.
 // Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
 class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
-  friend class G1SATBCardTableLoggingModRefBSChangedListener;
  private:
-  G1SATBCardTableLoggingModRefBSChangedListener _listener;
   DirtyCardQueueSet& _dcqs;
 
  public:
-  static size_t compute_size(size_t mem_region_size_in_words) {
-    size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
-    return ReservedSpace::allocation_align_size_up(number_of_slots);
-  }
-
-  // Returns how many bytes of the heap a single byte of the Card Table corresponds to.
-  static size_t heap_map_factor() {
-    return CardTableModRefBS::card_size;
-  }
-
-  G1SATBCardTableLoggingModRefBS(MemRegion whole_heap);
-
-  virtual void initialize() { }
-  virtual void initialize(G1RegionToSpaceMapper* mapper);
-
-  virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
+  G1SATBCardTableLoggingModRefBS(G1CardTable* card_table);
 
   // NB: if you do a whole-heap invalidation, the "usual invariant" defined
   // above no longer applies.
@@ -157,10 +93,6 @@
   virtual void on_thread_attach(JavaThread* thread);
   virtual void on_thread_detach(JavaThread* thread);
 
-  virtual bool card_mark_must_follow_store() const {
-    return true;
-  }
-
   // Callbacks for runtime accesses.
   template <DecoratorSet decorators, typename BarrierSetT = G1SATBCardTableLoggingModRefBS>
   class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -25,8 +25,9 @@
 #ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
 #define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
 
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/shared/accessBarrierSupport.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
 
 template <DecoratorSet decorators, typename T>
 inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
@@ -43,23 +44,13 @@
 
 template <DecoratorSet decorators, typename T>
 inline void G1SATBCardTableLoggingModRefBS::write_ref_field_post(T* field, oop new_val) {
-  volatile jbyte* byte = byte_for(field);
-  if (*byte != g1_young_gen) {
+  volatile jbyte* byte = _card_table->byte_for(field);
+  if (*byte != G1CardTable::g1_young_card_val()) {
     // Take a slow path for cards in old
     write_ref_field_post_slow(byte);
   }
 }
 
-void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) {
-  jbyte val = _byte_map[card_index];
-  if (val == clean_card_val()) {
-    val = (jbyte)claimed_card_val();
-  } else {
-    val |= (jbyte)claimed_card_val();
-  }
-  _byte_map[card_index] = val;
-}
-
 inline void G1SATBCardTableModRefBS::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
   assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
   // Archive roots need to be enqueued since they add subgraphs to the
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,7 +100,7 @@
   guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
 
   guarantee(CardsPerRegion == 0, "we should only set it once");
-  CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
+  CardsPerRegion = GrainBytes >> G1CardTable::card_shift;
 
   if (G1HeapRegionSize != GrainBytes) {
     FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes);
@@ -139,9 +139,8 @@
   assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
   HeapRegionRemSet* hrrs = rem_set();
   hrrs->clear();
-  CardTableModRefBS* ct_bs =
-    barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
-  ct_bs->clear(MemRegion(bottom(), end()));
+  G1CardTable* ct = G1CollectedHeap::heap()->card_table();
+  ct->clear(MemRegion(bottom(), end()));
 }
 
 void HeapRegion::calc_gc_efficiency() {
@@ -463,7 +462,7 @@
 class G1VerificationClosure : public OopClosure {
 protected:
   G1CollectedHeap* _g1h;
-  CardTableModRefBS* _bs;
+  G1CardTable *_ct;
   oop _containing_obj;
   bool _failures;
   int _n_failures;
@@ -473,7 +472,7 @@
   // _vo == UseNextMarking -> use "next" marking information,
   // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
   G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
-    _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
+    _g1h(g1h), _ct(g1h->card_table()),
     _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) {
   }
 
@@ -576,9 +575,9 @@
       if (from != NULL && to != NULL &&
         from != to &&
         !to->is_pinned()) {
-        jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
-        jbyte cv_field = *_bs->byte_for_const(p);
-        const jbyte dirty = CardTableModRefBS::dirty_card_val();
+        jbyte cv_obj = *_ct->byte_for_const(_containing_obj);
+        jbyte cv_field = *_ct->byte_for_const(p);
+        const jbyte dirty = G1CardTable::dirty_card_val();
 
         bool is_bad = !(from->is_young()
           || to->rem_set()->contains_reference(p)
@@ -834,7 +833,6 @@
   CompactibleSpace::clear(mangle_space);
   reset_bot();
 }
-
 #ifndef PRODUCT
 void G1ContiguousSpace::mangle_unused_area() {
   mangle_unused_area_complete();
--- a/src/hotspot/share/gc/g1/heapRegion.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 #include "gc/g1/heapRegionType.hpp"
 #include "gc/g1/survRateGroup.hpp"
 #include "gc/shared/ageTable.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "utilities/macros.hpp"
 
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -103,7 +103,7 @@
     if (loc_hr->is_in_reserved(from)) {
       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
       CardIdx_t from_card = (CardIdx_t)
-          hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
+          hw_offset >> (G1CardTable::card_shift - LogHeapWordSize);
 
       assert((size_t)from_card < HeapRegion::CardsPerRegion,
              "Must be in range.");
@@ -170,7 +170,7 @@
   bool contains_reference(OopOrNarrowOopStar from) const {
     assert(hr()->is_in_reserved(from), "Precondition.");
     size_t card_ind = pointer_delta(from, hr()->bottom(),
-                                    CardTableModRefBS::card_size);
+                                    G1CardTable::card_size);
     return _bm.at(card_ind);
   }
 
@@ -354,7 +354,7 @@
 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
   uint cur_hrm_ind = _hr->hrm_index();
 
-  int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
+  int from_card = (int)(uintptr_t(from) >> G1CardTable::card_shift);
 
   if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
     assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
@@ -382,7 +382,7 @@
 
       uintptr_t from_hr_bot_card_index =
         uintptr_t(from_hr->bottom())
-          >> CardTableModRefBS::card_shift;
+          >> G1CardTable::card_shift;
       CardIdx_t card_index = from_card - from_hr_bot_card_index;
       assert((size_t)card_index < HeapRegion::CardsPerRegion,
              "Must be in range.");
@@ -671,9 +671,9 @@
 
   } else {
     uintptr_t from_card =
-      (uintptr_t(from) >> CardTableModRefBS::card_shift);
+      (uintptr_t(from) >> G1CardTable::card_shift);
     uintptr_t hr_bot_card_index =
-      uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
+      uintptr_t(hr->bottom()) >> G1CardTable::card_shift;
     assert(from_card >= hr_bot_card_index, "Inv");
     CardIdx_t card_index = from_card - hr_bot_card_index;
     assert((size_t)card_index < HeapRegion::CardsPerRegion,
--- a/src/hotspot/share/gc/g1/sparsePRT.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/g1/sparsePRT.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
   // Check that the card array element type can represent all cards in the region.
   // Choose a large SparsePRTEntry::card_elem_t (e.g. CardIdx_t) if required.
   assert(((size_t)1 << (sizeof(SparsePRTEntry::card_elem_t) * BitsPerByte)) *
-         G1SATBCardTableModRefBS::card_size >= HeapRegionBounds::max_size(), "precondition");
+         G1CardTable::card_size >= HeapRegionBounds::max_size(), "precondition");
   assert(G1RSetSparseRegionEntries > 0, "precondition");
   _region_ind = region_ind;
   _next_index = RSHashTable::NullEntry;
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -509,7 +509,7 @@
   }
   MemRegion cmr((HeapWord*)virtual_space()->low(),
                 (HeapWord*)virtual_space()->high());
-  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
+  ParallelScavengeHeap::heap()->barrier_set()->card_table()->resize_covered_region(cmr);
 
   space_invariants();
 }
--- a/src/hotspot/share/gc/parallel/cardTableExtension.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,683 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/parallel/cardTableExtension.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
-#include "gc/parallel/objectStartArray.inline.hpp"
-#include "gc/parallel/parallelScavengeHeap.inline.hpp"
-#include "gc/parallel/psPromotionManager.inline.hpp"
-#include "gc/parallel/psScavenge.hpp"
-#include "gc/parallel/psTasks.hpp"
-#include "gc/parallel/psYoungGen.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/prefetch.inline.hpp"
-#include "utilities/align.hpp"
-
-// Checks an individual oop for missing precise marks. Mark
-// may be either dirty or newgen.
-class CheckForUnmarkedOops : public OopClosure {
- private:
-  PSYoungGen*         _young_gen;
-  CardTableExtension* _card_table;
-  HeapWord*           _unmarked_addr;
-
- protected:
-  template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    if (_young_gen->is_in_reserved(obj) &&
-        !_card_table->addr_is_marked_imprecise(p)) {
-      // Don't overwrite the first missing card mark
-      if (_unmarked_addr == NULL) {
-        _unmarked_addr = (HeapWord*)p;
-      }
-    }
-  }
-
- public:
-  CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
-    _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
-
-  virtual void do_oop(oop* p)       { CheckForUnmarkedOops::do_oop_work(p); }
-  virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
-
-  bool has_unmarked_oop() {
-    return _unmarked_addr != NULL;
-  }
-};
-
-// Checks all objects for the existence of some type of mark,
-// precise or imprecise, dirty or newgen.
-class CheckForUnmarkedObjects : public ObjectClosure {
- private:
-  PSYoungGen*         _young_gen;
-  CardTableExtension* _card_table;
-
- public:
-  CheckForUnmarkedObjects() {
-    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-    _young_gen = heap->young_gen();
-    _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
-    // No point in asserting barrier set type here. Need to make CardTableExtension
-    // a unique barrier set type.
-  }
-
-  // Card marks are not precise. The current system can leave us with
-  // a mismatch of precise marks and beginning of object marks. This means
-  // we test for missing precise marks first. If any are found, we don't
-  // fail unless the object head is also unmarked.
-  virtual void do_object(oop obj) {
-    CheckForUnmarkedOops object_check(_young_gen, _card_table);
-    obj->oop_iterate_no_header(&object_check);
-    if (object_check.has_unmarked_oop()) {
-      guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
-    }
-  }
-};
-
-// Checks for precise marking of oops as newgen.
-class CheckForPreciseMarks : public OopClosure {
- private:
-  PSYoungGen*         _young_gen;
-  CardTableExtension* _card_table;
-
- protected:
-  template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
-    if (_young_gen->is_in_reserved(obj)) {
-      assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
-      _card_table->set_card_newgen(p);
-    }
-  }
-
- public:
-  CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
-    _young_gen(young_gen), _card_table(card_table) { }
-
-  virtual void do_oop(oop* p)       { CheckForPreciseMarks::do_oop_work(p); }
-  virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
-};
-
-// We get passed the space_top value to prevent us from traversing into
-// the old_gen promotion labs, which cannot be safely parsed.
-
-// Do not call this method if the space is empty.
-// It is a waste to start tasks and get here only to
-// do no work.  If this method needs to be called
-// when the space is empty, fix the calculation of
-// end_card to allow sp_top == sp->bottom().
-
-void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
-                                                    MutableSpace* sp,
-                                                    HeapWord* space_top,
-                                                    PSPromotionManager* pm,
-                                                    uint stripe_number,
-                                                    uint stripe_total) {
-  int ssize = 128; // Naked constant!  Work unit = 64k.
-  int dirty_card_count = 0;
-
-  // It is a waste to get here if empty.
-  assert(sp->bottom() < sp->top(), "Should not be called if empty");
-  oop* sp_top = (oop*)space_top;
-  jbyte* start_card = byte_for(sp->bottom());
-  jbyte* end_card   = byte_for(sp_top - 1) + 1;
-  oop* last_scanned = NULL; // Prevent scanning objects more than once
-  // The width of the stripe ssize*stripe_total must be
-  // consistent with the number of stripes so that the complete slice
-  // is covered.
-  size_t slice_width = ssize * stripe_total;
-  for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
-    jbyte* worker_start_card = slice + stripe_number * ssize;
-    if (worker_start_card >= end_card)
-      return; // We're done.
-
-    jbyte* worker_end_card = worker_start_card + ssize;
-    if (worker_end_card > end_card)
-      worker_end_card = end_card;
-
-    // We do not want to scan objects more than once. In order to accomplish
-    // this, we assert that any object with an object head inside our 'slice'
-    // belongs to us. We may need to extend the range of scanned cards if the
-    // last object continues into the next 'slice'.
-    //
-    // Note! ending cards are exclusive!
-    HeapWord* slice_start = addr_for(worker_start_card);
-    HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
-
-#ifdef ASSERT
-    if (GCWorkerDelayMillis > 0) {
-      // Delay 1 worker so that it proceeds after all the work
-      // has been completed.
-      if (stripe_number < 2) {
-        os::sleep(Thread::current(), GCWorkerDelayMillis, false);
-      }
-    }
-#endif
-
-    // If there are not objects starting within the chunk, skip it.
-    if (!start_array->object_starts_in_range(slice_start, slice_end)) {
-      continue;
-    }
-    // Update our beginning addr
-    HeapWord* first_object = start_array->object_start(slice_start);
-    debug_only(oop* first_object_within_slice = (oop*) first_object;)
-    if (first_object < slice_start) {
-      last_scanned = (oop*)(first_object + oop(first_object)->size());
-      debug_only(first_object_within_slice = last_scanned;)
-      worker_start_card = byte_for(last_scanned);
-    }
-
-    // Update the ending addr
-    if (slice_end < (HeapWord*)sp_top) {
-      // The subtraction is important! An object may start precisely at slice_end.
-      HeapWord* last_object = start_array->object_start(slice_end - 1);
-      slice_end = last_object + oop(last_object)->size();
-      // worker_end_card is exclusive, so bump it one past the end of last_object's
-      // covered span.
-      worker_end_card = byte_for(slice_end) + 1;
-
-      if (worker_end_card > end_card)
-        worker_end_card = end_card;
-    }
-
-    assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
-    assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
-    assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
-    // Note that worker_start_card >= worker_end_card is legal, and happens when
-    // an object spans an entire slice.
-    assert(worker_start_card <= end_card, "worker start card beyond end card");
-    assert(worker_end_card <= end_card, "worker end card beyond end card");
-
-    jbyte* current_card = worker_start_card;
-    while (current_card < worker_end_card) {
-      // Find an unclean card.
-      while (current_card < worker_end_card && card_is_clean(*current_card)) {
-        current_card++;
-      }
-      jbyte* first_unclean_card = current_card;
-
-      // Find the end of a run of contiguous unclean cards
-      while (current_card < worker_end_card && !card_is_clean(*current_card)) {
-        while (current_card < worker_end_card && !card_is_clean(*current_card)) {
-          current_card++;
-        }
-
-        if (current_card < worker_end_card) {
-          // Some objects may be large enough to span several cards. If such
-          // an object has more than one dirty card, separated by a clean card,
-          // we will attempt to scan it twice. The test against "last_scanned"
-          // prevents the redundant object scan, but it does not prevent newly
-          // marked cards from being cleaned.
-          HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
-          size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
-          HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
-          jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
-          assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
-          if (ending_card_of_last_object > current_card) {
-            // This means the object spans the next complete card.
-            // We need to bump the current_card to ending_card_of_last_object
-            current_card = ending_card_of_last_object;
-          }
-        }
-      }
-      jbyte* following_clean_card = current_card;
-
-      if (first_unclean_card < worker_end_card) {
-        oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
-        assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
-        // "p" should always be >= "last_scanned" because newly GC dirtied
-        // cards are no longer scanned again (see comment at end
-        // of loop on the increment of "current_card").  Test that
-        // hypothesis before removing this code.
-        // If this code is removed, deal with the first time through
-        // the loop when the last_scanned is the object starting in
-        // the previous slice.
-        assert((p >= last_scanned) ||
-               (last_scanned == first_object_within_slice),
-               "Should no longer be possible");
-        if (p < last_scanned) {
-          // Avoid scanning more than once; this can happen because
-          // newgen cards set by GC may a different set than the
-          // originally dirty set
-          p = last_scanned;
-        }
-        oop* to = (oop*)addr_for(following_clean_card);
-
-        // Test slice_end first!
-        if ((HeapWord*)to > slice_end) {
-          to = (oop*)slice_end;
-        } else if (to > sp_top) {
-          to = sp_top;
-        }
-
-        // we know which cards to scan, now clear them
-        if (first_unclean_card <= worker_start_card+1)
-          first_unclean_card = worker_start_card+1;
-        if (following_clean_card >= worker_end_card-1)
-          following_clean_card = worker_end_card-1;
-
-        while (first_unclean_card < following_clean_card) {
-          *first_unclean_card++ = clean_card;
-        }
-
-        const int interval = PrefetchScanIntervalInBytes;
-        // scan all objects in the range
-        if (interval != 0) {
-          while (p < to) {
-            Prefetch::write(p, interval);
-            oop m = oop(p);
-            assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
-            pm->push_contents(m);
-            p += m->size();
-          }
-          pm->drain_stacks_cond_depth();
-        } else {
-          while (p < to) {
-            oop m = oop(p);
-            assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
-            pm->push_contents(m);
-            p += m->size();
-          }
-          pm->drain_stacks_cond_depth();
-        }
-        last_scanned = p;
-      }
-      // "current_card" is still the "following_clean_card" or
-      // the current_card is >= the worker_end_card so the
-      // loop will not execute again.
-      assert((current_card == following_clean_card) ||
-             (current_card >= worker_end_card),
-        "current_card should only be incremented if it still equals "
-        "following_clean_card");
-      // Increment current_card so that it is not processed again.
-      // It may now be dirty because a old-to-young pointer was
-      // found on it an updated.  If it is now dirty, it cannot be
-      // be safely cleaned in the next iteration.
-      current_card++;
-    }
-  }
-}
-
-// This should be called before a scavenge.
-void CardTableExtension::verify_all_young_refs_imprecise() {
-  CheckForUnmarkedObjects check;
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSOldGen* old_gen = heap->old_gen();
-
-  old_gen->object_iterate(&check);
-}
-
-// This should be called immediately after a scavenge, before mutators resume.
-void CardTableExtension::verify_all_young_refs_precise() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSOldGen* old_gen = heap->old_gen();
-
-  CheckForPreciseMarks check(
-    heap->young_gen(),
-    barrier_set_cast<CardTableExtension>(heap->barrier_set()));
-
-  old_gen->oop_iterate_no_header(&check);
-
-  verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
-}
-
-void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
-  CardTableExtension* card_table =
-    barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
-
-  jbyte* bot = card_table->byte_for(mr.start());
-  jbyte* top = card_table->byte_for(mr.end());
-  while(bot <= top) {
-    assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
-    if (*bot == verify_card)
-      *bot = youngergen_card;
-    bot++;
-  }
-}
-
-bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
-  jbyte* p = byte_for(addr);
-  jbyte val = *p;
-
-  if (card_is_dirty(val))
-    return true;
-
-  if (card_is_newgen(val))
-    return true;
-
-  if (card_is_clean(val))
-    return false;
-
-  assert(false, "Found unhandled card mark type");
-
-  return false;
-}
-
-// Also includes verify_card
-bool CardTableExtension::addr_is_marked_precise(void *addr) {
-  jbyte* p = byte_for(addr);
-  jbyte val = *p;
-
-  if (card_is_newgen(val))
-    return true;
-
-  if (card_is_verify(val))
-    return true;
-
-  if (card_is_clean(val))
-    return false;
-
-  if (card_is_dirty(val))
-    return false;
-
-  assert(false, "Found unhandled card mark type");
-
-  return false;
-}
-
-// Assumes that only the base or the end changes.  This allows indentification
-// of the region that is being resized.  The
-// CardTableModRefBS::resize_covered_region() is used for the normal case
-// where the covered regions are growing or shrinking at the high end.
-// The method resize_covered_region_by_end() is analogous to
-// CardTableModRefBS::resize_covered_region() but
-// for regions that grow or shrink at the low end.
-void CardTableExtension::resize_covered_region(MemRegion new_region) {
-
-  for (int i = 0; i < _cur_covered_regions; i++) {
-    if (_covered[i].start() == new_region.start()) {
-      // Found a covered region with the same start as the
-      // new region.  The region is growing or shrinking
-      // from the start of the region.
-      resize_covered_region_by_start(new_region);
-      return;
-    }
-    if (_covered[i].start() > new_region.start()) {
-      break;
-    }
-  }
-
-  int changed_region = -1;
-  for (int j = 0; j < _cur_covered_regions; j++) {
-    if (_covered[j].end() == new_region.end()) {
-      changed_region = j;
-      // This is a case where the covered region is growing or shrinking
-      // at the start of the region.
-      assert(changed_region != -1, "Don't expect to add a covered region");
-      assert(_covered[changed_region].byte_size() != new_region.byte_size(),
-        "The sizes should be different here");
-      resize_covered_region_by_end(changed_region, new_region);
-      return;
-    }
-  }
-  // This should only be a new covered region (where no existing
-  // covered region matches at the start or the end).
-  assert(_cur_covered_regions < _max_covered_regions,
-    "An existing region should have been found");
-  resize_covered_region_by_start(new_region);
-}
-
-void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
-  CardTableModRefBS::resize_covered_region(new_region);
-  debug_only(verify_guard();)
-}
-
-void CardTableExtension::resize_covered_region_by_end(int changed_region,
-                                                      MemRegion new_region) {
-  assert(SafepointSynchronize::is_at_safepoint(),
-    "Only expect an expansion at the low end at a GC");
-  debug_only(verify_guard();)
-#ifdef ASSERT
-  for (int k = 0; k < _cur_covered_regions; k++) {
-    if (_covered[k].end() == new_region.end()) {
-      assert(changed_region == k, "Changed region is incorrect");
-      break;
-    }
-  }
-#endif
-
-  // Commit new or uncommit old pages, if necessary.
-  if (resize_commit_uncommit(changed_region, new_region)) {
-    // Set the new start of the committed region
-    resize_update_committed_table(changed_region, new_region);
-  }
-
-  // Update card table entries
-  resize_update_card_table_entries(changed_region, new_region);
-
-  // Update the covered region
-  resize_update_covered_table(changed_region, new_region);
-
-  int ind = changed_region;
-  log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
-  log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT "  _covered[%d].last(): " INTPTR_FORMAT,
-                ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
-  log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
-                ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
-  log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
-                p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
-  log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
-                p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
-
-  debug_only(verify_guard();)
-}
-
-bool CardTableExtension::resize_commit_uncommit(int changed_region,
-                                                MemRegion new_region) {
-  bool result = false;
-  // Commit new or uncommit old pages, if necessary.
-  MemRegion cur_committed = _committed[changed_region];
-  assert(_covered[changed_region].end() == new_region.end(),
-    "The ends of the regions are expected to match");
-  // Extend the start of this _committed region to
-  // to cover the start of any previous _committed region.
-  // This forms overlapping regions, but never interior regions.
-  HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
-  if (min_prev_start < cur_committed.start()) {
-    // Only really need to set start of "cur_committed" to
-    // the new start (min_prev_start) but assertion checking code
-    // below use cur_committed.end() so make it correct.
-    MemRegion new_committed =
-        MemRegion(min_prev_start, cur_committed.end());
-    cur_committed = new_committed;
-  }
-#ifdef ASSERT
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
-    "Starts should have proper alignment");
-#endif
-
-  jbyte* new_start = byte_for(new_region.start());
-  // Round down because this is for the start address
-  HeapWord* new_start_aligned =
-    (HeapWord*)align_down((uintptr_t)new_start, os::vm_page_size());
-  // The guard page is always committed and should not be committed over.
-  // This method is used in cases where the generation is growing toward
-  // lower addresses but the guard region is still at the end of the
-  // card table.  That still makes sense when looking for writes
-  // off the end of the card table.
-  if (new_start_aligned < cur_committed.start()) {
-    // Expand the committed region
-    //
-    // Case A
-    //                                          |+ guard +|
-    //                          |+ cur committed +++++++++|
-    //                  |+ new committed +++++++++++++++++|
-    //
-    // Case B
-    //                                          |+ guard +|
-    //                        |+ cur committed +|
-    //                  |+ new committed +++++++|
-    //
-    // These are not expected because the calculation of the
-    // cur committed region and the new committed region
-    // share the same end for the covered region.
-    // Case C
-    //                                          |+ guard +|
-    //                        |+ cur committed +|
-    //                  |+ new committed +++++++++++++++++|
-    // Case D
-    //                                          |+ guard +|
-    //                        |+ cur committed +++++++++++|
-    //                  |+ new committed +++++++|
-
-    HeapWord* new_end_for_commit =
-      MIN2(cur_committed.end(), _guard_region.start());
-    if(new_start_aligned < new_end_for_commit) {
-      MemRegion new_committed =
-        MemRegion(new_start_aligned, new_end_for_commit);
-      os::commit_memory_or_exit((char*)new_committed.start(),
-                                new_committed.byte_size(), !ExecMem,
-                                "card table expansion");
-    }
-    result = true;
-  } else if (new_start_aligned > cur_committed.start()) {
-    // Shrink the committed region
-#if 0 // uncommitting space is currently unsafe because of the interactions
-      // of growing and shrinking regions.  One region A can uncommit space
-      // that it owns but which is being used by another region B (maybe).
-      // Region B has not committed the space because it was already
-      // committed by region A.
-    MemRegion uncommit_region = committed_unique_to_self(changed_region,
-      MemRegion(cur_committed.start(), new_start_aligned));
-    if (!uncommit_region.is_empty()) {
-      if (!os::uncommit_memory((char*)uncommit_region.start(),
-                               uncommit_region.byte_size())) {
-        // If the uncommit fails, ignore it.  Let the
-        // committed table resizing go even though the committed
-        // table will over state the committed space.
-      }
-    }
-#else
-    assert(!result, "Should be false with current workaround");
-#endif
-  }
-  assert(_committed[changed_region].end() == cur_committed.end(),
-    "end should not change");
-  return result;
-}
-
-void CardTableExtension::resize_update_committed_table(int changed_region,
-                                                       MemRegion new_region) {
-
-  jbyte* new_start = byte_for(new_region.start());
-  // Set the new start of the committed region
-  HeapWord* new_start_aligned =
-    (HeapWord*)align_down(new_start, os::vm_page_size());
-  MemRegion new_committed = MemRegion(new_start_aligned,
-    _committed[changed_region].end());
-  _committed[changed_region] = new_committed;
-  _committed[changed_region].set_start(new_start_aligned);
-}
-
-void CardTableExtension::resize_update_card_table_entries(int changed_region,
-                                                          MemRegion new_region) {
-  debug_only(verify_guard();)
-  MemRegion original_covered = _covered[changed_region];
-  // Initialize the card entries.  Only consider the
-  // region covered by the card table (_whole_heap)
-  jbyte* entry;
-  if (new_region.start() < _whole_heap.start()) {
-    entry = byte_for(_whole_heap.start());
-  } else {
-    entry = byte_for(new_region.start());
-  }
-  jbyte* end = byte_for(original_covered.start());
-  // If _whole_heap starts at the original covered regions start,
-  // this loop will not execute.
-  while (entry < end) { *entry++ = clean_card; }
-}
-
-void CardTableExtension::resize_update_covered_table(int changed_region,
-                                                     MemRegion new_region) {
-  // Update the covered region
-  _covered[changed_region].set_start(new_region.start());
-  _covered[changed_region].set_word_size(new_region.word_size());
-
-  // reorder regions.  There should only be at most 1 out
-  // of order.
-  for (int i = _cur_covered_regions-1 ; i > 0; i--) {
-    if (_covered[i].start() < _covered[i-1].start()) {
-        MemRegion covered_mr = _covered[i-1];
-        _covered[i-1] = _covered[i];
-        _covered[i] = covered_mr;
-        MemRegion committed_mr = _committed[i-1];
-      _committed[i-1] = _committed[i];
-      _committed[i] = committed_mr;
-      break;
-    }
-  }
-#ifdef ASSERT
-  for (int m = 0; m < _cur_covered_regions-1; m++) {
-    assert(_covered[m].start() <= _covered[m+1].start(),
-      "Covered regions out of order");
-    assert(_committed[m].start() <= _committed[m+1].start(),
-      "Committed regions out of order");
-  }
-#endif
-}
-
-// Returns the start of any committed region that is lower than
-// the target committed region (index ind) and that intersects the
-// target region.  If none, return start of target region.
-//
-//      -------------
-//      |           |
-//      -------------
-//              ------------
-//              | target   |
-//              ------------
-//                               -------------
-//                               |           |
-//                               -------------
-//      ^ returns this
-//
-//      -------------
-//      |           |
-//      -------------
-//                      ------------
-//                      | target   |
-//                      ------------
-//                               -------------
-//                               |           |
-//                               -------------
-//                      ^ returns this
-
-HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
-  assert(_cur_covered_regions >= 0, "Expecting at least on region");
-  HeapWord* min_start = _committed[ind].start();
-  for (int j = 0; j < ind; j++) {
-    HeapWord* this_start = _committed[j].start();
-    if ((this_start < min_start) &&
-        !(_committed[j].intersection(_committed[ind])).is_empty()) {
-       min_start = this_start;
-    }
-  }
-  return min_start;
-}
-
-bool CardTableExtension::is_in_young(oop obj) const {
-  return ParallelScavengeHeap::heap()->is_in_young(obj);
-}
--- a/src/hotspot/share/gc/parallel/cardTableExtension.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
-#define SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
-
-#include "gc/shared/cardTableModRefBS.hpp"
-
-class MutableSpace;
-class ObjectStartArray;
-class PSPromotionManager;
-class GCTaskQueue;
-
-class CardTableExtension : public CardTableModRefBS {
- private:
-  // Support methods for resizing the card table.
-  // resize_commit_uncommit() returns true if the pages were committed or
-  // uncommitted
-  bool resize_commit_uncommit(int changed_region, MemRegion new_region);
-  void resize_update_card_table_entries(int changed_region,
-                                        MemRegion new_region);
-  void resize_update_committed_table(int changed_region, MemRegion new_region);
-  void resize_update_covered_table(int changed_region, MemRegion new_region);
-
- protected:
-
-  static void verify_all_young_refs_precise_helper(MemRegion mr);
-
- public:
-  enum ExtendedCardValue {
-    youngergen_card   = CardTableModRefBS::CT_MR_BS_last_reserved + 1,
-    verify_card       = CardTableModRefBS::CT_MR_BS_last_reserved + 5
-  };
-
-  CardTableExtension(MemRegion whole_heap) :
-    CardTableModRefBS(
-      whole_heap,
-      BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
-    { }
-
-  // Scavenge support
-  void scavenge_contents_parallel(ObjectStartArray* start_array,
-                                  MutableSpace* sp,
-                                  HeapWord* space_top,
-                                  PSPromotionManager* pm,
-                                  uint stripe_number,
-                                  uint stripe_total);
-
-  // Verification
-  static void verify_all_young_refs_imprecise();
-  static void verify_all_young_refs_precise();
-
-  bool addr_is_marked_imprecise(void *addr);
-  bool addr_is_marked_precise(void *addr);
-
-  void set_card_newgen(void* addr)   { jbyte* p = byte_for(addr); *p = verify_card; }
-
-  // Testers for entries
-  static bool card_is_dirty(int value)      { return value == dirty_card; }
-  static bool card_is_newgen(int value)     { return value == youngergen_card; }
-  static bool card_is_clean(int value)      { return value == clean_card; }
-  static bool card_is_verify(int value)     { return value == verify_card; }
-
-  // Card marking
-  void inline_write_ref_field_gc(void* field, oop new_val) {
-    jbyte* byte = byte_for(field);
-    *byte = youngergen_card;
-  }
-
-  // Adaptive size policy support
-  // Allows adjustment of the base and size of the covered regions
-  void resize_covered_region(MemRegion new_region);
-  // Finds the covered region to resize based on the start address
-  // of the covered regions.
-  void resize_covered_region_by_start(MemRegion new_region);
-  // Finds the covered region to resize based on the end address
-  // of the covered regions.
-  void resize_covered_region_by_end(int changed_region, MemRegion new_region);
-  // Finds the lowest start address of a covered region that is
-  // previous (i.e., lower index) to the covered region with index "ind".
-  HeapWord* lowest_prev_committed_start(int ind) const;
-
-#ifdef ASSERT
-
-  bool is_valid_card_address(jbyte* addr) {
-    return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
-  }
-
-#endif // ASSERT
-
-  // ReduceInitialCardMarks support
-  virtual bool is_in_young(oop obj) const;
-
-  virtual bool card_mark_must_follow_store() const {
-    return false;
-  }
-};
-
-template<>
-struct BarrierSet::GetName<CardTableExtension> {
-  static const BarrierSet::Name value = BarrierSet::CardTableExtension;
-};
-
-template<>
-struct BarrierSet::GetType<BarrierSet::CardTableExtension> {
-  typedef ::CardTableExtension type;
-};
-
-#endif // SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
--- a/src/hotspot/share/gc/parallel/objectStartArray.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 void ObjectStartArray::initialize(MemRegion reserved_region) {
   // We're based on the assumption that we use the same
   // size blocks as the card table.
-  assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
+  assert((int)block_size == (int)CardTable::card_size, "Sanity");
   assert((int)block_size <= 512, "block_size must be less than or equal to 512");
 
   // Calculate how much space must be reserved
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -26,7 +26,6 @@
 #include "code/codeCache.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
-#include "gc/parallel/cardTableExtension.hpp"
 #include "gc/parallel/gcTaskManager.hpp"
 #include "gc/parallel/generationSizer.hpp"
 #include "gc/parallel/objectStartArray.inline.hpp"
@@ -70,7 +69,9 @@
 
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
+  PSCardTable* card_table = new PSCardTable(reserved_region());
+  card_table->initialize();
+  CardTableModRefBS* const barrier_set = new CardTableModRefBS(card_table);
   barrier_set->initialize();
   set_barrier_set(barrier_set);
 
@@ -625,6 +626,14 @@
   return (ParallelScavengeHeap*)heap;
 }
 
+CardTableModRefBS* ParallelScavengeHeap::barrier_set() {
+  return barrier_set_cast<CardTableModRefBS>(CollectedHeap::barrier_set());
+}
+
+PSCardTable* ParallelScavengeHeap::card_table() {
+  return static_cast<PSCardTable*>(barrier_set()->card_table());
+}
+
 // Before delegating the resize to the young generation,
 // the reserved space for the young and old generations
 // may be changed to accommodate the desired resize.
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -30,6 +30,7 @@
 #include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/parallel/psYoungGen.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/collectorPolicy.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
@@ -46,6 +47,7 @@
 class MemoryManager;
 class MemoryPool;
 class PSAdaptiveSizePolicy;
+class PSCardTable;
 class PSHeapSummary;
 
 class ParallelScavengeHeap : public CollectedHeap {
@@ -125,6 +127,9 @@
 
   static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
 
+  CardTableModRefBS* barrier_set();
+  PSCardTable* card_table();
+
   AdjoiningGenerations* gens() { return _gens; }
 
   // Returns JNI_OK on success
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psCardTable.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/gcTaskManager.hpp"
+#include "gc/parallel/objectStartArray.inline.hpp"
+#include "gc/parallel/parallelScavengeHeap.inline.hpp"
+#include "gc/parallel/psCardTable.hpp"
+#include "gc/parallel/psPromotionManager.inline.hpp"
+#include "gc/parallel/psScavenge.hpp"
+#include "gc/parallel/psTasks.hpp"
+#include "gc/parallel/psYoungGen.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
+#include "utilities/align.hpp"
+
+// Checks an individual oop for missing precise marks. Mark
+// may be either dirty or newgen.
+class CheckForUnmarkedOops : public OopClosure {
+ private:
+  PSYoungGen*  _young_gen;
+  PSCardTable* _card_table;
+  HeapWord*    _unmarked_addr;
+
+ protected:
+  template <class T> void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+    if (_young_gen->is_in_reserved(obj) &&
+        !_card_table->addr_is_marked_imprecise(p)) {
+      // Don't overwrite the first missing card mark
+      if (_unmarked_addr == NULL) {
+        _unmarked_addr = (HeapWord*)p;
+      }
+    }
+  }
+
+ public:
+  CheckForUnmarkedOops(PSYoungGen* young_gen, PSCardTable* card_table) :
+    _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
+
+  virtual void do_oop(oop* p)       { CheckForUnmarkedOops::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
+
+  bool has_unmarked_oop() {
+    return _unmarked_addr != NULL;
+  }
+};
+
+// Checks all objects for the existence of some type of mark,
+// precise or imprecise, dirty or newgen.
+class CheckForUnmarkedObjects : public ObjectClosure {
+ private:
+  PSYoungGen*  _young_gen;
+  PSCardTable* _card_table;
+
+ public:
+  CheckForUnmarkedObjects() {
+    ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+    _young_gen = heap->young_gen();
+    _card_table = heap->card_table();
+  }
+
+  // Card marks are not precise. The current system can leave us with
+  // a mismatch of precise marks and beginning of object marks. This means
+  // we test for missing precise marks first. If any are found, we don't
+  // fail unless the object head is also unmarked.
+  virtual void do_object(oop obj) {
+    CheckForUnmarkedOops object_check(_young_gen, _card_table);
+    obj->oop_iterate_no_header(&object_check);
+    if (object_check.has_unmarked_oop()) {
+      guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
+    }
+  }
+};
+
+// Checks for precise marking of oops as newgen.
+class CheckForPreciseMarks : public OopClosure {
+ private:
+  PSYoungGen*  _young_gen;
+  PSCardTable* _card_table;
+
+ protected:
+  template <class T> void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    if (_young_gen->is_in_reserved(obj)) {
+      assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
+      _card_table->set_card_newgen(p);
+    }
+  }
+
+ public:
+  CheckForPreciseMarks(PSYoungGen* young_gen, PSCardTable* card_table) :
+    _young_gen(young_gen), _card_table(card_table) { }
+
+  virtual void do_oop(oop* p)       { CheckForPreciseMarks::do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
+};
+
+// We get passed the space_top value to prevent us from traversing into
+// the old_gen promotion labs, which cannot be safely parsed.
+
+// Do not call this method if the space is empty.
+// It is a waste to start tasks and get here only to
+// do no work.  If this method needs to be called
+// when the space is empty, fix the calculation of
+// end_card to allow sp_top == sp->bottom().
+
+void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
+                                             MutableSpace* sp,
+                                             HeapWord* space_top,
+                                             PSPromotionManager* pm,
+                                             uint stripe_number,
+                                             uint stripe_total) {
+  int ssize = 128; // Naked constant!  Work unit = 64k.
+  int dirty_card_count = 0;
+
+  // It is a waste to get here if empty.
+  assert(sp->bottom() < sp->top(), "Should not be called if empty");
+  oop* sp_top = (oop*)space_top;
+  jbyte* start_card = byte_for(sp->bottom());
+  jbyte* end_card   = byte_for(sp_top - 1) + 1;
+  oop* last_scanned = NULL; // Prevent scanning objects more than once
+  // The width of the stripe ssize*stripe_total must be
+  // consistent with the number of stripes so that the complete slice
+  // is covered.
+  size_t slice_width = ssize * stripe_total;
+  for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
+    jbyte* worker_start_card = slice + stripe_number * ssize;
+    if (worker_start_card >= end_card)
+      return; // We're done.
+
+    jbyte* worker_end_card = worker_start_card + ssize;
+    if (worker_end_card > end_card)
+      worker_end_card = end_card;
+
+    // We do not want to scan objects more than once. In order to accomplish
+    // this, we assert that any object with an object head inside our 'slice'
+    // belongs to us. We may need to extend the range of scanned cards if the
+    // last object continues into the next 'slice'.
+    //
+    // Note! ending cards are exclusive!
+    HeapWord* slice_start = addr_for(worker_start_card);
+    HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
+
+#ifdef ASSERT
+    if (GCWorkerDelayMillis > 0) {
+      // Delay 1 worker so that it proceeds after all the work
+      // has been completed.
+      if (stripe_number < 2) {
+        os::sleep(Thread::current(), GCWorkerDelayMillis, false);
+      }
+    }
+#endif
+
+    // If there are not objects starting within the chunk, skip it.
+    if (!start_array->object_starts_in_range(slice_start, slice_end)) {
+      continue;
+    }
+    // Update our beginning addr
+    HeapWord* first_object = start_array->object_start(slice_start);
+    debug_only(oop* first_object_within_slice = (oop*) first_object;)
+    if (first_object < slice_start) {
+      last_scanned = (oop*)(first_object + oop(first_object)->size());
+      debug_only(first_object_within_slice = last_scanned;)
+      worker_start_card = byte_for(last_scanned);
+    }
+
+    // Update the ending addr
+    if (slice_end < (HeapWord*)sp_top) {
+      // The subtraction is important! An object may start precisely at slice_end.
+      HeapWord* last_object = start_array->object_start(slice_end - 1);
+      slice_end = last_object + oop(last_object)->size();
+      // worker_end_card is exclusive, so bump it one past the end of last_object's
+      // covered span.
+      worker_end_card = byte_for(slice_end) + 1;
+
+      if (worker_end_card > end_card)
+        worker_end_card = end_card;
+    }
+
+    assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
+    assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
+    assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
+    // Note that worker_start_card >= worker_end_card is legal, and happens when
+    // an object spans an entire slice.
+    assert(worker_start_card <= end_card, "worker start card beyond end card");
+    assert(worker_end_card <= end_card, "worker end card beyond end card");
+
+    jbyte* current_card = worker_start_card;
+    while (current_card < worker_end_card) {
+      // Find an unclean card.
+      while (current_card < worker_end_card && card_is_clean(*current_card)) {
+        current_card++;
+      }
+      jbyte* first_unclean_card = current_card;
+
+      // Find the end of a run of contiguous unclean cards
+      while (current_card < worker_end_card && !card_is_clean(*current_card)) {
+        while (current_card < worker_end_card && !card_is_clean(*current_card)) {
+          current_card++;
+        }
+
+        if (current_card < worker_end_card) {
+          // Some objects may be large enough to span several cards. If such
+          // an object has more than one dirty card, separated by a clean card,
+          // we will attempt to scan it twice. The test against "last_scanned"
+          // prevents the redundant object scan, but it does not prevent newly
+          // marked cards from being cleaned.
+          HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
+          size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
+          HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
+          jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
+          assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
+          if (ending_card_of_last_object > current_card) {
+            // This means the object spans the next complete card.
+            // We need to bump the current_card to ending_card_of_last_object
+            current_card = ending_card_of_last_object;
+          }
+        }
+      }
+      jbyte* following_clean_card = current_card;
+
+      if (first_unclean_card < worker_end_card) {
+        oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
+        assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
+        // "p" should always be >= "last_scanned" because newly GC dirtied
+        // cards are no longer scanned again (see comment at end
+        // of loop on the increment of "current_card").  Test that
+        // hypothesis before removing this code.
+        // If this code is removed, deal with the first time through
+        // the loop when the last_scanned is the object starting in
+        // the previous slice.
+        assert((p >= last_scanned) ||
+               (last_scanned == first_object_within_slice),
+               "Should no longer be possible");
+        if (p < last_scanned) {
+          // Avoid scanning more than once; this can happen because
+          // newgen cards set by GC may a different set than the
+          // originally dirty set
+          p = last_scanned;
+        }
+        oop* to = (oop*)addr_for(following_clean_card);
+
+        // Test slice_end first!
+        if ((HeapWord*)to > slice_end) {
+          to = (oop*)slice_end;
+        } else if (to > sp_top) {
+          to = sp_top;
+        }
+
+        // we know which cards to scan, now clear them
+        if (first_unclean_card <= worker_start_card+1)
+          first_unclean_card = worker_start_card+1;
+        if (following_clean_card >= worker_end_card-1)
+          following_clean_card = worker_end_card-1;
+
+        while (first_unclean_card < following_clean_card) {
+          *first_unclean_card++ = clean_card;
+        }
+
+        const int interval = PrefetchScanIntervalInBytes;
+        // scan all objects in the range
+        if (interval != 0) {
+          while (p < to) {
+            Prefetch::write(p, interval);
+            oop m = oop(p);
+            assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
+            pm->push_contents(m);
+            p += m->size();
+          }
+          pm->drain_stacks_cond_depth();
+        } else {
+          while (p < to) {
+            oop m = oop(p);
+            assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
+            pm->push_contents(m);
+            p += m->size();
+          }
+          pm->drain_stacks_cond_depth();
+        }
+        last_scanned = p;
+      }
+      // "current_card" is still the "following_clean_card" or
+      // the current_card is >= the worker_end_card so the
+      // loop will not execute again.
+      assert((current_card == following_clean_card) ||
+             (current_card >= worker_end_card),
+        "current_card should only be incremented if it still equals "
+        "following_clean_card");
+      // Increment current_card so that it is not processed again.
+      // It may now be dirty because a old-to-young pointer was
+      // found on it an updated.  If it is now dirty, it cannot be
+      // be safely cleaned in the next iteration.
+      current_card++;
+    }
+  }
+}
+
+// This should be called before a scavenge.
+void PSCardTable::verify_all_young_refs_imprecise() {
+  CheckForUnmarkedObjects check;
+
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+  PSOldGen* old_gen = heap->old_gen();
+
+  old_gen->object_iterate(&check);
+}
+
+// This should be called immediately after a scavenge, before mutators resume.
+void PSCardTable::verify_all_young_refs_precise() {
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+  PSOldGen* old_gen = heap->old_gen();
+
+  CheckForPreciseMarks check(heap->young_gen(), this);
+
+  old_gen->oop_iterate_no_header(&check);
+
+  verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
+}
+
+void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) {
+  jbyte* bot = byte_for(mr.start());
+  jbyte* top = byte_for(mr.end());
+  while (bot <= top) {
+    assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
+    if (*bot == verify_card)
+      *bot = youngergen_card;
+    bot++;
+  }
+}
+
+bool PSCardTable::addr_is_marked_imprecise(void *addr) {
+  jbyte* p = byte_for(addr);
+  jbyte val = *p;
+
+  if (card_is_dirty(val))
+    return true;
+
+  if (card_is_newgen(val))
+    return true;
+
+  if (card_is_clean(val))
+    return false;
+
+  assert(false, "Found unhandled card mark type");
+
+  return false;
+}
+
+// Also includes verify_card
+bool PSCardTable::addr_is_marked_precise(void *addr) {
+  jbyte* p = byte_for(addr);
+  jbyte val = *p;
+
+  if (card_is_newgen(val))
+    return true;
+
+  if (card_is_verify(val))
+    return true;
+
+  if (card_is_clean(val))
+    return false;
+
+  if (card_is_dirty(val))
+    return false;
+
+  assert(false, "Found unhandled card mark type");
+
+  return false;
+}
+
+// Assumes that only the base or the end changes.  This allows indentification
+// of the region that is being resized.  The
+// CardTableModRefBS::resize_covered_region() is used for the normal case
+// where the covered regions are growing or shrinking at the high end.
+// The method resize_covered_region_by_end() is analogous to
+// CardTableModRefBS::resize_covered_region() but
+// for regions that grow or shrink at the low end.
+void PSCardTable::resize_covered_region(MemRegion new_region) {
+  for (int i = 0; i < _cur_covered_regions; i++) {
+    if (_covered[i].start() == new_region.start()) {
+      // Found a covered region with the same start as the
+      // new region.  The region is growing or shrinking
+      // from the start of the region.
+      resize_covered_region_by_start(new_region);
+      return;
+    }
+    if (_covered[i].start() > new_region.start()) {
+      break;
+    }
+  }
+
+  int changed_region = -1;
+  for (int j = 0; j < _cur_covered_regions; j++) {
+    if (_covered[j].end() == new_region.end()) {
+      changed_region = j;
+      // This is a case where the covered region is growing or shrinking
+      // at the start of the region.
+      assert(changed_region != -1, "Don't expect to add a covered region");
+      assert(_covered[changed_region].byte_size() != new_region.byte_size(),
+        "The sizes should be different here");
+      resize_covered_region_by_end(changed_region, new_region);
+      return;
+    }
+  }
+  // This should only be a new covered region (where no existing
+  // covered region matches at the start or the end).
+  assert(_cur_covered_regions < _max_covered_regions,
+    "An existing region should have been found");
+  resize_covered_region_by_start(new_region);
+}
+
+void PSCardTable::resize_covered_region_by_start(MemRegion new_region) {
+  CardTable::resize_covered_region(new_region);
+  debug_only(verify_guard();)
+}
+
+void PSCardTable::resize_covered_region_by_end(int changed_region,
+                                               MemRegion new_region) {
+  assert(SafepointSynchronize::is_at_safepoint(),
+    "Only expect an expansion at the low end at a GC");
+  debug_only(verify_guard();)
+#ifdef ASSERT
+  for (int k = 0; k < _cur_covered_regions; k++) {
+    if (_covered[k].end() == new_region.end()) {
+      assert(changed_region == k, "Changed region is incorrect");
+      break;
+    }
+  }
+#endif
+
+  // Commit new or uncommit old pages, if necessary.
+  if (resize_commit_uncommit(changed_region, new_region)) {
+    // Set the new start of the committed region
+    resize_update_committed_table(changed_region, new_region);
+  }
+
+  // Update card table entries
+  resize_update_card_table_entries(changed_region, new_region);
+
+  // Update the covered region
+  resize_update_covered_table(changed_region, new_region);
+
+  int ind = changed_region;
+  log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
+  log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT "  _covered[%d].last(): " INTPTR_FORMAT,
+                ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
+  log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
+                ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
+  log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
+                p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
+  log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
+                p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
+
+  debug_only(verify_guard();)
+}
+
+bool PSCardTable::resize_commit_uncommit(int changed_region,
+                                         MemRegion new_region) {
+  bool result = false;
+  // Commit new or uncommit old pages, if necessary.
+  MemRegion cur_committed = _committed[changed_region];
+  assert(_covered[changed_region].end() == new_region.end(),
+    "The ends of the regions are expected to match");
+  // Extend the start of this _committed region to
+  // to cover the start of any previous _committed region.
+  // This forms overlapping regions, but never interior regions.
+  HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
+  if (min_prev_start < cur_committed.start()) {
+    // Only really need to set start of "cur_committed" to
+    // the new start (min_prev_start) but assertion checking code
+    // below use cur_committed.end() so make it correct.
+    MemRegion new_committed =
+        MemRegion(min_prev_start, cur_committed.end());
+    cur_committed = new_committed;
+  }
+#ifdef ASSERT
+  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+  assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
+         "Starts should have proper alignment");
+#endif
+
+  jbyte* new_start = byte_for(new_region.start());
+  // Round down because this is for the start address
+  HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
+  // The guard page is always committed and should not be committed over.
+  // This method is used in cases where the generation is growing toward
+  // lower addresses but the guard region is still at the end of the
+  // card table.  That still makes sense when looking for writes
+  // off the end of the card table.
+  if (new_start_aligned < cur_committed.start()) {
+    // Expand the committed region
+    //
+    // Case A
+    //                                          |+ guard +|
+    //                          |+ cur committed +++++++++|
+    //                  |+ new committed +++++++++++++++++|
+    //
+    // Case B
+    //                                          |+ guard +|
+    //                        |+ cur committed +|
+    //                  |+ new committed +++++++|
+    //
+    // These are not expected because the calculation of the
+    // cur committed region and the new committed region
+    // share the same end for the covered region.
+    // Case C
+    //                                          |+ guard +|
+    //                        |+ cur committed +|
+    //                  |+ new committed +++++++++++++++++|
+    // Case D
+    //                                          |+ guard +|
+    //                        |+ cur committed +++++++++++|
+    //                  |+ new committed +++++++|
+
+    HeapWord* new_end_for_commit =
+      MIN2(cur_committed.end(), _guard_region.start());
+    if(new_start_aligned < new_end_for_commit) {
+      MemRegion new_committed =
+        MemRegion(new_start_aligned, new_end_for_commit);
+      os::commit_memory_or_exit((char*)new_committed.start(),
+                                new_committed.byte_size(), !ExecMem,
+                                "card table expansion");
+    }
+    result = true;
+  } else if (new_start_aligned > cur_committed.start()) {
+    // Shrink the committed region
+#if 0 // uncommitting space is currently unsafe because of the interactions
+      // of growing and shrinking regions.  One region A can uncommit space
+      // that it owns but which is being used by another region B (maybe).
+      // Region B has not committed the space because it was already
+      // committed by region A.
+    MemRegion uncommit_region = committed_unique_to_self(changed_region,
+      MemRegion(cur_committed.start(), new_start_aligned));
+    if (!uncommit_region.is_empty()) {
+      if (!os::uncommit_memory((char*)uncommit_region.start(),
+                               uncommit_region.byte_size())) {
+        // If the uncommit fails, ignore it.  Let the
+        // committed table resizing go even though the committed
+        // table will over state the committed space.
+      }
+    }
+#else
+    assert(!result, "Should be false with current workaround");
+#endif
+  }
+  assert(_committed[changed_region].end() == cur_committed.end(),
+    "end should not change");
+  return result;
+}
+
+void PSCardTable::resize_update_committed_table(int changed_region,
+                                                MemRegion new_region) {
+
+  jbyte* new_start = byte_for(new_region.start());
+  // Set the new start of the committed region
+  HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
+  MemRegion new_committed = MemRegion(new_start_aligned,
+                                      _committed[changed_region].end());
+  _committed[changed_region] = new_committed;
+  _committed[changed_region].set_start(new_start_aligned);
+}
+
+void PSCardTable::resize_update_card_table_entries(int changed_region,
+                                                   MemRegion new_region) {
+  debug_only(verify_guard();)
+  MemRegion original_covered = _covered[changed_region];
+  // Initialize the card entries.  Only consider the
+  // region covered by the card table (_whole_heap)
+  jbyte* entry;
+  if (new_region.start() < _whole_heap.start()) {
+    entry = byte_for(_whole_heap.start());
+  } else {
+    entry = byte_for(new_region.start());
+  }
+  jbyte* end = byte_for(original_covered.start());
+  // If _whole_heap starts at the original covered regions start,
+  // this loop will not execute.
+  while (entry < end) { *entry++ = clean_card; }
+}
+
+void PSCardTable::resize_update_covered_table(int changed_region,
+                                              MemRegion new_region) {
+  // Update the covered region
+  _covered[changed_region].set_start(new_region.start());
+  _covered[changed_region].set_word_size(new_region.word_size());
+
+  // reorder regions.  There should only be at most 1 out
+  // of order.
+  for (int i = _cur_covered_regions-1 ; i > 0; i--) {
+    if (_covered[i].start() < _covered[i-1].start()) {
+        MemRegion covered_mr = _covered[i-1];
+        _covered[i-1] = _covered[i];
+        _covered[i] = covered_mr;
+        MemRegion committed_mr = _committed[i-1];
+      _committed[i-1] = _committed[i];
+      _committed[i] = committed_mr;
+      break;
+    }
+  }
+#ifdef ASSERT
+  for (int m = 0; m < _cur_covered_regions-1; m++) {
+    assert(_covered[m].start() <= _covered[m+1].start(),
+      "Covered regions out of order");
+    assert(_committed[m].start() <= _committed[m+1].start(),
+      "Committed regions out of order");
+  }
+#endif
+}
+
+// Returns the start of any committed region that is lower than
+// the target committed region (index ind) and that intersects the
+// target region.  If none, return start of target region.
+//
+//      -------------
+//      |           |
+//      -------------
+//              ------------
+//              | target   |
+//              ------------
+//                               -------------
+//                               |           |
+//                               -------------
+//      ^ returns this
+//
+//      -------------
+//      |           |
+//      -------------
+//                      ------------
+//                      | target   |
+//                      ------------
+//                               -------------
+//                               |           |
+//                               -------------
+//                      ^ returns this
+
+HeapWord* PSCardTable::lowest_prev_committed_start(int ind) const {
+  assert(_cur_covered_regions >= 0, "Expecting at least on region");
+  HeapWord* min_start = _committed[ind].start();
+  for (int j = 0; j < ind; j++) {
+    HeapWord* this_start = _committed[j].start();
+    if ((this_start < min_start) &&
+        !(_committed[j].intersection(_committed[ind])).is_empty()) {
+       min_start = this_start;
+    }
+  }
+  return min_start;
+}
+
+bool PSCardTable::is_in_young(oop obj) const {
+  return ParallelScavengeHeap::heap()->is_in_young(obj);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psCardTable.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP
+#define SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP
+
+#include "gc/shared/cardTable.hpp"
+#include "oops/oop.hpp"
+
+class MutableSpace;
+class ObjectStartArray;
+class PSPromotionManager;
+class GCTaskQueue;
+
+class PSCardTable: public CardTable {
+ private:
+  // Support methods for resizing the card table.
+  // resize_commit_uncommit() returns true if the pages were committed or
+  // uncommitted
+  bool resize_commit_uncommit(int changed_region, MemRegion new_region);
+  void resize_update_card_table_entries(int changed_region,
+                                        MemRegion new_region);
+  void resize_update_committed_table(int changed_region, MemRegion new_region);
+  void resize_update_covered_table(int changed_region, MemRegion new_region);
+
+  void verify_all_young_refs_precise_helper(MemRegion mr);
+
+  enum ExtendedCardValue {
+    youngergen_card   = CT_MR_BS_last_reserved + 1,
+    verify_card       = CT_MR_BS_last_reserved + 5
+  };
+
+ public:
+  PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {}
+
+  static jbyte youngergen_card_val() { return youngergen_card; }
+  static jbyte verify_card_val()     { return verify_card; }
+
+  // Scavenge support
+  void scavenge_contents_parallel(ObjectStartArray* start_array,
+                                  MutableSpace* sp,
+                                  HeapWord* space_top,
+                                  PSPromotionManager* pm,
+                                  uint stripe_number,
+                                  uint stripe_total);
+
+  bool addr_is_marked_imprecise(void *addr);
+  bool addr_is_marked_precise(void *addr);
+
+  void set_card_newgen(void* addr)   { jbyte* p = byte_for(addr); *p = verify_card; }
+
+  // Testers for entries
+  static bool card_is_dirty(int value)      { return value == dirty_card; }
+  static bool card_is_newgen(int value)     { return value == youngergen_card; }
+  static bool card_is_clean(int value)      { return value == clean_card; }
+  static bool card_is_verify(int value)     { return value == verify_card; }
+
+  // Card marking
+  void inline_write_ref_field_gc(void* field, oop new_val) {
+    jbyte* byte = byte_for(field);
+    *byte = youngergen_card;
+  }
+
+  // ReduceInitialCardMarks support
+  bool is_in_young(oop obj) const;
+
+  // Adaptive size policy support
+  // Allows adjustment of the base and size of the covered regions
+  void resize_covered_region(MemRegion new_region);
+  // Finds the covered region to resize based on the start address
+  // of the covered regions.
+  void resize_covered_region_by_start(MemRegion new_region);
+  // Finds the covered region to resize based on the end address
+  // of the covered regions.
+  void resize_covered_region_by_end(int changed_region, MemRegion new_region);
+  // Finds the lowest start address of a covered region that is
+  // previous (i.e., lower index) to the covered region with index "ind".
+  HeapWord* lowest_prev_committed_start(int ind) const;
+
+#ifdef ASSERT
+  bool is_valid_card_address(jbyte* addr) {
+    return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
+  }
+#endif // ASSERT
+
+  // Verification
+  void verify_all_young_refs_imprecise();
+  void verify_all_young_refs_precise();
+};
+
+#endif // SHARE_VM_GC_PARALLEL_PSCARDTABLE
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -236,12 +236,12 @@
                       young_gen->to_space()->is_empty();
     young_gen_empty = eden_empty && survivors_empty;
 
-    ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
+    PSCardTable* card_table = heap->card_table();
     MemRegion old_mr = heap->old_gen()->reserved();
     if (young_gen_empty) {
-      modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
+      card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
     } else {
-      modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
+      card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
     }
 
     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #include "gc/parallel/objectStartArray.inline.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
+#include "gc/parallel/psCardTable.hpp"
 #include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
@@ -111,11 +112,8 @@
   }
 
   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  BarrierSet* bs = heap->barrier_set();
-
-  bs->resize_covered_region(cmr);
-
-  CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
+  PSCardTable* ct = heap->card_table();
+  ct->resize_covered_region(cmr);
 
   // Verify that the start and end of this generation is the start of a card.
   // If this wasn't true, a single card could span more than one generation,
@@ -386,7 +384,7 @@
   size_t new_word_size = new_memregion.word_size();
 
   start_array()->set_covered_region(new_memregion);
-  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
+  ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);
 
   // ALWAYS do this last!!
   object_space()->initialize(new_memregion,
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1017,12 +1017,12 @@
   bool young_gen_empty = eden_empty && from_space->is_empty() &&
     to_space->is_empty();
 
-  ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
+  PSCardTable* ct = heap->card_table();
   MemRegion old_mr = heap->old_gen()->reserved();
   if (young_gen_empty) {
-    modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
+    ct->clear(MemRegion(old_mr.start(), old_mr.end()));
   } else {
-    modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
+    ct->invalidate(MemRegion(old_mr.start(), old_mr.end()));
   }
 
   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "classfile/stringTable.hpp"
 #include "code/codeCache.hpp"
-#include "gc/parallel/cardTableExtension.hpp"
 #include "gc/parallel/gcTaskManager.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
@@ -60,7 +59,7 @@
 HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
 int                        PSScavenge::_consecutive_skipped_scavenges = 0;
 ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
-CardTableExtension*        PSScavenge::_card_table = NULL;
+PSCardTable*               PSScavenge::_card_table = NULL;
 bool                       PSScavenge::_survivor_overflow = false;
 uint                       PSScavenge::_tenuring_threshold = 0;
 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
@@ -322,7 +321,7 @@
 
     // Verify no unmarked old->young roots
     if (VerifyRememberedSets) {
-      CardTableExtension::verify_all_young_refs_imprecise();
+      heap->card_table()->verify_all_young_refs_imprecise();
     }
 
     assert(young_gen->to_space()->is_empty(),
@@ -617,8 +616,8 @@
     if (VerifyRememberedSets) {
       // Precise verification will give false positives. Until this is fixed,
       // use imprecise verification.
-      // CardTableExtension::verify_all_young_refs_precise();
-      CardTableExtension::verify_all_young_refs_imprecise();
+      // heap->card_table()->verify_all_young_refs_precise();
+      heap->card_table()->verify_all_young_refs_imprecise();
     }
 
     if (log_is_enabled(Debug, gc, heap, exit)) {
@@ -778,7 +777,7 @@
                            NULL);                      // header provides liveness info
 
   // Cache the cardtable
-  _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
+  _card_table = heap->card_table();
 
   _counters = new CollectorCounters("PSScavenge", 0);
 }
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psScavenge.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_GC_PARALLEL_PSSCAVENGE_HPP
 #define SHARE_VM_GC_PARALLEL_PSSCAVENGE_HPP
 
-#include "gc/parallel/cardTableExtension.hpp"
+#include "gc/parallel/psCardTable.hpp"
 #include "gc/parallel/psVirtualspace.hpp"
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -67,7 +67,7 @@
   // Flags/counters
   static ReferenceProcessor*  _ref_processor;        // Reference processor for scavenging.
   static PSIsAliveClosure     _is_alive_closure;     // Closure used for reference processing
-  static CardTableExtension*  _card_table;           // We cache the card table for fast access.
+  static PSCardTable*         _card_table;           // We cache the card table for fast access.
   static bool                 _survivor_overflow;    // Overflow this collection
   static uint                 _tenuring_threshold;   // tenuring threshold for next scavenge
   static elapsedTimer         _accumulated_time;     // total time spent on scavenge
@@ -89,7 +89,7 @@
   static inline void save_to_space_top_before_gc();
 
   // Private accessors
-  static CardTableExtension* const card_table()       { assert(_card_table != NULL, "Sanity"); return _card_table; }
+  static PSCardTable* const card_table()           { assert(_card_table != NULL, "Sanity"); return _card_table; }
   static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
 
  public:
--- a/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP
 #define SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP
 
-#include "gc/parallel/cardTableExtension.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psPromotionManager.inline.hpp"
 #include "gc/parallel/psScavenge.hpp"
--- a/src/hotspot/share/gc/parallel/psTasks.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psTasks.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,9 @@
 #include "aot/aotLoader.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
-#include "gc/parallel/cardTableExtension.hpp"
 #include "gc/parallel/gcTaskManager.hpp"
 #include "gc/parallel/psMarkSweep.hpp"
+#include "gc/parallel/psCardTable.hpp"
 #include "gc/parallel/psPromotionManager.hpp"
 #include "gc/parallel/psPromotionManager.inline.hpp"
 #include "gc/parallel/psScavenge.inline.hpp"
@@ -176,8 +176,7 @@
 
   {
     PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
-    CardTableExtension* card_table =
-      barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
+    PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
 
     card_table->scavenge_contents_parallel(_old_gen->start_array(),
                                            _old_gen->object_space(),
--- a/src/hotspot/share/gc/parallel/psTasks.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psTasks.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -148,7 +148,7 @@
 // will be covered.  In this example if 4 tasks have been created to cover
 // all the stripes and there are only 3 threads, one of the threads will
 // get the tasks with the 4th stripe.  However, there is a dependence in
-// CardTableExtension::scavenge_contents_parallel() on the number
+// PSCardTable::scavenge_contents_parallel() on the number
 // of tasks created.  In scavenge_contents_parallel the distance
 // to the next stripe is calculated based on the number of tasks.
 // If the stripe width is ssize, a task's next stripe is at
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
 
   MemRegion cmr((HeapWord*)virtual_space()->low(),
                 (HeapWord*)virtual_space()->high());
-  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
+  ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr);
 
   if (ZapUnusedHeapArea) {
     // Mangle newly committed space immediately because it
@@ -870,7 +870,7 @@
 
   MemRegion cmr((HeapWord*)virtual_space()->low(),
                 (HeapWord*)virtual_space()->high());
-  ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
+  ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr);
   space_invariants();
 }
 
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -189,7 +189,7 @@
                 (HeapWord*)_virtual_space.high());
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
-  gch->barrier_set()->resize_covered_region(cmr);
+  gch->rem_set()->resize_covered_region(cmr);
 
   _eden_space = new ContiguousSpace();
   _from_space = new ContiguousSpace();
@@ -454,7 +454,7 @@
                              SpaceDecorator::DontMangle);
     MemRegion cmr((HeapWord*)_virtual_space.low(),
                   (HeapWord*)_virtual_space.high());
-    gch->barrier_set()->resize_covered_region(cmr);
+    gch->rem_set()->resize_covered_region(cmr);
 
     log_debug(gc, ergo, heap)(
         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
@@ -634,7 +634,7 @@
   {
     // DefNew needs to run with n_threads == 0, to make sure the serial
     // version of the card table scanning code is used.
-    // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
+    // See: CardTableRS::non_clean_card_iterate_possibly_parallel.
     StrongRootsScope srs(0);
 
     gch->young_process_roots(&srs,
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -123,15 +123,6 @@
   virtual void write_ref_array_work(MemRegion mr) = 0;
 
 public:
-  // Inform the BarrierSet that the the covered heap region that starts
-  // with "base" has been changed to have the given size (possibly from 0,
-  // for initialization.)
-  virtual void resize_covered_region(MemRegion new_region) = 0;
-
-  // If the barrier set imposes any alignment restrictions on boundaries
-  // within the heap, this function tells whether they are met.
-  virtual bool is_aligned(HeapWord* addr) = 0;
-
   // Print a description of the memory for the barrier set
   virtual void print_on(outputStream* st) const = 0;
 
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -29,25 +29,31 @@
 
 #if INCLUDE_ALL_GCS
 #define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
-  f(CardTableExtension)                                    \
   f(G1SATBCTLogging)
 #else
 #define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
 #endif
 
+#if INCLUDE_ALL_GCS
+#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
+  f(G1SATBCT)
+#else
+#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+#endif
+
 // Do something for each concrete barrier set part of the build.
 #define FOR_EACH_CONCRETE_BARRIER_SET_DO(f)          \
-  f(CardTableForRS)                                  \
+  f(CardTableModRef)                                 \
   FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
 
+#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f)          \
+  f(ModRef)                                          \
+  FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+
 // Do something for each known barrier set.
 #define FOR_EACH_BARRIER_SET_DO(f)    \
-  f(ModRef)                           \
-  f(CardTableModRef)                  \
-  f(CardTableForRS)                   \
-  f(CardTableExtension)               \
-  f(G1SATBCT)                         \
-  f(G1SATBCTLogging)
+  FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
+  FOR_EACH_CONCRETE_BARRIER_SET_DO(f)
 
 // To enable runtime-resolution of GC barriers on primitives, please
 // define SUPPORT_BARRIER_ON_PRIMITIVES.
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,10 +29,8 @@
 
 #include "gc/shared/modRefBarrierSet.inline.hpp"
 #include "gc/shared/cardTableModRefBS.inline.hpp"
-#include "gc/shared/cardTableModRefBSForCTRS.hpp"
 
 #if INCLUDE_ALL_GCS
-#include "gc/parallel/cardTableExtension.hpp"       // Parallel support
 #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" // G1 support
 #endif
 
--- a/src/hotspot/share/gc/shared/cardGeneration.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardGeneration.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@
        heap_word_size(_virtual_space.committed_size());
     MemRegion mr(space()->bottom(), new_word_size);
     // Expand card table
-    GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
+    GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
     // Expand shared block offset array
     _bts->resize(new_word_size);
 
@@ -166,7 +166,7 @@
   _bts->resize(new_word_size);
   MemRegion mr(space()->bottom(), new_word_size);
   // Shrink the card table
-  GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
+  GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
 
   size_t new_mem_size = _virtual_space.committed_size();
   size_t old_mem_size = new_mem_size + size;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/cardTable.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/space.inline.hpp"
+#include "logging/log.hpp"
+#include "memory/virtualspace.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/align.hpp"
+
+size_t CardTable::compute_byte_map_size() {
+  assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
+                                        "uninitialized, check declaration order");
+  assert(_page_size != 0, "uninitialized, check declaration order");
+  const size_t granularity = os::vm_allocation_granularity();
+  return align_up(_guard_index + 1, MAX2(_page_size, granularity));
+}
+
+CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
+  _scanned_concurrently(conc_scan),
+  _whole_heap(whole_heap),
+  _guard_index(0),
+  _guard_region(),
+  _last_valid_index(0),
+  _page_size(os::vm_page_size()),
+  _byte_map_size(0),
+  _covered(NULL),
+  _committed(NULL),
+  _cur_covered_regions(0),
+  _byte_map(NULL),
+  _byte_map_base(NULL)
+{
+  assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
+  assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
+
+  assert(card_size <= 512, "card_size must be less than 512"); // why?
+
+  _covered   = new MemRegion[_max_covered_regions];
+  if (_covered == NULL) {
+    vm_exit_during_initialization("Could not allocate card table covered region set.");
+  }
+}
+
+CardTable::~CardTable() {
+  if (_covered) {
+    delete[] _covered;
+    _covered = NULL;
+  }
+  if (_committed) {
+    delete[] _committed;
+    _committed = NULL;
+  }
+}
+
+void CardTable::initialize() {
+  _guard_index = cards_required(_whole_heap.word_size()) - 1;
+  _last_valid_index = _guard_index - 1;
+
+  _byte_map_size = compute_byte_map_size();
+
+  HeapWord* low_bound  = _whole_heap.start();
+  HeapWord* high_bound = _whole_heap.end();
+
+  _cur_covered_regions = 0;
+  _committed = new MemRegion[_max_covered_regions];
+  if (_committed == NULL) {
+    vm_exit_during_initialization("Could not allocate card table committed region set.");
+  }
+
+  const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
+    MAX2(_page_size, (size_t) os::vm_allocation_granularity());
+  ReservedSpace heap_rs(_byte_map_size, rs_align, false);
+
+  MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
+
+  os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
+                       _page_size, heap_rs.base(), heap_rs.size());
+  if (!heap_rs.is_reserved()) {
+    vm_exit_during_initialization("Could not reserve enough space for the "
+                                  "card marking array");
+  }
+
+  // The assembler store_check code will do an unsigned shift of the oop,
+  // then add it to _byte_map_base, i.e.
+  //
+  //   _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
+  _byte_map = (jbyte*) heap_rs.base();
+  _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+  assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
+  assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
+
+  jbyte* guard_card = &_byte_map[_guard_index];
+  HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size);
+  _guard_region = MemRegion(guard_page, _page_size);
+  os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
+                            !ExecMem, "card table last card");
+  *guard_card = last_card;
+
+  log_trace(gc, barrier)("CardTable::CardTable: ");
+  log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
+                  p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
+  log_trace(gc, barrier)("    _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base));
+}
+
+int CardTable::find_covering_region_by_base(HeapWord* base) {
+  int i;
+  for (i = 0; i < _cur_covered_regions; i++) {
+    if (_covered[i].start() == base) return i;
+    if (_covered[i].start() > base) break;
+  }
+  // If we didn't find it, create a new one.
+  assert(_cur_covered_regions < _max_covered_regions,
+         "too many covered regions");
+  // Move the ones above up, to maintain sorted order.
+  for (int j = _cur_covered_regions; j > i; j--) {
+    _covered[j] = _covered[j-1];
+    _committed[j] = _committed[j-1];
+  }
+  int res = i;
+  _cur_covered_regions++;
+  _covered[res].set_start(base);
+  _covered[res].set_word_size(0);
+  jbyte* ct_start = byte_for(base);
+  HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size);
+  _committed[res].set_start(ct_start_aligned);
+  _committed[res].set_word_size(0);
+  return res;
+}
+
+int CardTable::find_covering_region_containing(HeapWord* addr) {
+  for (int i = 0; i < _cur_covered_regions; i++) {
+    if (_covered[i].contains(addr)) {
+      return i;
+    }
+  }
+  assert(0, "address outside of heap?");
+  return -1;
+}
+
+HeapWord* CardTable::largest_prev_committed_end(int ind) const {
+  HeapWord* max_end = NULL;
+  for (int j = 0; j < ind; j++) {
+    HeapWord* this_end = _committed[j].end();
+    if (this_end > max_end) max_end = this_end;
+  }
+  return max_end;
+}
+
+MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const {
+  MemRegion result = mr;
+  for (int r = 0; r < _cur_covered_regions; r += 1) {
+    if (r != self) {
+      result = result.minus(_committed[r]);
+    }
+  }
+  // Never include the guard page.
+  result = result.minus(_guard_region);
+  return result;
+}
+
+void CardTable::resize_covered_region(MemRegion new_region) {
+  // We don't change the start of a region, only the end.
+  assert(_whole_heap.contains(new_region),
+           "attempt to cover area not in reserved area");
+  debug_only(verify_guard();)
+  // collided is true if the expansion would push into another committed region
+  debug_only(bool collided = false;)
+  int const ind = find_covering_region_by_base(new_region.start());
+  MemRegion const old_region = _covered[ind];
+  assert(old_region.start() == new_region.start(), "just checking");
+  if (new_region.word_size() != old_region.word_size()) {
+    // Commit new or uncommit old pages, if necessary.
+    MemRegion cur_committed = _committed[ind];
+    // Extend the end of this _committed region
+    // to cover the end of any lower _committed regions.
+    // This forms overlapping regions, but never interior regions.
+    HeapWord* const max_prev_end = largest_prev_committed_end(ind);
+    if (max_prev_end > cur_committed.end()) {
+      cur_committed.set_end(max_prev_end);
+    }
+    // Align the end up to a page size (starts are already aligned).
+    HeapWord* new_end = (HeapWord*) byte_after(new_region.last());
+    HeapWord* new_end_aligned = align_up(new_end, _page_size);
+    assert(new_end_aligned >= new_end, "align up, but less");
+    // Check the other regions (excludes "ind") to ensure that
+    // the new_end_aligned does not intrude onto the committed
+    // space of another region.
+    int ri = 0;
+    for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
+      if (new_end_aligned > _committed[ri].start()) {
+        assert(new_end_aligned <= _committed[ri].end(),
+               "An earlier committed region can't cover a later committed region");
+        // Any region containing the new end
+        // should start at or beyond the region found (ind)
+        // for the new end (committed regions are not expected to
+        // be proper subsets of other committed regions).
+        assert(_committed[ri].start() >= _committed[ind].start(),
+               "New end of committed region is inconsistent");
+        new_end_aligned = _committed[ri].start();
+        // new_end_aligned can be equal to the start of its
+        // committed region (i.e., of "ind") if a second
+        // region following "ind" also start at the same location
+        // as "ind".
+        assert(new_end_aligned >= _committed[ind].start(),
+          "New end of committed region is before start");
+        debug_only(collided = true;)
+        // Should only collide with 1 region
+        break;
+      }
+    }
+#ifdef ASSERT
+    for (++ri; ri < _cur_covered_regions; ri++) {
+      assert(!_committed[ri].contains(new_end_aligned),
+        "New end of committed region is in a second committed region");
+    }
+#endif
+    // The guard page is always committed and should not be committed over.
+    // "guarded" is used for assertion checking below and recalls the fact
+    // that the would-be end of the new committed region would have
+    // penetrated the guard page.
+    HeapWord* new_end_for_commit = new_end_aligned;
+
+    DEBUG_ONLY(bool guarded = false;)
+    if (new_end_for_commit > _guard_region.start()) {
+      new_end_for_commit = _guard_region.start();
+      DEBUG_ONLY(guarded = true;)
+    }
+
+    if (new_end_for_commit > cur_committed.end()) {
+      // Must commit new pages.
+      MemRegion const new_committed =
+        MemRegion(cur_committed.end(), new_end_for_commit);
+
+      assert(!new_committed.is_empty(), "Region should not be empty here");
+      os::commit_memory_or_exit((char*)new_committed.start(),
+                                new_committed.byte_size(), _page_size,
+                                !ExecMem, "card table expansion");
+    // Use new_end_aligned (as opposed to new_end_for_commit) because
+    // the cur_committed region may include the guard region.
+    } else if (new_end_aligned < cur_committed.end()) {
+      // Must uncommit pages.
+      MemRegion const uncommit_region =
+        committed_unique_to_self(ind, MemRegion(new_end_aligned,
+                                                cur_committed.end()));
+      if (!uncommit_region.is_empty()) {
+        // It is not safe to uncommit cards if the boundary between
+        // the generations is moving.  A shrink can uncommit cards
+        // owned by generation A but being used by generation B.
+        if (!UseAdaptiveGCBoundary) {
+          if (!os::uncommit_memory((char*)uncommit_region.start(),
+                                   uncommit_region.byte_size())) {
+            assert(false, "Card table contraction failed");
+            // The call failed so don't change the end of the
+            // committed region.  This is better than taking the
+            // VM down.
+            new_end_aligned = _committed[ind].end();
+          }
+        } else {
+          new_end_aligned = _committed[ind].end();
+        }
+      }
+    }
+    // In any case, we can reset the end of the current committed entry.
+    _committed[ind].set_end(new_end_aligned);
+
+#ifdef ASSERT
+    // Check that the last card in the new region is committed according
+    // to the tables.
+    bool covered = false;
+    for (int cr = 0; cr < _cur_covered_regions; cr++) {
+      if (_committed[cr].contains(new_end - 1)) {
+        covered = true;
+        break;
+      }
+    }
+    assert(covered, "Card for end of new region not committed");
+#endif
+
+    // The default of 0 is not necessarily clean cards.
+    jbyte* entry;
+    if (old_region.last() < _whole_heap.start()) {
+      entry = byte_for(_whole_heap.start());
+    } else {
+      entry = byte_after(old_region.last());
+    }
+    assert(index_for(new_region.last()) <  _guard_index,
+      "The guard card will be overwritten");
+    // This line commented out cleans the newly expanded region and
+    // not the aligned up expanded region.
+    // jbyte* const end = byte_after(new_region.last());
+    jbyte* const end = (jbyte*) new_end_for_commit;
+    assert((end >= byte_after(new_region.last())) || collided || guarded,
+      "Expect to be beyond new region unless impacting another region");
+    // do nothing if we resized downward.
+#ifdef ASSERT
+    for (int ri = 0; ri < _cur_covered_regions; ri++) {
+      if (ri != ind) {
+        // The end of the new committed region should not
+        // be in any existing region unless it matches
+        // the start of the next region.
+        assert(!_committed[ri].contains(end) ||
+               (_committed[ri].start() == (HeapWord*) end),
+               "Overlapping committed regions");
+      }
+    }
+#endif
+    if (entry < end) {
+      memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
+    }
+  }
+  // In any case, the covered size changes.
+  _covered[ind].set_word_size(new_region.word_size());
+
+  log_trace(gc, barrier)("CardTable::resize_covered_region: ");
+  log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
+                         ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
+  log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
+                         ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
+  log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
+                         p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
+  log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
+                         p2i(addr_for((jbyte*) _committed[ind].start())),  p2i(addr_for((jbyte*) _committed[ind].last())));
+
+  // Touch the last card of the covered region to show that it
+  // is committed (or SEGV).
+  debug_only((void) (*byte_for(_covered[ind].last()));)
+  debug_only(verify_guard();)
+}
+
+// Note that these versions are precise!  The scanning code has to handle the
+// fact that the write barrier may be either precise or imprecise.
+void CardTable::dirty_MemRegion(MemRegion mr) {
+  assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+  assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
+  jbyte* cur  = byte_for(mr.start());
+  jbyte* last = byte_after(mr.last());
+  while (cur < last) {
+    *cur = dirty_card;
+    cur++;
+  }
+}
+
+void CardTable::clear_MemRegion(MemRegion mr) {
+  // Be conservative: only clean cards entirely contained within the
+  // region.
+  jbyte* cur;
+  if (mr.start() == _whole_heap.start()) {
+    cur = byte_for(mr.start());
+  } else {
+    assert(mr.start() > _whole_heap.start(), "mr is not covered.");
+    cur = byte_after(mr.start() - 1);
+  }
+  jbyte* last = byte_after(mr.last());
+  memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
+}
+
+void CardTable::clear(MemRegion mr) {
+  for (int i = 0; i < _cur_covered_regions; i++) {
+    MemRegion mri = mr.intersection(_covered[i]);
+    if (!mri.is_empty()) clear_MemRegion(mri);
+  }
+}
+
+void CardTable::dirty(MemRegion mr) {
+  jbyte* first = byte_for(mr.start());
+  jbyte* last  = byte_after(mr.last());
+  memset(first, dirty_card, last-first);
+}
+
+// Unlike several other card table methods, dirty_card_iterate()
+// iterates over dirty cards ranges in increasing address order.
+void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) {
+  for (int i = 0; i < _cur_covered_regions; i++) {
+    MemRegion mri = mr.intersection(_covered[i]);
+    if (!mri.is_empty()) {
+      jbyte *cur_entry, *next_entry, *limit;
+      for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
+           cur_entry <= limit;
+           cur_entry  = next_entry) {
+        next_entry = cur_entry + 1;
+        if (*cur_entry == dirty_card) {
+          size_t dirty_cards;
+          // Accumulate maximal dirty card range, starting at cur_entry
+          for (dirty_cards = 1;
+               next_entry <= limit && *next_entry == dirty_card;
+               dirty_cards++, next_entry++);
+          MemRegion cur_cards(addr_for(cur_entry),
+                              dirty_cards*card_size_in_words);
+          cl->do_MemRegion(cur_cards);
+        }
+      }
+    }
+  }
+}
+
+MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr,
+                                                  bool reset,
+                                                  int reset_val) {
+  for (int i = 0; i < _cur_covered_regions; i++) {
+    MemRegion mri = mr.intersection(_covered[i]);
+    if (!mri.is_empty()) {
+      jbyte* cur_entry, *next_entry, *limit;
+      for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
+           cur_entry <= limit;
+           cur_entry  = next_entry) {
+        next_entry = cur_entry + 1;
+        if (*cur_entry == dirty_card) {
+          size_t dirty_cards;
+          // Accumulate maximal dirty card range, starting at cur_entry
+          for (dirty_cards = 1;
+               next_entry <= limit && *next_entry == dirty_card;
+               dirty_cards++, next_entry++);
+          MemRegion cur_cards(addr_for(cur_entry),
+                              dirty_cards*card_size_in_words);
+          if (reset) {
+            for (size_t i = 0; i < dirty_cards; i++) {
+              cur_entry[i] = reset_val;
+            }
+          }
+          return cur_cards;
+        }
+      }
+    }
+  }
+  return MemRegion(mr.end(), mr.end());
+}
+
+uintx CardTable::ct_max_alignment_constraint() {
+  return card_size * os::vm_page_size();
+}
+
+void CardTable::verify_guard() {
+  // For product build verification
+  guarantee(_byte_map[_guard_index] == last_card,
+            "card table guard has been modified");
+}
+
+void CardTable::invalidate(MemRegion mr) {
+  assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+  assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
+  for (int i = 0; i < _cur_covered_regions; i++) {
+    MemRegion mri = mr.intersection(_covered[i]);
+    if (!mri.is_empty()) dirty_MemRegion(mri);
+  }
+}
+
+void CardTable::verify() {
+  verify_guard();
+}
+
+#ifndef PRODUCT
+void CardTable::verify_region(MemRegion mr,
+                                      jbyte val, bool val_equals) {
+  jbyte* start    = byte_for(mr.start());
+  jbyte* end      = byte_for(mr.last());
+  bool failures = false;
+  for (jbyte* curr = start; curr <= end; ++curr) {
+    jbyte curr_val = *curr;
+    bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
+    if (failed) {
+      if (!failures) {
+        log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
+        log_error(gc, verify)("==   %sexpecting value: %d", (val_equals) ? "" : "not ", val);
+        failures = true;
+      }
+      log_error(gc, verify)("==   card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
+                            p2i(curr), p2i(addr_for(curr)),
+                            p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
+                            (int) curr_val);
+    }
+  }
+  guarantee(!failures, "there should not have been any failures");
+}
+
+void CardTable::verify_not_dirty_region(MemRegion mr) {
+  verify_region(mr, dirty_card, false /* val_equals */);
+}
+
+void CardTable::verify_dirty_region(MemRegion mr) {
+  verify_region(mr, dirty_card, true /* val_equals */);
+}
+#endif
+
+void CardTable::print_on(outputStream* st) const {
+  st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT,
+               p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/cardTable.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_CARDTABLE_HPP
+#define SHARE_VM_GC_SHARED_CARDTABLE_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/memRegion.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/align.hpp"
+
+class CardTable: public CHeapObj<mtGC> {
+  friend class VMStructs;
+protected:
+  // The declaration order of these const fields is important; see the
+  // constructor before changing.
+  const bool      _scanned_concurrently;
+  const MemRegion _whole_heap;       // the region covered by the card table
+  size_t          _guard_index;      // index of very last element in the card
+                                     // table; it is set to a guard value
+                                     // (last_card) and should never be modified
+  size_t          _last_valid_index; // index of the last valid element
+  const size_t    _page_size;        // page size used when mapping _byte_map
+  size_t          _byte_map_size;    // in bytes
+  jbyte*          _byte_map;         // the card marking array
+  jbyte*          _byte_map_base;
+
+  int _cur_covered_regions;
+
+  // The covered regions should be in address order.
+  MemRegion* _covered;
+  // The committed regions correspond one-to-one to the covered regions.
+  // They represent the card-table memory that has been committed to service
+  // the corresponding covered region.  It may be that committed region for
+  // one covered region corresponds to a larger region because of page-size
+  // roundings.  Thus, a committed region for one covered region may
+  // actually extend onto the card-table space for the next covered region.
+  MemRegion* _committed;
+
+  // The last card is a guard card, and we commit the page for it so
+  // we can use the card for verification purposes. We make sure we never
+  // uncommit the MemRegion for that page.
+  MemRegion _guard_region;
+
+  inline size_t compute_byte_map_size();
+
+  // Finds and return the index of the region, if any, to which the given
+  // region would be contiguous.  If none exists, assign a new region and
+  // returns its index.  Requires that no more than the maximum number of
+  // covered regions defined in the constructor are ever in use.
+  int find_covering_region_by_base(HeapWord* base);
+
+  // Same as above, but finds the region containing the given address
+  // instead of starting at a given base address.
+  int find_covering_region_containing(HeapWord* addr);
+
+  // Returns the leftmost end of a committed region corresponding to a
+  // covered region before covered region "ind", or else "NULL" if "ind" is
+  // the first covered region.
+  HeapWord* largest_prev_committed_end(int ind) const;
+
+  // Returns the part of the region mr that doesn't intersect with
+  // any committed region other than self.  Used to prevent uncommitting
+  // regions that are also committed by other regions.  Also protects
+  // against uncommitting the guard region.
+  MemRegion committed_unique_to_self(int self, MemRegion mr) const;
+
+  // Some barrier sets create tables whose elements correspond to parts of
+  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
+  // normally reserve space for such tables, and commit parts of the table
+  // "covering" parts of the heap that are committed. At most one covered
+  // region per generation is needed.
+  static const int _max_covered_regions = 2;
+
+  enum CardValues {
+    clean_card                  = -1,
+    // The mask contains zeros in places for all other values.
+    clean_card_mask             = clean_card - 31,
+
+    dirty_card                  =  0,
+    precleaned_card             =  1,
+    claimed_card                =  2,
+    deferred_card               =  4,
+    last_card                   =  8,
+    CT_MR_BS_last_reserved      = 16
+  };
+
+  // a word's worth (row) of clean card values
+  static const intptr_t clean_card_row = (intptr_t)(-1);
+
+public:
+  CardTable(MemRegion whole_heap, bool conc_scan);
+  virtual ~CardTable();
+  virtual void initialize();
+
+  // The kinds of precision a CardTableModRefBS may offer.
+  enum PrecisionStyle {
+    Precise,
+    ObjHeadPreciseArray
+  };
+
+  // Tells what style of precision this card table offers.
+  PrecisionStyle precision() {
+    return ObjHeadPreciseArray; // Only one supported for now.
+  }
+
+  // *** Barrier set functions.
+
+  // Initialization utilities; covered_words is the size of the covered region
+  // in, um, words.
+  inline size_t cards_required(size_t covered_words) {
+    // Add one for a guard card, used to detect errors.
+    const size_t words = align_up(covered_words, card_size_in_words);
+    return words / card_size_in_words + 1;
+  }
+
+  // Dirty the bytes corresponding to "mr" (not all of which must be
+  // covered.)
+  void dirty_MemRegion(MemRegion mr);
+
+  // Clear (to clean_card) the bytes entirely contained within "mr" (not
+  // all of which must be covered.)
+  void clear_MemRegion(MemRegion mr);
+
+  // Return true if "p" is at the start of a card.
+  bool is_card_aligned(HeapWord* p) {
+    jbyte* pcard = byte_for(p);
+    return (addr_for(pcard) == p);
+  }
+
+  // Mapping from address to card marking array entry
+  jbyte* byte_for(const void* p) const {
+    assert(_whole_heap.contains(p),
+           "Attempt to access p = " PTR_FORMAT " out of bounds of "
+           " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
+           p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
+    jbyte* result = &_byte_map_base[uintptr_t(p) >> card_shift];
+    assert(result >= _byte_map && result < _byte_map + _byte_map_size,
+           "out of bounds accessor for card marking array");
+    return result;
+  }
+
+  // The card table byte one after the card marking array
+  // entry for argument address. Typically used for higher bounds
+  // for loops iterating through the card table.
+  jbyte* byte_after(const void* p) const {
+    return byte_for(p) + 1;
+  }
+
+  virtual void invalidate(MemRegion mr);
+  void clear(MemRegion mr);
+  void dirty(MemRegion mr);
+
+  // Provide read-only access to the card table array.
+  const jbyte* byte_for_const(const void* p) const {
+    return byte_for(p);
+  }
+  const jbyte* byte_after_const(const void* p) const {
+    return byte_after(p);
+  }
+
+  // Mapping from card marking array entry to address of first word
+  HeapWord* addr_for(const jbyte* p) const {
+    assert(p >= _byte_map && p < _byte_map + _byte_map_size,
+           "out of bounds access to card marking array. p: " PTR_FORMAT
+           " _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT,
+           p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size));
+    size_t delta = pointer_delta(p, _byte_map_base, sizeof(jbyte));
+    HeapWord* result = (HeapWord*) (delta << card_shift);
+    assert(_whole_heap.contains(result),
+           "Returning result = " PTR_FORMAT " out of bounds of "
+           " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
+           p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
+    return result;
+  }
+
+  // Mapping from address to card marking array index.
+  size_t index_for(void* p) {
+    assert(_whole_heap.contains(p),
+           "Attempt to access p = " PTR_FORMAT " out of bounds of "
+           " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
+           p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
+    return byte_for(p) - _byte_map;
+  }
+
+  const jbyte* byte_for_index(const size_t card_index) const {
+    return _byte_map + card_index;
+  }
+
+  // Resize one of the regions covered by the remembered set.
+  virtual void resize_covered_region(MemRegion new_region);
+
+  // *** Card-table-RemSet-specific things.
+
+  static uintx ct_max_alignment_constraint();
+
+  // Apply closure "cl" to the dirty cards containing some part of
+  // MemRegion "mr".
+  void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
+
+  // Return the MemRegion corresponding to the first maximal run
+  // of dirty cards lying completely within MemRegion mr.
+  // If reset is "true", then sets those card table entries to the given
+  // value.
+  MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
+                                         int reset_val);
+
+  // Constants
+  enum SomePublicConstants {
+    card_shift                  = 9,
+    card_size                   = 1 << card_shift,
+    card_size_in_words          = card_size / sizeof(HeapWord)
+  };
+
+  static jbyte clean_card_val()          { return clean_card; }
+  static jbyte clean_card_mask_val()     { return clean_card_mask; }
+  static jbyte dirty_card_val()          { return dirty_card; }
+  static jbyte claimed_card_val()        { return claimed_card; }
+  static jbyte precleaned_card_val()     { return precleaned_card; }
+  static jbyte deferred_card_val()       { return deferred_card; }
+  static intptr_t clean_card_row_val()   { return clean_card_row; }
+
+  // Card marking array base (adjusted for heap low boundary)
+  // This would be the 0th element of _byte_map, if the heap started at 0x0.
+  // But since the heap starts at some higher address, this points to somewhere
+  // before the beginning of the actual _byte_map.
+  jbyte* byte_map_base() const { return _byte_map_base; }
+  bool scanned_concurrently() const { return _scanned_concurrently; }
+
+  virtual bool is_in_young(oop obj) const = 0;
+
+  // Print a description of the memory for the card table
+  virtual void print_on(outputStream* st) const;
+
+  void verify();
+  void verify_guard();
+
+  // val_equals -> it will check that all cards covered by mr equal val
+  // !val_equals -> it will check that all cards covered by mr do not equal val
+  void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
+  void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
+  void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
+};
+
+#endif // SHARE_VM_GC_SHARED_CARDTABLE_HPP
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -39,490 +39,38 @@
 // enumerate ref fields that have been modified (since the last
 // enumeration.)
 
-size_t CardTableModRefBS::compute_byte_map_size()
-{
-  assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
-                                        "uninitialized, check declaration order");
-  assert(_page_size != 0, "uninitialized, check declaration order");
-  const size_t granularity = os::vm_allocation_granularity();
-  return align_up(_guard_index + 1, MAX2(_page_size, granularity));
-}
-
 CardTableModRefBS::CardTableModRefBS(
-  MemRegion whole_heap,
+  CardTable* card_table,
   const BarrierSet::FakeRtti& fake_rtti) :
   ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
-  _whole_heap(whole_heap),
-  _guard_index(0),
-  _guard_region(),
-  _last_valid_index(0),
-  _page_size(os::vm_page_size()),
-  _byte_map_size(0),
-  _covered(NULL),
-  _committed(NULL),
-  _cur_covered_regions(0),
-  _byte_map(NULL),
-  byte_map_base(NULL),
-  _defer_initial_card_mark(false)
-{
-  assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
-  assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
+  _defer_initial_card_mark(false),
+  _card_table(card_table)
+{}
 
-  assert(card_size <= 512, "card_size must be less than 512"); // why?
-
-  _covered   = new MemRegion[_max_covered_regions];
-  if (_covered == NULL) {
-    vm_exit_during_initialization("Could not allocate card table covered region set.");
-  }
-}
+CardTableModRefBS::CardTableModRefBS(CardTable* card_table) :
+  ModRefBarrierSet(BarrierSet::FakeRtti(BarrierSet::CardTableModRef)),
+  _defer_initial_card_mark(false),
+  _card_table(card_table)
+{}
 
 void CardTableModRefBS::initialize() {
   initialize_deferred_card_mark_barriers();
-  _guard_index = cards_required(_whole_heap.word_size()) - 1;
-  _last_valid_index = _guard_index - 1;
-
-  _byte_map_size = compute_byte_map_size();
-
-  HeapWord* low_bound  = _whole_heap.start();
-  HeapWord* high_bound = _whole_heap.end();
-
-  _cur_covered_regions = 0;
-  _committed = new MemRegion[_max_covered_regions];
-  if (_committed == NULL) {
-    vm_exit_during_initialization("Could not allocate card table committed region set.");
-  }
-
-  const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
-    MAX2(_page_size, (size_t) os::vm_allocation_granularity());
-  ReservedSpace heap_rs(_byte_map_size, rs_align, false);
-
-  MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
-
-  os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
-                       _page_size, heap_rs.base(), heap_rs.size());
-  if (!heap_rs.is_reserved()) {
-    vm_exit_during_initialization("Could not reserve enough space for the "
-                                  "card marking array");
-  }
-
-  // The assembler store_check code will do an unsigned shift of the oop,
-  // then add it to byte_map_base, i.e.
-  //
-  //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
-  _byte_map = (jbyte*) heap_rs.base();
-  byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
-  assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
-  assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
-
-  jbyte* guard_card = &_byte_map[_guard_index];
-  uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size);
-  _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
-  os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
-                            !ExecMem, "card table last card");
-  *guard_card = last_card;
-
-  log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
-  log_trace(gc, barrier)("    &_byte_map[0]: " INTPTR_FORMAT "  &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
-                  p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
-  log_trace(gc, barrier)("    byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
 }
 
 CardTableModRefBS::~CardTableModRefBS() {
-  if (_covered) {
-    delete[] _covered;
-    _covered = NULL;
-  }
-  if (_committed) {
-    delete[] _committed;
-    _committed = NULL;
-  }
-}
-
-int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
-  int i;
-  for (i = 0; i < _cur_covered_regions; i++) {
-    if (_covered[i].start() == base) return i;
-    if (_covered[i].start() > base) break;
-  }
-  // If we didn't find it, create a new one.
-  assert(_cur_covered_regions < _max_covered_regions,
-         "too many covered regions");
-  // Move the ones above up, to maintain sorted order.
-  for (int j = _cur_covered_regions; j > i; j--) {
-    _covered[j] = _covered[j-1];
-    _committed[j] = _committed[j-1];
-  }
-  int res = i;
-  _cur_covered_regions++;
-  _covered[res].set_start(base);
-  _covered[res].set_word_size(0);
-  jbyte* ct_start = byte_for(base);
-  uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size);
-  _committed[res].set_start((HeapWord*)ct_start_aligned);
-  _committed[res].set_word_size(0);
-  return res;
-}
-
-int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
-  for (int i = 0; i < _cur_covered_regions; i++) {
-    if (_covered[i].contains(addr)) {
-      return i;
-    }
-  }
-  assert(0, "address outside of heap?");
-  return -1;
-}
-
-HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
-  HeapWord* max_end = NULL;
-  for (int j = 0; j < ind; j++) {
-    HeapWord* this_end = _committed[j].end();
-    if (this_end > max_end) max_end = this_end;
-  }
-  return max_end;
-}
-
-MemRegion CardTableModRefBS::committed_unique_to_self(int self,
-                                                      MemRegion mr) const {
-  MemRegion result = mr;
-  for (int r = 0; r < _cur_covered_regions; r += 1) {
-    if (r != self) {
-      result = result.minus(_committed[r]);
-    }
-  }
-  // Never include the guard page.
-  result = result.minus(_guard_region);
-  return result;
+  delete _card_table;
 }
 
-void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
-  // We don't change the start of a region, only the end.
-  assert(_whole_heap.contains(new_region),
-           "attempt to cover area not in reserved area");
-  debug_only(verify_guard();)
-  // collided is true if the expansion would push into another committed region
-  debug_only(bool collided = false;)
-  int const ind = find_covering_region_by_base(new_region.start());
-  MemRegion const old_region = _covered[ind];
-  assert(old_region.start() == new_region.start(), "just checking");
-  if (new_region.word_size() != old_region.word_size()) {
-    // Commit new or uncommit old pages, if necessary.
-    MemRegion cur_committed = _committed[ind];
-    // Extend the end of this _committed region
-    // to cover the end of any lower _committed regions.
-    // This forms overlapping regions, but never interior regions.
-    HeapWord* const max_prev_end = largest_prev_committed_end(ind);
-    if (max_prev_end > cur_committed.end()) {
-      cur_committed.set_end(max_prev_end);
-    }
-    // Align the end up to a page size (starts are already aligned).
-    jbyte* const new_end = byte_after(new_region.last());
-    HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size);
-    assert((void*)new_end_aligned >= (void*) new_end, "align up, but less");
-    // Check the other regions (excludes "ind") to ensure that
-    // the new_end_aligned does not intrude onto the committed
-    // space of another region.
-    int ri = 0;
-    for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
-      if (new_end_aligned > _committed[ri].start()) {
-        assert(new_end_aligned <= _committed[ri].end(),
-               "An earlier committed region can't cover a later committed region");
-        // Any region containing the new end
-        // should start at or beyond the region found (ind)
-        // for the new end (committed regions are not expected to
-        // be proper subsets of other committed regions).
-        assert(_committed[ri].start() >= _committed[ind].start(),
-               "New end of committed region is inconsistent");
-        new_end_aligned = _committed[ri].start();
-        // new_end_aligned can be equal to the start of its
-        // committed region (i.e., of "ind") if a second
-        // region following "ind" also start at the same location
-        // as "ind".
-        assert(new_end_aligned >= _committed[ind].start(),
-          "New end of committed region is before start");
-        debug_only(collided = true;)
-        // Should only collide with 1 region
-        break;
-      }
-    }
-#ifdef ASSERT
-    for (++ri; ri < _cur_covered_regions; ri++) {
-      assert(!_committed[ri].contains(new_end_aligned),
-        "New end of committed region is in a second committed region");
-    }
-#endif
-    // The guard page is always committed and should not be committed over.
-    // "guarded" is used for assertion checking below and recalls the fact
-    // that the would-be end of the new committed region would have
-    // penetrated the guard page.
-    HeapWord* new_end_for_commit = new_end_aligned;
-
-    DEBUG_ONLY(bool guarded = false;)
-    if (new_end_for_commit > _guard_region.start()) {
-      new_end_for_commit = _guard_region.start();
-      DEBUG_ONLY(guarded = true;)
-    }
-
-    if (new_end_for_commit > cur_committed.end()) {
-      // Must commit new pages.
-      MemRegion const new_committed =
-        MemRegion(cur_committed.end(), new_end_for_commit);
-
-      assert(!new_committed.is_empty(), "Region should not be empty here");
-      os::commit_memory_or_exit((char*)new_committed.start(),
-                                new_committed.byte_size(), _page_size,
-                                !ExecMem, "card table expansion");
-    // Use new_end_aligned (as opposed to new_end_for_commit) because
-    // the cur_committed region may include the guard region.
-    } else if (new_end_aligned < cur_committed.end()) {
-      // Must uncommit pages.
-      MemRegion const uncommit_region =
-        committed_unique_to_self(ind, MemRegion(new_end_aligned,
-                                                cur_committed.end()));
-      if (!uncommit_region.is_empty()) {
-        // It is not safe to uncommit cards if the boundary between
-        // the generations is moving.  A shrink can uncommit cards
-        // owned by generation A but being used by generation B.
-        if (!UseAdaptiveGCBoundary) {
-          if (!os::uncommit_memory((char*)uncommit_region.start(),
-                                   uncommit_region.byte_size())) {
-            assert(false, "Card table contraction failed");
-            // The call failed so don't change the end of the
-            // committed region.  This is better than taking the
-            // VM down.
-            new_end_aligned = _committed[ind].end();
-          }
-        } else {
-          new_end_aligned = _committed[ind].end();
-        }
-      }
-    }
-    // In any case, we can reset the end of the current committed entry.
-    _committed[ind].set_end(new_end_aligned);
-
-#ifdef ASSERT
-    // Check that the last card in the new region is committed according
-    // to the tables.
-    bool covered = false;
-    for (int cr = 0; cr < _cur_covered_regions; cr++) {
-      if (_committed[cr].contains(new_end - 1)) {
-        covered = true;
-        break;
-      }
-    }
-    assert(covered, "Card for end of new region not committed");
-#endif
-
-    // The default of 0 is not necessarily clean cards.
-    jbyte* entry;
-    if (old_region.last() < _whole_heap.start()) {
-      entry = byte_for(_whole_heap.start());
-    } else {
-      entry = byte_after(old_region.last());
-    }
-    assert(index_for(new_region.last()) <  _guard_index,
-      "The guard card will be overwritten");
-    // This line commented out cleans the newly expanded region and
-    // not the aligned up expanded region.
-    // jbyte* const end = byte_after(new_region.last());
-    jbyte* const end = (jbyte*) new_end_for_commit;
-    assert((end >= byte_after(new_region.last())) || collided || guarded,
-      "Expect to be beyond new region unless impacting another region");
-    // do nothing if we resized downward.
-#ifdef ASSERT
-    for (int ri = 0; ri < _cur_covered_regions; ri++) {
-      if (ri != ind) {
-        // The end of the new committed region should not
-        // be in any existing region unless it matches
-        // the start of the next region.
-        assert(!_committed[ri].contains(end) ||
-               (_committed[ri].start() == (HeapWord*) end),
-               "Overlapping committed regions");
-      }
-    }
-#endif
-    if (entry < end) {
-      memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
-    }
-  }
-  // In any case, the covered size changes.
-  _covered[ind].set_word_size(new_region.word_size());
-
-  log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
-  log_trace(gc, barrier)("    _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
-                         ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
-  log_trace(gc, barrier)("    _committed[%d].start(): " INTPTR_FORMAT "  _committed[%d].last(): " INTPTR_FORMAT,
-                         ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
-  log_trace(gc, barrier)("    byte_for(start): " INTPTR_FORMAT "  byte_for(last): " INTPTR_FORMAT,
-                         p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
-  log_trace(gc, barrier)("    addr_for(start): " INTPTR_FORMAT "  addr_for(last): " INTPTR_FORMAT,
-                         p2i(addr_for((jbyte*) _committed[ind].start())),  p2i(addr_for((jbyte*) _committed[ind].last())));
-
-  // Touch the last card of the covered region to show that it
-  // is committed (or SEGV).
-  debug_only((void) (*byte_for(_covered[ind].last()));)
-  debug_only(verify_guard();)
-}
-
-// Note that these versions are precise!  The scanning code has to handle the
-// fact that the write barrier may be either precise or imprecise.
-
-void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
-  assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
-  assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
-  jbyte* cur  = byte_for(mr.start());
-  jbyte* last = byte_after(mr.last());
-  while (cur < last) {
-    *cur = dirty_card;
-    cur++;
-  }
+void CardTableModRefBS::write_ref_array_work(MemRegion mr) {
+  _card_table->dirty_MemRegion(mr);
 }
 
 void CardTableModRefBS::invalidate(MemRegion mr) {
-  assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
-  assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
-  for (int i = 0; i < _cur_covered_regions; i++) {
-    MemRegion mri = mr.intersection(_covered[i]);
-    if (!mri.is_empty()) dirty_MemRegion(mri);
-  }
-}
-
-void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
-  // Be conservative: only clean cards entirely contained within the
-  // region.
-  jbyte* cur;
-  if (mr.start() == _whole_heap.start()) {
-    cur = byte_for(mr.start());
-  } else {
-    assert(mr.start() > _whole_heap.start(), "mr is not covered.");
-    cur = byte_after(mr.start() - 1);
-  }
-  jbyte* last = byte_after(mr.last());
-  memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
-}
-
-void CardTableModRefBS::clear(MemRegion mr) {
-  for (int i = 0; i < _cur_covered_regions; i++) {
-    MemRegion mri = mr.intersection(_covered[i]);
-    if (!mri.is_empty()) clear_MemRegion(mri);
-  }
-}
-
-void CardTableModRefBS::dirty(MemRegion mr) {
-  jbyte* first = byte_for(mr.start());
-  jbyte* last  = byte_after(mr.last());
-  memset(first, dirty_card, last-first);
-}
-
-// Unlike several other card table methods, dirty_card_iterate()
-// iterates over dirty cards ranges in increasing address order.
-void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
-                                           MemRegionClosure* cl) {
-  for (int i = 0; i < _cur_covered_regions; i++) {
-    MemRegion mri = mr.intersection(_covered[i]);
-    if (!mri.is_empty()) {
-      jbyte *cur_entry, *next_entry, *limit;
-      for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
-           cur_entry <= limit;
-           cur_entry  = next_entry) {
-        next_entry = cur_entry + 1;
-        if (*cur_entry == dirty_card) {
-          size_t dirty_cards;
-          // Accumulate maximal dirty card range, starting at cur_entry
-          for (dirty_cards = 1;
-               next_entry <= limit && *next_entry == dirty_card;
-               dirty_cards++, next_entry++);
-          MemRegion cur_cards(addr_for(cur_entry),
-                              dirty_cards*card_size_in_words);
-          cl->do_MemRegion(cur_cards);
-        }
-      }
-    }
-  }
+  _card_table->invalidate(mr);
 }
 
-MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
-                                                          bool reset,
-                                                          int reset_val) {
-  for (int i = 0; i < _cur_covered_regions; i++) {
-    MemRegion mri = mr.intersection(_covered[i]);
-    if (!mri.is_empty()) {
-      jbyte* cur_entry, *next_entry, *limit;
-      for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
-           cur_entry <= limit;
-           cur_entry  = next_entry) {
-        next_entry = cur_entry + 1;
-        if (*cur_entry == dirty_card) {
-          size_t dirty_cards;
-          // Accumulate maximal dirty card range, starting at cur_entry
-          for (dirty_cards = 1;
-               next_entry <= limit && *next_entry == dirty_card;
-               dirty_cards++, next_entry++);
-          MemRegion cur_cards(addr_for(cur_entry),
-                              dirty_cards*card_size_in_words);
-          if (reset) {
-            for (size_t i = 0; i < dirty_cards; i++) {
-              cur_entry[i] = reset_val;
-            }
-          }
-          return cur_cards;
-        }
-      }
-    }
-  }
-  return MemRegion(mr.end(), mr.end());
-}
-
-uintx CardTableModRefBS::ct_max_alignment_constraint() {
-  return card_size * os::vm_page_size();
-}
-
-void CardTableModRefBS::verify_guard() {
-  // For product build verification
-  guarantee(_byte_map[_guard_index] == last_card,
-            "card table guard has been modified");
-}
-
-void CardTableModRefBS::verify() {
-  verify_guard();
-}
-
-#ifndef PRODUCT
-void CardTableModRefBS::verify_region(MemRegion mr,
-                                      jbyte val, bool val_equals) {
-  jbyte* start    = byte_for(mr.start());
-  jbyte* end      = byte_for(mr.last());
-  bool failures = false;
-  for (jbyte* curr = start; curr <= end; ++curr) {
-    jbyte curr_val = *curr;
-    bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
-    if (failed) {
-      if (!failures) {
-        log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
-        log_error(gc, verify)("==   %sexpecting value: %d", (val_equals) ? "" : "not ", val);
-        failures = true;
-      }
-      log_error(gc, verify)("==   card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
-                            p2i(curr), p2i(addr_for(curr)),
-                            p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
-                            (int) curr_val);
-    }
-  }
-  guarantee(!failures, "there should not have been any failures");
-}
-
-void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
-  verify_region(mr, dirty_card, false /* val_equals */);
-}
-
-void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
-  verify_region(mr, dirty_card, true /* val_equals */);
-}
-#endif
-
 void CardTableModRefBS::print_on(outputStream* st) const {
-  st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
-               p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
+  _card_table->print_on(st);
 }
 
 // Helper for ReduceInitialCardMarks. For performance,
@@ -573,7 +121,7 @@
   }
   // If a previous card-mark was deferred, flush it now.
   flush_deferred_card_mark_barrier(thread);
-  if (new_obj->is_typeArray() || is_in_young(new_obj)) {
+  if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
     // Arrays of non-references don't need a post-barrier.
     // The deferred_card_mark region should be empty
     // following the flush above.
@@ -586,7 +134,7 @@
       thread->set_deferred_card_mark(mr);
     } else {
       // Do the card mark
-      write_region(mr);
+      invalidate(mr);
     }
   }
 }
@@ -610,7 +158,7 @@
     {
       // Verify that the storage points to a parsable object in heap
       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
-      assert(!is_in_young(old_obj),
+      assert(!_card_table->is_in_young(old_obj),
              "Else should have been filtered in on_slowpath_allocation_exit()");
       assert(oopDesc::is_oop(old_obj, true), "Not an oop");
       assert(deferred.word_size() == (size_t)(old_obj->size()),
@@ -633,3 +181,7 @@
   // processing the card-table (or other remembered set).
   flush_deferred_card_mark_barrier(thread);
 }
+
+bool CardTableModRefBS::card_mark_must_follow_store() const {
+ return _card_table->scanned_concurrently();
+}
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -28,6 +28,8 @@
 #include "gc/shared/modRefBarrierSet.hpp"
 #include "utilities/align.hpp"
 
+class CardTable;
+
 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
 // enumerate ref fields that have been modified (since the last
 // enumeration.)
@@ -45,162 +47,29 @@
   friend class VMStructs;
  protected:
 
-  enum CardValues {
-    clean_card                  = -1,
-    // The mask contains zeros in places for all other values.
-    clean_card_mask             = clean_card - 31,
-
-    dirty_card                  =  0,
-    precleaned_card             =  1,
-    claimed_card                =  2,
-    deferred_card               =  4,
-    last_card                   =  8,
-    CT_MR_BS_last_reserved      = 16
-  };
-
   // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
   // or INCLUDE_JVMCI is being used
-  bool _defer_initial_card_mark;
-
-  // a word's worth (row) of clean card values
-  static const intptr_t clean_card_row = (intptr_t)(-1);
-
-  // The declaration order of these const fields is important; see the
-  // constructor before changing.
-  const MemRegion _whole_heap;       // the region covered by the card table
-  size_t          _guard_index;      // index of very last element in the card
-                                     // table; it is set to a guard value
-                                     // (last_card) and should never be modified
-  size_t          _last_valid_index; // index of the last valid element
-  const size_t    _page_size;        // page size used when mapping _byte_map
-  size_t          _byte_map_size;    // in bytes
-  jbyte*          _byte_map;         // the card marking array
-
-  // Some barrier sets create tables whose elements correspond to parts of
-  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
-  // normally reserve space for such tables, and commit parts of the table
-  // "covering" parts of the heap that are committed. At most one covered
-  // region per generation is needed.
-  static const int _max_covered_regions = 2;
-
-  int _cur_covered_regions;
-
-  // The covered regions should be in address order.
-  MemRegion* _covered;
-  // The committed regions correspond one-to-one to the covered regions.
-  // They represent the card-table memory that has been committed to service
-  // the corresponding covered region.  It may be that committed region for
-  // one covered region corresponds to a larger region because of page-size
-  // roundings.  Thus, a committed region for one covered region may
-  // actually extend onto the card-table space for the next covered region.
-  MemRegion* _committed;
-
-  // The last card is a guard card, and we commit the page for it so
-  // we can use the card for verification purposes. We make sure we never
-  // uncommit the MemRegion for that page.
-  MemRegion _guard_region;
-
-  inline size_t compute_byte_map_size();
+  bool       _defer_initial_card_mark;
+  CardTable* _card_table;
 
-  // Finds and return the index of the region, if any, to which the given
-  // region would be contiguous.  If none exists, assign a new region and
-  // returns its index.  Requires that no more than the maximum number of
-  // covered regions defined in the constructor are ever in use.
-  int find_covering_region_by_base(HeapWord* base);
-
-  // Same as above, but finds the region containing the given address
-  // instead of starting at a given base address.
-  int find_covering_region_containing(HeapWord* addr);
-
-  // Resize one of the regions covered by the remembered set.
-  virtual void resize_covered_region(MemRegion new_region);
-
-  // Returns the leftmost end of a committed region corresponding to a
-  // covered region before covered region "ind", or else "NULL" if "ind" is
-  // the first covered region.
-  HeapWord* largest_prev_committed_end(int ind) const;
-
-  // Returns the part of the region mr that doesn't intersect with
-  // any committed region other than self.  Used to prevent uncommitting
-  // regions that are also committed by other regions.  Also protects
-  // against uncommitting the guard region.
-  MemRegion committed_unique_to_self(int self, MemRegion mr) const;
-
-  // Mapping from address to card marking array entry
-  jbyte* byte_for(const void* p) const {
-    assert(_whole_heap.contains(p),
-           "Attempt to access p = " PTR_FORMAT " out of bounds of "
-           " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
-           p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
-    jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
-    assert(result >= _byte_map && result < _byte_map + _byte_map_size,
-           "out of bounds accessor for card marking array");
-    return result;
-  }
-
-  // The card table byte one after the card marking array
-  // entry for argument address. Typically used for higher bounds
-  // for loops iterating through the card table.
-  jbyte* byte_after(const void* p) const {
-    return byte_for(p) + 1;
-  }
-
-  // Dirty the bytes corresponding to "mr" (not all of which must be
-  // covered.)
-  void dirty_MemRegion(MemRegion mr);
-
-  // Clear (to clean_card) the bytes entirely contained within "mr" (not
-  // all of which must be covered.)
-  void clear_MemRegion(MemRegion mr);
+  CardTableModRefBS(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti);
 
  public:
-  // Constants
-  enum SomePublicConstants {
-    card_shift                  = 9,
-    card_size                   = 1 << card_shift,
-    card_size_in_words          = card_size / sizeof(HeapWord)
-  };
+  CardTableModRefBS(CardTable* card_table);
+  ~CardTableModRefBS();
 
-  static int clean_card_val()      { return clean_card; }
-  static int clean_card_mask_val() { return clean_card_mask; }
-  static int dirty_card_val()      { return dirty_card; }
-  static int claimed_card_val()    { return claimed_card; }
-  static int precleaned_card_val() { return precleaned_card; }
-  static int deferred_card_val()   { return deferred_card; }
+  CardTable* card_table() const { return _card_table; }
 
   virtual void initialize();
 
-  // *** Barrier set functions.
-
-  // Initialization utilities; covered_words is the size of the covered region
-  // in, um, words.
-  inline size_t cards_required(size_t covered_words) {
-    // Add one for a guard card, used to detect errors.
-    const size_t words = align_up(covered_words, card_size_in_words);
-    return words / card_size_in_words + 1;
+  void write_region(MemRegion mr) {
+    invalidate(mr);
   }
 
  protected:
-  CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
-  ~CardTableModRefBS();
+  void write_ref_array_work(MemRegion mr);
 
  public:
-  void write_region(MemRegion mr) {
-    dirty_MemRegion(mr);
-  }
-
- protected:
-  void write_ref_array_work(MemRegion mr) {
-    dirty_MemRegion(mr);
-  }
-
- public:
-  bool is_aligned(HeapWord* addr) {
-    return is_card_aligned(addr);
-  }
-
-  // *** Card-table-barrier-specific things.
-
   // Record a reference update. Note that these versions are precise!
   // The scanning code has to handle the fact that the write barrier may be
   // either precise or imprecise. We make non-virtual inline variants of
@@ -208,115 +77,7 @@
   template <DecoratorSet decorators, typename T>
   void write_ref_field_post(T* field, oop newVal);
 
-  // These are used by G1, when it uses the card table as a temporary data
-  // structure for card claiming.
-  bool is_card_dirty(size_t card_index) {
-    return _byte_map[card_index] == dirty_card_val();
-  }
-
-  void mark_card_dirty(size_t card_index) {
-    _byte_map[card_index] = dirty_card_val();
-  }
-
-  bool is_card_clean(size_t card_index) {
-    return _byte_map[card_index] == clean_card_val();
-  }
-
-  // Card marking array base (adjusted for heap low boundary)
-  // This would be the 0th element of _byte_map, if the heap started at 0x0.
-  // But since the heap starts at some higher address, this points to somewhere
-  // before the beginning of the actual _byte_map.
-  jbyte* byte_map_base;
-
-  // Return true if "p" is at the start of a card.
-  bool is_card_aligned(HeapWord* p) {
-    jbyte* pcard = byte_for(p);
-    return (addr_for(pcard) == p);
-  }
-
-  HeapWord* align_to_card_boundary(HeapWord* p) {
-    jbyte* pcard = byte_for(p + card_size_in_words - 1);
-    return addr_for(pcard);
-  }
-
-  // The kinds of precision a CardTableModRefBS may offer.
-  enum PrecisionStyle {
-    Precise,
-    ObjHeadPreciseArray
-  };
-
-  // Tells what style of precision this card table offers.
-  PrecisionStyle precision() {
-    return ObjHeadPreciseArray; // Only one supported for now.
-  }
-
-  // ModRefBS functions.
   virtual void invalidate(MemRegion mr);
-  void clear(MemRegion mr);
-  void dirty(MemRegion mr);
-
-  // *** Card-table-RemSet-specific things.
-
-  static uintx ct_max_alignment_constraint();
-
-  // Apply closure "cl" to the dirty cards containing some part of
-  // MemRegion "mr".
-  void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
-
-  // Return the MemRegion corresponding to the first maximal run
-  // of dirty cards lying completely within MemRegion mr.
-  // If reset is "true", then sets those card table entries to the given
-  // value.
-  MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
-                                         int reset_val);
-
-  // Provide read-only access to the card table array.
-  const jbyte* byte_for_const(const void* p) const {
-    return byte_for(p);
-  }
-  const jbyte* byte_after_const(const void* p) const {
-    return byte_after(p);
-  }
-
-  // Mapping from card marking array entry to address of first word
-  HeapWord* addr_for(const jbyte* p) const {
-    assert(p >= _byte_map && p < _byte_map + _byte_map_size,
-           "out of bounds access to card marking array. p: " PTR_FORMAT
-           " _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT,
-           p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size));
-    size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte));
-    HeapWord* result = (HeapWord*) (delta << card_shift);
-    assert(_whole_heap.contains(result),
-           "Returning result = " PTR_FORMAT " out of bounds of "
-           " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
-           p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
-    return result;
-  }
-
-  // Mapping from address to card marking array index.
-  size_t index_for(void* p) {
-    assert(_whole_heap.contains(p),
-           "Attempt to access p = " PTR_FORMAT " out of bounds of "
-           " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
-           p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
-    return byte_for(p) - _byte_map;
-  }
-
-  const jbyte* byte_for_index(const size_t card_index) const {
-    return _byte_map + card_index;
-  }
-
-  // Print a description of the memory for the barrier set
-  virtual void print_on(outputStream* st) const;
-
-  void verify();
-  void verify_guard();
-
-  // val_equals -> it will check that all cards covered by mr equal val
-  // !val_equals -> it will check that all cards covered by mr do not equal val
-  void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
-  void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
-  void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
 
   // ReduceInitialCardMarks
   void initialize_deferred_card_mark_barriers();
@@ -352,15 +113,15 @@
   // barrier until the next slow-path allocation or gc-related safepoint.)
   // This interface answers whether a particular barrier type needs the card
   // mark to be thus strictly sequenced after the stores.
-  virtual bool card_mark_must_follow_store() const = 0;
-
-  virtual bool is_in_young(oop obj) const = 0;
+  virtual bool card_mark_must_follow_store() const;
 
   virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
   virtual void on_thread_detach(JavaThread* thread);
 
   virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
 
+  virtual void print_on(outputStream* st) const;
+
   template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
   class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
 };
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,17 +26,18 @@
 #define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP
 
 #include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "runtime/orderAccess.inline.hpp"
 
 template <DecoratorSet decorators, typename T>
 inline void CardTableModRefBS::write_ref_field_post(T* field, oop newVal) {
-  volatile jbyte* byte = byte_for(field);
+  volatile jbyte* byte = _card_table->byte_for(field);
   if (UseConcMarkSweepGC) {
     // Perform a releasing store if using CMS so that it may
     // scan and clear the cards concurrently during pre-cleaning.
-    OrderAccess::release_store(byte, jbyte(dirty_card));
+    OrderAccess::release_store(byte, CardTable::dirty_card_val());
   } else {
-    *byte = dirty_card;
+    *byte = CardTable::dirty_card_val();
   }
 }
 
--- a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/cardTableModRefBS.inline.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "memory/allocation.inline.hpp"
-#include "gc/shared/space.inline.hpp"
-
-CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
-  CardTableModRefBS(
-    whole_heap,
-    BarrierSet::FakeRtti(BarrierSet::CardTableForRS)),
-  // LNC functionality
-  _lowest_non_clean(NULL),
-  _lowest_non_clean_chunk_size(NULL),
-  _lowest_non_clean_base_chunk_index(NULL),
-  _last_LNC_resizing_collection(NULL)
-{ }
-
-void CardTableModRefBSForCTRS::initialize() {
-  CardTableModRefBS::initialize();
-  _lowest_non_clean =
-    NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
-  _lowest_non_clean_chunk_size =
-    NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
-  _lowest_non_clean_base_chunk_index =
-    NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
-  _last_LNC_resizing_collection =
-    NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
-  if (_lowest_non_clean == NULL
-      || _lowest_non_clean_chunk_size == NULL
-      || _lowest_non_clean_base_chunk_index == NULL
-      || _last_LNC_resizing_collection == NULL)
-    vm_exit_during_initialization("couldn't allocate an LNC array.");
-  for (int i = 0; i < _max_covered_regions; i++) {
-    _lowest_non_clean[i] = NULL;
-    _lowest_non_clean_chunk_size[i] = 0;
-    _last_LNC_resizing_collection[i] = -1;
-  }
-}
-
-CardTableModRefBSForCTRS::~CardTableModRefBSForCTRS() {
-  if (_lowest_non_clean) {
-    FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
-    _lowest_non_clean = NULL;
-  }
-  if (_lowest_non_clean_chunk_size) {
-    FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
-    _lowest_non_clean_chunk_size = NULL;
-  }
-  if (_lowest_non_clean_base_chunk_index) {
-    FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
-    _lowest_non_clean_base_chunk_index = NULL;
-  }
-  if (_last_LNC_resizing_collection) {
-    FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
-    _last_LNC_resizing_collection = NULL;
-  }
-}
-
-bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
-  return
-    card_is_dirty_wrt_gen_iter(cv) ||
-    _rs->is_prev_nonclean_card_val(cv);
-}
-
-bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
-  return
-    cv != clean_card &&
-    (card_is_dirty_wrt_gen_iter(cv) ||
-     CardTableRS::youngergen_may_have_been_dirty(cv));
-}
-
-void CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel(
-  Space* sp,
-  MemRegion mr,
-  OopsInGenClosure* cl,
-  CardTableRS* ct,
-  uint n_threads)
-{
-  if (!mr.is_empty()) {
-    if (n_threads > 0) {
-#if INCLUDE_ALL_GCS
-      non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
-#else  // INCLUDE_ALL_GCS
-      fatal("Parallel gc not supported here.");
-#endif // INCLUDE_ALL_GCS
-    } else {
-      // clear_cl finds contiguous dirty ranges of cards to process and clear.
-
-      // This is the single-threaded version used by DefNew.
-      const bool parallel = false;
-
-      DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
-      ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
-
-      clear_cl.do_MemRegion(mr);
-    }
-  }
-}
-
-bool CardTableModRefBSForCTRS::is_in_young(oop obj) const {
-  return GenCollectedHeap::heap()->is_in_young(obj);
-}
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,41 +75,6 @@
 }
 
 
-CardTableRS::CardTableRS(MemRegion whole_heap) :
-  _bs(NULL),
-  _cur_youngergen_card_val(youngergenP1_card)
-{
-  _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
-  _ct_bs->initialize();
-  set_bs(_ct_bs);
-  // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
-  // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
-  uint max_gens = 2;
-  _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
-                         mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
-  if (_last_cur_val_in_gen == NULL) {
-    vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
-  }
-  for (uint i = 0; i < max_gens + 1; i++) {
-    _last_cur_val_in_gen[i] = clean_card_val();
-  }
-  _ct_bs->set_CTRS(this);
-}
-
-CardTableRS::~CardTableRS() {
-  if (_ct_bs) {
-    delete _ct_bs;
-    _ct_bs = NULL;
-  }
-  if (_last_cur_val_in_gen) {
-    FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
-  }
-}
-
-void CardTableRS::resize_covered_region(MemRegion new_region) {
-  _ct_bs->resize_covered_region(new_region);
-}
-
 jbyte CardTableRS::find_unused_youngergenP_card_value() {
   for (jbyte v = youngergenP1_card;
        v < cur_youngergen_and_prev_nonclean_card;
@@ -247,7 +212,7 @@
       // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
       if (is_word_aligned(cur_entry)) {
         jbyte* cur_row = cur_entry - BytesPerWord;
-        while (cur_row >= limit && *((intptr_t*)cur_row) ==  CardTableRS::clean_card_row()) {
+        while (cur_row >= limit && *((intptr_t*)cur_row) ==  CardTableRS::clean_card_row_val()) {
           cur_row -= BytesPerWord;
         }
         cur_entry = cur_row + BytesPerWord;
@@ -283,7 +248,7 @@
 // cur-younger-gen                ==> cur_younger_gen
 // cur_youngergen_and_prev_nonclean_card ==> no change.
 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
-  volatile jbyte* entry = _ct_bs->byte_for(field);
+  volatile jbyte* entry = byte_for(field);
   do {
     jbyte entry_val = *entry;
     // We put this first because it's probably the most common case.
@@ -341,7 +306,7 @@
     ShouldNotReachHere();
   }
 #endif
-  _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
+  non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
 }
 
 void CardTableRS::clear_into_younger(Generation* old_gen) {
@@ -642,5 +607,115 @@
   // generational heaps.
   VerifyCTGenClosure blk(this);
   GenCollectedHeap::heap()->generation_iterate(&blk, false);
-  _ct_bs->verify();
+  CardTable::verify();
+}
+
+CardTableRS::CardTableRS(MemRegion whole_heap) :
+  CardTable(whole_heap, /* scanned concurrently */ UseConcMarkSweepGC && CMSPrecleaningEnabled),
+  _cur_youngergen_card_val(youngergenP1_card),
+  // LNC functionality
+  _lowest_non_clean(NULL),
+  _lowest_non_clean_chunk_size(NULL),
+  _lowest_non_clean_base_chunk_index(NULL),
+  _last_LNC_resizing_collection(NULL)
+{
+  // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
+  // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
+  uint max_gens = 2;
+  _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
+                         mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
+  if (_last_cur_val_in_gen == NULL) {
+    vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
+  }
+  for (uint i = 0; i < max_gens + 1; i++) {
+    _last_cur_val_in_gen[i] = clean_card_val();
+  }
+}
+
+CardTableRS::~CardTableRS() {
+  if (_last_cur_val_in_gen) {
+    FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
+    _last_cur_val_in_gen = NULL;
+  }
+  if (_lowest_non_clean) {
+    FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
+    _lowest_non_clean = NULL;
+  }
+  if (_lowest_non_clean_chunk_size) {
+    FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
+    _lowest_non_clean_chunk_size = NULL;
+  }
+  if (_lowest_non_clean_base_chunk_index) {
+    FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
+    _lowest_non_clean_base_chunk_index = NULL;
+  }
+  if (_last_LNC_resizing_collection) {
+    FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
+    _last_LNC_resizing_collection = NULL;
+  }
 }
+
+void CardTableRS::initialize() {
+  CardTable::initialize();
+  _lowest_non_clean =
+    NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
+  _lowest_non_clean_chunk_size =
+    NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
+  _lowest_non_clean_base_chunk_index =
+    NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
+  _last_LNC_resizing_collection =
+    NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
+  if (_lowest_non_clean == NULL
+      || _lowest_non_clean_chunk_size == NULL
+      || _lowest_non_clean_base_chunk_index == NULL
+      || _last_LNC_resizing_collection == NULL)
+    vm_exit_during_initialization("couldn't allocate an LNC array.");
+  for (int i = 0; i < _max_covered_regions; i++) {
+    _lowest_non_clean[i] = NULL;
+    _lowest_non_clean_chunk_size[i] = 0;
+    _last_LNC_resizing_collection[i] = -1;
+  }
+}
+
+bool CardTableRS::card_will_be_scanned(jbyte cv) {
+  return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv);
+}
+
+bool CardTableRS::card_may_have_been_dirty(jbyte cv) {
+  return
+    cv != clean_card &&
+    (card_is_dirty_wrt_gen_iter(cv) ||
+     CardTableRS::youngergen_may_have_been_dirty(cv));
+}
+
+void CardTableRS::non_clean_card_iterate_possibly_parallel(
+  Space* sp,
+  MemRegion mr,
+  OopsInGenClosure* cl,
+  CardTableRS* ct,
+  uint n_threads)
+{
+  if (!mr.is_empty()) {
+    if (n_threads > 0) {
+#if INCLUDE_ALL_GCS
+      non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
+#else  // INCLUDE_ALL_GCS
+      fatal("Parallel gc not supported here.");
+#endif // INCLUDE_ALL_GCS
+    } else {
+      // clear_cl finds contiguous dirty ranges of cards to process and clear.
+
+      // This is the single-threaded version used by DefNew.
+      const bool parallel = false;
+
+      DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
+      ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
+
+      clear_cl.do_MemRegion(mr);
+    }
+  }
+}
+
+bool CardTableRS::is_in_young(oop obj) const {
+  return GenCollectedHeap::heap()->is_in_young(obj);
+}
--- a/src/hotspot/share/gc/shared/cardTableRS.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableRS.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,11 @@
 #ifndef SHARE_VM_GC_SHARED_CARDTABLERS_HPP
 #define SHARE_VM_GC_SHARED_CARDTABLERS_HPP
 
-#include "gc/shared/cardTableModRefBSForCTRS.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "memory/memRegion.hpp"
+#include "oops/oop.hpp"
 
+class DirtyCardToOopClosure;
 class Generation;
 class Space;
 class OopsInGenClosure;
@@ -46,44 +48,28 @@
 // This RemSet uses a card table both as shared data structure
 // for a mod ref barrier set and for the rem set information.
 
-class CardTableRS: public CHeapObj<mtGC> {
+class CardTableRS: public CardTable {
   friend class VMStructs;
   // Below are private classes used in impl.
   friend class VerifyCTSpaceClosure;
   friend class ClearNoncleanCardWrapper;
 
-  static jbyte clean_card_val() {
-    return CardTableModRefBSForCTRS::clean_card;
-  }
-
-  static intptr_t clean_card_row() {
-    return CardTableModRefBSForCTRS::clean_card_row;
-  }
-
-  static bool
-  card_is_dirty_wrt_gen_iter(jbyte cv) {
-    return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
-  }
-
   CLDRemSet _cld_rem_set;
-  BarrierSet* _bs;
-
-  CardTableModRefBSForCTRS* _ct_bs;
 
   void verify_space(Space* s, HeapWord* gen_start);
 
   enum ExtendedCardValue {
-    youngergen_card   = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 1,
+    youngergen_card   = CT_MR_BS_last_reserved + 1,
     // These are for parallel collection.
     // There are three P (parallel) youngergen card values.  In general, this
     // needs to be more than the number of generations (including the perm
     // gen) that might have younger_refs_do invoked on them separately.  So
     // if we add more gens, we have to add more values.
-    youngergenP1_card  = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 2,
-    youngergenP2_card  = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 3,
-    youngergenP3_card  = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 4,
+    youngergenP1_card  = CT_MR_BS_last_reserved + 2,
+    youngergenP2_card  = CT_MR_BS_last_reserved + 3,
+    youngergenP3_card  = CT_MR_BS_last_reserved + 4,
     cur_youngergen_and_prev_nonclean_card =
-      CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 5
+      CT_MR_BS_last_reserved + 5
   };
 
   // An array that contains, for each generation, the card table value last
@@ -116,16 +102,8 @@
   CardTableRS(MemRegion whole_heap);
   ~CardTableRS();
 
-  // Return the barrier set associated with "this."
-  BarrierSet* bs() { return _bs; }
-
-  // Set the barrier set.
-  void set_bs(BarrierSet* bs) { _bs = bs; }
-
   CLDRemSet* cld_rem_set() { return &_cld_rem_set; }
 
-  CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }
-
   void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
 
   // Override.
@@ -137,7 +115,7 @@
   void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads);
 
   void inline_write_ref_field_gc(void* field, oop new_val) {
-    jbyte* byte = _ct_bs->byte_for(field);
+    jbyte* byte = byte_for(field);
     *byte = youngergen_card;
   }
   void write_ref_field_gc_work(void* field, oop new_val) {
@@ -149,30 +127,17 @@
   // a younger card in the current collection.
   virtual void write_ref_field_gc_par(void* field, oop new_val);
 
-  void resize_covered_region(MemRegion new_region);
-
   bool is_aligned(HeapWord* addr) {
-    return _ct_bs->is_card_aligned(addr);
+    return is_card_aligned(addr);
   }
 
   void verify();
+  void initialize();
 
-  void clear(MemRegion mr) { _ct_bs->clear(mr); }
   void clear_into_younger(Generation* old_gen);
 
-  void invalidate(MemRegion mr) {
-    _ct_bs->invalidate(mr);
-  }
   void invalidate_or_clear(Generation* old_gen);
 
-  static uintx ct_max_alignment_constraint() {
-    return CardTableModRefBSForCTRS::ct_max_alignment_constraint();
-  }
-
-  jbyte* byte_for(void* p)     { return _ct_bs->byte_for(p); }
-  jbyte* byte_after(void* p)   { return _ct_bs->byte_after(p); }
-  HeapWord* addr_for(jbyte* p) { return _ct_bs->addr_for(p); }
-
   bool is_prev_nonclean_card_val(jbyte v) {
     return
       youngergen_card <= v &&
@@ -184,6 +149,94 @@
     return cv == CardTableRS::cur_youngergen_and_prev_nonclean_card;
   }
 
+  // *** Support for parallel card scanning.
+
+  // dirty and precleaned are equivalent wrt younger_refs_iter.
+  static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
+    return cv == dirty_card || cv == precleaned_card;
+  }
+
+  // Returns "true" iff the value "cv" will cause the card containing it
+  // to be scanned in the current traversal.  May be overridden by
+  // subtypes.
+  bool card_will_be_scanned(jbyte cv);
+
+  // Returns "true" iff the value "cv" may have represented a dirty card at
+  // some point.
+  bool card_may_have_been_dirty(jbyte cv);
+
+  // Iterate over the portion of the card-table which covers the given
+  // region mr in the given space and apply cl to any dirty sub-regions
+  // of mr. Clears the dirty cards as they are processed.
+  void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
+                                                OopsInGenClosure* cl, CardTableRS* ct,
+                                                uint n_threads);
+
+  // Work method used to implement non_clean_card_iterate_possibly_parallel()
+  // above in the parallel case.
+  void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
+                                            OopsInGenClosure* cl, CardTableRS* ct,
+                                            uint n_threads);
+
+  // This is an array, one element per covered region of the card table.
+  // Each entry is itself an array, with one element per chunk in the
+  // covered region.  Each entry of these arrays is the lowest non-clean
+  // card of the corresponding chunk containing part of an object from the
+  // previous chunk, or else NULL.
+  typedef jbyte*  CardPtr;
+  typedef CardPtr* CardArr;
+  CardArr* _lowest_non_clean;
+  size_t*  _lowest_non_clean_chunk_size;
+  uintptr_t* _lowest_non_clean_base_chunk_index;
+  volatile int* _last_LNC_resizing_collection;
+
+  // Initializes "lowest_non_clean" to point to the array for the region
+  // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
+  // index of the corresponding to the first element of that array.
+  // Ensures that these arrays are of sufficient size, allocating if necessary.
+  // May be called by several threads concurrently.
+  void get_LNC_array_for_space(Space* sp,
+                               jbyte**& lowest_non_clean,
+                               uintptr_t& lowest_non_clean_base_chunk_index,
+                               size_t& lowest_non_clean_chunk_size);
+
+  // Returns the number of chunks necessary to cover "mr".
+  size_t chunks_to_cover(MemRegion mr) {
+    return (size_t)(addr_to_chunk_index(mr.last()) -
+                    addr_to_chunk_index(mr.start()) + 1);
+  }
+
+  // Returns the index of the chunk in a stride which
+  // covers the given address.
+  uintptr_t addr_to_chunk_index(const void* addr) {
+    uintptr_t card = (uintptr_t) byte_for(addr);
+    return card / ParGCCardsPerStrideChunk;
+  }
+
+  // Apply cl, which must either itself apply dcto_cl or be dcto_cl,
+  // to the cards in the stride (of n_strides) within the given space.
+  void process_stride(Space* sp,
+                      MemRegion used,
+                      jint stride, int n_strides,
+                      OopsInGenClosure* cl,
+                      CardTableRS* ct,
+                      jbyte** lowest_non_clean,
+                      uintptr_t lowest_non_clean_base_chunk_index,
+                      size_t lowest_non_clean_chunk_size);
+
+  // Makes sure that chunk boundaries are handled appropriately, by
+  // adjusting the min_done of dcto_cl, and by using a special card-table
+  // value to indicate how min_done should be set.
+  void process_chunk_boundaries(Space* sp,
+                                DirtyCardToOopClosure* dcto_cl,
+                                MemRegion chunk_mr,
+                                MemRegion used,
+                                jbyte** lowest_non_clean,
+                                uintptr_t lowest_non_clean_base_chunk_index,
+                                size_t    lowest_non_clean_chunk_size);
+
+  virtual bool is_in_young(oop obj) const;
+
 };
 
 class ClearNoncleanCardWrapper: public MemRegionClosure {
--- a/src/hotspot/share/gc/shared/collectorPolicy.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/collectorPolicy.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -31,6 +31,7 @@
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "gc/shared/adaptiveSizePolicy.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/collectorCounters.hpp"
@@ -110,7 +111,10 @@
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   _rem_set = new CardTableRS(reserved_region());
-  set_barrier_set(rem_set()->bs());
+  _rem_set->initialize();
+  CardTableModRefBS *bs = new CardTableModRefBS(_rem_set);
+  bs->initialize();
+  set_barrier_set(bs);
 
   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false);
   _young_gen = _young_gen_spec->init(young_rs, rem_set());
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -47,10 +47,6 @@
   virtual void invalidate(MemRegion mr) = 0;
   virtual void write_region(MemRegion mr) = 0;
 
-  // The caller guarantees that "mr" contains no references.  (Perhaps it's
-  // objects have been moved elsewhere.)
-  virtual void clear(MemRegion mr) = 0;
-
   // The ModRef abstraction introduces pre and post barriers
   template <DecoratorSet decorators, typename BarrierSetT>
   class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
--- a/src/hotspot/share/gc/shared/space.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/space.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
                                                 HeapWord* top_obj) {
   if (top_obj != NULL) {
     if (_sp->block_is_obj(top_obj)) {
-      if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
+      if (_precision == CardTable::ObjHeadPreciseArray) {
         if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
           // An arrayOop is starting on the dirty card - since we do exact
           // store checks for objArrays we are done.
@@ -125,11 +125,11 @@
   HeapWord* bottom_obj;
   HeapWord* top_obj;
 
-  assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
-         _precision == CardTableModRefBS::Precise,
+  assert(_precision == CardTable::ObjHeadPreciseArray ||
+         _precision == CardTable::Precise,
          "Only ones we deal with for now.");
 
-  assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
+  assert(_precision != CardTable::ObjHeadPreciseArray ||
          _cl->idempotent() || _last_bottom == NULL ||
          top <= _last_bottom,
          "Not decreasing");
@@ -147,7 +147,7 @@
   top = get_actual_top(top, top_obj);
 
   // If the previous call did some part of this region, don't redo.
-  if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
+  if (_precision == CardTable::ObjHeadPreciseArray &&
       _min_done != NULL &&
       _min_done < top) {
     top = _min_done;
@@ -159,7 +159,7 @@
   bottom = MIN2(bottom, top);
   MemRegion extended_mr = MemRegion(bottom, top);
   assert(bottom <= top &&
-         (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
+         (_precision != CardTable::ObjHeadPreciseArray ||
           _min_done == NULL ||
           top <= _min_done),
          "overlap!");
@@ -180,7 +180,7 @@
 }
 
 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
-                                          CardTableModRefBS::PrecisionStyle precision,
+                                          CardTable::PrecisionStyle precision,
                                           HeapWord* boundary,
                                           bool parallel) {
   return new DirtyCardToOopClosure(this, cl, precision, boundary);
@@ -189,7 +189,7 @@
 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
                                                HeapWord* top_obj) {
   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
-    if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
+    if (_precision == CardTable::ObjHeadPreciseArray) {
       if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
         // An arrayOop is starting on the dirty card - since we do exact
         // store checks for objArrays we are done.
@@ -260,7 +260,7 @@
 
 DirtyCardToOopClosure*
 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
-                             CardTableModRefBS::PrecisionStyle precision,
+                             CardTable::PrecisionStyle precision,
                              HeapWord* boundary,
                              bool parallel) {
   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
--- a/src/hotspot/share/gc/shared/space.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/gc/shared/space.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #define SHARE_VM_GC_SHARED_SPACE_HPP
 
 #include "gc/shared/blockOffsetTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
@@ -181,7 +181,7 @@
   // depending on the type of space in which the closure will
   // operate. ResourceArea allocated.
   virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
-                                             CardTableModRefBS::PrecisionStyle precision,
+                                             CardTable::PrecisionStyle precision,
                                              HeapWord* boundary,
                                              bool parallel);
 
@@ -253,7 +253,7 @@
 protected:
   ExtendedOopClosure* _cl;
   Space* _sp;
-  CardTableModRefBS::PrecisionStyle _precision;
+  CardTable::PrecisionStyle _precision;
   HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
                                 // pointing below boundary.
   HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
@@ -282,7 +282,7 @@
 
 public:
   DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
-                        CardTableModRefBS::PrecisionStyle precision,
+                        CardTable::PrecisionStyle precision,
                         HeapWord* boundary) :
     _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
     _min_done(NULL) {
@@ -619,7 +619,7 @@
 
   // Override.
   DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
-                                     CardTableModRefBS::PrecisionStyle precision,
+                                     CardTable::PrecisionStyle precision,
                                      HeapWord* boundary,
                                      bool parallel);
 
@@ -694,7 +694,7 @@
 
 public:
   FilteringDCTOC(Space* sp, ExtendedOopClosure* cl,
-                  CardTableModRefBS::PrecisionStyle precision,
+                  CardTable::PrecisionStyle precision,
                   HeapWord* boundary) :
     DirtyCardToOopClosure(sp, cl, precision, boundary) {}
 };
@@ -723,7 +723,7 @@
 
 public:
   ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
-                       CardTableModRefBS::PrecisionStyle precision,
+                       CardTable::PrecisionStyle precision,
                        HeapWord* boundary) :
     FilteringDCTOC(sp, cl, precision, boundary)
   {}
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -22,6 +22,7 @@
  */
 
 #include "precompiled.hpp"
+#include "ci/ciUtilities.hpp"
 #include "classfile/javaClasses.inline.hpp"
 #include "code/codeCache.hpp"
 #include "code/scopeDesc.hpp"
@@ -48,6 +49,7 @@
 #include "jvmci/jvmciCodeInstaller.hpp"
 #include "jvmci/vmStructs_jvmci.hpp"
 #include "gc/g1/heapRegion.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/timerTrace.hpp"
@@ -205,10 +207,10 @@
 
   BarrierSet* bs = Universe::heap()->barrier_set();
   if (bs->is_a(BarrierSet::CardTableModRef)) {
-    jbyte* base = barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base;
-    assert(base != 0, "unexpected byte_map_base");
+    jbyte* base = ci_card_table_address();
+    assert(base != NULL, "unexpected byte_map_base");
     cardtable_start_address = base;
-    cardtable_shift = CardTableModRefBS::card_shift;
+    cardtable_shift = CardTable::card_shift;
   } else {
     // No card mark barriers
     cardtable_start_address = 0;
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -426,7 +426,7 @@
   declare_constant(BitData::null_seen_flag)                               \
   declare_constant(BranchData::not_taken_off_set)                         \
                                                                           \
-  declare_constant_with_value("CardTableModRefBS::dirty_card", CardTableModRefBS::dirty_card_val()) \
+  declare_constant_with_value("CardTable::dirty_card", CardTable::dirty_card_val()) \
                                                                           \
   declare_constant(CodeInstaller::VERIFIED_ENTRY)                         \
   declare_constant(CodeInstaller::UNVERIFIED_ENTRY)                       \
@@ -653,7 +653,7 @@
   static_field(HeapRegion, LogOfHRGrainBytes, int)
 
 #define VM_INT_CONSTANTS_G1(declare_constant, declare_constant_with_value, declare_preprocessor_constant) \
-  declare_constant_with_value("G1SATBCardTableModRefBS::g1_young_gen", G1SATBCardTableModRefBS::g1_young_card_val())
+  declare_constant_with_value("G1CardTable::g1_young_gen", G1CardTable::g1_young_card_val())
 
 #endif // INCLUDE_ALL_GCS
 
--- a/src/hotspot/share/opto/graphKit.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/opto/graphKit.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -23,10 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "ci/ciUtilities.hpp"
 #include "compiler/compileLog.hpp"
+#include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/barrierSet.hpp"
+#include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/resourceArea.hpp"
@@ -1562,9 +1565,7 @@
       g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
       break;
 
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
-    case BarrierSet::ModRef:
+    case BarrierSet::CardTableModRef:
       break;
 
     default      :
@@ -1579,9 +1580,7 @@
     case BarrierSet::G1SATBCTLogging:
       return true; // Can move it if no safepoint
 
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
-    case BarrierSet::ModRef:
+    case BarrierSet::CardTableModRef:
       return true; // There is no pre-barrier
 
     default      :
@@ -1605,14 +1604,10 @@
       g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
       break;
 
-    case BarrierSet::CardTableForRS:
-    case BarrierSet::CardTableExtension:
+    case BarrierSet::CardTableModRef:
       write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
       break;
 
-    case BarrierSet::ModRef:
-      break;
-
     default      :
       ShouldNotReachHere();
 
@@ -3827,11 +3822,9 @@
 
 Node* GraphKit::byte_map_base_node() {
   // Get base of card map
-  CardTableModRefBS* ct =
-    barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
-  assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
-  if (ct->byte_map_base != NULL) {
-    return makecon(TypeRawPtr::make((address)ct->byte_map_base));
+  jbyte* card_table_base = ci_card_table_address();
+  if (card_table_base != NULL) {
+    return makecon(TypeRawPtr::make((address)card_table_base));
   } else {
     return null();
   }
@@ -3883,7 +3876,7 @@
   // Divide by card size
   assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
          "Only one we handle so far.");
-  Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
+  Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
 
   // Combine card table base and card offset
   Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
@@ -4275,8 +4268,8 @@
   Node* no_base = __ top();
   float likely  = PROB_LIKELY(0.999);
   float unlikely  = PROB_UNLIKELY(0.999);
-  Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
-  Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
+  Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
+  Node* dirty_card = __ ConI((jint)CardTable::dirty_card_val());
   Node* zeroX = __ ConX(0);
 
   // Get the alias_index for raw card-mark memory
@@ -4306,7 +4299,7 @@
   Node* cast =  __ CastPX(__ ctrl(), adr);
 
   // Divide pointer by card size
-  Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
+  Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
 
   // Combine card table base and card offset
   Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsGC.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/runtime/commandLineFlagConstraintsGC.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -375,8 +375,8 @@
   if (UseConcMarkSweepGC) {
     // ParGCCardsPerStrideChunk should be compared with card table size.
     size_t heap_size = Universe::heap()->reserved_region().word_size();
-    CardTableModRefBS* bs = (CardTableModRefBS*)GenCollectedHeap::heap()->rem_set()->bs();
-    size_t card_table_size = bs->cards_required(heap_size) - 1; // Valid card table size
+    CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
+    size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
 
     if ((size_t)value > card_table_size) {
       CommandLineError::print(verbose,
@@ -387,7 +387,7 @@
     }
 
     // ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread)
-    // from CardTableModRefBSForCTRS::process_stride(). Note that ParGCStridesPerThread is already checked
+    // from CardTableRS::process_stride(). Note that ParGCStridesPerThread is already checked
     // not to make an overflow with ParallelGCThreads from its constraint function.
     uintx n_strides = ParallelGCThreads * ParGCStridesPerThread;
     uintx ergo_max = max_uintx / n_strides;
@@ -469,9 +469,9 @@
 #if INCLUDE_ALL_GCS
   if (status == Flag::SUCCESS && UseConcMarkSweepGC) {
     // CMSParRemarkTask::do_dirty_card_rescan_tasks requires CompactibleFreeListSpace::rescan_task_size()
-    // to be aligned to CardTableModRefBS::card_size * BitsPerWord.
+    // to be aligned to CardTable::card_size * BitsPerWord.
     // Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize'
-    // because rescan_task_size() is CardTableModRefBS::card_size / HeapWordSize * BitsPerWord.
+    // because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord.
     if (value % HeapWordSize != 0) {
       CommandLineError::print(verbose,
                               "CMSRescanMultiple (" SIZE_FORMAT ") must be "
--- a/src/hotspot/share/runtime/globals.hpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/runtime/globals.hpp	Mon Feb 26 09:34:12 2018 +0100
@@ -1723,13 +1723,13 @@
           "enough work per iteration")                                      \
           range(0, max_intx)                                                \
                                                                             \
-  /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */          \
+  /* 4096 = CardTable::card_size_in_words * BitsPerWord */                  \
   product(size_t, CMSRescanMultiple, 32,                                    \
           "Size (in cards) of CMS parallel rescan task")                    \
           range(1, SIZE_MAX / 4096)                                         \
           constraint(CMSRescanMultipleConstraintFunc,AfterMemoryInit)       \
                                                                             \
-  /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */          \
+  /* 4096 = CardTable::card_size_in_words * BitsPerWord */                  \
   product(size_t, CMSConcMarkMultiple, 32,                                  \
           "Size (in cards) of CMS concurrent MT marking task")              \
           range(1, SIZE_MAX / 4096)                                         \
--- a/src/hotspot/share/runtime/vmStructs.cpp	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/hotspot/share/runtime/vmStructs.cpp	Mon Feb 26 09:34:12 2018 +0100
@@ -466,20 +466,19 @@
   nonstatic_field(CardGeneration,              _capacity_at_prologue,                         size_t)                                \
   nonstatic_field(CardGeneration,              _used_at_prologue,                             size_t)                                \
                                                                                                                                      \
+  nonstatic_field(CardTable,                   _whole_heap,                                   const MemRegion)                       \
+  nonstatic_field(CardTable,                   _guard_index,                                  const size_t)                          \
+  nonstatic_field(CardTable,                   _last_valid_index,                             const size_t)                          \
+  nonstatic_field(CardTable,                   _page_size,                                    const size_t)                          \
+  nonstatic_field(CardTable,                   _byte_map_size,                                const size_t)                          \
+  nonstatic_field(CardTable,                   _byte_map,                                     jbyte*)                                \
+  nonstatic_field(CardTable,                   _cur_covered_regions,                          int)                                   \
+  nonstatic_field(CardTable,                   _covered,                                      MemRegion*)                            \
+  nonstatic_field(CardTable,                   _committed,                                    MemRegion*)                            \
+  nonstatic_field(CardTable,                   _guard_region,                                 MemRegion)                             \
+  nonstatic_field(CardTable,                   _byte_map_base,                                jbyte*)                                \
   nonstatic_field(CardTableModRefBS,           _defer_initial_card_mark,                      bool)                                  \
-  nonstatic_field(CardTableModRefBS,           _whole_heap,                                   const MemRegion)                       \
-  nonstatic_field(CardTableModRefBS,           _guard_index,                                  const size_t)                          \
-  nonstatic_field(CardTableModRefBS,           _last_valid_index,                             const size_t)                          \
-  nonstatic_field(CardTableModRefBS,           _page_size,                                    const size_t)                          \
-  nonstatic_field(CardTableModRefBS,           _byte_map_size,                                const size_t)                          \
-  nonstatic_field(CardTableModRefBS,           _byte_map,                                     jbyte*)                                \
-  nonstatic_field(CardTableModRefBS,           _cur_covered_regions,                          int)                                   \
-  nonstatic_field(CardTableModRefBS,           _covered,                                      MemRegion*)                            \
-  nonstatic_field(CardTableModRefBS,           _committed,                                    MemRegion*)                            \
-  nonstatic_field(CardTableModRefBS,           _guard_region,                                 MemRegion)                             \
-  nonstatic_field(CardTableModRefBS,           byte_map_base,                                 jbyte*)                                \
-                                                                                                                                     \
-  nonstatic_field(CardTableRS,                 _ct_bs,                                        CardTableModRefBSForCTRS*)             \
+  nonstatic_field(CardTableModRefBS,           _card_table,                                   CardTable*)                            \
                                                                                                                                      \
   nonstatic_field(CollectedHeap,               _reserved,                                     MemRegion)                             \
   nonstatic_field(CollectedHeap,               _barrier_set,                                  BarrierSet*)                           \
@@ -1477,9 +1476,9 @@
   declare_toplevel_type(BarrierSet)                                       \
            declare_type(ModRefBarrierSet,             BarrierSet)         \
            declare_type(CardTableModRefBS,            ModRefBarrierSet)   \
-           declare_type(CardTableModRefBSForCTRS,     CardTableModRefBS)  \
+  declare_toplevel_type(CardTable)                                        \
+           declare_type(CardTableRS, CardTable)                           \
   declare_toplevel_type(BarrierSet::Name)                                 \
-  declare_toplevel_type(CardTableRS)                                      \
   declare_toplevel_type(BlockOffsetSharedArray)                           \
   declare_toplevel_type(BlockOffsetTable)                                 \
            declare_type(BlockOffsetArray,             BlockOffsetTable)   \
@@ -1502,11 +1501,11 @@
                                                                           \
   declare_toplevel_type(BarrierSet*)                                      \
   declare_toplevel_type(BlockOffsetSharedArray*)                          \
+  declare_toplevel_type(CardTable*)                                       \
+  declare_toplevel_type(CardTable*const)                                  \
   declare_toplevel_type(CardTableRS*)                                     \
   declare_toplevel_type(CardTableModRefBS*)                               \
   declare_toplevel_type(CardTableModRefBS**)                              \
-  declare_toplevel_type(CardTableModRefBSForCTRS*)                        \
-  declare_toplevel_type(CardTableModRefBSForCTRS**)                       \
   declare_toplevel_type(CollectedHeap*)                                   \
   declare_toplevel_type(ContiguousSpace*)                                 \
   declare_toplevel_type(DefNewGeneration*)                                \
@@ -2240,8 +2239,6 @@
                                                                           \
   declare_constant(BarrierSet::ModRef)                                    \
   declare_constant(BarrierSet::CardTableModRef)                           \
-  declare_constant(BarrierSet::CardTableForRS)                            \
-  declare_constant(BarrierSet::CardTableExtension)                        \
   declare_constant(BarrierSet::G1SATBCT)                                  \
   declare_constant(BarrierSet::G1SATBCTLogging)                           \
                                                                           \
@@ -2253,18 +2250,18 @@
   declare_constant(BOTConstants::Base)                                    \
   declare_constant(BOTConstants::N_powers)                                \
                                                                           \
-  declare_constant(CardTableModRefBS::clean_card)                         \
-  declare_constant(CardTableModRefBS::last_card)                          \
-  declare_constant(CardTableModRefBS::dirty_card)                         \
-  declare_constant(CardTableModRefBS::Precise)                            \
-  declare_constant(CardTableModRefBS::ObjHeadPreciseArray)                \
-  declare_constant(CardTableModRefBS::card_shift)                         \
-  declare_constant(CardTableModRefBS::card_size)                          \
-  declare_constant(CardTableModRefBS::card_size_in_words)                 \
+  declare_constant(CardTable::clean_card)                                 \
+  declare_constant(CardTable::last_card)                                  \
+  declare_constant(CardTable::dirty_card)                                 \
+  declare_constant(CardTable::Precise)                                    \
+  declare_constant(CardTable::ObjHeadPreciseArray)                        \
+  declare_constant(CardTable::card_shift)                                 \
+  declare_constant(CardTable::card_size)                                  \
+  declare_constant(CardTable::card_size_in_words)                         \
                                                                           \
   declare_constant(CardTableRS::youngergen_card)                          \
                                                                           \
-  declare_constant(G1SATBCardTableModRefBS::g1_young_gen)                 \
+  declare_constant(G1CardTable::g1_young_gen)                             \
                                                                           \
   declare_constant(CollectedHeap::SerialHeap)                             \
   declare_constant(CollectedHeap::CMSHeap)                                \
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java	Mon Feb 26 09:34:20 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java	Mon Feb 26 09:34:12 2018 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,7 @@
     public static final GraalHotSpotVMConfig INJECTED_VMCONFIG = null;
 
     private final boolean isJDK8 = System.getProperty("java.specification.version").compareTo("1.9") < 0;
+    private final int JDKVersion = isJDK8 ? 8 : Integer.parseInt(System.getProperty("java.specification.version"));
     public final String osName = getHostOSName();
     public final String osArch = getHostArchitectureName();
     public final boolean windowsOs = System.getProperty("os.name", "").startsWith("Windows");
@@ -554,8 +555,12 @@
 
     public final int logOfHRGrainBytes = getFieldValue("HeapRegion::LogOfHRGrainBytes", Integer.class, "int");
 
-    public final byte dirtyCardValue = isJDK8 ? getFieldValue("CompilerToVM::Data::dirty_card", Byte.class, "int") : getConstant("CardTableModRefBS::dirty_card", Byte.class);
-    public final byte g1YoungCardValue = isJDK8 ? getFieldValue("CompilerToVM::Data::g1_young_card", Byte.class, "int") : getConstant("G1SATBCardTableModRefBS::g1_young_gen", Byte.class);
+    public final byte dirtyCardValue = JDKVersion >= 11 ? getConstant("CardTable::dirty_card", Byte.class) :
+                                         (JDKVersion > 8 ? getConstant("CardTableModRefBS::dirty_card", Byte.class) :
+                                         getFieldValue("CompilerToVM::Data::dirty_card", Byte.class, "int"));
+    public final byte g1YoungCardValue = JDKVersion >= 11 ? getConstant("G1CardTable::g1_young_gen", Byte.class) :
+                                           (JDKVersion > 8 ? getConstant("G1SATBCardTableModRefBS::g1_young_gen", Byte.class) :
+                                           getFieldValue("CompilerToVM::Data::g1_young_card", Byte.class, "int"));
 
     public final long cardtableStartAddress = getFieldValue("CompilerToVM::Data::cardtable_start_address", Long.class, "jbyte*");
     public final int cardtableShift = getFieldValue("CompilerToVM::Data::cardtable_shift", Integer.class, "int");