Merge
authorjwilhelm
Fri, 17 Nov 2017 02:50:51 +0100
changeset 47910 b4d2929683b6
parent 47909 da4fb7d2f917 (diff)
parent 47826 7f06714e7f0b (current diff)
child 47911 af1361361585
Merge
--- a/make/hotspot/lib/JvmFeatures.gmk	Thu Nov 16 11:07:44 2017 -0800
+++ b/make/hotspot/lib/JvmFeatures.gmk	Fri Nov 17 02:50:51 2017 +0100
@@ -153,7 +153,7 @@
   # like the old build, but it's probably not right.
   JVM_OPTIMIZATION :=
   JVM_CFLAGS_FEATURES += -O3 -flto
-  JVM_LDFLAGS_FEATURES += -O3 -flto -fwhole-program -fno-strict-aliasing
+  JVM_LDFLAGS_FEATURES += -O3 -flto -fuse-linker-plugin -fno-strict-aliasing
 endif
 
 ifeq ($(call check-jvm-feature, minimal), true)
--- a/make/test/JtregNativeHotspot.gmk	Thu Nov 16 11:07:44 2017 -0800
+++ b/make/test/JtregNativeHotspot.gmk	Fri Nov 17 02:50:51 2017 +0100
@@ -60,6 +60,7 @@
     $(TOPDIR)/test/hotspot/jtreg/runtime/SameObject \
     $(TOPDIR)/test/hotspot/jtreg/runtime/BoolReturn \
     $(TOPDIR)/test/hotspot/jtreg/runtime/noClassDefFoundMsg \
+    $(TOPDIR)/test/hotspot/jtreg/runtime/handshake \
     $(TOPDIR)/test/hotspot/jtreg/runtime/RedefineTests \
     $(TOPDIR)/test/hotspot/jtreg/compiler/floatingpoint/ \
     $(TOPDIR)/test/hotspot/jtreg/compiler/calls \
@@ -108,6 +109,7 @@
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMAAThreadStart := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAllowedFunctions := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libRedefineDoubleDelete := -lc
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libHandshakeTransitionTest := -lc
 endif
 
 ifeq ($(OPENJDK_TARGET_OS), linux)
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -79,6 +79,8 @@
 // Clear short arrays bigger than one word in an arch-specific way
 define_pd_global(intx, InitArrayShortSize, BytesPerLong);
 
+define_pd_global(bool, ThreadLocalHandshakes, false);
+
 #if defined(COMPILER1) || defined(COMPILER2)
 define_pd_global(intx, InlineSmallCode,          1000);
 #endif
--- a/src/hotspot/cpu/arm/globals_arm.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/arm/globals_arm.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -79,6 +79,8 @@
 
 define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
 
+define_pd_global(bool, ThreadLocalHandshakes, false);
+
 #define ARCH_FLAGS(develop, \
                    product, \
                    diagnostic, \
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -83,6 +83,8 @@
 // 2x unrolled loop is shorter with more than 9 HeapWords.
 define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
 
+define_pd_global(bool, ThreadLocalHandshakes, false);
+
 // Platform dependent flag handling: flags only defined on this platform.
 #define ARCH_FLAGS(develop, \
                    product, \
--- a/src/hotspot/cpu/s390/globals_s390.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/s390/globals_s390.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -85,6 +85,8 @@
 // 8146801 (Short Array Allocation): No performance work done here yet.
 define_pd_global(intx, InitArrayShortSize, 1*BytesPerLong);
 
+define_pd_global(bool, ThreadLocalHandshakes, false);
+
 #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct, range, constraint, writeable) \
                                                                               \
   /* Reoptimize code-sequences of calls at runtime, e.g. replace an */        \
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -35,6 +35,7 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "nativeInst_sparc.hpp"
 #include "oops/objArrayKlass.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 
 #define __ _masm->
@@ -1415,7 +1416,11 @@
   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
     __ reserved_stack_check();
   }
-  __ set((intptr_t)os::get_polling_page(), L0);
+  if (SafepointMechanism::uses_thread_local_poll()) {
+    __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), L0);
+  } else {
+    __ set((intptr_t)os::get_polling_page(), L0);
+  }
   __ relocate(relocInfo::poll_return_type);
   __ ld_ptr(L0, 0, G0);
   __ ret();
@@ -1424,11 +1429,16 @@
 
 
 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
-  __ set((intptr_t)os::get_polling_page(), tmp->as_register());
+  if (SafepointMechanism::uses_thread_local_poll()) {
+    __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), tmp->as_register());
+  } else {
+    __ set((intptr_t)os::get_polling_page(), tmp->as_register());
+  }
   if (info != NULL) {
     add_debug_info_for_branch(info);
   }
   int offset = __ offset();
+
   __ relocate(relocInfo::poll_type);
   __ ld_ptr(tmp->as_register(), 0, G0);
   return offset;
--- a/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -33,6 +33,7 @@
 #include "ci/ciArray.hpp"
 #include "ci/ciObjArrayKlass.hpp"
 #include "ci/ciTypeArrayKlass.hpp"
+#include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "vmreg_sparc.inline.hpp"
@@ -1304,7 +1305,7 @@
   if (x->is_safepoint()) {
     // increment backedge counter if needed
     increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
-    __ safepoint(new_register(T_INT), state_for(x, x->state_before()));
+    __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
   }
 
   __ cmp(lir_cond(cond), left, right);
--- a/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -52,4 +52,7 @@
 #define SUPPORT_RESERVED_STACK_AREA
 #endif
 
+// SPARC have implemented the local polling
+#define THREAD_LOCAL_POLL
+
 #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP
--- a/src/hotspot/cpu/sparc/globals_sparc.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/globals_sparc.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -87,6 +87,8 @@
 
 define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
 
+define_pd_global(bool, ThreadLocalHandshakes, true);
+
 #define ARCH_FLAGS(develop, \
                    product, \
                    diagnostic, \
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -36,6 +36,7 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "runtime/basicLock.hpp"
 #include "runtime/biasedLocking.hpp"
+#include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/align.hpp"
@@ -95,12 +96,11 @@
   else                delayed()->nop();
 }
 
-
-void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
+void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr, bool generate_poll) {
   // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
   assert_not_delayed();
   ldub( Lbcp, bcp_incr, Lbyte_code);               // load next bytecode
-  dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr);
+  dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, true, generate_poll);
 }
 
 
@@ -261,15 +261,34 @@
 // common code to dispatch and dispatch_only
 // dispatch value in Lbyte_code and increment Lbcp
 
-void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
+void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify, bool generate_poll) {
   verify_FPU(1, state);
   // %%%%% maybe implement +VerifyActivationFrameSize here
   //verify_thread(); //too slow; we will just verify on method entry & exit
   if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
   // dispatch table to use
   AddressLiteral tbl(table);
+  Label dispatch;
+
+  if (SafepointMechanism::uses_thread_local_poll() && generate_poll) {
+    AddressLiteral sfpt_tbl(Interpreter::safept_table(state));
+    Label no_safepoint;
+
+    if (tbl.value() != sfpt_tbl.value()) {
+      ldx(Address(G2_thread, Thread::polling_page_offset()), G3_scratch, 0);
+      // Armed page has poll_bit set, if poll bit is cleared just continue.
+      and3(G3_scratch, SafepointMechanism::poll_bit(), G3_scratch);
+
+      br_null_short(G3_scratch, Assembler::pt, no_safepoint);
+      set(sfpt_tbl, G3_scratch);
+      ba_short(dispatch);
+    }
+    bind(no_safepoint);
+  }
+
+  set(tbl, G3_scratch);                               // compute addr of table
+  bind(dispatch);
   sll(Lbyte_code, LogBytesPerWord, Lbyte_code);       // multiply by wordSize
-  set(tbl, G3_scratch);                               // compute addr of table
   ld_ptr(G3_scratch, Lbyte_code, G3_scratch);         // get entry addr
   jmp( G3_scratch, 0 );
   if (bcp_incr != 0)  delayed()->inc(Lbcp, bcp_incr);
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -98,7 +98,7 @@
   void dispatch_epilog(TosState state, int step = 0);
   void dispatch_only(TosState state);
   void dispatch_normal(TosState state);
-  void dispatch_next(TosState state, int step = 0);
+  void dispatch_next(TosState state, int step = 0, bool generate_poll = false);
   void dispatch_next_noverify_oop(TosState state, int step = 0);
   void dispatch_via (TosState state, address* table);
 
@@ -113,7 +113,7 @@
                          bool install_monitor_exception = true);
 
  protected:
-  void dispatch_Lbyte_code(TosState state, address* table, int bcp_incr = 0, bool verify = true);
+  void dispatch_Lbyte_code(TosState state, address* table, int bcp_incr = 0, bool verify = true, bool generate_poll = false);
 
  public:
   // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -37,6 +37,8 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/os.inline.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/align.hpp"
@@ -236,6 +238,20 @@
 }
 
 
+void MacroAssembler::safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg) {
+  if (SafepointMechanism::uses_thread_local_poll()) {
+    ldx(Address(thread_reg, Thread::polling_page_offset()), temp_reg, 0);
+    // Armed page has poll bit set.
+    and3(temp_reg, SafepointMechanism::poll_bit(), temp_reg);
+    br_notnull(temp_reg, a, Assembler::pn, slow_path);
+  } else {
+    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
+
+    load_contents(sync_state, temp_reg);
+    cmp(temp_reg, SafepointSynchronize::_not_synchronized);
+    br(Assembler::notEqual, a, Assembler::pn, slow_path);
+  }
+}
 
 void MacroAssembler::enter() {
   Unimplemented();
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -986,6 +986,8 @@
   // Support for serializing memory accesses between threads
   void serialize_memory(Register thread, Register tmp1, Register tmp2);
 
+  void safepoint_poll(Label& slow_path, bool a, Register thread_reg, Register temp_reg);
+
   // Stack frame creation/removal
   void enter();
   void leave();
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -2359,7 +2359,6 @@
   // Block, if necessary, before resuming in _thread_in_Java state.
   // In order for GC to work, don't clear the last_Java_sp until after blocking.
   { Label no_block;
-    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
 
     // Switch thread to "native transition" state before reading the synchronization state.
     // This additional state is necessary because reading and testing the synchronization
@@ -2382,12 +2381,10 @@
         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
       }
     }
-    __ load_contents(sync_state, G3_scratch);
-    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
 
     Label L;
     Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
-    __ br(Assembler::notEqual, false, Assembler::pn, L);
+    __ safepoint_poll(L, false, G2_thread, G3_scratch);
     __ delayed()->ld(suspend_state, G3_scratch);
     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
     __ bind(L);
@@ -3118,7 +3115,7 @@
   } else {
     // Make it look like we were called via the poll
     // so that frame constructor always sees a valid return address
-    __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
+    __ ld_ptr(Address(G2_thread, JavaThread::saved_exception_pc_offset()), O7);
     __ sub(O7, frame::pc_return_offset, O7);
   }
 
@@ -3127,6 +3124,15 @@
   // setup last_Java_sp (blows G4)
   __ set_last_Java_frame(SP, noreg);
 
+  Register saved_O7 = O7->after_save();
+  if (!cause_return && SafepointMechanism::uses_thread_local_poll()) {
+    // Keep a copy of the return pc in L0 to detect if it gets modified
+    __ mov(saved_O7, L0);
+    // Adjust and keep a copy of our npc saved by the signal handler
+    __ ld_ptr(Address(G2_thread, JavaThread::saved_exception_npc_offset()), L1);
+    __ sub(L1, frame::pc_return_offset, L1);
+  }
+
   // call into the runtime to handle illegal instructions exception
   // Do not use call_VM_leaf, because we need to make a GC map at this call site.
   __ mov(G2_thread, O0);
@@ -3150,6 +3156,12 @@
   __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
   __ br_notnull_short(O1, Assembler::pn, pending);
 
+  if (!cause_return && SafepointMechanism::uses_thread_local_poll()) {
+    // If nobody modified our return pc then we must return to the npc which he saved in L1
+    __ cmp(saved_O7, L0);
+    __ movcc(Assembler::equal, false, Assembler::ptr_cc, L1, saved_O7);
+  }
+
   RegisterSaver::restore_live_registers(masm);
 
   // We are back the the original state on entry and ready to go.
--- a/src/hotspot/cpu/sparc/sparc.ad	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/sparc.ad	Fri Nov 17 02:50:51 2017 +0100
@@ -1206,7 +1206,11 @@
   Compile* C = ra_->C;
 
   if(do_polling() && ra_->C->is_method_compilation()) {
-    st->print("SETHI  #PollAddr,L0\t! Load Polling address\n\t");
+    if (SafepointMechanism::uses_global_page_poll()) {
+      st->print("SETHI  #PollAddr,L0\t! Load Polling address\n\t");
+    } else {
+      st->print("LDX    [R_G2 + #poll_offset],L0\t! Load local polling address\n\t");
+    }
     st->print("LDX    [L0],G0\t!Poll for Safepointing\n\t");
   }
 
@@ -1233,8 +1237,12 @@
 
   // If this does safepoint polling, then do it here
   if(do_polling() && ra_->C->is_method_compilation()) {
-    AddressLiteral polling_page(os::get_polling_page());
-    __ sethi(polling_page, L0);
+    if (SafepointMechanism::uses_thread_local_poll()) {
+      __ ld_ptr(Address(G2_thread, Thread::polling_page_offset()), L0);
+    } else {
+      AddressLiteral polling_page(os::get_polling_page());
+      __ sethi(polling_page, L0);
+    }
     __ relocate(relocInfo::poll_return_type);
     __ ld_ptr(L0, 0, G0);
   }
@@ -1266,6 +1274,7 @@
 }
 
 int MachEpilogNode::safepoint_offset() const {
+  assert(SafepointMechanism::uses_global_page_poll(), "sanity");
   assert( do_polling(), "no return for this epilog node");
   return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord;
 }
--- a/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -912,10 +912,8 @@
 
     Label L_slow_path;
     // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
-    __ set(SafepointSynchronize::_not_synchronized, O3);
-    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
+    __ safepoint_poll(L_slow_path, false, G2_thread, O2);
+    __ delayed()->nop();
 
     // Load parameters
     const Register crc   = O0; // initial crc
@@ -956,10 +954,9 @@
 
     Label L_slow_path;
     // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
-    __ set(SafepointSynchronize::_not_synchronized, O3);
-    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
+
+    __ safepoint_poll(L_slow_path, false, G2_thread, O2);
+    __ delayed()->nop();
 
     // Load parameters from the stack
     const Register crc    = O0; // initial crc
@@ -1397,7 +1394,6 @@
   // Block, if necessary, before resuming in _thread_in_Java state.
   // In order for GC to work, don't clear the last_Java_sp until after blocking.
   { Label no_block;
-    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
 
     // Switch thread to "native transition" state before reading the synchronization state.
     // This additional state is necessary because reading and testing the synchronization
@@ -1420,11 +1416,9 @@
         __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
       }
     }
-    __ load_contents(sync_state, G3_scratch);
-    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
 
     Label L;
-    __ br(Assembler::notEqual, false, Assembler::pn, L);
+    __ safepoint_poll(L, false, G2_thread, G3_scratch);
     __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
     __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
     __ bind(L);
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1499,7 +1499,7 @@
     // Push returnAddress for "ret" on stack
     __ push_ptr(Otos_i);
     // And away we go!
-    __ dispatch_next(vtos);
+    __ dispatch_next(vtos, 0, true);
     return;
   }
 
@@ -1607,7 +1607,7 @@
   // continue with bytecode @ target
   // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
   // %%%%% and changing dispatch_next to dispatch_only
-  __ dispatch_next(vtos);
+  __ dispatch_next(vtos, 0, true);
 }
 
 
@@ -1676,7 +1676,7 @@
   __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
   __ add(G3_scratch, Otos_i, G3_scratch);
   __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
-  __ dispatch_next(vtos);
+  __ dispatch_next(vtos, 0, true);
 }
 
 
@@ -1691,7 +1691,7 @@
   __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
   __ add(G3_scratch, Otos_i, G3_scratch);
   __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
-  __ dispatch_next(vtos);
+  __ dispatch_next(vtos, 0, true);
 }
 
 
@@ -1727,7 +1727,7 @@
   // continue execution
   __ bind(continue_execution);
   __ add(Lbcp, O2, Lbcp);
-  __ dispatch_next(vtos);
+  __ dispatch_next(vtos, 0, true);
 }
 
 
@@ -1779,7 +1779,7 @@
     __ bind(continue_execution);
   }
   __ add(Lbcp, O4, Lbcp);
-  __ dispatch_next(vtos);
+  __ dispatch_next(vtos, 0, true);
 }
 
 
@@ -1888,7 +1888,7 @@
 
   __ bind(continue_execution);
   __ add( Lbcp, Rj, Lbcp );
-  __ dispatch_next( vtos );
+  __ dispatch_next(vtos, 0, true);
 }
 
 
@@ -1914,6 +1914,18 @@
     __ bind(skip_register_finalizer);
   }
 
+  if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
+    Label no_safepoint;
+    __ ldx(Address(G2_thread, Thread::polling_page_offset()), G3_scratch, 0);
+    __ btst(SafepointMechanism::poll_bit(), G3_scratch);
+    __ br(Assembler::zero, false, Assembler::pt, no_safepoint);
+    __ delayed()->nop();
+    __ push(state);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
+    __ pop(state);
+    __ bind(no_safepoint);
+  }
+
   // Narrow result if state is itos but result type is smaller.
   // Need to narrow in the return bytecode rather than in generate_return_entry
   // since compiled code callers expect the result to already be narrowed.
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -526,32 +526,57 @@
 
   // Note: we do not need to round double result; float result has the right precision
   // the poll sets the condition code, but no data registers
-  AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
-
-  if (Assembler::is_polling_page_far()) {
-    __ lea(rscratch1, polling_page);
+
+  if (SafepointMechanism::uses_thread_local_poll()) {
+#ifdef _LP64
+    __ movptr(rscratch1, Address(r15_thread, Thread::polling_page_offset()));
     __ relocate(relocInfo::poll_return_type);
     __ testl(rax, Address(rscratch1, 0));
+#else
+    ShouldNotReachHere();
+#endif
   } else {
-    __ testl(rax, polling_page);
+    AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
+
+    if (Assembler::is_polling_page_far()) {
+      __ lea(rscratch1, polling_page);
+      __ relocate(relocInfo::poll_return_type);
+      __ testl(rax, Address(rscratch1, 0));
+    } else {
+      __ testl(rax, polling_page);
+    }
   }
   __ ret(0);
 }
 
 
 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
-  AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
   guarantee(info != NULL, "Shouldn't be NULL");
   int offset = __ offset();
-  if (Assembler::is_polling_page_far()) {
-    __ lea(rscratch1, polling_page);
-    offset = __ offset();
+  if (SafepointMechanism::uses_thread_local_poll()) {
+#ifdef _LP64
+    __ movptr(rscratch1, Address(r15_thread, Thread::polling_page_offset()));
     add_debug_info_for_branch(info);
     __ relocate(relocInfo::poll_type);
+    address pre_pc = __ pc();
     __ testl(rax, Address(rscratch1, 0));
+    address post_pc = __ pc();
+    guarantee(pointer_delta(post_pc, pre_pc, 1) == 3, "must be exact length");
+#else
+    ShouldNotReachHere();
+#endif
   } else {
-    add_debug_info_for_branch(info);
-    __ testl(rax, polling_page);
+    AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type);
+    if (Assembler::is_polling_page_far()) {
+      __ lea(rscratch1, polling_page);
+      offset = __ offset();
+      add_debug_info_for_branch(info);
+      __ relocate(relocInfo::poll_type);
+      __ testl(rax, Address(rscratch1, 0));
+    } else {
+      add_debug_info_for_branch(info);
+      __ testl(rax, polling_page);
+    }
   }
   return offset;
 }
--- a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,4 +65,9 @@
 #define SUPPORT_RESERVED_STACK_AREA
 #endif
 
+#ifdef _LP64
+// X64 have implemented the local polling
+#define THREAD_LOCAL_POLL
+#endif
+
 #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
--- a/src/hotspot/cpu/x86/globals_x86.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/globals_x86.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -97,6 +97,12 @@
 
 define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
 
+#ifdef _LP64
+define_pd_global(bool, ThreadLocalHandshakes, true);
+#else
+define_pd_global(bool, ThreadLocalHandshakes, false);
+#endif
+
 #define ARCH_FLAGS(develop, \
                    product, \
                    diagnostic, \
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -35,6 +35,7 @@
 #include "prims/jvmtiThreadState.hpp"
 #include "runtime/basicLock.hpp"
 #include "runtime/biasedLocking.hpp"
+#include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.inline.hpp"
 
@@ -809,7 +810,8 @@
 
 void InterpreterMacroAssembler::dispatch_base(TosState state,
                                               address* table,
-                                              bool verifyoop) {
+                                              bool verifyoop,
+                                              bool generate_poll) {
   verify_FPU(1, state);
   if (VerifyActivationFrameSize) {
     Label L;
@@ -827,8 +829,24 @@
     verify_oop(rax, state);
   }
 #ifdef _LP64
+
+  Label no_safepoint, dispatch;
+  address* const safepoint_table = Interpreter::safept_table(state);
+  if (SafepointMechanism::uses_thread_local_poll() && table != safepoint_table && generate_poll) {
+    NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
+
+    testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
+
+    jccb(Assembler::zero, no_safepoint);
+    lea(rscratch1, ExternalAddress((address)safepoint_table));
+    jmpb(dispatch);
+  }
+
+  bind(no_safepoint);
   lea(rscratch1, ExternalAddress((address)table));
+  bind(dispatch);
   jmp(Address(rscratch1, rbx, Address::times_8));
+
 #else
   Address index(noreg, rbx, Address::times_ptr);
   ExternalAddress tbl((address)table);
@@ -837,8 +855,8 @@
 #endif // _LP64
 }
 
-void InterpreterMacroAssembler::dispatch_only(TosState state) {
-  dispatch_base(state, Interpreter::dispatch_table(state));
+void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
+  dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 }
 
 void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
@@ -850,12 +868,12 @@
 }
 
 
-void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {
+void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
   // load next bytecode (load before advancing _bcp_register to prevent AGI)
   load_unsigned_byte(rbx, Address(_bcp_register, step));
   // advance _bcp_register
   increment(_bcp_register, step);
-  dispatch_base(state, Interpreter::dispatch_table(state));
+  dispatch_base(state, Interpreter::dispatch_table(state), true, generate_poll);
 }
 
 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
--- a/src/hotspot/cpu/x86/interp_masm_x86.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/interp_masm_x86.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -49,7 +49,7 @@
                             bool check_exceptions);
 
   // base routine for all dispatches
-  void dispatch_base(TosState state, address* table, bool verifyoop = true);
+  void dispatch_base(TosState state, address* table, bool verifyoop = true, bool generate_poll = false);
 
  public:
   InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code),
@@ -184,12 +184,12 @@
   void dispatch_prolog(TosState state, int step = 0);
   void dispatch_epilog(TosState state, int step = 0);
   // dispatch via rbx (assume rbx is loaded already)
-  void dispatch_only(TosState state);
+  void dispatch_only(TosState state, bool generate_poll = false);
   // dispatch normal table via rbx (assume rbx is loaded already)
   void dispatch_only_normal(TosState state);
   void dispatch_only_noverify(TosState state);
   // load rbx from [_bcp_register + step] and dispatch via rbx
-  void dispatch_next(TosState state, int step = 0);
+  void dispatch_next(TosState state, int step = 0, bool generate_poll = false);
   // load rbx from [_bcp_register] and dispatch via rbx and table
   void dispatch_via (TosState state, address* table);
 
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -38,6 +38,8 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/safepointMechanism.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.hpp"
@@ -3759,6 +3761,25 @@
   movl(as_Address(ArrayAddress(page, index)), tmp);
 }
 
+#ifdef _LP64
+void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg) {
+  if (SafepointMechanism::uses_thread_local_poll()) {
+    testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
+    jcc(Assembler::notZero, slow_path); // handshake bit set implies poll
+  } else {
+    cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+        SafepointSynchronize::_not_synchronized);
+    jcc(Assembler::notEqual, slow_path);
+  }
+}
+#else
+void MacroAssembler::safepoint_poll(Label& slow_path) {
+  cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+      SafepointSynchronize::_not_synchronized);
+  jcc(Assembler::notEqual, slow_path);
+}
+#endif
+
 // Calls to C land
 //
 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -656,6 +656,12 @@
   // Support for serializing memory accesses between threads
   void serialize_memory(Register thread, Register tmp);
 
+#ifdef _LP64
+  void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg);
+#else
+  void safepoint_poll(Label& slow_path);
+#endif
+
   void verify_tlab();
 
   // Biased locking support
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -29,6 +29,7 @@
 #include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointMechanism.hpp"
 
 // We have interfaces for the following instructions:
 // - NativeInstruction
@@ -678,6 +679,7 @@
   enum Intel_specific_constants {
     instruction_rex_prefix_mask = 0xF0,
     instruction_rex_prefix      = Assembler::REX,
+    instruction_rex_b_prefix    = Assembler::REX_B,
     instruction_code_memXregl   = 0x85,
     modrm_mask                  = 0x38, // select reg from the ModRM byte
     modrm_reg                   = 0x00  // rax
@@ -703,6 +705,16 @@
                                                           (ubyte_at(0) & 0xF0) == 0x70;  /* short jump */ }
 inline bool NativeInstruction::is_safepoint_poll() {
 #ifdef AMD64
+  if (SafepointMechanism::uses_thread_local_poll()) {
+    // We know that the poll must have a REX_B prefix since we enforce its source to be
+    // a rex-register and the destination to be rax.
+    const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix;
+    const bool is_test_opcode = ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl;
+    const bool is_rax_target = (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
+    if (has_rex_prefix && is_test_opcode && is_rax_target) {
+      return true;
+    }
+  }
   // Try decoding a near safepoint first:
   if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
       ubyte_at(1) == 0x05) { // 00 rax 101
--- a/src/hotspot/cpu/x86/relocInfo_x86.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/relocInfo_x86.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/safepointMechanism.hpp"
 
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -183,9 +184,12 @@
   typedef Assembler::WhichOperand WhichOperand;
   WhichOperand which = (WhichOperand) format();
 #if !INCLUDE_JVMCI
-  assert((which == Assembler::disp32_operand) == !Assembler::is_polling_page_far(), "format not set correctly");
+  if (SafepointMechanism::uses_global_page_poll()) {
+    assert((which == Assembler::disp32_operand) == !Assembler::is_polling_page_far(), "format not set correctly");
+  }
 #endif
   if (which == Assembler::disp32_operand) {
+    assert(SafepointMechanism::uses_global_page_poll(), "should only have generated such a poll if global polling enabled");
     address orig_addr = old_addr_for(addr(), src, dest);
     NativeInstruction* oni = nativeInstruction_at(orig_addr);
     int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -30,6 +30,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "code/debugInfoRec.hpp"
 #include "code/icBuffer.hpp"
+#include "code/nativeInst.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
 #include "logging/log.hpp"
@@ -2474,15 +2475,13 @@
   // check for safepoint operation in progress and/or pending suspend requests
   {
     Label Continue;
-
-    __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-
-    Label L;
-    __ jcc(Assembler::notEqual, L);
+    Label slow_path;
+
+    __ safepoint_poll(slow_path, r15_thread, rscratch1);
+
     __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
     __ jcc(Assembler::equal, Continue);
-    __ bind(L);
+    __ bind(slow_path);
 
     // Don't use call_VM as it will see a possible pending exception and forward it
     // and never return here preventing us from clearing _last_native_pc down below.
@@ -3355,9 +3354,11 @@
   // sees an invalid pc.
 
   if (!cause_return) {
-    // overwrite the dummy value we pushed on entry
-    __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
-    __ movptr(Address(rbp, wordSize), c_rarg0);
+    // Get the return pc saved by the signal handler and stash it in its appropriate place on the stack.
+    // Additionally, rbx is a callee saved register and we can look at it later to determine
+    // if someone changed the return address for us!
+    __ movptr(rbx, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
+    __ movptr(Address(rbp, wordSize), rbx);
   }
 
   // Do the call
@@ -3387,11 +3388,38 @@
   // No exception case
   __ bind(noException);
 
+  Label no_adjust, bail;
+  if (SafepointMechanism::uses_thread_local_poll() && !cause_return) {
+    // If our stashed return pc was modified by the runtime we avoid touching it
+    __ cmpptr(rbx, Address(rbp, wordSize));
+    __ jccb(Assembler::notEqual, no_adjust);
+
+#ifdef ASSERT
+    // Verify the correct encoding of the poll we're about to skip.
+    // See NativeInstruction::is_safepoint_poll()
+    __ cmpb(Address(rbx, 0), NativeTstRegMem::instruction_rex_b_prefix);
+    __ jcc(Assembler::notEqual, bail);
+    __ cmpb(Address(rbx, 1), NativeTstRegMem::instruction_code_memXregl);
+    __ jcc(Assembler::notEqual, bail);
+    // Mask out the modrm bits
+    __ testb(Address(rbx, 2), NativeTstRegMem::modrm_mask);
+    // rax encodes to 0, so if the bits are nonzero it's incorrect
+    __ jcc(Assembler::notZero, bail);
+#endif
+    // Adjust return pc forward to step over the safepoint poll instruction
+    __ addptr(Address(rbp, wordSize), 3);
+  }
+
+  __ bind(no_adjust);
   // Normal exit, restore registers and exit.
   RegisterSaver::restore_live_registers(masm, save_vectors);
-
   __ ret(0);
 
+#ifdef ASSERT
+  __ bind(bail);
+  __ stop("Attempting to adjust pc to skip safepoint poll but the return point is not what we expected");
+#endif
+
   // Make sure all code is generated
   masm->flush();
 
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1141,14 +1141,17 @@
   // check for safepoint operation in progress and/or pending suspend requests
   {
     Label Continue;
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
+    Label slow_path;
 
-    Label L;
-    __ jcc(Assembler::notEqual, L);
+#ifndef _LP64
+    __ safepoint_poll(slow_path);
+#else
+    __ safepoint_poll(slow_path, r15_thread, rscratch1);
+#endif
+
     __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
     __ jcc(Assembler::equal, Continue);
-    __ bind(L);
+    __ bind(slow_path);
 
     // Don't use call_VM as it will see a possible pending exception
     // and forward it and never return here preventing us from
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_64.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_64.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -190,11 +190,7 @@
     // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)
 
     Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
+    __ safepoint_poll(slow_path, r15_thread, rscratch1);
 
     // We don't generate local frame and don't align stack because
     // we call stub code and there is no safepoint on this path.
@@ -240,11 +236,7 @@
     // r13: senderSP must preserved for slow path, set SP to it on fast path
 
     Label slow_path;
-    // If we need a safepoint check, generate full interpreter entry.
-    ExternalAddress state(SafepointSynchronize::address_of_state());
-    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
-             SafepointSynchronize::_not_synchronized);
-    __ jcc(Assembler::notEqual, slow_path);
+    __ safepoint_poll(slow_path, r15_thread, rscratch1);
 
     // We don't generate local frame and don't align stack because
     // we call stub code and there is no safepoint on this path.
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -2084,7 +2084,7 @@
     __ addptr(rbcp, rdx);
     // jsr returns atos that is not an oop
     __ push_i(rax);
-    __ dispatch_only(vtos);
+    __ dispatch_only(vtos, true);
     return;
   }
 
@@ -2203,7 +2203,7 @@
   // rax: return bci for jsr's, unused otherwise
   // rbx: target bytecode
   // r13: target bcp
-  __ dispatch_only(vtos);
+  __ dispatch_only(vtos, true);
 
   if (UseLoopCounter) {
     if (ProfileInterpreter) {
@@ -2332,7 +2332,7 @@
   __ movptr(rbcp, Address(rax, Method::const_offset()));
   __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
                       ConstMethod::codes_offset()));
-  __ dispatch_next(vtos);
+  __ dispatch_next(vtos, 0, true);
 }
 
 void TemplateTable::wide_ret() {
@@ -2343,7 +2343,7 @@
   __ get_method(rax);
   __ movptr(rbcp, Address(rax, Method::const_offset()));
   __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
-  __ dispatch_next(vtos);
+  __ dispatch_next(vtos, 0, true);
 }
 
 void TemplateTable::tableswitch() {
@@ -2373,7 +2373,7 @@
   LP64_ONLY(__ movl2ptr(rdx, rdx));
   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
   __ addptr(rbcp, rdx);
-  __ dispatch_only(vtos);
+  __ dispatch_only(vtos, true);
   // handle default
   __ bind(default_case);
   __ profile_switch_default(rax);
@@ -2421,7 +2421,7 @@
   __ movl2ptr(rdx, rdx);
   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
   __ addptr(rbcp, rdx);
-  __ dispatch_only(vtos);
+  __ dispatch_only(vtos, true);
 }
 
 void TemplateTable::fast_binaryswitch() {
@@ -2525,7 +2525,7 @@
 
   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
   __ addptr(rbcp, j);
-  __ dispatch_only(vtos);
+  __ dispatch_only(vtos, true);
 
   // default case -> j = default offset
   __ bind(default_case);
@@ -2539,7 +2539,7 @@
 
   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
   __ addptr(rbcp, j);
-  __ dispatch_only(vtos);
+  __ dispatch_only(vtos, true);
 }
 
 void TemplateTable::_return(TosState state) {
@@ -2570,6 +2570,20 @@
   }
 #endif
 
+#ifdef _LP64
+  if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
+    Label no_safepoint;
+    NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
+    __ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
+    __ jcc(Assembler::zero, no_safepoint);
+    __ push(state);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address,
+                                    InterpreterRuntime::at_safepoint));
+    __ pop(state);
+    __ bind(no_safepoint);
+  }
+#endif
+
   // Narrow result if state is itos but result type is smaller.
   // Need to narrow in the return bytecode rather than in generate_return_entry
   // since compiled code callers expect the result to already be narrowed.
--- a/src/hotspot/cpu/x86/x86_64.ad	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/x86/x86_64.ad	Fri Nov 17 02:50:51 2017 +0100
@@ -317,6 +317,18 @@
 // Singleton class for TLS pointer
 reg_class ptr_r15_reg(R15, R15_H);
 
+// The registers which can be used for
+// a thread local safepoint poll
+// * R12 is reserved for heap base
+// * R13 cannot be encoded for addressing without an offset byte
+// * R15 is reserved for the JavaThread
+reg_class ptr_rex_reg(R8,  R8_H,
+                      R9,  R9_H,
+                      R10, R10_H,
+                      R11, R11_H,
+                      R14, R14_H);
+
+
 // Class for all long registers (excluding RSP)
 reg_class long_reg_with_rbp(RAX, RAX_H,
                             RDX, RDX_H,
@@ -566,7 +578,7 @@
 // it does if the polling page is more than disp32 away.
 bool SafePointNode::needs_polling_address_input()
 {
-  return Assembler::is_polling_page_far();
+  return SafepointMechanism::uses_thread_local_poll() || Assembler::is_polling_page_far();
 }
 
 //
@@ -938,7 +950,11 @@
   st->print_cr("popq   rbp");
   if (do_polling() && C->is_method_compilation()) {
     st->print("\t");
-    if (Assembler::is_polling_page_far()) {
+    if (SafepointMechanism::uses_thread_local_poll()) {
+      st->print_cr("movq   rscratch1, poll_offset[r15_thread] #polling_page_address\n\t"
+                   "testl  rax, [rscratch1]\t"
+                   "# Safepoint: poll for GC");
+    } else if (Assembler::is_polling_page_far()) {
       st->print_cr("movq   rscratch1, #polling_page_address\n\t"
                    "testl  rax, [rscratch1]\t"
                    "# Safepoint: poll for GC");
@@ -989,13 +1005,19 @@
 
   if (do_polling() && C->is_method_compilation()) {
     MacroAssembler _masm(&cbuf);
-    AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
-    if (Assembler::is_polling_page_far()) {
-      __ lea(rscratch1, polling_page);
+    if (SafepointMechanism::uses_thread_local_poll()) {
+      __ movq(rscratch1, Address(r15_thread, Thread::polling_page_offset()));
       __ relocate(relocInfo::poll_return_type);
       __ testl(rax, Address(rscratch1, 0));
     } else {
-      __ testl(rax, polling_page);
+      AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
+      if (Assembler::is_polling_page_far()) {
+        __ lea(rscratch1, polling_page);
+        __ relocate(relocInfo::poll_return_type);
+        __ testl(rax, Address(rscratch1, 0));
+      } else {
+        __ testl(rax, polling_page);
+      }
     }
   }
 }
@@ -3511,6 +3533,16 @@
   interface(REG_INTER);
 %}
 
+operand rex_RegP()
+%{
+  constraint(ALLOC_IN_RC(ptr_rex_reg));
+  match(RegP);
+  match(rRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 operand rRegL()
 %{
   constraint(ALLOC_IN_RC(long_reg));
@@ -12060,7 +12092,7 @@
 // Safepoint Instructions
 instruct safePoint_poll(rFlagsReg cr)
 %{
-  predicate(!Assembler::is_polling_page_far());
+  predicate(!Assembler::is_polling_page_far() && SafepointMechanism::uses_global_page_poll());
   match(SafePoint);
   effect(KILL cr);
 
@@ -12076,7 +12108,7 @@
 
 instruct safePoint_poll_far(rFlagsReg cr, rRegP poll)
 %{
-  predicate(Assembler::is_polling_page_far());
+  predicate(Assembler::is_polling_page_far() && SafepointMechanism::uses_global_page_poll());
   match(SafePoint poll);
   effect(KILL cr, USE poll);
 
@@ -12090,6 +12122,26 @@
   ins_pipe(ialu_reg_mem);
 %}
 
+instruct safePoint_poll_tls(rFlagsReg cr, rex_RegP poll)
+%{
+  predicate(SafepointMechanism::uses_thread_local_poll());
+  match(SafePoint poll);
+  effect(KILL cr, USE poll);
+
+  format %{ "testl  rax, [$poll]\t"
+            "# Safepoint: poll for GC" %}
+  ins_cost(125);
+  size(3); /* setting an explicit size will cause debug builds to assert if size is incorrect */
+  ins_encode %{
+    __ relocate(relocInfo::poll_type);
+    address pre_pc = __ pc();
+    __ testl(rax, Address($poll$$Register, 0));
+    address post_pc = __ pc();
+    guarantee(pre_pc[0] == 0x41 && pre_pc[1] == 0x85, "must emit #rex test-ax [reg]");
+  %}
+  ins_pipe(ialu_reg_mem);
+%}
+
 // ============================================================================
 // Procedure Call/Return Instructions
 // Call Java Static Instruction
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -379,7 +379,7 @@
 
   // Handle safepoint operations, pending suspend requests,
   // and pending asynchronous exceptions.
-  if (SafepointSynchronize::do_call_back() ||
+  if (SafepointMechanism::poll(thread) ||
       thread->has_special_condition_for_native_trans()) {
     JavaThread::check_special_condition_for_native_trans(thread);
     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops());
@@ -511,7 +511,7 @@
   intptr_t *locals = stack->sp();
 
   // Drop into the slow path if we need a safepoint check
-  if (SafepointSynchronize::do_call_back()) {
+  if (SafepointMechanism::poll(THREAD)) {
     return normal_entry(method, 0, THREAD);
   }
 
@@ -643,7 +643,7 @@
   ZeroStack *stack = thread->zero_stack();
 
   // Drop into the slow path if we need a safepoint check
-  if (SafepointSynchronize::do_call_back()) {
+  if (SafepointMechanism::poll(THREAD)) {
     return normal_entry(method, 0, THREAD);
   }
 
--- a/src/hotspot/cpu/zero/globals_zero.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/cpu/zero/globals_zero.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -81,6 +81,8 @@
 // No performance work done here yet.
 define_pd_global(bool, CompactStrings, false);
 
+define_pd_global(bool, ThreadLocalHandshakes, false);
+
 #define ARCH_FLAGS(develop, \
                    product, \
                    diagnostic, \
--- a/src/hotspot/os/aix/os_aix.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os/aix/os_aix.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -3477,75 +3477,6 @@
     LoadedLibraries::print(tty);
   }
 
-  const int page_size = Aix::page_size();
-  const int map_size = page_size;
-
-  address map_address = (address) MAP_FAILED;
-  const int prot  = PROT_READ;
-  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
-
-  // Use optimized addresses for the polling page,
-  // e.g. map it to a special 32-bit address.
-  if (OptimizePollingPageLocation) {
-    // architecture-specific list of address wishes:
-    address address_wishes[] = {
-      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
-      // PPC64: all address wishes are non-negative 32 bit values where
-      // the lower 16 bits are all zero. we can load these addresses
-      // with a single ppc_lis instruction.
-      (address) 0x30000000, (address) 0x31000000,
-      (address) 0x32000000, (address) 0x33000000,
-      (address) 0x40000000, (address) 0x41000000,
-      (address) 0x42000000, (address) 0x43000000,
-      (address) 0x50000000, (address) 0x51000000,
-      (address) 0x52000000, (address) 0x53000000,
-      (address) 0x60000000, (address) 0x61000000,
-      (address) 0x62000000, (address) 0x63000000
-    };
-    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
-
-    // iterate over the list of address wishes:
-    for (int i=0; i<address_wishes_length; i++) {
-      // Try to map with current address wish.
-      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
-      // fail if the address is already mapped.
-      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
-                                     map_size, prot,
-                                     flags | MAP_FIXED,
-                                     -1, 0);
-      trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
-                   address_wishes[i], map_address + (ssize_t)page_size);
-
-      if (map_address + (ssize_t)page_size == address_wishes[i]) {
-        // Map succeeded and map_address is at wished address, exit loop.
-        break;
-      }
-
-      if (map_address != (address) MAP_FAILED) {
-        // Map succeeded, but polling_page is not at wished address, unmap and continue.
-        ::munmap(map_address, map_size);
-        map_address = (address) MAP_FAILED;
-      }
-      // Map failed, continue loop.
-    }
-  } // end OptimizePollingPageLocation
-
-  if (map_address == (address) MAP_FAILED) {
-    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
-  }
-  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
-  os::set_polling_page(map_address);
-
-  if (!UseMembar) {
-    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
-    os::set_memory_serialize_page(mem_serialize_page);
-
-    trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
-        mem_serialize_page, mem_serialize_page + Aix::page_size(),
-        Aix::page_size(), Aix::page_size());
-  }
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
@@ -3614,6 +3545,14 @@
 };
 
 int os::active_processor_count() {
+  // User has overridden the number of active processors
+  if (ActiveProcessorCount > 0) {
+    log_trace(os)("active_processor_count: "
+                  "active processor count set by user : %d",
+                  ActiveProcessorCount);
+    return ActiveProcessorCount;
+  }
+
   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
   return online_cpus;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/aix/safepointMechanism_aix.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepointMechanism.hpp"
+#include <sys/mman.h>
+
+void SafepointMechanism::pd_initialize() {
+  char* map_address = (char*)MAP_FAILED;
+  const size_t page_size = os::vm_page_size();
+  // Use optimized addresses for the polling page,
+  // e.g. map it to a special 32-bit address.
+  if (OptimizePollingPageLocation) {
+    // architecture-specific list of address wishes:
+    char* address_wishes[] = {
+        // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
+        // PPC64: all address wishes are non-negative 32 bit values where
+        // the lower 16 bits are all zero. we can load these addresses
+        // with a single ppc_lis instruction.
+        (char*) 0x30000000, (char*) 0x31000000,
+        (char*) 0x32000000, (char*) 0x33000000,
+        (char*) 0x40000000, (char*) 0x41000000,
+        (char*) 0x42000000, (char*) 0x43000000,
+        (char*) 0x50000000, (char*) 0x51000000,
+        (char*) 0x52000000, (char*) 0x53000000,
+        (char*) 0x60000000, (char*) 0x61000000,
+        (char*) 0x62000000, (char*) 0x63000000
+    };
+    int address_wishes_length = sizeof(address_wishes)/sizeof(char*);
+
+    // iterate over the list of address wishes:
+    for (int i = 0; i < address_wishes_length; i++) {
+      // Try to map with current address wish.
+      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
+      // fail if the address is already mapped.
+      map_address = (char*) ::mmap(address_wishes[i] - (ssize_t)page_size,
+                                   page_size, PROT_READ,
+                                   MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                                   -1, 0);
+      log_debug(os)("SafePoint Polling  Page address: %p (wish) => %p",
+                    address_wishes[i], map_address + (ssize_t)page_size);
+
+      if (map_address + (ssize_t)page_size == address_wishes[i]) {
+        // Map succeeded and map_address is at wished address, exit loop.
+        break;
+      }
+
+      if (map_address != (char*)MAP_FAILED) {
+        // Map succeeded, but polling_page is not at wished address, unmap and continue.
+        ::munmap(map_address, page_size);
+        map_address = (char*)MAP_FAILED;
+      }
+      // Map failed, continue loop.
+    }
+  }
+  if (map_address == (char*)MAP_FAILED) {
+    map_address = os::reserve_memory(page_size, NULL, page_size);
+  }
+  guarantee(map_address != (char*)MAP_FAILED, "SafepointMechanism::pd_initialize: failed to allocate polling page");
+  os::set_polling_page((address)(map_address));
+}
--- a/src/hotspot/os/bsd/os_bsd.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -3391,20 +3391,6 @@
 
   os::Posix::init_2();
 
-  // Allocate a single page and mark it as readable for safepoint polling
-  address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-  guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
-
-  os::set_polling_page(polling_page);
-  log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
-
-  if (!UseMembar) {
-    address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-    guarantee(mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
-    os::set_memory_serialize_page(mem_serialize_page);
-    log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
-  }
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
@@ -3491,6 +3477,14 @@
 }
 
 int os::active_processor_count() {
+  // User has overridden the number of active processors
+  if (ActiveProcessorCount > 0) {
+    log_trace(os)("active_processor_count: "
+                  "active processor count set by user : %d",
+                  ActiveProcessorCount);
+    return ActiveProcessorCount;
+  }
+
   return _processor_count;
 }
 
--- a/src/hotspot/os/linux/globals_linux.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os/linux/globals_linux.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -59,6 +59,9 @@
   product(bool, UseSHM, false,                                          \
           "Use SYSV shared memory for large pages")                     \
                                                                         \
+  product(bool, UseContainerSupport, true,                              \
+          "Enable detection and runtime container configuration support") \
+                                                                        \
   diagnostic(bool, UseCpuAllocPath, false,                              \
              "Use CPU_ALLOC code path in os::active_processor_count ")
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/osContainer_linux.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,594 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include <string.h>
+#include <math.h>
+#include <errno.h>
+#include "utilities/globalDefinitions.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/os.hpp"
+#include "logging/log.hpp"
+#include "osContainer_linux.hpp"
+
+/*
+ * Warning: Some linux distros use 0x7FFFFFFFFFFFF000
+ * and others use 0x7FFFFFFFFFFFFFFF for unlimited.
+ */
+#define UNLIMITED_MEM CONST64(0x7FFFFFFFFFFFF000)
+
+#define PER_CPU_SHARES 1024
+
+bool  OSContainer::_is_initialized   = false;
+bool  OSContainer::_is_containerized = false;
+
+class CgroupSubsystem: CHeapObj<mtInternal> {
+ friend class OSContainer;
+
+ private:
+    /* mountinfo contents */
+    char *_root;
+    char *_mount_point;
+
+    /* Constructed subsystem directory */
+    char *_path;
+
+ public:
+    CgroupSubsystem(char *root, char *mountpoint) {
+      _root = os::strdup(root);
+      _mount_point = os::strdup(mountpoint);
+      _path = NULL;
+    }
+
+    /*
+     * Set directory to subsystem specific files based
+     * on the contents of the mountinfo and cgroup files.
+     */
+    void set_subsystem_path(char *cgroup_path) {
+      char buf[MAXPATHLEN+1];
+      if (_root != NULL && cgroup_path != NULL) {
+        if (strcmp(_root, "/") == 0) {
+          int buflen;
+          strncpy(buf, _mount_point, MAXPATHLEN);
+          buf[MAXPATHLEN-1] = '\0';
+          if (strcmp(cgroup_path,"/") != 0) {
+            buflen = strlen(buf);
+            if ((buflen + strlen(cgroup_path)) > (MAXPATHLEN-1)) {
+              return;
+            }
+            strncat(buf, cgroup_path, MAXPATHLEN-buflen);
+            buf[MAXPATHLEN-1] = '\0';
+          }
+          _path = os::strdup(buf);
+        } else {
+          if (strcmp(_root, cgroup_path) == 0) {
+            strncpy(buf, _mount_point, MAXPATHLEN);
+            buf[MAXPATHLEN-1] = '\0';
+            _path = os::strdup(buf);
+          } else {
+            char *p = strstr(_root, cgroup_path);
+            if (p != NULL && p == _root) {
+              if (strlen(cgroup_path) > strlen(_root)) {
+                int buflen;
+                strncpy(buf, _mount_point, MAXPATHLEN);
+                buf[MAXPATHLEN-1] = '\0';
+                buflen = strlen(buf);
+                if ((buflen + strlen(cgroup_path)) > (MAXPATHLEN-1)) {
+                  return;
+                }
+                strncat(buf, cgroup_path + strlen(_root), MAXPATHLEN-buflen);
+                buf[MAXPATHLEN-1] = '\0';
+                _path = os::strdup(buf);
+              }
+            }
+          }
+        }
+      }
+    }
+
+    char *subsystem_path() { return _path; }
+};
+
+CgroupSubsystem* memory = NULL;
+CgroupSubsystem* cpuset = NULL;
+CgroupSubsystem* cpu = NULL;
+CgroupSubsystem* cpuacct = NULL;
+
+typedef char * cptr;
+
+PRAGMA_DIAG_PUSH
+PRAGMA_FORMAT_NONLITERAL_IGNORED
+template <typename T> int subsystem_file_contents(CgroupSubsystem* c,
+                                              const char *filename,
+                                              const char *scan_fmt,
+                                              T returnval) {
+  FILE *fp = NULL;
+  char *p;
+  char file[MAXPATHLEN+1];
+  char buf[MAXPATHLEN+1];
+
+  if (c != NULL && c->subsystem_path() != NULL) {
+    strncpy(file, c->subsystem_path(), MAXPATHLEN);
+    file[MAXPATHLEN-1] = '\0';
+    int filelen = strlen(file);
+    if ((filelen + strlen(filename)) > (MAXPATHLEN-1)) {
+       log_debug(os, container)("File path too long %s, %s", file, filename);
+       return OSCONTAINER_ERROR;
+    }
+    strncat(file, filename, MAXPATHLEN-filelen);
+    log_trace(os, container)("Path to %s is %s", filename, file);
+    fp = fopen(file, "r");
+    if (fp != NULL) {
+      p = fgets(buf, MAXPATHLEN, fp);
+      if (p != NULL) {
+        int matched = sscanf(p, scan_fmt, returnval);
+        if (matched == 1) {
+          fclose(fp);
+          return 0;
+        } else {
+          log_debug(os, container)("Type %s not found in file %s",
+                                     scan_fmt , file);
+        }
+      } else {
+        log_debug(os, container)("Empty file %s", file);
+      }
+    } else {
+      log_debug(os, container)("Open of file %s failed, %s", file,
+                               os::strerror(errno));
+    }
+  }
+  if (fp != NULL)
+    fclose(fp);
+  return OSCONTAINER_ERROR;
+}
+PRAGMA_DIAG_POP
+
+#define GET_CONTAINER_INFO(return_type, subsystem, filename,              \
+                           logstring, scan_fmt, variable)                 \
+  return_type variable;                                                   \
+{                                                                         \
+  int err;                                                                \
+  err = subsystem_file_contents(subsystem,                                \
+                                filename,                                 \
+                                scan_fmt,                                 \
+                                &variable);                               \
+  if (err != 0)                                                           \
+    return (return_type) OSCONTAINER_ERROR;                               \
+                                                                          \
+  log_trace(os, container)(logstring, variable);                          \
+}
+
+#define GET_CONTAINER_INFO_CPTR(return_type, subsystem, filename,         \
+                               logstring, scan_fmt, variable, bufsize)    \
+  char variable[bufsize];                                                 \
+{                                                                         \
+  int err;                                                                \
+  err = subsystem_file_contents(subsystem,                                \
+                                filename,                                 \
+                                scan_fmt,                                 \
+                                variable);                                \
+  if (err != 0)                                                           \
+    return (return_type) NULL;                                            \
+                                                                          \
+  log_trace(os, container)(logstring, variable);                          \
+}
+
+/* init
+ *
+ * Initialize the container support and determine if
+ * we are running under cgroup control.
+ */
+void OSContainer::init() {
+  int mountid;
+  int parentid;
+  int major;
+  int minor;
+  FILE *mntinfo = NULL;
+  FILE *cgroup = NULL;
+  char buf[MAXPATHLEN+1];
+  char tmproot[MAXPATHLEN+1];
+  char tmpmount[MAXPATHLEN+1];
+  char tmpbase[MAXPATHLEN+1];
+  char *p;
+  jlong mem_limit;
+
+  assert(!_is_initialized, "Initializing OSContainer more than once");
+
+  _is_initialized = true;
+  _is_containerized = false;
+
+  log_trace(os, container)("OSContainer::init: Initializing Container Support");
+  if (!UseContainerSupport) {
+    log_trace(os, container)("Container Support not enabled");
+    return;
+  }
+
+  /*
+   * Find the cgroup mount point for memory and cpuset
+   * by reading /proc/self/mountinfo
+   *
+   * Example for docker:
+   * 219 214 0:29 /docker/7208cebd00fa5f2e342b1094f7bed87fa25661471a4637118e65f1c995be8a34 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory
+   *
+   * Example for host:
+   * 34 28 0:29 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,memory
+   */
+  mntinfo = fopen("/proc/self/mountinfo", "r");
+  if (mntinfo == NULL) {
+      log_debug(os, container)("Can't open /proc/self/mountinfo, %s",
+                               os::strerror(errno));
+      return;
+  }
+
+  while ( (p = fgets(buf, MAXPATHLEN, mntinfo)) != NULL) {
+    // Look for the filesystem type and see if it's cgroup
+    char fstype[MAXPATHLEN+1];
+    fstype[0] = '\0';
+    char *s =  strstr(p, " - ");
+    if (s != NULL &&
+        sscanf(s, " - %s", fstype) == 1 &&
+        strcmp(fstype, "cgroup") == 0) {
+
+      if (strstr(p, "memory") != NULL) {
+        int matched = sscanf(p, "%d %d %d:%d %s %s",
+                             &mountid,
+                             &parentid,
+                             &major,
+                             &minor,
+                             tmproot,
+                             tmpmount);
+        if (matched == 6) {
+          memory = new CgroupSubsystem(tmproot, tmpmount);
+        }
+        else
+          log_debug(os, container)("Incompatible str containing cgroup and memory: %s", p);
+      } else if (strstr(p, "cpuset") != NULL) {
+        int matched = sscanf(p, "%d %d %d:%d %s %s",
+                             &mountid,
+                             &parentid,
+                             &major,
+                             &minor,
+                             tmproot,
+                             tmpmount);
+        if (matched == 6) {
+          cpuset = new CgroupSubsystem(tmproot, tmpmount);
+        }
+        else {
+          log_debug(os, container)("Incompatible str containing cgroup and cpuset: %s", p);
+        }
+      } else if (strstr(p, "cpu,cpuacct") != NULL) {
+        int matched = sscanf(p, "%d %d %d:%d %s %s",
+                             &mountid,
+                             &parentid,
+                             &major,
+                             &minor,
+                             tmproot,
+                             tmpmount);
+        if (matched == 6) {
+          cpu = new CgroupSubsystem(tmproot, tmpmount);
+          cpuacct = new CgroupSubsystem(tmproot, tmpmount);
+        }
+        else {
+          log_debug(os, container)("Incompatible str containing cgroup and cpu,cpuacct: %s", p);
+        }
+      } else if (strstr(p, "cpuacct") != NULL) {
+        int matched = sscanf(p, "%d %d %d:%d %s %s",
+                             &mountid,
+                             &parentid,
+                             &major,
+                             &minor,
+                             tmproot,
+                             tmpmount);
+        if (matched == 6) {
+          cpuacct = new CgroupSubsystem(tmproot, tmpmount);
+        }
+        else {
+          log_debug(os, container)("Incompatible str containing cgroup and cpuacct: %s", p);
+        }
+      } else if (strstr(p, "cpu") != NULL) {
+        int matched = sscanf(p, "%d %d %d:%d %s %s",
+                             &mountid,
+                             &parentid,
+                             &major,
+                             &minor,
+                             tmproot,
+                             tmpmount);
+        if (matched == 6) {
+          cpu = new CgroupSubsystem(tmproot, tmpmount);
+        }
+        else {
+          log_debug(os, container)("Incompatible str containing cgroup and cpu: %s", p);
+        }
+      }
+    }
+  }
+
+  if (mntinfo != NULL) fclose(mntinfo);
+
+  /*
+   * Read /proc/self/cgroup and map host mount point to
+   * local one via /proc/self/mountinfo content above
+   *
+   * Docker example:
+   * 5:memory:/docker/6558aed8fc662b194323ceab5b964f69cf36b3e8af877a14b80256e93aecb044
+   *
+   * Host example:
+   * 5:memory:/user.slice
+   *
+   * Construct a path to the process specific memory and cpuset
+   * cgroup directory.
+   *
+   * For a container running under Docker from memory example above
+   * the paths would be:
+   *
+   * /sys/fs/cgroup/memory
+   *
+   * For a Host from memory example above the path would be:
+   *
+   * /sys/fs/cgroup/memory/user.slice
+   *
+   */
+  cgroup = fopen("/proc/self/cgroup", "r");
+  if (cgroup == NULL) {
+    log_debug(os, container)("Can't open /proc/self/cgroup, %s",
+                             os::strerror(errno));
+    return;
+  }
+
+  while ( (p = fgets(buf, MAXPATHLEN, cgroup)) != NULL) {
+    int cgno;
+    int matched;
+    char *controller;
+    char *base;
+
+    /* Skip cgroup number */
+    strsep(&p, ":");
+    /* Get controller and base */
+    controller = strsep(&p, ":");
+    base = strsep(&p, "\n");
+
+    if (controller != NULL) {
+      if (strstr(controller, "memory") != NULL) {
+        memory->set_subsystem_path(base);
+      } else if (strstr(controller, "cpuset") != NULL) {
+        cpuset->set_subsystem_path(base);
+      } else if (strstr(controller, "cpu,cpuacct") != NULL) {
+        cpu->set_subsystem_path(base);
+        cpuacct->set_subsystem_path(base);
+      } else if (strstr(controller, "cpuacct") != NULL) {
+        cpuacct->set_subsystem_path(base);
+      } else if (strstr(controller, "cpu") != NULL) {
+        cpu->set_subsystem_path(base);
+      }
+    }
+  }
+
+  if (cgroup != NULL) fclose(cgroup);
+
+  if (memory == NULL || cpuset == NULL || cpu == NULL) {
+    log_debug(os, container)("Required cgroup subsystems not found");
+    return;
+  }
+
+  // We need to update the amount of physical memory now that
+  // command line arguments have been processed.
+  if ((mem_limit = memory_limit_in_bytes()) > 0) {
+    os::Linux::set_physical_memory(mem_limit);
+  }
+
+  _is_containerized = true;
+
+}
+
+char * OSContainer::container_type() {
+  if (is_containerized()) {
+    return (char *)"cgroupv1";
+  } else {
+    return NULL;
+  }
+}
+
+
+/* memory_limit_in_bytes
+ *
+ * Return the limit of available memory for this process.
+ *
+ * return:
+ *    memory limit in bytes or
+ *    -1 for unlimited
+ *    OSCONTAINER_ERROR for not supported
+ */
+jlong OSContainer::memory_limit_in_bytes() {
+  GET_CONTAINER_INFO(jlong, memory, "/memory.limit_in_bytes",
+                     "Memory Limit is: " JLONG_FORMAT, JLONG_FORMAT, memlimit);
+
+  if (memlimit >= UNLIMITED_MEM) {
+    log_trace(os, container)("Memory Limit is: Unlimited");
+    return (jlong)-1;
+  }
+  else {
+    return memlimit;
+  }
+}
+
+jlong OSContainer::memory_and_swap_limit_in_bytes() {
+  GET_CONTAINER_INFO(jlong, memory, "/memory.memsw.limit_in_bytes",
+                     "Memory and Swap Limit is: " JLONG_FORMAT, JLONG_FORMAT, memswlimit);
+  if (memswlimit >= UNLIMITED_MEM) {
+    log_trace(os, container)("Memory and Swap Limit is: Unlimited");
+    return (jlong)-1;
+  } else {
+    return memswlimit;
+  }
+}
+
+jlong OSContainer::memory_soft_limit_in_bytes() {
+  GET_CONTAINER_INFO(jlong, memory, "/memory.soft_limit_in_bytes",
+                     "Memory Soft Limit is: " JLONG_FORMAT, JLONG_FORMAT, memsoftlimit);
+  if (memsoftlimit >= UNLIMITED_MEM) {
+    log_trace(os, container)("Memory Soft Limit is: Unlimited");
+    return (jlong)-1;
+  } else {
+    return memsoftlimit;
+  }
+}
+
+/* memory_usage_in_bytes
+ *
+ * Return the amount of used memory for this process.
+ *
+ * return:
+ *    memory usage in bytes or
+ *    -1 for unlimited
+ *    OSCONTAINER_ERROR for not supported
+ */
+jlong OSContainer::memory_usage_in_bytes() {
+  GET_CONTAINER_INFO(jlong, memory, "/memory.usage_in_bytes",
+                     "Memory Usage is: " JLONG_FORMAT, JLONG_FORMAT, memusage);
+  return memusage;
+}
+
+/* memory_max_usage_in_bytes
+ *
+ * Return the maximum amount of used memory for this process.
+ *
+ * return:
+ *    max memory usage in bytes or
+ *    OSCONTAINER_ERROR for not supported
+ */
+jlong OSContainer::memory_max_usage_in_bytes() {
+  GET_CONTAINER_INFO(jlong, memory, "/memory.max_usage_in_bytes",
+                     "Maximum Memory Usage is: " JLONG_FORMAT, JLONG_FORMAT, memmaxusage);
+  return memmaxusage;
+}
+
+/* active_processor_count
+ *
+ * Calculate an appropriate number of active processors for the
+ * VM to use based on these three cgroup options.
+ *
+ * cpu affinity
+ * cpu quota & cpu period
+ * cpu shares
+ *
+ * Algorithm:
+ *
+ * Determine the number of available CPUs from sched_getaffinity
+ *
+ * If user specified a quota (quota != -1), calculate the number of
+ * required CPUs by dividing quota by period.
+ *
+ * If shares are in effect (shares != -1), calculate the number
+ * of cpus required for the shares by dividing the share value
+ * by PER_CPU_SHARES.
+ *
+ * All results of division are rounded up to the next whole number.
+ *
+ * Return the smaller number from the three different settings.
+ *
+ * return:
+ *    number of cpus
+ *    OSCONTAINER_ERROR if failure occured during extract of cpuset info
+ */
+int OSContainer::active_processor_count() {
+  int cpu_count, share_count, quota_count;
+  int share, quota, period;
+  int result;
+
+  cpu_count = os::Linux::active_processor_count();
+
+  share = cpu_shares();
+  if (share > -1) {
+    share_count = ceilf((float)share / (float)PER_CPU_SHARES);
+    log_trace(os, container)("cpu_share count: %d", share_count);
+  } else {
+    share_count = cpu_count;
+  }
+
+  quota = cpu_quota();
+  period = cpu_period();
+  if (quota > -1 && period > 0) {
+    quota_count = ceilf((float)quota / (float)period);
+    log_trace(os, container)("quota_count: %d", quota_count);
+  } else {
+    quota_count = cpu_count;
+  }
+
+  result = MIN2(cpu_count, MIN2(share_count, quota_count));
+  log_trace(os, container)("OSContainer::active_processor_count: %d", result);
+  return result;
+}
+
+char * OSContainer::cpu_cpuset_cpus() {
+  GET_CONTAINER_INFO_CPTR(cptr, cpuset, "/cpuset.cpus",
+                     "cpuset.cpus is: %s", "%1023s", cpus, 1024);
+  return os::strdup(cpus);
+}
+
+char * OSContainer::cpu_cpuset_memory_nodes() {
+  GET_CONTAINER_INFO_CPTR(cptr, cpuset, "/cpuset.mems",
+                     "cpuset.mems is: %s", "%1023s", mems, 1024);
+  return os::strdup(mems);
+}
+
+/* cpu_quota
+ *
+ * Return the number of milliseconds per period
+ * process is guaranteed to run.
+ *
+ * return:
+ *    quota time in milliseconds
+ *    -1 for no quota
+ *    OSCONTAINER_ERROR for not supported
+ */
+int OSContainer::cpu_quota() {
+  GET_CONTAINER_INFO(int, cpu, "/cpu.cfs_quota_us",
+                     "CPU Quota is: %d", "%d", quota);
+  return quota;
+}
+
+int OSContainer::cpu_period() {
+  GET_CONTAINER_INFO(int, cpu, "/cpu.cfs_period_us",
+                     "CPU Period is: %d", "%d", period);
+  return period;
+}
+
+/* cpu_shares
+ *
+ * Return the amount of cpu shares available to the process
+ *
+ * return:
+ *    Share number (typically a number relative to 1024)
+ *                 (2048 typically expresses 2 CPUs worth of processing)
+ *    -1 for no share setup
+ *    OSCONTAINER_ERROR for not supported
+ */
+int OSContainer::cpu_shares() {
+  GET_CONTAINER_INFO(int, cpu, "/cpu.shares",
+                     "CPU Shares is: %d", "%d", shares);
+  // Convert 1024 to no shares setup
+  if (shares == 1024) return -1;
+
+  return shares;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/osContainer_linux.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_LINUX_VM_OSCONTAINER_LINUX_HPP
+#define OS_LINUX_VM_OSCONTAINER_LINUX_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#include "memory/allocation.hpp"
+
+#define OSCONTAINER_ERROR (-2)
+
+class OSContainer: AllStatic {
+
+ private:
+  static bool   _is_initialized;
+  static bool   _is_containerized;
+
+ public:
+  static void init();
+  static inline bool is_containerized();
+  static char * container_type();
+
+  static jlong memory_limit_in_bytes();
+  static jlong memory_and_swap_limit_in_bytes();
+  static jlong memory_soft_limit_in_bytes();
+  static jlong memory_usage_in_bytes();
+  static jlong memory_max_usage_in_bytes();
+
+  static int active_processor_count();
+
+  static char * cpu_cpuset_cpus();
+  static char * cpu_cpuset_memory_nodes();
+
+  static int cpu_quota();
+  static int cpu_period();
+
+  static int cpu_shares();
+
+};
+
+inline bool OSContainer::is_containerized() {
+  assert(_is_initialized, "OSContainer not initialized");
+  return _is_containerized;
+}
+
+#endif // OS_LINUX_VM_OSCONTAINER_LINUX_HPP
--- a/src/hotspot/os/linux/os_linux.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os/linux/os_linux.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -38,6 +38,7 @@
 #include "oops/oop.inline.hpp"
 #include "os_linux.inline.hpp"
 #include "os_share_linux.hpp"
+#include "osContainer_linux.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
@@ -171,13 +172,52 @@
 julong os::Linux::available_memory() {
   // values in struct sysinfo are "unsigned long"
   struct sysinfo si;
+  julong avail_mem;
+
+  if (OSContainer::is_containerized()) {
+    jlong mem_limit, mem_usage;
+    if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
+      if ((mem_usage = OSContainer::memory_usage_in_bytes()) > 0) {
+        if (mem_limit > mem_usage) {
+          avail_mem = (julong)mem_limit - (julong)mem_usage;
+        } else {
+          avail_mem = 0;
+        }
+        log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
+        return avail_mem;
+      } else {
+        log_debug(os,container)("container memory usage call failed: " JLONG_FORMAT, mem_usage);
+      }
+    } else {
+      log_debug(os,container)("container memory unlimited or failed: " JLONG_FORMAT, mem_limit);
+    }
+  }
+
   sysinfo(&si);
-
-  return (julong)si.freeram * si.mem_unit;
+  avail_mem = (julong)si.freeram * si.mem_unit;
+  log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);
+  return avail_mem;
 }
 
 julong os::physical_memory() {
-  return Linux::physical_memory();
+  if (OSContainer::is_containerized()) {
+    jlong mem_limit;
+    if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
+      log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
+      return (julong)mem_limit;
+    } else {
+      if (mem_limit == OSCONTAINER_ERROR) {
+        log_debug(os,container)("container memory limit call failed");
+      }
+      if (mem_limit == -1) {
+        log_debug(os,container)("container memory unlimited, using host value");
+      }
+    }
+  }
+
+  jlong phys_mem = Linux::physical_memory();
+  log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem);
+  return phys_mem;
 }
 
 // Return true if user is running as root.
@@ -1950,6 +1990,8 @@
   os::Posix::print_load_average(st);
 
   os::Linux::print_full_memory_info(st);
+
+  os::Linux::print_container_info(st);
 }
 
 // Try to identify popular distros.
@@ -2087,6 +2129,66 @@
   st->cr();
 }
 
+void os::Linux::print_container_info(outputStream* st) {
+  if (OSContainer::is_containerized()) {
+    st->print("container (cgroup) information:\n");
+
+    char *p = OSContainer::container_type();
+    if (p == NULL)
+      st->print("container_type() failed\n");
+    else {
+      st->print("container_type: %s\n", p);
+    }
+
+    p = OSContainer::cpu_cpuset_cpus();
+    if (p == NULL)
+      st->print("cpu_cpuset_cpus() failed\n");
+    else {
+      st->print("cpu_cpuset_cpus: %s\n", p);
+      free(p);
+    }
+
+    p = OSContainer::cpu_cpuset_memory_nodes();
+    if (p < 0)
+      st->print("cpu_memory_nodes() failed\n");
+    else {
+      st->print("cpu_memory_nodes: %s\n", p);
+      free(p);
+    }
+
+    int i = OSContainer::active_processor_count();
+    if (i < 0)
+      st->print("active_processor_count() failed\n");
+    else
+      st->print("active_processor_count: %d\n", i);
+
+    i = OSContainer::cpu_quota();
+    st->print("cpu_quota: %d\n", i);
+
+    i = OSContainer::cpu_period();
+    st->print("cpu_period: %d\n", i);
+
+    i = OSContainer::cpu_shares();
+    st->print("cpu_shares: %d\n", i);
+
+    jlong j = OSContainer::memory_limit_in_bytes();
+    st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j);
+
+    j = OSContainer::memory_and_swap_limit_in_bytes();
+    st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j);
+
+    j = OSContainer::memory_soft_limit_in_bytes();
+    st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j);
+
+    j = OSContainer::OSContainer::memory_usage_in_bytes();
+    st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j);
+
+    j = OSContainer::OSContainer::memory_max_usage_in_bytes();
+    st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j);
+    st->cr();
+  }
+}
+
 void os::print_memory_info(outputStream* st) {
 
   st->print("Memory:");
@@ -4798,6 +4900,10 @@
   }
 }
 
+void os::pd_init_container_support() {
+  OSContainer::init();
+}
+
 // this is called _after_ the global arguments have been parsed
 jint os::init_2(void) {
 
@@ -4805,20 +4911,6 @@
 
   Linux::fast_thread_clock_init();
 
-  // Allocate a single page and mark it as readable for safepoint polling
-  address polling_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-  guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
-
-  os::set_polling_page(polling_page);
-  log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
-
-  if (!UseMembar) {
-    address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-    guarantee(mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
-    os::set_memory_serialize_page(mem_serialize_page);
-    log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
-  }
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
@@ -4960,12 +5052,12 @@
 // dynamic check - see 6515172 for details.
 // If anything goes wrong we fallback to returning the number of online
 // processors - which can be greater than the number available to the process.
-int os::active_processor_count() {
+int os::Linux::active_processor_count() {
   cpu_set_t cpus;  // can represent at most 1024 (CPU_SETSIZE) processors
   cpu_set_t* cpus_p = &cpus;
   int cpus_size = sizeof(cpu_set_t);
 
-  int configured_cpus = processor_count();  // upper bound on available cpus
+  int configured_cpus = os::processor_count();  // upper bound on available cpus
   int cpu_count = 0;
 
 // old build platforms may not support dynamic cpu sets
@@ -5028,10 +5120,44 @@
     CPU_FREE(cpus_p);
   }
 
-  assert(cpu_count > 0 && cpu_count <= processor_count(), "sanity check");
+  assert(cpu_count > 0 && cpu_count <= os::processor_count(), "sanity check");
   return cpu_count;
 }
 
+// Determine the active processor count from one of
+// three different sources:
+//
+// 1. User option -XX:ActiveProcessorCount
+// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
+// 3. extracted from cgroup cpu subsystem (shares and quotas)
+//
+// Option 1, if specified, will always override.
+// If the cgroup subsystem is active and configured, we
+// will return the min of the cgroup and option 2 results.
+// This is required since tools, such as numactl, that
+// alter cpu affinity do not update cgroup subsystem
+// cpuset configuration files.
+int os::active_processor_count() {
+  // User has overridden the number of active processors
+  if (ActiveProcessorCount > 0) {
+    log_trace(os)("active_processor_count: "
+                  "active processor count set by user : %d",
+                  ActiveProcessorCount);
+    return ActiveProcessorCount;
+  }
+
+  int active_cpus;
+  if (OSContainer::is_containerized()) {
+    active_cpus = OSContainer::active_processor_count();
+    log_trace(os)("active_processor_count: determined by OSContainer: %d",
+                   active_cpus);
+  } else {
+    active_cpus = os::Linux::active_processor_count();
+  }
+
+  return active_cpus;
+}
+
 void os::set_native_thread_name(const char *name) {
   if (Linux::_pthread_setname_np) {
     char buf [16]; // according to glibc manpage, 16 chars incl. '/0'
--- a/src/hotspot/os/linux/os_linux.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os/linux/os_linux.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -32,6 +32,7 @@
 
 class Linux {
   friend class os;
+  friend class OSContainer;
   friend class TestReserveMemorySpecial;
 
   static bool libjsig_is_loaded;        // libjsig that interposes sigaction(),
@@ -75,6 +76,9 @@
 
   static julong available_memory();
   static julong physical_memory() { return _physical_memory; }
+  static void set_physical_memory(julong phys_mem) { _physical_memory = phys_mem; }
+  static int active_processor_count();
+
   static void initialize_system_info();
 
   static int commit_memory_impl(char* addr, size_t bytes, bool exec);
@@ -106,6 +110,7 @@
   static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
 
   static void print_full_memory_info(outputStream* st);
+  static void print_container_info(outputStream* st);
   static void print_distro_info(outputStream* st);
   static void print_libversion_info(outputStream* st);
 
--- a/src/hotspot/os/solaris/os_solaris.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -290,6 +290,14 @@
 }
 
 int os::active_processor_count() {
+  // User has overridden the number of active processors
+  if (ActiveProcessorCount > 0) {
+    log_trace(os)("active_processor_count: "
+                  "active processor count set by user : %d",
+                  ActiveProcessorCount);
+    return ActiveProcessorCount;
+  }
+
   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
   pid_t pid = getpid();
   psetid_t pset = PS_NONE;
@@ -2190,10 +2198,6 @@
 
 static int page_size = -1;
 
-// The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
-// clear this var if support is not available.
-static bool has_map_align = true;
-
 int os::vm_page_size() {
   assert(page_size != -1, "must call os::init");
   return page_size;
@@ -2560,7 +2564,7 @@
 
   if (fixed) {
     flags |= MAP_FIXED;
-  } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
+  } else if (alignment_hint > (size_t) vm_page_size()) {
     flags |= MAP_ALIGN;
     addr = (char*) alignment_hint;
   }
@@ -4222,28 +4226,6 @@
   // try to enable extended file IO ASAP, see 6431278
   os::Solaris::try_enable_extended_io();
 
-  // Allocate a single page and mark it as readable for safepoint polling.  Also
-  // use this first mmap call to check support for MAP_ALIGN.
-  address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
-                                                      page_size,
-                                                      MAP_PRIVATE | MAP_ALIGN,
-                                                      PROT_READ);
-  if (polling_page == NULL) {
-    has_map_align = false;
-    polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
-                                                PROT_READ);
-  }
-
-  os::set_polling_page(polling_page);
-  log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
-
-  if (!UseMembar) {
-    address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE);
-    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
-    os::set_memory_serialize_page(mem_serialize_page);
-    log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
-  }
-
   // Check and sets minimum stack sizes against command line options
   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
     return JNI_ERR;
--- a/src/hotspot/os/windows/os_windows.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os/windows/os_windows.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -723,6 +723,14 @@
 }
 
 int os::active_processor_count() {
+  // User has overridden the number of active processors
+  if (ActiveProcessorCount > 0) {
+    log_trace(os)("active_processor_count: "
+                  "active processor count set by user : %d",
+                  ActiveProcessorCount);
+    return ActiveProcessorCount;
+  }
+
   DWORD_PTR lpProcessAffinityMask = 0;
   DWORD_PTR lpSystemAffinityMask = 0;
   int proc_count = processor_count();
@@ -2487,6 +2495,20 @@
     } // /EXCEPTION_ACCESS_VIOLATION
     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
+    if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
+      CompiledMethod* nm = NULL;
+      JavaThread* thread = (JavaThread*)t;
+      if (in_java) {
+        CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
+        nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
+      }
+      if ((thread->thread_state() == _thread_in_vm &&
+          thread->doing_unsafe_access()) ||
+          (nm != NULL && nm->has_unsafe_access())) {
+        return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, (address)Assembler::locate_next_instruction(pc)));
+      }
+    }
+
     if (in_java) {
       switch (exception_code) {
       case EXCEPTION_INT_DIVIDE_BY_ZERO:
@@ -3911,27 +3933,6 @@
 
 // this is called _after_ the global arguments have been parsed
 jint os::init_2(void) {
-  // Allocate a single page and mark it as readable for safepoint polling
-  address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
-  guarantee(polling_page != NULL, "Reserve Failed for polling page");
-
-  address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
-  guarantee(return_page != NULL, "Commit Failed for polling page");
-
-  os::set_polling_page(polling_page);
-  log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
-
-  if (!UseMembar) {
-    address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
-    guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
-
-    return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
-    guarantee(return_page != NULL, "Commit Failed for memory serialize page");
-
-    os::set_memory_serialize_page(mem_serialize_page);
-    log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
-  }
-
   // Setup Windows Exceptions
 
   // for debugging float code generation bugs
--- a/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,7 @@
   address o_reg_temps(int i) { return (address)&_o_reg_temps[i]; }
 #endif
 
-  static int saved_exception_npc_offset_in_bytes() { return offset_of(JavaThread,_saved_exception_npc); }
+  static ByteSize saved_exception_npc_offset() { return byte_offset_of(JavaThread,_saved_exception_npc); }
 
   address  saved_exception_npc()             { return _saved_exception_npc; }
   void set_saved_exception_npc(address a)    { _saved_exception_npc = a; }
--- a/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -447,7 +447,7 @@
       // a fault inside compiled code, the interpreter, or a stub
 
       // Support Safepoint Polling
-      if ( sig == SIGSEGV && (address)info->si_addr == os::get_polling_page() ) {
+      if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
         stub = SharedRuntime::get_poll_stub(pc);
       }
 
--- a/src/hotspot/os_cpu/solaris_sparc/thread_solaris_sparc.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/os_cpu/solaris_sparc/thread_solaris_sparc.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -63,7 +63,7 @@
 
   static int o_reg_temps_offset_in_bytes() { return offset_of(JavaThread, _o_reg_temps); }
 
-  static int saved_exception_npc_offset_in_bytes() { return offset_of(JavaThread,_saved_exception_npc); }
+  static ByteSize saved_exception_npc_offset() { return byte_offset_of(JavaThread,_saved_exception_npc); }
 
   address  saved_exception_npc()             { return _saved_exception_npc; }
   void set_saved_exception_npc(address a)    { _saved_exception_npc = a; }
--- a/src/hotspot/share/ci/ciEnv.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/ci/ciEnv.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -187,10 +187,6 @@
     }
   }
 
-  void ensure_metadata_alive(ciMetadata* m) {
-    _factory->ensure_metadata_alive(m);
-  }
-
   ciInstance* get_instance(oop o) {
     if (o == NULL) return NULL;
     return get_object(o)->as_instance();
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -34,6 +34,9 @@
 #include "oops/oop.inline.hpp"
 #include "oops/fieldStreams.hpp"
 #include "runtime/fieldDescriptor.hpp"
+#if INCLUDE_ALL_GCS
+# include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#endif
 
 // ciInstanceKlass
 //
@@ -41,6 +44,27 @@
 // whose Klass part in an InstanceKlass.
 
 // ------------------------------------------------------------------
+// ensure_metadata_alive
+//
+// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
+// This is primarily useful for metadata which is considered as weak roots
+// by the GC but need to be strong roots if reachable from a current compilation.
+// InstanceKlass are created for both weak and strong metadata.  Ensuring this metadata
+// alive covers the cases where there are weak roots without performance cost.
+//
+static void ensure_metadata_alive(oop metadata_holder) {
+#if INCLUDE_ALL_GCS
+  if (!UseG1GC) {
+    return;
+  }
+  if (metadata_holder != NULL) {
+    G1SATBCardTableModRefBS::enqueue(metadata_holder);
+  }
+#endif
+}
+
+
+// ------------------------------------------------------------------
 // ciInstanceKlass::ciInstanceKlass
 //
 // Loaded instance klass.
@@ -64,6 +88,18 @@
   _has_injected_fields = -1;
   _implementor = NULL; // we will fill these lazily
 
+  oop holder = ik->klass_holder();
+  ensure_metadata_alive(holder);
+  if (ik->is_anonymous()) {
+    // Though ciInstanceKlass records class loader oop, it's not enough to keep
+    // VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
+    // It is enough to record a ciObject, since cached elements are never removed
+    // during ciObjectFactory lifetime. ciObjectFactory itself is created for
+    // every compilation and lives for the whole duration of the compilation.
+    assert(holder != NULL, "holder of anonymous class is the mirror which is never null");
+    (void)CURRENT_ENV->get_object(holder);
+  }
+
   Thread *thread = Thread::current();
   if (ciObjectFactory::is_initialized()) {
     _loader = JNIHandles::make_local(thread, ik->class_loader());
--- a/src/hotspot/share/ci/ciMethodData.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/ci/ciMethodData.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -188,7 +188,6 @@
     Klass* k = data->as_ReceiverTypeData()->receiver(row);
     if (k != NULL) {
       ciKlass* klass = CURRENT_ENV->get_klass(k);
-      CURRENT_ENV->ensure_metadata_alive(klass);
       set_receiver(row, klass);
     }
   }
@@ -210,7 +209,6 @@
 void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
   Method* m = data->as_SpeculativeTrapData()->method();
   ciMethod* ci_m = CURRENT_ENV->get_method(m);
-  CURRENT_ENV->ensure_metadata_alive(ci_m);
   set_method(ci_m);
 }
 
--- a/src/hotspot/share/ci/ciMethodData.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/ci/ciMethodData.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,6 @@
     Klass* v = TypeEntries::valid_klass(k);
     if (v != NULL) {
       ciKlass* klass = CURRENT_ENV->get_klass(v);
-      CURRENT_ENV->ensure_metadata_alive(klass);
       return with_status(klass, k);
     }
     return with_status(NULL, k);
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -47,9 +47,6 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/fieldType.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-# include "gc/g1/g1SATBCardTableModRefBS.hpp"
-#endif
 
 // ciObjectFactory
 //
@@ -363,19 +360,6 @@
 ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) {
   EXCEPTION_CONTEXT;
 
-  // Hold metadata from unloading by keeping it's holder alive.
-  if (_initialized && o->is_klass()) {
-    Klass* holder = ((Klass*)o);
-    if (holder->is_instance_klass() && InstanceKlass::cast(holder)->is_anonymous()) {
-      // Though ciInstanceKlass records class loader oop, it's not enough to keep
-      // VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
-      // It is enough to record a ciObject, since cached elements are never removed
-      // during ciObjectFactory lifetime. ciObjectFactory itself is created for
-      // every compilation and lives for the whole duration of the compilation.
-      ciObject* h = get(holder->klass_holder());
-    }
-  }
-
   if (o->is_klass()) {
     Klass* k = (Klass*)o;
     if (k->is_instance_klass()) {
@@ -401,38 +385,6 @@
   return NULL;
 }
 
-// ------------------------------------------------------------------
-// ciObjectFactory::ensure_metadata_alive
-//
-// Ensure that the metadata wrapped by the ciMetadata is kept alive by GC.
-// This is primarily useful for metadata which is considered as weak roots
-// by the GC but need to be strong roots if reachable from a current compilation.
-//
-void ciObjectFactory::ensure_metadata_alive(ciMetadata* m) {
-  ASSERT_IN_VM; // We're handling raw oops here.
-
-#if INCLUDE_ALL_GCS
-  if (!UseG1GC) {
-    return;
-  }
-  Klass* metadata_owner_klass;
-  if (m->is_klass()) {
-    metadata_owner_klass = m->as_klass()->get_Klass();
-  } else if (m->is_method()) {
-    metadata_owner_klass = m->as_method()->get_Method()->constants()->pool_holder();
-  } else {
-    fatal("Not implemented for other types of metadata");
-    return;
-  }
-
-  oop metadata_holder = metadata_owner_klass->klass_holder();
-  if (metadata_holder != NULL) {
-    G1SATBCardTableModRefBS::enqueue(metadata_holder);
-  }
-
-#endif
-}
-
 //------------------------------------------------------------------
 // ciObjectFactory::get_unloaded_method
 //
--- a/src/hotspot/share/ci/ciObjectFactory.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/ci/ciObjectFactory.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,8 +73,6 @@
   ciObject* create_new_object(oop o);
   ciMetadata* create_new_metadata(Metadata* o);
 
-  void ensure_metadata_alive(ciMetadata* m);
-
   static bool is_equal(NonPermObject* p, oop key) {
     return p->object()->get_oop() == key;
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsArguments.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/cms/cmsArguments.hpp"
+#include "gc/cms/compactibleFreeListSpace.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/vm_version.hpp"
+#include "utilities/defaultStream.hpp"
+
+size_t CMSArguments::conservative_max_heap_alignment() {
+  return GenCollectedHeap::conservative_max_heap_alignment();
+}
+
+void CMSArguments::set_parnew_gc_flags() {
+  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
+         "control point invariant");
+  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
+
+  if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
+    FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
+    assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
+  } else if (ParallelGCThreads == 0) {
+    jio_fprintf(defaultStream::error_stream(),
+        "The ParNew GC can not be combined with -XX:ParallelGCThreads=0\n");
+    vm_exit(1);
+  }
+
+  // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
+  // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
+  // we set them to 1024 and 1024.
+  // See CR 6362902.
+  if (FLAG_IS_DEFAULT(YoungPLABSize)) {
+    FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
+  }
+  if (FLAG_IS_DEFAULT(OldPLABSize)) {
+    FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
+  }
+
+  // When using compressed oops, we use local overflow stacks,
+  // rather than using a global overflow list chained through
+  // the klass word of the object's pre-image.
+  if (UseCompressedOops && !ParGCUseLocalOverflow) {
+    if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) {
+      warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references");
+    }
+    FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true);
+  }
+  assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error");
+}
+
+// Adjust some sizes to suit CMS and/or ParNew needs; these work well on
+// sparc/solaris for certain applications, but would gain from
+// further optimization and tuning efforts, and would almost
+// certainly gain from analysis of platform and environment.
+void CMSArguments::initialize_flags() {
+  GCArguments::initialize_flags();
+  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
+  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
+
+  // Set CMS global values
+  CompactibleFreeListSpace::set_cms_values();
+
+  // Turn off AdaptiveSizePolicy by default for cms until it is complete.
+  disable_adaptive_size_policy("UseConcMarkSweepGC");
+
+  set_parnew_gc_flags();
+
+  size_t max_heap = align_down(MaxHeapSize,
+                               CardTableRS::ct_max_alignment_constraint());
+
+  // Now make adjustments for CMS
+  intx   tenuring_default = (intx)6;
+  size_t young_gen_per_worker = CMSYoungGenPerWorker;
+
+  // Preferred young gen size for "short" pauses:
+  // upper bound depends on # of threads and NewRatio.
+  const size_t preferred_max_new_size_unaligned =
+    MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
+  size_t preferred_max_new_size =
+    align_up(preferred_max_new_size_unaligned, os::vm_page_size());
+
+  // Unless explicitly requested otherwise, size young gen
+  // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
+
+  // If either MaxNewSize or NewRatio is set on the command line,
+  // assume the user is trying to set the size of the young gen.
+  if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
+
+    // Set MaxNewSize to our calculated preferred_max_new_size unless
+    // NewSize was set on the command line and it is larger than
+    // preferred_max_new_size.
+    if (!FLAG_IS_DEFAULT(NewSize)) {   // NewSize explicitly set at command-line
+      FLAG_SET_ERGO(size_t, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
+    } else {
+      FLAG_SET_ERGO(size_t, MaxNewSize, preferred_max_new_size);
+    }
+    log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
+
+    // Code along this path potentially sets NewSize and OldSize
+    log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size:  " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
+                        Arguments::min_heap_size(), InitialHeapSize, max_heap);
+    size_t min_new = preferred_max_new_size;
+    if (FLAG_IS_CMDLINE(NewSize)) {
+      min_new = NewSize;
+    }
+    if (max_heap > min_new && Arguments::min_heap_size() > min_new) {
+      // Unless explicitly requested otherwise, make young gen
+      // at least min_new, and at most preferred_max_new_size.
+      if (FLAG_IS_DEFAULT(NewSize)) {
+        FLAG_SET_ERGO(size_t, NewSize, MAX2(NewSize, min_new));
+        FLAG_SET_ERGO(size_t, NewSize, MIN2(preferred_max_new_size, NewSize));
+        log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
+      }
+      // Unless explicitly requested otherwise, size old gen
+      // so it's NewRatio x of NewSize.
+      if (FLAG_IS_DEFAULT(OldSize)) {
+        if (max_heap > NewSize) {
+          FLAG_SET_ERGO(size_t, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
+          log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
+        }
+      }
+    }
+  }
+  // Unless explicitly requested otherwise, definitely
+  // promote all objects surviving "tenuring_default" scavenges.
+  if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
+      FLAG_IS_DEFAULT(SurvivorRatio)) {
+    FLAG_SET_ERGO(uintx, MaxTenuringThreshold, tenuring_default);
+  }
+  // If we decided above (or user explicitly requested)
+  // `promote all' (via MaxTenuringThreshold := 0),
+  // prefer minuscule survivor spaces so as not to waste
+  // space for (non-existent) survivors
+  if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
+    FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
+  }
+
+  // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
+  // but rather the number of free blocks of a given size that are used when
+  // replenishing the local per-worker free list caches.
+  if (FLAG_IS_DEFAULT(OldPLABSize)) {
+    if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
+      // OldPLAB sizing manually turned off: Use a larger default setting,
+      // unless it was manually specified. This is because a too-low value
+      // will slow down scavenges.
+      FLAG_SET_ERGO(size_t, OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
+    } else {
+      FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
+    }
+  }
+
+  // If either of the static initialization defaults have changed, note this
+  // modification.
+  if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
+    CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
+  }
+
+  log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
+}
+
+void CMSArguments::disable_adaptive_size_policy(const char* collector_name) {
+  if (UseAdaptiveSizePolicy) {
+    if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
+      warning("Disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
+              collector_name);
+    }
+    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsArguments.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_CMS_CMSARGUMENTS_HPP
+#define SHARE_GC_CMS_CMSARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class CMSArguments : public GCArguments {
+private:
+  void disable_adaptive_size_policy(const char* collector_name);
+  void set_parnew_gc_flags();
+public:
+  virtual void initialize_flags();
+  virtual size_t conservative_max_heap_alignment();
+};
+
+#endif // SHARE_GC_CMS_CMSARGUMENTS_HPP
--- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,8 +247,8 @@
       _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
 
   void work(uint worker_id) {
-    ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
-    _g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
+    ParKnownGarbageHRClosure par_known_garbage_cl(_hrSorted, _chunk_size);
+    _g1->heap_region_par_iterate_from_worker_offset(&par_known_garbage_cl, &_hrclaimer, worker_id);
   }
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1Arguments.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/vm_version.hpp"
+
+size_t G1Arguments::conservative_max_heap_alignment() {
+  return HeapRegion::max_region_size();
+}
+
+void G1Arguments::initialize_flags() {
+  GCArguments::initialize_flags();
+  assert(UseG1GC, "Error");
+#if defined(COMPILER1) || INCLUDE_JVMCI
+  FastTLABRefill = false;
+#endif
+  FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
+  if (ParallelGCThreads == 0) {
+    assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
+    vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
+  }
+
+#if INCLUDE_ALL_GCS
+  if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
+    FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
+  }
+#endif
+
+  // MarkStackSize will be set (if it hasn't been set by the user)
+  // when concurrent marking is initialized.
+  // Its value will be based upon the number of parallel marking threads.
+  // But we do set the maximum mark stack size here.
+  if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
+    FLAG_SET_DEFAULT(MarkStackSizeMax, 128 * TASKQUEUE_SIZE);
+  }
+
+  if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
+    // In G1, we want the default GC overhead goal to be higher than
+    // it is for PS, or the heap might be expanded too aggressively.
+    // We set it here to ~8%.
+    FLAG_SET_DEFAULT(GCTimeRatio, 12);
+  }
+
+  // Below, we might need to calculate the pause time interval based on
+  // the pause target. When we do so we are going to give G1 maximum
+  // flexibility and allow it to do pauses when it needs to. So, we'll
+  // arrange that the pause interval to be pause time target + 1 to
+  // ensure that a) the pause time target is maximized with respect to
+  // the pause interval and b) we maintain the invariant that pause
+  // time target < pause interval. If the user does not want this
+  // maximum flexibility, they will have to set the pause interval
+  // explicitly.
+
+  if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
+    // The default pause time target in G1 is 200ms
+    FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
+  }
+
+  // Then, if the interval parameter was not set, set it according to
+  // the pause time target (this will also deal with the case when the
+  // pause time target is the default value).
+  if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
+    FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
+  }
+
+  log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1Arguments.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1ARGUMENTS_HPP
+#define SHARE_GC_G1_G1ARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class G1Arguments : public GCArguments {
+public:
+  virtual void initialize_flags();
+  virtual size_t conservative_max_heap_alignment();
+};
+
+#endif // SHARE_GC_G1_G1ARGUMENTS_HPP
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -314,7 +314,7 @@
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
     G1ConcurrentMark* cm = g1h->concurrent_mark();
     G1CreateLiveDataClosure cl(g1h, cm, cm->next_mark_bitmap(), _live_data);
-    g1h->heap_region_par_iterate(&cl, worker_id, &_hr_claimer);
+    g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
   }
 };
 
@@ -381,7 +381,7 @@
   void work(uint worker_id) {
     G1FinalizeCardLiveDataClosure cl(G1CollectedHeap::heap(), _bitmap, _live_data);
 
-    G1CollectedHeap::heap()->heap_region_par_iterate(&cl, worker_id, &_hr_claimer);
+    G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
   }
 };
 
@@ -560,7 +560,7 @@
                                    _mark_bitmap,
                                    _act_live_data,
                                    &_exp_live_data);
-    _g1h->heap_region_par_iterate(&cl, worker_id, &_hr_claimer);
+    _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
 
     Atomic::add(cl.failures(), &_failures);
   }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -38,6 +38,7 @@
 #include "gc/g1/g1ConcurrentRefine.hpp"
 #include "gc/g1/g1ConcurrentRefineThread.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
+#include "gc/g1/g1FullCollector.hpp"
 #include "gc/g1/g1FullGCScope.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1HeapSizingPolicy.hpp"
@@ -48,10 +49,9 @@
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 #include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
-#include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
-#include "gc/g1/g1SerialFullCollector.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1YCTypes.hpp"
 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
@@ -143,6 +143,12 @@
   reset_from_card_cache(start_idx, num_regions);
 }
 
+
+HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
+                                             MemRegion mr) {
+  return new HeapRegion(hrs_index, bot(), mr);
+}
+
 // Private methods.
 
 HeapRegion*
@@ -1155,7 +1161,6 @@
 
 void G1CollectedHeap::abort_refinement() {
   if (_hot_card_cache->use_cache()) {
-    _hot_card_cache->reset_card_counts();
     _hot_card_cache->reset_hot_cache();
   }
 
@@ -1199,6 +1204,10 @@
 }
 
 void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
+  // Post collection logging.
+  // We should do this after we potentially resize the heap so
+  // that all the COMMIT / UNCOMMIT events are generated before
+  // the compaction events.
   print_hrm_post_compaction();
   heap_transition->print();
   print_heap_after_gc();
@@ -1221,23 +1230,18 @@
   gc_prologue(true);
   prepare_heap_for_full_collection();
 
-  G1SerialFullCollector serial(scope, ref_processor_stw());
-  serial.prepare_collection();
-  serial.collect();
-  serial.complete_collection();
+  G1FullCollector collector(scope, ref_processor_stw(), concurrent_mark()->next_mark_bitmap(), workers()->active_workers());
+  collector.prepare_collection();
+  collector.collect();
+  collector.complete_collection();
 
   prepare_heap_for_mutators();
 
   g1_policy()->record_full_collection_end();
   gc_epilogue(true);
 
-  // Post collection verification.
   verify_after_full_collection();
 
-  // Post collection logging.
-  // We should do this after we potentially resize the heap so
-  // that all the COMMIT / UNCOMMIT events are generated before
-  // the compaction events.
   print_heap_after_full_collection(scope->heap_transition());
 }
 
@@ -1269,10 +1273,10 @@
 }
 
 void G1CollectedHeap::resize_if_necessary_after_full_collection() {
-  // Include bytes that will be pre-allocated to support collections, as "used".
-  const size_t used_after_gc = used();
+  // Capacity, free and used after the GC counted as full regions to
+  // include the waste in the following calculations.
   const size_t capacity_after_gc = capacity();
-  const size_t free_after_gc = capacity_after_gc - used_after_gc;
+  const size_t used_after_gc = capacity_after_gc - unused_committed_regions_in_bytes();
 
   // This is enforced in arguments.cpp.
   assert(MinHeapFreeRatio <= MaxHeapFreeRatio,
@@ -1326,8 +1330,9 @@
     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
 
     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). "
-                              "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
-                              capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
+                              "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
+                              "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
+                              capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
 
     expand(expand_bytes, _workers);
 
@@ -1337,8 +1342,9 @@
     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
 
     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). "
-                              "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
-                              capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio);
+                              "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
+                              "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
+                              capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
 
     shrink(shrink_bytes);
   }
@@ -1959,6 +1965,10 @@
   return _hrm.length() * HeapRegion::GrainBytes;
 }
 
+size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
+  return _hrm.total_free_bytes();
+}
+
 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
   hr->reset_gc_time_stamp();
 }
@@ -2262,10 +2272,15 @@
   _hrm.iterate(cl);
 }
 
-void G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
-                                              uint worker_id,
-                                              HeapRegionClaimer *hrclaimer) const {
-  _hrm.par_iterate(cl, worker_id, hrclaimer);
+void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
+                                                                 HeapRegionClaimer *hrclaimer,
+                                                                 uint worker_id) const {
+  _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
+}
+
+void G1CollectedHeap::heap_region_par_iterate_from_start(HeapRegionClosure* cl,
+                                                         HeapRegionClaimer *hrclaimer) const {
+  _hrm.par_iterate(cl, hrclaimer, 0);
 }
 
 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
@@ -2276,14 +2291,6 @@
   _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
 }
 
-HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
-  HeapRegion* result = _hrm.next_region_in_heap(from);
-  while (result != NULL && result->is_pinned()) {
-    result = _hrm.next_region_in_heap(result);
-  }
-  return result;
-}
-
 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
   HeapRegion* hr = heap_region_containing(addr);
   return hr->block_start(addr);
@@ -2375,7 +2382,7 @@
   switch (vo) {
   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
-  case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked() && !hr->is_archive();
+  case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
   default:                            ShouldNotReachHere();
   }
   return false; // keep some compilers happy
@@ -2386,10 +2393,7 @@
   switch (vo) {
   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
-  case VerifyOption_G1UseMarkWord: {
-    HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
-    return !obj->is_gc_marked() && !hr->is_archive();
-  }
+  case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj);
   default:                            ShouldNotReachHere();
   }
   return false; // keep some compilers happy
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1046,6 +1046,7 @@
   // The Concurrent Marking reference processor...
   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 
+  size_t unused_committed_regions_in_bytes() const;
   virtual size_t capacity() const;
   virtual size_t used() const;
   // This should be called when we're not holding the heap lock. The
@@ -1181,6 +1182,8 @@
     return barrier_set_cast<G1SATBCardTableLoggingModRefBS>(barrier_set());
   }
 
+  G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
+
   // Iteration functions.
 
   // Iterate over all objects, calling "cl.do_object" on each.
@@ -1207,15 +1210,18 @@
 
   inline HeapWord* bottom_addr_for_region(uint index) const;
 
-  // Iterate over the heap regions in parallel. Assumes that this will be called
-  // in parallel by a number of worker threads with distinct worker ids
-  // in the range passed to the HeapRegionClaimer. Applies "blk->doHeapRegion"
-  // to each of the regions, by attempting to claim the region using the
-  // HeapRegionClaimer and, if successful, applying the closure to the claimed
-  // region.
-  void heap_region_par_iterate(HeapRegionClosure* cl,
-                               uint worker_id,
-                               HeapRegionClaimer* hrclaimer) const;
+  // Two functions to iterate over the heap regions in parallel. Threads
+  // compete using the HeapRegionClaimer to claim the regions before
+  // applying the closure on them.
+  // The _from_worker_offset version uses the HeapRegionClaimer and
+  // the worker id to calculate a start offset to prevent all workers to
+  // start from the point.
+  void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
+                                                  HeapRegionClaimer* hrclaimer,
+                                                  uint worker_id) const;
+
+  void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
+                                          HeapRegionClaimer* hrclaimer) const;
 
   // Iterate over the regions (if any) in the current collection set.
   void collection_set_iterate(HeapRegionClosure* blk);
@@ -1226,8 +1232,6 @@
   // collection set regions.
   void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
 
-  HeapRegion* next_compaction_region(const HeapRegion* from) const;
-
   // Returns the HeapRegion that contains addr. addr must not be NULL.
   template <class T>
   inline HeapRegion* heap_region_containing(const T addr) const;
@@ -1391,6 +1395,9 @@
 
   inline bool is_obj_ill(const oop obj) const;
 
+  inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
+  inline bool is_obj_dead_full(const oop obj) const;
+
   G1ConcurrentMark* concurrent_mark() const { return _cm; }
 
   // Refinement
@@ -1435,9 +1442,9 @@
 
   // Perform verification.
 
-  // vo == UsePrevMarking  -> use "prev" marking information,
+  // vo == UsePrevMarking -> use "prev" marking information,
   // vo == UseNextMarking -> use "next" marking information
-  // vo == UseMarkWord    -> use the mark word in the object header
+  // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
   //
   // NOTE: Only the "prev" marking information is guaranteed to be
   // consistent most of the time, so most calls to this should use
@@ -1446,7 +1453,7 @@
   // vo == UseNextMarking, which is to verify the "next" marking
   // information at the end of remark.
   // Currently there is only one place where this is called with
-  // vo == UseMarkWord, which is to verify the marking during a
+  // vo == UseFullMarking, which is to verify the marking during a
   // full GC.
   void verify(VerifyOption vo);
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -264,6 +264,14 @@
   return is_obj_ill(obj, heap_region_containing(obj));
 }
 
+inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
+   return !isMarkedNext(obj) && !hr->is_archive();
+}
+
+inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
+    return is_obj_dead_full(obj, heap_region_containing(obj));
+}
+
 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
   assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
   _humongous_reclaim_candidates.set_candidate(region, value);
--- a/src/hotspot/share/gc/g1/g1CollectedHeap_ext.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap_ext.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,11 +37,6 @@
   return false;
 }
 
-HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
-                                             MemRegion mr) {
-  return new HeapRegion(hrs_index, bot(), mr);
-}
-
 G1Policy* G1CollectedHeap::create_g1_policy(STWGCTimer* gc_timer) {
   return new G1DefaultPolicy(gc_timer);
 }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -634,7 +634,7 @@
 
   void work(uint worker_id) {
     SuspendibleThreadSetJoiner sts_join(_suspendible);
-    G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer);
+    G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hr_claimer, worker_id);
   }
 
   bool is_complete() {
@@ -1140,7 +1140,7 @@
     HRRSCleanupTask hrrs_cleanup_task;
     G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
                                            &hrrs_cleanup_task);
-    _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
+    _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id);
     assert(g1_note_end.complete(), "Shouldn't have yielded!");
 
     // Now update the lists
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
+#include "gc/g1/heapRegion.hpp"
 #include "memory/virtualspace.hpp"
 
 void G1CMBitMap::print_on_error(outputStream* st, const char* prefix) const {
@@ -65,3 +66,10 @@
   _bm.at_put_range(addr_to_offset(intersection.start()),
                    addr_to_offset(intersection.end()), false);
 }
+
+void G1CMBitMap::clear_region(HeapRegion* region) {
+ if (!region->is_empty()) {
+   MemRegion mr(region->bottom(), region->top());
+   clear_range(mr);
+ }
+}
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -28,6 +28,7 @@
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "memory/allocation.hpp"
 #include "memory/memRegion.hpp"
+#include "oops/oopsHierarchy.hpp"
 #include "utilities/bitMap.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
@@ -35,6 +36,7 @@
 class G1CMBitMap;
 class G1CMTask;
 class G1ConcurrentMark;
+class HeapRegion;
 
 // Closure for iteration over bitmaps
 class G1CMBitMapClosure VALUE_OBJ_CLASS_SPEC {
@@ -96,6 +98,7 @@
   void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
 
   // Read marks
+  bool is_marked(oop obj) const;
   bool is_marked(HeapWord* addr) const {
     assert(_covered.contains(addr),
            "Address " PTR_FORMAT " is outside underlying space from " PTR_FORMAT " to " PTR_FORMAT,
@@ -120,9 +123,12 @@
   // Write marks.
   inline void mark(HeapWord* addr);
   inline void clear(HeapWord* addr);
+  inline void clear(oop obj);
   inline bool par_mark(HeapWord* addr);
+  inline bool par_mark(oop obj);
 
   void clear_range(MemRegion mr);
+  void clear_region(HeapRegion* hr);
 };
 
 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARKBITMAP_HPP
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.inline.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -83,4 +83,16 @@
   return _bm.par_set_bit(addr_to_offset(addr));
 }
 
+inline bool G1CMBitMap::par_mark(oop obj) {
+  return par_mark((HeapWord*) obj);
+}
+
+inline bool G1CMBitMap::is_marked(oop obj) const{
+  return is_marked((HeapWord*) obj);
+}
+
+inline void G1CMBitMap::clear(oop obj) {
+  clear((HeapWord*) obj);
+}
+
 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARKBITMAP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeCache.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1FullCollector.hpp"
+#include "gc/g1/g1FullGCAdjustTask.hpp"
+#include "gc/g1/g1FullGCCompactTask.hpp"
+#include "gc/g1/g1FullGCMarker.inline.hpp"
+#include "gc/g1/g1FullGCMarkTask.hpp"
+#include "gc/g1/g1FullGCPrepareTask.hpp"
+#include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
+#include "gc/g1/g1OopClosures.hpp"
+#include "gc/g1/g1StringDedup.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/preservedMarks.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "gc/shared/weakProcessor.hpp"
+#include "logging/log.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "utilities/debug.hpp"
+
+static void clear_and_activate_derived_pointers() {
+#if COMPILER2_OR_JVMCI
+  DerivedPointerTable::clear();
+#endif
+}
+
+static void deactivate_derived_pointers() {
+#if COMPILER2_OR_JVMCI
+  DerivedPointerTable::set_active(false);
+#endif
+}
+
+static void update_derived_pointers() {
+#if COMPILER2_OR_JVMCI
+  DerivedPointerTable::update_pointers();
+#endif
+}
+
+G1FullCollector::G1FullCollector(G1FullGCScope* scope,
+                                 ReferenceProcessor* reference_processor,
+                                 G1CMBitMap* bitmap,
+                                 uint workers) :
+    _scope(scope),
+    _num_workers(workers),
+    _mark_bitmap(bitmap),
+    _oop_queue_set(_num_workers),
+    _array_queue_set(_num_workers),
+    _preserved_marks_set(true),
+    _reference_processor(reference_processor),
+    _serial_compaction_point(),
+    _is_alive(_mark_bitmap),
+    _is_alive_mutator(_reference_processor, &_is_alive) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
+
+  _preserved_marks_set.init(_num_workers);
+  _markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC);
+  _compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
+  for (uint i = 0; i < _num_workers; i++) {
+    _markers[i] = new G1FullGCMarker(i, _preserved_marks_set.get(i), mark_bitmap());
+    _compaction_points[i] = new G1FullGCCompactionPoint();
+    _oop_queue_set.register_queue(i, marker(i)->oop_stack());
+    _array_queue_set.register_queue(i, marker(i)->objarray_stack());
+  }
+}
+
+G1FullCollector::~G1FullCollector() {
+  for (uint i = 0; i < _num_workers; i++) {
+    delete _markers[i];
+    delete _compaction_points[i];
+  }
+  FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
+  FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
+}
+
+void G1FullCollector::prepare_collection() {
+  _reference_processor->enable_discovery();
+  _reference_processor->setup_policy(scope()->should_clear_soft_refs());
+
+  // When collecting the permanent generation Method*s may be moving,
+  // so we either have to flush all bcp data or convert it into bci.
+  CodeCache::gc_prologue();
+
+  // We should save the marks of the currently locked biased monitors.
+  // The marking doesn't preserve the marks of biased objects.
+  BiasedLocking::preserve_marks();
+
+  // Clear and activate derived pointer collection.
+  clear_and_activate_derived_pointers();
+}
+
+void G1FullCollector::collect() {
+  phase1_mark_live_objects();
+  verify_after_marking();
+
+  // Don't add any more derived pointers during later phases
+  deactivate_derived_pointers();
+
+  phase2_prepare_compaction();
+
+  phase3_adjust_pointers();
+
+  phase4_do_compaction();
+}
+
+void G1FullCollector::complete_collection() {
+  // Restore all marks.
+  restore_marks();
+
+  // When the pointers have been adjusted and moved, we can
+  // update the derived pointer table.
+  update_derived_pointers();
+
+  BiasedLocking::restore_marks();
+  CodeCache::gc_epilogue();
+  JvmtiExport::gc_epilogue();
+}
+
+void G1FullCollector::phase1_mark_live_objects() {
+  // Recursively traverse all live objects and mark them.
+  GCTraceTime(Info, gc, phases) info("Phase 1: Mark live objects", scope()->timer());
+
+  // Do the actual marking.
+  G1FullGCMarkTask marking_task(this);
+  run_task(&marking_task);
+
+  // Process references discovered during marking.
+  G1FullGCReferenceProcessingExecutor reference_processing(this);
+  reference_processing.execute(scope()->timer(), scope()->tracer());
+
+  // Weak oops cleanup.
+  {
+    GCTraceTime(Debug, gc, phases) trace("Phase 1: Weak Processing", scope()->timer());
+    WeakProcessor::weak_oops_do(&_is_alive, &do_nothing_cl);
+  }
+
+  // Class unloading and cleanup.
+  if (ClassUnloading) {
+    GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
+    // Unload classes and purge the SystemDictionary.
+    bool purged_class = SystemDictionary::do_unloading(&_is_alive, scope()->timer());
+    G1CollectedHeap::heap()->complete_cleaning(&_is_alive, purged_class);
+  } else {
+    GCTraceTime(Debug, gc, phases) debug("Phase 1: String and Symbol Tables Cleanup", scope()->timer());
+    // If no class unloading just clean out strings and symbols.
+    G1CollectedHeap::heap()->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled());
+  }
+
+  scope()->tracer()->report_object_count_after_gc(&_is_alive);
+}
+
+void G1FullCollector::prepare_compaction_common() {
+  G1FullGCPrepareTask task(this);
+  run_task(&task);
+
+  // To avoid OOM when there is memory left.
+  if (!task.has_freed_regions()) {
+    task.prepare_serial_compaction();
+  }
+}
+
+void G1FullCollector::phase2_prepare_compaction() {
+  GCTraceTime(Info, gc, phases) info("Phase 2: Prepare for compaction", scope()->timer());
+  prepare_compaction_ext(); // Will call prepare_compaction_common() above.
+}
+
+void G1FullCollector::phase3_adjust_pointers() {
+  // Adjust the pointers to reflect the new locations
+  GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers and remembered sets", scope()->timer());
+
+  G1FullGCAdjustTask task(this);
+  run_task(&task);
+}
+
+void G1FullCollector::phase4_do_compaction() {
+  // Compact the heap using the compaction queues created in phase 2.
+  GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
+  G1FullGCCompactTask task(this);
+  run_task(&task);
+
+  // Serial compact to avoid OOM when very few free regions.
+  if (serial_compaction_point()->has_regions()) {
+    task.serial_compaction();
+  }
+}
+
+void G1FullCollector::restore_marks() {
+  SharedRestorePreservedMarksTaskExecutor task_executor(G1CollectedHeap::heap()->workers());
+  _preserved_marks_set.restore(&task_executor);
+  _preserved_marks_set.reclaim();
+}
+
+void G1FullCollector::run_task(AbstractGangTask* task) {
+  G1CollectedHeap::heap()->workers()->run_task(task, _num_workers);
+}
+
+void G1FullCollector::verify_after_marking() {
+  if (!VerifyDuringGC) {
+    //Only do verification if VerifyDuringGC is set.
+    return;
+  }
+
+  HandleMark hm;  // handle scope
+#if COMPILER2_OR_JVMCI
+  DerivedPointerTableDeactivate dpt_deact;
+#endif
+  G1CollectedHeap::heap()->prepare_for_verify();
+  // Note: we can verify only the heap here. When an object is
+  // marked, the previous value of the mark word (including
+  // identity hash values, ages, etc) is preserved, and the mark
+  // word is set to markOop::marked_value - effectively removing
+  // any hash values from the mark word. These hash values are
+  // used when verifying the dictionaries and so removing them
+  // from the mark word can make verification of the dictionaries
+  // fail. At the end of the GC, the original mark word values
+  // (including hash values) are restored to the appropriate
+  // objects.
+  GCTraceTime(Info, gc, verify)("During GC (full)");
+  G1CollectedHeap::heap()->verify(VerifyOption_G1UseFullMarking);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLCOLLECTOR_HPP
+#define SHARE_GC_G1_G1FULLCOLLECTOR_HPP
+
+#include "gc/g1/g1FullGCCompactionPoint.hpp"
+#include "gc/g1/g1FullGCMarker.hpp"
+#include "gc/g1/g1FullGCOopClosures.hpp"
+#include "gc/shared/preservedMarks.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "gc/shared/taskqueue.hpp"
+#include "memory/allocation.hpp"
+
+class AbstractGangTask;
+class G1CMBitMap;
+class G1FullGCMarker;
+class G1FullGCScope;
+class G1FullGCCompactionPoint;
+class ReferenceProcessor;
+
+// The G1FullCollector holds data associated with the current Full GC.
+class G1FullCollector : StackObj {
+  G1FullGCScope*            _scope;
+  uint                      _num_workers;
+  G1FullGCMarker**          _markers;
+  G1FullGCCompactionPoint** _compaction_points;
+  G1CMBitMap*               _mark_bitmap;
+  OopQueueSet               _oop_queue_set;
+  ObjArrayTaskQueueSet      _array_queue_set;
+  PreservedMarksSet         _preserved_marks_set;
+  ReferenceProcessor*       _reference_processor;
+  G1FullGCCompactionPoint   _serial_compaction_point;
+
+  G1IsAliveClosure          _is_alive;
+  ReferenceProcessorIsAliveMutator _is_alive_mutator;
+
+public:
+  G1FullCollector(G1FullGCScope* scope,
+                  ReferenceProcessor* reference_processor,
+                  G1CMBitMap* mark_bitmap,
+                  uint workers);
+  ~G1FullCollector();
+
+  void prepare_collection();
+  void collect();
+  void complete_collection();
+
+  G1FullGCScope*           scope() { return _scope; }
+  uint                     workers() { return _num_workers; }
+  G1FullGCMarker*          marker(uint id) { return _markers[id]; }
+  G1FullGCCompactionPoint* compaction_point(uint id) { return _compaction_points[id]; }
+  G1CMBitMap*              mark_bitmap() { return _mark_bitmap; }
+  OopQueueSet*             oop_queue_set() { return &_oop_queue_set; }
+  ObjArrayTaskQueueSet*    array_queue_set() { return &_array_queue_set; }
+  PreservedMarksSet*       preserved_mark_set() { return &_preserved_marks_set; }
+  ReferenceProcessor*      reference_processor() { return _reference_processor; }
+  G1FullGCCompactionPoint* serial_compaction_point() { return &_serial_compaction_point; }
+
+private:
+  void phase1_mark_live_objects();
+  void phase2_prepare_compaction();
+  void phase3_adjust_pointers();
+  void phase4_do_compaction();
+
+  void restore_marks();
+  void verify_after_marking();
+
+  void run_task(AbstractGangTask* task);
+
+  // Prepare compaction extension support.
+  void prepare_compaction_ext();
+  void prepare_compaction_common();
+};
+
+
+#endif // SHARE_GC_G1_G1FULLCOLLECTOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullCollector_ext.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1FullCollector.hpp"
+
+void G1FullCollector::prepare_compaction_ext() {
+  prepare_compaction_common();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
+#include "gc/g1/g1FullCollector.hpp"
+#include "gc/g1/g1FullGCAdjustTask.hpp"
+#include "gc/g1/g1FullGCCompactionPoint.hpp"
+#include "gc/g1/g1FullGCMarker.hpp"
+#include "gc/g1/g1FullGCOopClosures.inline.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "logging/log.hpp"
+#include "utilities/ticks.inline.hpp"
+
+class G1AdjustLiveClosure : public StackObj {
+  G1AdjustAndRebuildClosure* _adjust_closure;
+public:
+  G1AdjustLiveClosure(G1AdjustAndRebuildClosure* cl) :
+    _adjust_closure(cl) { }
+
+  size_t apply(oop object) {
+    _adjust_closure->update_compaction_delta(object);
+    return object->oop_iterate_size(_adjust_closure);
+  }
+};
+
+class G1AdjustRegionClosure : public HeapRegionClosure {
+  G1CMBitMap* _bitmap;
+  uint _worker_id;
+ public:
+  G1AdjustRegionClosure(G1CMBitMap* bitmap, uint worker_id) :
+    _bitmap(bitmap),
+    _worker_id(worker_id) { }
+
+  bool doHeapRegion(HeapRegion* r) {
+    G1AdjustAndRebuildClosure cl(_worker_id);
+    if (r->is_humongous()) {
+      oop obj = oop(r->humongous_start_region()->bottom());
+      cl.update_compaction_delta(obj);
+      obj->oop_iterate(&cl, MemRegion(r->bottom(), r->top()));
+    } else if (r->is_open_archive()) {
+      // Only adjust the open archive regions, the closed ones
+      // never change.
+      G1AdjustLiveClosure adjust(&cl);
+      r->apply_to_marked_objects(_bitmap, &adjust);
+      // Open archive regions will not be compacted and the marking information is
+      // no longer needed. Clear it here to avoid having to do it later.
+      _bitmap->clear_region(r);
+    } else {
+      G1AdjustLiveClosure adjust(&cl);
+      r->apply_to_marked_objects(_bitmap, &adjust);
+    }
+    return false;
+  }
+};
+
+G1FullGCAdjustTask::G1FullGCAdjustTask(G1FullCollector* collector) :
+    G1FullGCTask("G1 Adjust and Rebuild", collector),
+    _root_processor(G1CollectedHeap::heap(), collector->workers()),
+    _hrclaimer(collector->workers()),
+    _adjust(),
+    _adjust_string_dedup(NULL, &_adjust, G1StringDedup::is_enabled()) {
+  // Need cleared claim bits for the roots processing
+  ClassLoaderDataGraph::clear_claimed_marks();
+}
+
+void G1FullGCAdjustTask::work(uint worker_id) {
+  Ticks start = Ticks::now();
+  ResourceMark rm;
+
+  // Adjust preserved marks first since they are not balanced.
+  G1FullGCMarker* marker = collector()->marker(worker_id);
+  marker->preserved_stack()->adjust_during_full_gc();
+
+  // Adjust the weak_roots.
+  CLDToOopClosure adjust_cld(&_adjust);
+  CodeBlobToOopClosure adjust_code(&_adjust, CodeBlobToOopClosure::FixRelocations);
+  _root_processor.process_full_gc_weak_roots(&_adjust);
+
+  // Needs to be last, process_all_roots calls all_tasks_completed(...).
+  _root_processor.process_all_roots(
+      &_adjust,
+      &adjust_cld,
+      &adjust_code);
+
+  // Adjust string dedup if enabled.
+  if (G1StringDedup::is_enabled()) {
+    G1StringDedup::parallel_unlink(&_adjust_string_dedup, worker_id);
+  }
+
+  // Now adjust pointers region by region
+  G1AdjustRegionClosure blk(collector()->mark_bitmap(), worker_id);
+  G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id);
+  log_task("Adjust and Rebuild task", worker_id, start);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLGCADJUSTTASK_HPP
+#define SHARE_GC_G1_G1FULLGCADJUSTTASK_HPP
+
+#include "gc/g1/g1FullGCOopClosures.hpp"
+#include "gc/g1/g1FullGCTask.hpp"
+#include "gc/g1/g1RootProcessor.hpp"
+#include "gc/g1/g1StringDedup.hpp"
+#include "gc/g1/heapRegionManager.hpp"
+#include "utilities/ticks.hpp"
+
+class G1CollectedHeap;
+
+class G1FullGCAdjustTask : public G1FullGCTask {
+  G1RootProcessor          _root_processor;
+  HeapRegionClaimer        _hrclaimer;
+  G1AdjustClosure          _adjust;
+  G1StringDedupUnlinkOrOopsDoClosure _adjust_string_dedup;
+
+public:
+  G1FullGCAdjustTask(G1FullCollector* collector);
+  void work(uint worker_id);
+};
+
+#endif // SHARE_GC_G1_G1FULLGCADJUSTTASK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
+#include "gc/g1/g1FullCollector.hpp"
+#include "gc/g1/g1FullGCCompactionPoint.hpp"
+#include "gc/g1/g1FullGCCompactTask.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "logging/log.hpp"
+#include "utilities/ticks.inline.hpp"
+
+class G1ResetHumongousClosure : public HeapRegionClosure {
+  G1CMBitMap* _bitmap;
+
+public:
+  G1ResetHumongousClosure(G1CMBitMap* bitmap) :
+      _bitmap(bitmap) { }
+
+  bool doHeapRegion(HeapRegion* current) {
+    if (current->is_humongous()) {
+      if (current->is_starts_humongous()) {
+        oop obj = oop(current->bottom());
+        if (_bitmap->is_marked(obj)) {
+          // Clear bitmap and fix mark word.
+          _bitmap->clear(obj);
+          obj->init_mark();
+        } else {
+          assert(current->is_empty(), "Should have been cleared in phase 2.");
+        }
+      }
+      current->reset_during_compaction();
+    }
+    return false;
+  }
+};
+
+size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
+  size_t size = obj->size();
+  HeapWord* destination = (HeapWord*)obj->forwardee();
+  if (destination == NULL) {
+    // Object not moving
+    return size;
+  }
+
+  // copy object and reinit its mark
+  HeapWord* obj_addr = (HeapWord*) obj;
+  assert(obj_addr != destination, "everything in this pass should be moving");
+  Copy::aligned_conjoint_words(obj_addr, destination, size);
+  oop(destination)->init_mark();
+  assert(oop(destination)->klass() != NULL, "should have a class");
+
+  return size;
+}
+
+void G1FullGCCompactTask::compact_region(HeapRegion* hr) {
+  assert(!hr->is_humongous(), "Should be no humongous regions in compaction queue");
+  G1CompactRegionClosure compact(collector()->mark_bitmap());
+  hr->apply_to_marked_objects(collector()->mark_bitmap(), &compact);
+  // Once all objects have been moved the liveness information
+  // needs be cleared.
+  collector()->mark_bitmap()->clear_region(hr);
+  hr->complete_compaction();
+}
+
+void G1FullGCCompactTask::work(uint worker_id) {
+  Ticks start = Ticks::now();
+  GrowableArray<HeapRegion*>* compaction_queue = collector()->compaction_point(worker_id)->regions();
+  for (GrowableArrayIterator<HeapRegion*> it = compaction_queue->begin();
+       it != compaction_queue->end();
+       ++it) {
+    compact_region(*it);
+  }
+
+  G1ResetHumongousClosure hc(collector()->mark_bitmap());
+  G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&hc, &_claimer, worker_id);
+  log_task("Compaction task", worker_id, start);
+}
+
+void G1FullGCCompactTask::serial_compaction() {
+  GCTraceTime(Debug, gc, phases) tm("Phase 4: Serial Compaction", collector()->scope()->timer());
+  GrowableArray<HeapRegion*>* compaction_queue = collector()->serial_compaction_point()->regions();
+  for (GrowableArrayIterator<HeapRegion*> it = compaction_queue->begin();
+       it != compaction_queue->end();
+       ++it) {
+    compact_region(*it);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLGCCOMPACTTASK_HPP
+#define SHARE_GC_G1_G1FULLGCCOMPACTTASK_HPP
+
+#include "gc/g1/g1FullGCCompactionPoint.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
+#include "gc/g1/g1FullGCTask.hpp"
+#include "gc/g1/g1StringDedup.hpp"
+#include "gc/g1/heapRegionManager.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "utilities/ticks.hpp"
+
+class G1CollectedHeap;
+class G1CMBitMap;
+
+class G1FullGCCompactTask : public G1FullGCTask {
+protected:
+  HeapRegionClaimer _claimer;
+
+private:
+  void compact_region(HeapRegion* hr);
+
+public:
+  G1FullGCCompactTask(G1FullCollector* collector) :
+    G1FullGCTask("G1 Compact Task", collector),
+    _claimer(collector->workers()) { }
+  void work(uint worker_id);
+  void serial_compaction();
+
+  class G1CompactRegionClosure : public StackObj {
+    G1CMBitMap* _bitmap;
+
+  public:
+    G1CompactRegionClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { }
+    size_t apply(oop object);
+  };
+};
+
+#endif // SHARE_GC_G1_G1FULLGCCOMPACTTASK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1FullGCCompactionPoint.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "oops/oop.inline.hpp"
+#include "utilities/debug.hpp"
+
+G1FullGCCompactionPoint::G1FullGCCompactionPoint() :
+    _current_region(NULL),
+    _threshold(NULL),
+    _compaction_top(NULL) {
+  _compaction_regions = new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(32, true, mtGC);
+  _compaction_region_iterator = _compaction_regions->begin();
+}
+
+G1FullGCCompactionPoint::~G1FullGCCompactionPoint() {
+  delete _compaction_regions;
+}
+
+void G1FullGCCompactionPoint::update() {
+  if (is_initialized()) {
+    _current_region->set_compaction_top(_compaction_top);
+  }
+}
+
+void G1FullGCCompactionPoint::initialize_values(bool init_threshold) {
+  _compaction_top = _current_region->compaction_top();
+  if (init_threshold) {
+    _threshold = _current_region->initialize_threshold();
+  }
+}
+
+bool G1FullGCCompactionPoint::has_regions() {
+  return !_compaction_regions->is_empty();
+}
+
+bool G1FullGCCompactionPoint::is_initialized() {
+  return _current_region != NULL;
+}
+
+void G1FullGCCompactionPoint::initialize(HeapRegion* hr, bool init_threshold) {
+  _current_region = hr;
+  initialize_values(init_threshold);
+}
+
+HeapRegion* G1FullGCCompactionPoint::current_region() {
+  return *_compaction_region_iterator;
+}
+
+HeapRegion* G1FullGCCompactionPoint::next_region() {
+  HeapRegion* next = *(++_compaction_region_iterator);
+  assert(next != NULL, "Must return valid region");
+  return next;
+}
+
+GrowableArray<HeapRegion*>* G1FullGCCompactionPoint::regions() {
+  return _compaction_regions;
+}
+
+bool G1FullGCCompactionPoint::object_will_fit(size_t size) {
+  size_t space_left = pointer_delta(_current_region->end(), _compaction_top);
+  return size <= space_left;
+}
+
+void G1FullGCCompactionPoint::switch_region() {
+  // Save compaction top in the region.
+  _current_region->set_compaction_top(_compaction_top);
+  // Get the next region and re-initialize the values.
+  _current_region = next_region();
+  initialize_values(true);
+}
+
+void G1FullGCCompactionPoint::forward(oop object, size_t size) {
+  assert(_current_region != NULL, "Must have been initialized");
+
+  // Ensure the object fit in the current region.
+  while (!object_will_fit(size)) {
+    switch_region();
+  }
+
+  // Store a forwarding pointer if the object should be moved.
+  if ((HeapWord*)object != _compaction_top) {
+    object->forward_to(oop(_compaction_top));
+  } else {
+    if (object->forwardee() != NULL) {
+      // Object should not move but mark-word is used so it looks like the
+      // object is forwarded. Need to clear the mark and it's no problem
+      // since it will be restored by preserved marks. There is an exception
+      // with BiasedLocking, in this case forwardee() will return NULL
+      // even if the mark-word is used. This is no problem since
+      // forwardee() will return NULL in the compaction phase as well.
+      object->init_mark();
+    } else {
+      // Make sure object has the correct mark-word set or that it will be
+      // fixed when restoring the preserved marks.
+      assert(object->mark() == markOopDesc::prototype_for_object(object) || // Correct mark
+             object->mark()->must_be_preserved(object) || // Will be restored by PreservedMarksSet
+             (UseBiasedLocking && object->has_bias_pattern()), // Will be restored by BiasedLocking
+             "should have correct prototype obj: " PTR_FORMAT " mark: " PTR_FORMAT " prototype: " PTR_FORMAT,
+             p2i(object), p2i(object->mark()), p2i(markOopDesc::prototype_for_object(object)));
+    }
+    assert(object->forwardee() == NULL, "should be forwarded to NULL");
+  }
+
+  // Update compaction values.
+  _compaction_top += size;
+  if (_compaction_top > _threshold) {
+    _threshold = _current_region->cross_threshold(_compaction_top - size, _compaction_top);
+  }
+}
+
+void G1FullGCCompactionPoint::add(HeapRegion* hr) {
+  _compaction_regions->append(hr);
+}
+
+void G1FullGCCompactionPoint::merge(G1FullGCCompactionPoint* other) {
+   _compaction_regions->appendAll(other->regions());
+}
+
+HeapRegion* G1FullGCCompactionPoint::remove_last() {
+  return _compaction_regions->pop();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLGCCOMPACTIONPOINT_HPP
+#define SHARE_GC_G1_G1FULLGCCOMPACTIONPOINT_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/growableArray.hpp"
+
+class HeapRegion;
+
+class G1FullGCCompactionPoint : public CHeapObj<mtGC> {
+  HeapRegion* _current_region;
+  HeapWord*   _threshold;
+  HeapWord*   _compaction_top;
+  GrowableArray<HeapRegion*>* _compaction_regions;
+  GrowableArrayIterator<HeapRegion*> _compaction_region_iterator;
+
+  bool object_will_fit(size_t size);
+  void initialize_values(bool init_threshold);
+  void switch_region();
+  HeapRegion* next_region();
+
+public:
+  G1FullGCCompactionPoint();
+  ~G1FullGCCompactionPoint();
+
+  bool has_regions();
+  bool is_initialized();
+  void initialize(HeapRegion* hr, bool init_threshold);
+  void update();
+  void forward(oop object, size_t size);
+  void add(HeapRegion* hr);
+  void merge(G1FullGCCompactionPoint* other);
+
+  HeapRegion* remove_last();
+  HeapRegion* current_region();
+
+  GrowableArray<HeapRegion*>* regions();
+};
+
+#endif // SHARE_GC_G1_G1FULLGCCOMPACTIONPOINT_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1FullCollector.hpp"
+#include "gc/g1/g1FullGCMarker.hpp"
+#include "gc/g1/g1FullGCMarkTask.hpp"
+#include "gc/g1/g1FullGCOopClosures.inline.hpp"
+#include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+
+G1FullGCMarkTask::G1FullGCMarkTask(G1FullCollector* collector) :
+    G1FullGCTask("G1 Parallel Marking Task", collector),
+    _root_processor(G1CollectedHeap::heap(), collector->workers()),
+    _terminator(collector->workers(), collector->array_queue_set()) {
+  // Need cleared claim bits for the roots processing
+  ClassLoaderDataGraph::clear_claimed_marks();
+}
+
+void G1FullGCMarkTask::work(uint worker_id) {
+  Ticks start = Ticks::now();
+  ResourceMark rm;
+  G1FullGCMarker* marker = collector()->marker(worker_id);
+  MarkingCodeBlobClosure code_closure(marker->mark_closure(), !CodeBlobToOopClosure::FixRelocations);
+
+  if (ClassUnloading) {
+    _root_processor.process_strong_roots(
+        marker->mark_closure(),
+        marker->cld_closure(),
+        &code_closure);
+  } else {
+    _root_processor.process_all_roots_no_string_table(
+        marker->mark_closure(),
+        marker->cld_closure(),
+        &code_closure);
+  }
+
+  // Mark stack is populated, now process and drain it.
+  marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), &_terminator);
+
+  // This is the point where the entire marking should have completed.
+  assert(marker->oop_stack()->is_empty(), "Marking should have completed");
+  assert(marker->objarray_stack()->is_empty(), "Array marking should have completed");
+  log_task("Marking task", worker_id, start);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLGCMARKTASK_HPP
+#define SHARE_GC_G1_G1FULLGCMARKTASK_HPP
+
+#include "gc/g1/g1FullGCCompactionPoint.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
+#include "gc/g1/g1FullGCTask.hpp"
+#include "gc/g1/g1RootProcessor.hpp"
+#include "gc/g1/g1StringDedup.hpp"
+#include "gc/g1/heapRegionManager.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "utilities/ticks.hpp"
+
+class G1FullGCMarkTask : public G1FullGCTask {
+  G1RootProcessor          _root_processor;
+  ParallelTaskTerminator   _terminator;
+
+public:
+  G1FullGCMarkTask(G1FullCollector* collector);
+  void work(uint worker_id);
+};
+
+#endif // SHARE_GC_G1_G1FULLGCMARKTASK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1FullGCMarker.inline.hpp"
+
+G1FullGCMarker::G1FullGCMarker(uint worker_id, PreservedMarks* preserved_stack, G1CMBitMap* bitmap) :
+    _worker_id(worker_id),
+    _mark_closure(worker_id, this, G1CollectedHeap::heap()->ref_processor_stw()),
+    _verify_closure(VerifyOption_G1UseFullMarking),
+    _cld_closure(mark_closure()),
+    _stack_closure(this),
+    _preserved_stack(preserved_stack),
+    _bitmap(bitmap) {
+  _oop_stack.initialize();
+  _objarray_stack.initialize();
+}
+
+G1FullGCMarker::~G1FullGCMarker() {
+  assert(is_empty(), "Must be empty at this point");
+}
+
+void G1FullGCMarker::complete_marking(OopQueueSet* oop_stacks,
+                                      ObjArrayTaskQueueSet* array_stacks,
+                                      ParallelTaskTerminator* terminator) {
+  int hash_seed = 17;
+  do {
+    drain_stack();
+    ObjArrayTask steal_array;
+    if (array_stacks->steal(_worker_id, &hash_seed, steal_array)) {
+      follow_array_chunk(objArrayOop(steal_array.obj()), steal_array.index());
+    } else {
+      oop steal_oop;
+      if (oop_stacks->steal(_worker_id, &hash_seed, steal_oop)) {
+        follow_object(steal_oop);
+      }
+    }
+  } while (!is_empty() || !terminator->offer_termination());
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLGCMARKER_HPP
+#define SHARE_GC_G1_G1FULLGCMARKER_HPP
+
+#include "gc/g1/g1FullGCOopClosures.hpp"
+#include "gc/shared/preservedMarks.hpp"
+#include "gc/shared/taskqueue.hpp"
+#include "memory/iterator.hpp"
+#include "oops/markOop.hpp"
+#include "oops/oop.hpp"
+#include "runtime/timer.hpp"
+#include "utilities/chunkedList.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/stack.hpp"
+
+typedef OverflowTaskQueue<oop, mtGC>                 OopQueue;
+typedef OverflowTaskQueue<ObjArrayTask, mtGC>        ObjArrayTaskQueue;
+
+typedef GenericTaskQueueSet<OopQueue, mtGC>          OopQueueSet;
+typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet;
+
+class G1CMBitMap;
+
+class G1FullGCMarker : public CHeapObj<mtGC> {
+private:
+  uint               _worker_id;
+  // Backing mark bitmap
+  G1CMBitMap*        _bitmap;
+
+  // Mark stack
+  OopQueue           _oop_stack;
+  ObjArrayTaskQueue  _objarray_stack;
+  PreservedMarks*    _preserved_stack;
+
+  // Marking closures
+  G1MarkAndPushClosure _mark_closure;
+  G1VerifyOopClosure   _verify_closure;
+  G1FollowStackClosure _stack_closure;
+  CLDToOopClosure      _cld_closure;
+
+  inline bool is_empty();
+  inline bool pop_object(oop& obj);
+  inline bool pop_objarray(ObjArrayTask& array);
+  inline void push_objarray(oop obj, size_t index);
+  inline bool mark_object(oop obj);
+
+  // Marking helpers
+  inline void follow_object(oop obj);
+  inline void follow_array(objArrayOop array);
+  inline void follow_array_chunk(objArrayOop array, int index);
+public:
+  G1FullGCMarker(uint worker_id, PreservedMarks* preserved_stack, G1CMBitMap* bitmap);
+  ~G1FullGCMarker();
+
+  // Stack getters
+  OopQueue*          oop_stack()       { return &_oop_stack; }
+  ObjArrayTaskQueue* objarray_stack()  { return &_objarray_stack; }
+  PreservedMarks*    preserved_stack() { return _preserved_stack; }
+
+  // Marking entry points
+  template <class T> inline void mark_and_push(T* p);
+  inline void follow_klass(Klass* k);
+  inline void follow_cld(ClassLoaderData* cld);
+
+  inline void drain_stack();
+  void complete_marking(OopQueueSet* oop_stacks,
+                        ObjArrayTaskQueueSet* array_stacks,
+                        ParallelTaskTerminator* terminator);
+
+  // Closure getters
+  CLDToOopClosure*      cld_closure()   { return &_cld_closure; }
+  G1MarkAndPushClosure* mark_closure()  { return &_mark_closure; }
+  G1FollowStackClosure* stack_closure() { return &_stack_closure; }
+};
+
+#endif // SHARE_GC_G1_G1FULLGCMARKER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1MARKSTACK_INLINE_HPP
+#define SHARE_VM_GC_G1_G1MARKSTACK_INLINE_HPP
+
+#include "gc/g1/g1Allocator.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
+#include "gc/g1/g1FullGCMarker.hpp"
+#include "gc/g1/g1StringDedup.hpp"
+#include "gc/g1/g1StringDedupQueue.hpp"
+#include "gc/shared/preservedMarks.inline.hpp"
+#include "utilities/debug.hpp"
+
+inline bool G1FullGCMarker::mark_object(oop obj) {
+  // Not marking closed archive objects.
+  if (G1ArchiveAllocator::is_closed_archive_object(obj)) {
+    return false;
+  }
+
+  // Try to mark.
+  if (!_bitmap->par_mark(obj)) {
+    // Lost mark race.
+    return false;
+  }
+
+  // Marked by us, preserve if needed.
+  markOop mark = obj->mark();
+  if (mark->must_be_preserved(obj) &&
+      !G1ArchiveAllocator::is_open_archive_object(obj)) {
+    preserved_stack()->push(obj, mark);
+  }
+
+  // Check if deduplicatable string.
+  if (G1StringDedup::is_enabled()) {
+    G1StringDedup::enqueue_from_mark(obj, _worker_id);
+  }
+  return true;
+}
+
+template <class T> inline void G1FullGCMarker::mark_and_push(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    if (mark_object(obj)) {
+      _oop_stack.push(obj);
+      assert(_bitmap->is_marked(obj), "Must be marked now - map self");
+    } else {
+      assert(_bitmap->is_marked(obj) || G1ArchiveAllocator::is_closed_archive_object(obj),
+             "Must be marked by other or closed archive object");
+    }
+  }
+}
+
+inline bool G1FullGCMarker::is_empty() {
+  return _oop_stack.is_empty() && _objarray_stack.is_empty();
+}
+
+inline bool G1FullGCMarker::pop_object(oop& oop) {
+  return _oop_stack.pop_overflow(oop) || _oop_stack.pop_local(oop);
+}
+
+inline void G1FullGCMarker::push_objarray(oop obj, size_t index) {
+  ObjArrayTask task(obj, index);
+  assert(task.is_valid(), "bad ObjArrayTask");
+  _objarray_stack.push(task);
+}
+
+inline bool G1FullGCMarker::pop_objarray(ObjArrayTask& arr) {
+  return _objarray_stack.pop_overflow(arr) || _objarray_stack.pop_local(arr);
+}
+
+inline void G1FullGCMarker::follow_array(objArrayOop array) {
+  follow_klass(array->klass());
+  // Don't push empty arrays to avoid unnecessary work.
+  if (array->length() > 0) {
+    push_objarray(array, 0);
+  }
+}
+
+void G1FullGCMarker::follow_array_chunk(objArrayOop array, int index) {
+  const int len = array->length();
+  const int beg_index = index;
+  assert(beg_index < len || len == 0, "index too large");
+
+  const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
+  const int end_index = beg_index + stride;
+
+  array->oop_iterate_range(mark_closure(), beg_index, end_index);
+
+  if (VerifyDuringGC) {
+    _verify_closure.set_containing_obj(array);
+    NoHeaderExtendedOopClosure no(&_verify_closure);
+    array->oop_iterate_range(&no, beg_index, end_index);
+    if (_verify_closure.failures()) {
+      assert(false, "Failed");
+    }
+  }
+
+  if (end_index < len) {
+    push_objarray(array, end_index); // Push the continuation.
+  }
+}
+
+inline void G1FullGCMarker::follow_object(oop obj) {
+  assert(_bitmap->is_marked(obj), "should be marked");
+  if (obj->is_objArray()) {
+    // Handle object arrays explicitly to allow them to
+    // be split into chunks if needed.
+    follow_array((objArrayOop)obj);
+  } else {
+    obj->oop_iterate(mark_closure());
+    if (VerifyDuringGC) {
+      if (obj->is_instance() && InstanceKlass::cast(obj->klass())->is_reference_instance_klass()) {
+        return;
+      }
+      _verify_closure.set_containing_obj(obj);
+      obj->oop_iterate_no_header(&_verify_closure);
+      if (_verify_closure.failures()) {
+        log_warning(gc, verify)("Failed after %d", _verify_closure._cc);
+        assert(false, "Failed");
+      }
+    }
+  }
+}
+
+void G1FullGCMarker::drain_stack() {
+  do {
+    oop obj;
+    while (pop_object(obj)) {
+      assert(_bitmap->is_marked(obj), "must be marked");
+      follow_object(obj);
+    }
+    // Process ObjArrays one at a time to avoid marking stack bloat.
+    ObjArrayTask task;
+    if (pop_objarray(task)) {
+      follow_array_chunk(objArrayOop(task.obj()), task.index());
+    }
+  } while (!is_empty());
+}
+
+inline void G1FullGCMarker::follow_klass(Klass* k) {
+  oop op = k->klass_holder();
+  mark_and_push(&op);
+}
+
+inline void G1FullGCMarker::follow_cld(ClassLoaderData* cld) {
+  _cld_closure.do_cld(cld);
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1FullGCMarker.inline.hpp"
+#include "gc/g1/g1FullGCOopClosures.inline.hpp"
+#include "gc/g1/g1_specialized_oop_closures.hpp"
+#include "logging/logStream.hpp"
+
+void G1MarkAndPushClosure::do_oop(oop* p) {
+  do_oop_nv(p);
+}
+
+void G1MarkAndPushClosure::do_oop(narrowOop* p) {
+  do_oop_nv(p);
+}
+
+bool G1MarkAndPushClosure::do_metadata() {
+  return do_metadata_nv();
+}
+
+void G1MarkAndPushClosure::do_klass(Klass* k) {
+  do_klass_nv(k);
+}
+
+void G1MarkAndPushClosure::do_cld(ClassLoaderData* cld) {
+  do_cld_nv(cld);
+}
+
+G1AdjustAndRebuildClosure::G1AdjustAndRebuildClosure(uint worker_id) :
+  _worker_id(worker_id),
+  _compaction_delta(0),
+  _g1h(G1CollectedHeap::heap()) { }
+
+void G1AdjustAndRebuildClosure::update_compaction_delta(oop obj) {
+  if (G1ArchiveAllocator::is_open_archive_object(obj)) {
+    _compaction_delta = 0;
+    return;
+  }
+  oop forwardee = obj->forwardee();
+  if (forwardee == NULL) {
+    // Object not moved.
+    _compaction_delta = 0;
+  } else {
+    // Object moved to forwardee, calculate delta.
+    _compaction_delta = calculate_compaction_delta(obj, forwardee);
+  }
+}
+
+void G1AdjustClosure::do_oop(oop* p)       { adjust_pointer(p); }
+void G1AdjustClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
+
+void G1AdjustAndRebuildClosure::do_oop(oop* p)       { do_oop_nv(p); }
+void G1AdjustAndRebuildClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
+
+void G1FollowStackClosure::do_void() { _marker->drain_stack(); }
+
+void G1FullKeepAliveClosure::do_oop(oop* p) { do_oop_work(p); }
+void G1FullKeepAliveClosure::do_oop(narrowOop* p) { do_oop_work(p); }
+
+G1VerifyOopClosure::G1VerifyOopClosure(VerifyOption option) :
+   _g1h(G1CollectedHeap::heap()),
+   _containing_obj(NULL),
+   _verify_option(option),
+   _cc(0),
+   _failures(false) {
+}
+
+void G1VerifyOopClosure::print_object(outputStream* out, oop obj) {
+#ifdef PRODUCT
+  Klass* k = obj->klass();
+  const char* class_name = InstanceKlass::cast(k)->external_name();
+  out->print_cr("class name %s", class_name);
+#else // PRODUCT
+  obj->print_on(out);
+#endif // PRODUCT
+}
+
+template <class T> void G1VerifyOopClosure::do_oop_nv(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (!oopDesc::is_null(heap_oop)) {
+    _cc++;
+    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    bool failed = false;
+    if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _verify_option)) {
+      MutexLockerEx x(ParGCRareEvent_lock,
+          Mutex::_no_safepoint_check_flag);
+      LogStreamHandle(Error, gc, verify) yy;
+      if (!_failures) {
+        yy.cr();
+        yy.print_cr("----------");
+      }
+      if (!_g1h->is_in_closed_subset(obj)) {
+        HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+        yy.print_cr("Field " PTR_FORMAT
+            " of live obj " PTR_FORMAT " in region "
+            "[" PTR_FORMAT ", " PTR_FORMAT ")",
+            p2i(p), p2i(_containing_obj),
+            p2i(from->bottom()), p2i(from->end()));
+        print_object(&yy, _containing_obj);
+        yy.print_cr("points to obj " PTR_FORMAT " not in the heap",
+            p2i(obj));
+      } else {
+        HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
+        HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
+        yy.print_cr("Field " PTR_FORMAT
+            " of live obj " PTR_FORMAT " in region "
+            "[" PTR_FORMAT ", " PTR_FORMAT ")",
+            p2i(p), p2i(_containing_obj),
+            p2i(from->bottom()), p2i(from->end()));
+        print_object(&yy, _containing_obj);
+        yy.print_cr("points to dead obj " PTR_FORMAT " in region "
+            "[" PTR_FORMAT ", " PTR_FORMAT ")",
+            p2i(obj), p2i(to->bottom()), p2i(to->end()));
+        print_object(&yy, obj);
+      }
+      yy.print_cr("----------");
+      yy.flush();
+      _failures = true;
+      failed = true;
+    }
+  }
+}
+
+// Generate G1 full GC specialized oop_oop_iterate functions.
+SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(ALL_KLASS_OOP_OOP_ITERATE_DEFN)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLGCOOPCLOSURES_HPP
+#define SHARE_GC_G1_G1FULLGCOOPCLOSURES_HPP
+
+#include "memory/iterator.hpp"
+#include "memory/universe.hpp"
+
+class G1CollectedHeap;
+class G1FullCollector;
+class G1CMBitMap;
+class G1FullGCMarker;
+
+// Below are closures used by the G1 Full GC.
+class G1IsAliveClosure : public BoolObjectClosure {
+  G1CMBitMap* _bitmap;
+
+public:
+  G1IsAliveClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { }
+
+  virtual bool do_object_b(oop p);
+};
+
+class G1FullKeepAliveClosure: public OopClosure {
+  G1FullGCMarker* _marker;
+  template <class T>
+  inline void do_oop_work(T* p);
+
+public:
+  G1FullKeepAliveClosure(G1FullGCMarker* pm) : _marker(pm) { }
+
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+};
+
+class G1MarkAndPushClosure : public ExtendedOopClosure {
+  G1FullGCMarker* _marker;
+  uint _worker_id;
+
+public:
+  G1MarkAndPushClosure(uint worker, G1FullGCMarker* marker, ReferenceProcessor* ref) :
+    _marker(marker),
+    _worker_id(worker),
+    ExtendedOopClosure(ref) { }
+
+  template <class T> inline void do_oop_nv(T* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+
+  virtual bool do_metadata();
+  bool do_metadata_nv();
+
+  virtual void do_klass(Klass* k);
+  void do_klass_nv(Klass* k);
+
+  virtual void do_cld(ClassLoaderData* cld);
+  void do_cld_nv(ClassLoaderData* cld);
+};
+
+class G1AdjustClosure : public OopClosure {
+public:
+  template <class T> static inline oop adjust_pointer(T* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+};
+
+class G1AdjustAndRebuildClosure : public ExtendedOopClosure {
+  uint _worker_id;
+  size_t _compaction_delta;
+  G1CollectedHeap* _g1h;
+
+  inline size_t calculate_compaction_delta(oop current, oop forwardee);
+  template <class T> inline T* add_compaction_delta(T* p);
+
+public:
+  G1AdjustAndRebuildClosure(uint worker_id);
+
+  void update_compaction_delta(oop obj);
+
+  template <class T> inline void add_reference(T* from_field, oop reference, uint worker_id);
+  template <class T> void do_oop_nv(T* p);
+  virtual void do_oop(oop* p);
+  virtual void do_oop(narrowOop* p);
+
+  virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
+};
+
+class G1AdjustObjectClosure {
+  G1AdjustAndRebuildClosure* _closure;
+
+public:
+  G1AdjustObjectClosure(G1AdjustAndRebuildClosure* cl) : _closure(cl) { }
+
+  inline int adjust_object(oop obj);
+};
+
+class G1VerifyOopClosure: public OopClosure {
+private:
+  G1CollectedHeap* _g1h;
+  bool             _failures;
+  oop              _containing_obj;
+  VerifyOption     _verify_option;
+
+public:
+  int _cc;
+  G1VerifyOopClosure(VerifyOption option);
+
+  void set_containing_obj(oop obj) {
+    _containing_obj = obj;
+  }
+
+  bool failures() { return _failures; }
+  void print_object(outputStream* out, oop obj);
+
+  template <class T> void do_oop_nv(T* p);
+
+  void do_oop(oop* p)       { do_oop_nv(p); }
+  void do_oop(narrowOop* p) { do_oop_nv(p); }
+};
+
+class G1FollowStackClosure: public VoidClosure {
+  G1FullGCMarker* _marker;
+
+public:
+  G1FollowStackClosure(G1FullGCMarker* marker) : _marker(marker) {}
+  virtual void do_void();
+};
+
+#endif // SHARE_GC_G1_G1FULLGCOOPCLOSURES_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1FULLGCOOPCLOSURES_INLINE_HPP
+#define SHARE_VM_GC_G1_G1FULLGCOOPCLOSURES_INLINE_HPP
+
+#include "gc/g1/g1Allocator.hpp"
+#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
+#include "gc/g1/g1FullGCMarker.inline.hpp"
+#include "gc/g1/g1FullGCOopClosures.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "memory/iterator.inline.hpp"
+
+template <typename T>
+inline void G1MarkAndPushClosure::do_oop_nv(T* p) {
+  _marker->mark_and_push(p);
+}
+
+inline bool G1MarkAndPushClosure::do_metadata_nv() {
+  return true;
+}
+
+inline void G1MarkAndPushClosure::do_klass_nv(Klass* k) {
+  _marker->follow_klass(k);
+}
+
+inline void G1MarkAndPushClosure::do_cld_nv(ClassLoaderData* cld) {
+  _marker->follow_cld(cld);
+}
+
+template <class T> inline oop G1AdjustClosure::adjust_pointer(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+  if (oopDesc::is_null(heap_oop)) {
+    // NULL reference, return NULL.
+    return NULL;
+  }
+
+  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  assert(Universe::heap()->is_in(obj), "should be in heap");
+  if (G1ArchiveAllocator::is_archive_object(obj)) {
+    // Never forwarding archive objects, return current reference.
+    return obj;
+  }
+
+  oop forwardee = obj->forwardee();
+  if (forwardee == NULL) {
+    // Not forwarded, return current reference.
+    assert(obj->mark() == markOopDesc::prototype_for_object(obj) || // Correct mark
+           obj->mark()->must_be_preserved(obj) || // Will be restored by PreservedMarksSet
+           (UseBiasedLocking && obj->has_bias_pattern()), // Will be restored by BiasedLocking
+           "Must have correct prototype or be preserved, obj: " PTR_FORMAT ", mark: " PTR_FORMAT ", prototype: " PTR_FORMAT,
+           p2i(obj), p2i(obj->mark()), p2i(markOopDesc::prototype_for_object(obj)));
+    return obj;
+  }
+
+  // Forwarded, update and return new reference.
+  assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space");
+  oopDesc::encode_store_heap_oop_not_null(p, forwardee);
+  return forwardee;
+}
+
+template <class T>
+inline void G1AdjustAndRebuildClosure::add_reference(T* from_field, oop reference, uint worker_id) {
+  if (HeapRegion::is_in_same_region(from_field, reference)) {
+    return;
+  }
+  _g1h->heap_region_containing(reference)->rem_set()->add_reference(from_field, worker_id);
+}
+
+inline size_t G1AdjustAndRebuildClosure::calculate_compaction_delta(oop current, oop forwardee) {
+  return pointer_delta((HeapWord*)forwardee, (HeapWord*)current);
+}
+
+template <class T>
+inline T* G1AdjustAndRebuildClosure::add_compaction_delta(T* p) {
+  return (T*)((HeapWord*)p + _compaction_delta);
+}
+
+template<typename T>
+void G1AdjustAndRebuildClosure::do_oop_nv(T* p) {
+  oop new_reference = G1AdjustClosure::adjust_pointer(p);
+  if (new_reference == NULL) {
+    return;
+  }
+
+  // Update p using the calculated compaction delta to
+  // get the new field address.
+  T* new_field = add_compaction_delta(p);
+  // Update the remembered set.
+  add_reference(new_field, new_reference, _worker_id);
+}
+
+inline int G1AdjustObjectClosure::adjust_object(oop obj) {
+  _closure->update_compaction_delta(obj);
+  return obj->oop_iterate_size(_closure);
+}
+
+inline bool G1IsAliveClosure::do_object_b(oop p) {
+  return _bitmap->is_marked(p) || G1ArchiveAllocator::is_closed_archive_object(p);
+}
+
+template<typename T>
+inline void G1FullKeepAliveClosure::do_oop_work(T* p) {
+  _marker->mark_and_push(p);
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
+#include "gc/g1/g1FullCollector.hpp"
+#include "gc/g1/g1FullGCCompactionPoint.hpp"
+#include "gc/g1/g1FullGCMarker.hpp"
+#include "gc/g1/g1FullGCOopClosures.inline.hpp"
+#include "gc/g1/g1FullGCPrepareTask.hpp"
+#include "gc/g1/g1HotCardCache.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "logging/log.hpp"
+#include "utilities/ticks.inline.hpp"
+
+bool G1FullGCPrepareTask::G1CalculatePointersClosure::doHeapRegion(HeapRegion* hr) {
+  if (hr->is_humongous()) {
+    oop obj = oop(hr->humongous_start_region()->bottom());
+    if (_bitmap->is_marked(obj)) {
+      if (hr->is_starts_humongous()) {
+        obj->forward_to(obj);
+      }
+    } else {
+      free_humongous_region(hr);
+    }
+  } else if (!hr->is_pinned()) {
+    prepare_for_compaction(hr);
+  }
+
+  // Reset data structures not valid after Full GC.
+  reset_region_metadata(hr);
+
+  return false;
+}
+
+G1FullGCPrepareTask::G1FullGCPrepareTask(G1FullCollector* collector) :
+    G1FullGCTask("G1 Prepare Compact Task", collector),
+    _hrclaimer(collector->workers()),
+    _freed_regions(false) {
+}
+
+void G1FullGCPrepareTask::set_freed_regions() {
+  if (!_freed_regions) {
+    _freed_regions = true;
+  }
+}
+
+bool G1FullGCPrepareTask::has_freed_regions() {
+  return _freed_regions;
+}
+
+void G1FullGCPrepareTask::work(uint worker_id) {
+  Ticks start = Ticks::now();
+  G1FullGCCompactionPoint* compaction_point = collector()->compaction_point(worker_id);
+  G1CalculatePointersClosure closure(collector()->mark_bitmap(), compaction_point);
+  G1CollectedHeap::heap()->heap_region_par_iterate_from_start(&closure, &_hrclaimer);
+
+  // Update humongous region sets
+  closure.update_sets();
+  compaction_point->update();
+
+  // Check if any regions was freed by this worker and store in task.
+  if (closure.freed_regions()) {
+    set_freed_regions();
+  }
+  log_task("Prepare compaction task", worker_id, start);
+}
+
+G1FullGCPrepareTask::G1CalculatePointersClosure::G1CalculatePointersClosure(G1CMBitMap* bitmap,
+                                                                            G1FullGCCompactionPoint* cp) :
+    _g1h(G1CollectedHeap::heap()),
+    _bitmap(bitmap),
+    _cp(cp),
+    _humongous_regions_removed(0) { }
+
+void G1FullGCPrepareTask::G1CalculatePointersClosure::free_humongous_region(HeapRegion* hr) {
+  FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
+
+  hr->set_containing_set(NULL);
+  _humongous_regions_removed++;
+
+  _g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */);
+  prepare_for_compaction(hr);
+  dummy_free_list.remove_all();
+}
+
+void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
+  hr->reset_gc_time_stamp();
+  hr->rem_set()->clear();
+
+  _g1h->g1_barrier_set()->clear(MemRegion(hr->bottom(), hr->end()));
+
+  if (_g1h->g1_hot_card_cache()->use_cache()) {
+    _g1h->g1_hot_card_cache()->reset_card_counts(hr);
+  }
+}
+
+G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
+    _cp(cp) { }
+
+size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
+  size_t size = object->size();
+  _cp->forward(object, size);
+  return size;
+}
+
+size_t G1FullGCPrepareTask::G1RePrepareClosure::apply(oop obj) {
+  // We only re-prepare objects forwarded within the current region, so
+  // skip objects that are already forwarded to another region.
+  oop forwarded_to = obj->forwardee();
+  if (forwarded_to != NULL && !_current->is_in(forwarded_to)) {
+    return obj->size();
+  }
+
+  // Get size and forward.
+  size_t size = obj->size();
+  _cp->forward(obj, size);
+
+  return size;
+}
+
+void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_work(G1FullGCCompactionPoint* cp,
+                                                                                  HeapRegion* hr) {
+  G1PrepareCompactLiveClosure prepare_compact(cp);
+  hr->set_compaction_top(hr->bottom());
+  hr->apply_to_marked_objects(_bitmap, &prepare_compact);
+}
+
+void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
+  if (!_cp->is_initialized()) {
+    hr->set_compaction_top(hr->bottom());
+    _cp->initialize(hr, true);
+  }
+  // Add region to the compaction queue and prepare it.
+  _cp->add(hr);
+  prepare_for_compaction_work(_cp, hr);
+}
+
+void G1FullGCPrepareTask::prepare_serial_compaction() {
+  GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare Serial Compaction", collector()->scope()->timer());
+  // At this point we know that no regions were completely freed by
+  // the parallel compaction. That means that the last region of
+  // all compaction queues still have data in them. We try to compact
+  // these regions in serial to avoid a premature OOM.
+  for (uint i = 0; i < collector()->workers(); i++) {
+    G1FullGCCompactionPoint* cp = collector()->compaction_point(i);
+    if (cp->has_regions()) {
+      collector()->serial_compaction_point()->add(cp->remove_last());
+    }
+  }
+
+  // Update the forwarding information for the regions in the serial
+  // compaction point.
+  G1FullGCCompactionPoint* cp = collector()->serial_compaction_point();
+  for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
+    HeapRegion* current = *it;
+    if (!cp->is_initialized()) {
+      // Initialize the compaction point. Nothing more is needed for the first heap region
+      // since it is already prepared for compaction.
+      cp->initialize(current, false);
+    } else {
+      assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
+      G1RePrepareClosure re_prepare(cp, current);
+      current->set_compaction_top(current->bottom());
+      current->apply_to_marked_objects(collector()->mark_bitmap(), &re_prepare);
+    }
+  }
+  cp->update();
+}
+
+void G1FullGCPrepareTask::G1CalculatePointersClosure::update_sets() {
+  // We'll recalculate total used bytes and recreate the free list
+  // at the end of the GC, so no point in updating those values here.
+  _g1h->remove_from_old_sets(0, _humongous_regions_removed);
+}
+
+bool G1FullGCPrepareTask::G1CalculatePointersClosure::freed_regions() {
+  if (_humongous_regions_removed > 0) {
+    // Free regions from dead humongous regions.
+    return true;
+  }
+
+  if (!_cp->has_regions()) {
+    // No regions in queue, so no free ones either.
+    return false;
+  }
+
+  if (_cp->current_region() != _cp->regions()->last()) {
+    // The current region used for compaction is not the last in the
+    // queue. That means there is at least one free region in the queue.
+    return true;
+  }
+
+  // No free regions in the queue.
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLGCPREPARETASK_HPP
+#define SHARE_GC_G1_G1FULLGCPREPARETASK_HPP
+
+#include "gc/g1/g1FullGCCompactionPoint.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
+#include "gc/g1/g1FullGCTask.hpp"
+#include "gc/g1/g1RootProcessor.hpp"
+#include "gc/g1/g1StringDedup.hpp"
+#include "gc/g1/heapRegionManager.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "utilities/ticks.hpp"
+
+class G1CMBitMap;
+
+class G1FullGCPrepareTask : public G1FullGCTask {
+protected:
+  volatile bool     _freed_regions;
+  HeapRegionClaimer _hrclaimer;
+
+  void set_freed_regions();
+
+public:
+  G1FullGCPrepareTask(G1FullCollector* collector);
+  void work(uint worker_id);
+  void prepare_serial_compaction();
+  bool has_freed_regions();
+
+protected:
+  class G1CalculatePointersClosure : public HeapRegionClosure {
+  protected:
+    G1CollectedHeap* _g1h;
+    G1CMBitMap* _bitmap;
+    G1FullGCCompactionPoint* _cp;
+    uint _humongous_regions_removed;
+
+    virtual void prepare_for_compaction(HeapRegion* hr);
+    void prepare_for_compaction_work(G1FullGCCompactionPoint* cp, HeapRegion* hr);
+    void free_humongous_region(HeapRegion* hr);
+    void reset_region_metadata(HeapRegion* hr);
+
+  public:
+    G1CalculatePointersClosure(G1CMBitMap* bitmap,
+                               G1FullGCCompactionPoint* cp);
+
+    void update_sets();
+    bool doHeapRegion(HeapRegion* hr);
+    bool freed_regions();
+  };
+
+  class G1PrepareCompactLiveClosure : public StackObj {
+    G1FullGCCompactionPoint* _cp;
+
+  public:
+    G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp);
+    size_t apply(oop object);
+  };
+
+  class G1RePrepareClosure : public StackObj {
+    G1FullGCCompactionPoint* _cp;
+    HeapRegion* _current;
+
+  public:
+    G1RePrepareClosure(G1FullGCCompactionPoint* hrcp,
+                       HeapRegion* hr) :
+        _cp(hrcp),
+        _current(hr) { }
+
+    size_t apply(oop object);
+  };
+};
+
+#endif // SHARE_GC_G1_G1FULLGCPREPARETASK_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1FullCollector.hpp"
+#include "gc/g1/g1FullGCMarker.hpp"
+#include "gc/g1/g1FullGCOopClosures.inline.hpp"
+#include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+
+G1FullGCReferenceProcessingExecutor::G1FullGCReferenceProcessingExecutor(G1FullCollector* collector) :
+    _collector(collector),
+    _reference_processor(collector->reference_processor()),
+    _old_mt_degree(_reference_processor->num_q()) {
+  if (_reference_processor->processing_is_mt()) {
+    _reference_processor->set_active_mt_degree(_collector->workers());
+  }
+}
+
+G1FullGCReferenceProcessingExecutor::~G1FullGCReferenceProcessingExecutor() {
+  if (_reference_processor->processing_is_mt()) {
+    _reference_processor->set_active_mt_degree(_old_mt_degree);
+  }
+}
+
+G1FullGCReferenceProcessingExecutor::G1RefProcTaskProxy::G1RefProcTaskProxy(ProcessTask& proc_task,
+                                                                      G1FullCollector* collector) :
+     AbstractGangTask("G1 reference processing task"),
+     _proc_task(proc_task),
+     _collector(collector),
+     _terminator(_collector->workers(), _collector->oop_queue_set()) { }
+
+void G1FullGCReferenceProcessingExecutor::G1RefProcTaskProxy::work(uint worker_id) {
+  G1FullGCMarker* marker = _collector->marker(worker_id);
+  G1IsAliveClosure is_alive(_collector->mark_bitmap());
+  G1FullKeepAliveClosure keep_alive(marker);
+  _proc_task.work(worker_id,
+                  is_alive,
+                  keep_alive,
+                  *marker->stack_closure());
+}
+
+G1FullGCReferenceProcessingExecutor::G1RefEnqueueTaskProxy::G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
+  AbstractGangTask("G1 reference enqueue task"),
+  _enq_task(enq_task) { }
+
+void G1FullGCReferenceProcessingExecutor::G1RefEnqueueTaskProxy::work(uint worker_id) {
+  _enq_task.work(worker_id);
+}
+
+void G1FullGCReferenceProcessingExecutor::run_task(AbstractGangTask* task) {
+  G1CollectedHeap::heap()->workers()->run_task(task, _collector->workers());
+}
+
+void G1FullGCReferenceProcessingExecutor::execute(ProcessTask& proc_task) {
+  G1RefProcTaskProxy proc_task_proxy(proc_task, _collector);
+  run_task(&proc_task_proxy);
+}
+
+// Driver routine for parallel reference processing.
+void G1FullGCReferenceProcessingExecutor::execute(EnqueueTask& enq_task) {
+  G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
+  run_task(&enq_task_proxy);
+}
+
+void G1FullGCReferenceProcessingExecutor::execute(STWGCTimer* timer, G1FullGCTracer* tracer) {
+  GCTraceTime(Debug, gc, phases) debug("Phase 1: Reference Processing", timer);
+  // Process reference objects found during marking.
+  G1FullGCMarker* marker = _collector->marker(0);
+  G1IsAliveClosure is_alive(_collector->mark_bitmap());
+  G1FullKeepAliveClosure keep_alive(marker);
+  ReferenceProcessorPhaseTimes pt(timer, _reference_processor->num_q());
+  AbstractRefProcTaskExecutor* executor = _reference_processor->processing_is_mt() ? this : NULL;
+
+  // Process discovered references, use this executor if multi-threaded
+  // processing is enabled.
+  const ReferenceProcessorStats& stats =
+      _reference_processor->process_discovered_references(&is_alive,
+                                                          &keep_alive,
+                                                          marker->stack_closure(),
+                                                          executor,
+                                                          &pt);
+
+  tracer->report_gc_reference_stats(stats);
+  pt.print_all_references();
+
+  assert(marker->oop_stack()->is_empty(), "Should be no oops on the stack");
+
+  // Now enqueue the references.
+  _reference_processor->enqueue_discovered_references(executor, &pt);
+  pt.print_enqueue_phase();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLGCREFERENCEPROCESSOREXECUTOR_HPP
+#define SHARE_GC_G1_G1FULLGCREFERENCEPROCESSOREXECUTOR_HPP
+
+#include "gc/g1/g1FullGCCompactionPoint.hpp"
+#include "gc/g1/g1FullGCScope.hpp"
+#include "gc/g1/g1FullGCTask.hpp"
+#include "gc/g1/g1RootProcessor.hpp"
+#include "gc/g1/g1StringDedup.hpp"
+#include "gc/g1/heapRegionManager.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "utilities/ticks.hpp"
+
+class G1FullGCTracer;
+class STWGCTimer;
+
+class G1FullGCReferenceProcessingExecutor: public AbstractRefProcTaskExecutor {
+  G1FullCollector*    _collector;
+  ReferenceProcessor* _reference_processor;
+  uint                _old_mt_degree;
+
+public:
+  G1FullGCReferenceProcessingExecutor(G1FullCollector* collector);
+  ~G1FullGCReferenceProcessingExecutor();
+
+  // Do reference processing.
+  void execute(STWGCTimer* timer, G1FullGCTracer* tracer);
+
+  // Executes the given task using concurrent marking worker threads.
+  virtual void execute(ProcessTask& task);
+  virtual void execute(EnqueueTask& task);
+
+private:
+  void run_task(AbstractGangTask* task);
+
+  class G1RefProcTaskProxy : public AbstractGangTask {
+    typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
+    ProcessTask&             _proc_task;
+    G1FullCollector*         _collector;
+    ParallelTaskTerminator   _terminator;
+
+  public:
+    G1RefProcTaskProxy(ProcessTask& proc_task,
+                       G1FullCollector* scope);
+
+    virtual void work(uint worker_id);
+  };
+
+  class G1RefEnqueueTaskProxy: public AbstractGangTask {
+    typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
+    EnqueueTask& _enq_task;
+
+  public:
+    G1RefEnqueueTaskProxy(EnqueueTask& enq_task);
+    virtual void work(uint worker_id);
+  };
+};
+
+#endif // SHARE_GC_G1_G1FULLGCREFERENCEPROCESSOREXECUTOR_HPP
--- a/src/hotspot/share/gc/g1/g1FullGCScope.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -25,13 +25,6 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1FullGCScope.hpp"
 
-G1FullGCScope* G1FullGCScope::_instance = NULL;
-
-G1FullGCScope* G1FullGCScope::instance() {
-  assert(_instance != NULL, "Must be setup already");
-  return _instance;
-}
-
 G1FullGCScope::G1FullGCScope(bool explicit_gc, bool clear_soft) :
     _rm(),
     _explicit_gc(explicit_gc),
@@ -46,12 +39,10 @@
     _memory_stats(true, _g1h->gc_cause()),
     _collector_stats(_g1h->g1mm()->full_collection_counters()),
     _heap_transition(_g1h) {
-  assert(_instance == NULL, "Only one scope at a time");
   _timer.register_gc_start();
   _tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start());
   _g1h->pre_full_gc_dump(&_timer);
   _g1h->trace_heap_before_gc(&_tracer);
-  _instance = this;
 }
 
 G1FullGCScope::~G1FullGCScope() {
@@ -64,7 +55,6 @@
   _g1h->post_full_gc_dump(&_timer);
   _timer.register_gc_end();
   _tracer.report_gc_end(_timer.gc_end(), _timer.time_partitions());
-  _instance = NULL;
 }
 
 bool G1FullGCScope::is_explicit_gc() {
@@ -79,7 +69,7 @@
   return &_timer;
 }
 
-SerialOldTracer* G1FullGCScope::tracer() {
+G1FullGCTracer* G1FullGCScope::tracer() {
   return &_tracer;
 }
 
--- a/src/hotspot/share/gc/g1/g1FullGCScope.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -45,7 +45,7 @@
   GCIdMark                _gc_id;
   SvcGCMarker             _svc_marker;
   STWGCTimer              _timer;
-  SerialOldTracer         _tracer;
+  G1FullGCTracer          _tracer;
   IsGCActiveMark          _active;
   GCTraceCPUTime          _cpu_time;
   ClearedAllSoftRefs      _soft_refs;
@@ -53,11 +53,7 @@
   TraceMemoryManagerStats _memory_stats;
   G1HeapTransition        _heap_transition;
 
-  // Singleton instance.
-  static G1FullGCScope* _instance;
 public:
-  static G1FullGCScope* instance();
-
   G1FullGCScope(bool explicit_gc, bool clear_soft);
   ~G1FullGCScope();
 
@@ -65,7 +61,7 @@
   bool should_clear_soft_refs();
 
   STWGCTimer* timer();
-  SerialOldTracer* tracer();
+  G1FullGCTracer* tracer();
   G1HeapTransition* heap_transition();
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCTask.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1FullGCTask.hpp"
+#include "logging/log.hpp"
+#include "utilities/ticks.inline.hpp"
+
+void G1FullGCTask::log_task(const char* name, uint worker_id, const Ticks& start, const Ticks& stop) {
+  Tickspan duration = stop - start;
+  double duration_ms = TimeHelper::counter_to_millis(duration.value());
+  log_trace(gc, phases)("%s (%u) %.3fms", name, worker_id, duration_ms);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCTask.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1FULLGCTASK_HPP
+#define SHARE_GC_G1_G1FULLGCTASK_HPP
+
+#include "gc/shared/workgroup.hpp"
+#include "utilities/ticks.hpp"
+
+class G1FullCollector;
+
+class G1FullGCTask : public AbstractGangTask {
+  G1FullCollector* _collector;
+
+protected:
+  G1FullGCTask(const char* name, G1FullCollector* collector) :
+    AbstractGangTask(name),
+    _collector(collector) { }
+
+  G1FullCollector* collector() { return _collector; }
+  void log_task(const char* name, uint worker_id, const Ticks& start, const Ticks& stop = Ticks::now());
+};
+
+#endif // SHARE_GC_G1_G1FULLGCTASK_HPP
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -48,7 +48,7 @@
 public:
   // _vo == UsePrevMarking -> use "prev" marking information,
   // _vo == UseNextMarking -> use "next" marking information,
-  // _vo == UseMarkWord    -> use mark word from object header.
+  // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS
   VerifyRootsClosure(VerifyOption vo) :
     _g1h(G1CollectedHeap::heap()),
     _vo(vo),
@@ -63,9 +63,6 @@
       if (_g1h->is_obj_dead_cond(obj, _vo)) {
         Log(gc, verify) log;
         log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
-        if (_vo == VerifyOption_G1UseMarkWord) {
-          log.error("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
-        }
         ResourceMark rm;
         LogStream ls(log.error());
         obj->print_on(&ls);
@@ -95,7 +92,7 @@
     }
 
     // Don't check the code roots during marking verification in a full GC
-    if (_vo == VerifyOption_G1UseMarkWord) {
+    if (_vo == VerifyOption_G1UseFullMarking) {
       return;
     }
 
@@ -203,7 +200,7 @@
 public:
   // _vo == UsePrevMarking -> use "prev" marking information,
   // _vo == UseNextMarking -> use "next" marking information,
-  // _vo == UseMarkWord    -> use mark word from object header.
+  // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
   VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
     : _live_bytes(0), _hr(hr), _vo(vo) {
     _g1h = G1CollectedHeap::heap();
@@ -212,15 +209,15 @@
     VerifyLivenessOopClosure isLive(_g1h, _vo);
     assert(o != NULL, "Huh?");
     if (!_g1h->is_obj_dead_cond(o, _vo)) {
-      // If the object is alive according to the mark word,
+      // If the object is alive according to the full gc mark,
       // then verify that the marking information agrees.
       // Note we can't verify the contra-positive of the
       // above: if the object is dead (according to the mark
       // word), it may not be marked, or may have been marked
       // but has since became dead, or may have been allocated
       // since the last marking.
-      if (_vo == VerifyOption_G1UseMarkWord) {
-        guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
+      if (_vo == VerifyOption_G1UseFullMarking) {
+        guarantee(!_g1h->is_obj_dead(o), "Full GC marking and concurrent mark mismatch");
       }
 
       o->oop_iterate_no_header(&isLive);
@@ -299,7 +296,7 @@
 public:
   // _vo == UsePrevMarking -> use "prev" marking information,
   // _vo == UseNextMarking -> use "next" marking information,
-  // _vo == UseMarkWord    -> use mark word from object header.
+  // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS
   VerifyRegionClosure(bool par, VerifyOption vo)
     : _par(par),
       _vo(vo),
@@ -357,7 +354,7 @@
 public:
   // _vo == UsePrevMarking -> use "prev" marking information,
   // _vo == UseNextMarking -> use "next" marking information,
-  // _vo == UseMarkWord    -> use mark word from object header.
+  // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS
   G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
       AbstractGangTask("Parallel verify task"),
       _g1h(g1h),
@@ -372,7 +369,7 @@
   void work(uint worker_id) {
     HandleMark hm;
     VerifyRegionClosure blk(true, _vo);
-    _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
+    _g1h->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id);
     if (blk.failures()) {
       _failures = true;
     }
@@ -407,7 +404,7 @@
 
   bool failures = rootsCl.failures() || codeRootsCl.failures();
 
-  if (vo != VerifyOption_G1UseMarkWord) {
+  if (!_g1h->g1_policy()->collector_state()->full_collection()) {
     // If we're verifying during a full GC then the region sets
     // will have been torn down at the start of the GC. Therefore
     // verifying the region sets will fail. So we only verify
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -46,9 +46,9 @@
 
   // Perform verification.
 
-  // vo == UsePrevMarking  -> use "prev" marking information,
+  // vo == UsePrevMarking -> use "prev" marking information,
   // vo == UseNextMarking -> use "next" marking information
-  // vo == UseMarkWord    -> use the mark word in the object header
+  // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
   //
   // NOTE: Only the "prev" marking information is guaranteed to be
   // consistent most of the time, so most calls to this should use
@@ -57,7 +57,7 @@
   // vo == UseNextMarking, which is to verify the "next" marking
   // information at the end of remark.
   // Currently there is only one place where this is called with
-  // vo == UseMarkWord, which is to verify the marking during a
+  // vo == UseFullMarking, which is to verify the marking during a
   // full GC.
   void verify(VerifyOption vo);
 
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -110,7 +110,3 @@
 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
   _card_counts.clear_region(hr);
 }
-
-void G1HotCardCache::reset_card_counts() {
-  _card_counts.clear_all();
-}
--- a/src/hotspot/share/gc/g1/g1HotCardCache.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -128,9 +128,6 @@
     }
   }
 
-  // Zeros the values in the card counts table for entire committed heap
-  void reset_card_counts();
-
   // Zeros the values in the card counts table for the given region
   void reset_card_counts(HeapRegion* hr);
 
--- a/src/hotspot/share/gc/g1/g1MarkSweep.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,387 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/javaClasses.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "classfile/vmSymbols.hpp"
-#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/g1/g1FullGCScope.hpp"
-#include "gc/g1/g1MarkSweep.hpp"
-#include "gc/g1/g1RootProcessor.hpp"
-#include "gc/g1/g1StringDedup.hpp"
-#include "gc/serial/markSweep.inline.hpp"
-#include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/modRefBarrierSet.hpp"
-#include "gc/shared/referencePolicy.hpp"
-#include "gc/shared/space.hpp"
-#include "gc/shared/weakProcessor.hpp"
-#include "oops/instanceRefKlass.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/biasedLocking.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/thread.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/copy.hpp"
-#include "utilities/events.hpp"
-
-class HeapRegion;
-
-void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
-                                      bool clear_all_softrefs) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
-  HandleMark hm;  // Discard invalid handles created during gc
-
-#if COMPILER2_OR_JVMCI
-  DerivedPointerTable::clear();
-#endif
-#ifdef ASSERT
-  if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
-    assert(clear_all_softrefs, "Policy should have been checked earler");
-  }
-#endif
-  // hook up weak ref data so it can be used during Mark-Sweep
-  assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
-  assert(rp != NULL, "should be non-NULL");
-  assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
-
-  GenMarkSweep::set_ref_processor(rp);
-  rp->setup_policy(clear_all_softrefs);
-
-  // When collecting the permanent generation Method*s may be moving,
-  // so we either have to flush all bcp data or convert it into bci.
-  CodeCache::gc_prologue();
-
-  bool marked_for_unloading = false;
-
-  allocate_stacks();
-
-  // We should save the marks of the currently locked biased monitors.
-  // The marking doesn't preserve the marks of biased objects.
-  BiasedLocking::preserve_marks();
-
-  // Process roots and do the marking.
-  mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
-
-  // Prepare compaction.
-  mark_sweep_phase2();
-
-#if COMPILER2_OR_JVMCI
-  // Don't add any more derived pointers during phase3
-  DerivedPointerTable::set_active(false);
-#endif
-
-  // Adjust all pointers.
-  mark_sweep_phase3();
-
-  // Do the actual compaction.
-  mark_sweep_phase4();
-
-  GenMarkSweep::restore_marks();
-  BiasedLocking::restore_marks();
-  GenMarkSweep::deallocate_stacks();
-
-#if COMPILER2_OR_JVMCI
-  // Now update the derived pointers.
-  DerivedPointerTable::update_pointers();
-#endif
-
-  CodeCache::gc_epilogue();
-  JvmtiExport::gc_epilogue();
-
-  // refs processing: clean slate
-  GenMarkSweep::set_ref_processor(NULL);
-}
-
-STWGCTimer* G1MarkSweep::gc_timer() {
-  return G1FullGCScope::instance()->timer();
-}
-
-SerialOldTracer* G1MarkSweep::gc_tracer() {
-  return G1FullGCScope::instance()->tracer();
-}
-
-void G1MarkSweep::allocate_stacks() {
-  GenMarkSweep::_preserved_count_max = 0;
-  GenMarkSweep::_preserved_marks = NULL;
-  GenMarkSweep::_preserved_count = 0;
-}
-
-void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
-                                    bool clear_all_softrefs) {
-  // Recursively traverse all live objects and mark them
-  GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", gc_timer());
-
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  // Need cleared claim bits for the roots processing
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations);
-  {
-    G1RootProcessor root_processor(g1h, 1);
-    if (ClassUnloading) {
-      root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure,
-                                          &GenMarkSweep::follow_cld_closure,
-                                          &follow_code_closure);
-    } else {
-      root_processor.process_all_roots_no_string_table(
-                                          &GenMarkSweep::follow_root_closure,
-                                          &GenMarkSweep::follow_cld_closure,
-                                          &follow_code_closure);
-    }
-  }
-
-  {
-    GCTraceTime(Debug, gc, phases) trace("Reference Processing", gc_timer());
-
-    // Process reference objects found during marking
-    ReferenceProcessor* rp = GenMarkSweep::ref_processor();
-    assert(rp == g1h->ref_processor_stw(), "Sanity");
-
-    rp->setup_policy(clear_all_softrefs);
-    ReferenceProcessorPhaseTimes pt(gc_timer(), rp->num_q());
-
-    const ReferenceProcessorStats& stats =
-        rp->process_discovered_references(&GenMarkSweep::is_alive,
-                                          &GenMarkSweep::keep_alive,
-                                          &GenMarkSweep::follow_stack_closure,
-                                          NULL,
-                                          &pt);
-    gc_tracer()->report_gc_reference_stats(stats);
-    pt.print_all_references();
-  }
-
-  // This is the point where the entire marking should have completed.
-  assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
-
-  {
-    GCTraceTime(Debug, gc, phases) trace("Weak Processing", gc_timer());
-    WeakProcessor::weak_oops_do(&GenMarkSweep::is_alive, &do_nothing_cl);
-  }
-
-  if (ClassUnloading) {
-    GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer());
-
-    // Unload classes and purge the SystemDictionary.
-    bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive, gc_timer());
-
-    g1h->complete_cleaning(&GenMarkSweep::is_alive, purged_class);
-  } else {
-    GCTraceTime(Debug, gc, phases) trace("Cleanup", gc_timer());
-    g1h->partial_cleaning(&GenMarkSweep::is_alive, true, true, G1StringDedup::is_enabled());
-  }
-
-  if (VerifyDuringGC) {
-    HandleMark hm;  // handle scope
-#if COMPILER2_OR_JVMCI
-    DerivedPointerTableDeactivate dpt_deact;
-#endif
-    g1h->prepare_for_verify();
-    // Note: we can verify only the heap here. When an object is
-    // marked, the previous value of the mark word (including
-    // identity hash values, ages, etc) is preserved, and the mark
-    // word is set to markOop::marked_value - effectively removing
-    // any hash values from the mark word. These hash values are
-    // used when verifying the dictionaries and so removing them
-    // from the mark word can make verification of the dictionaries
-    // fail. At the end of the GC, the original mark word values
-    // (including hash values) are restored to the appropriate
-    // objects.
-    GCTraceTime(Info, gc, verify)("During GC (full)");
-    g1h->verify(VerifyOption_G1UseMarkWord);
-  }
-
-  gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
-}
-
-
-void G1MarkSweep::mark_sweep_phase2() {
-  // Now all live objects are marked, compute the new object addresses.
-
-  // It is not required that we traverse spaces in the same order in
-  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
-  // tracking expects us to do so. See comment under phase4.
-
-  GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", gc_timer());
-
-  prepare_compaction();
-}
-
-class G1AdjustPointersClosure: public HeapRegionClosure {
- public:
-  bool doHeapRegion(HeapRegion* r) {
-    if (r->is_humongous()) {
-      if (r->is_starts_humongous()) {
-        // We must adjust the pointers on the single H object.
-        oop obj = oop(r->bottom());
-        // point all the oops to the new location
-        MarkSweep::adjust_pointers(obj);
-      }
-    } else if (!r->is_closed_archive()) {
-      // This really ought to be "as_CompactibleSpace"...
-      r->adjust_pointers();
-    }
-    return false;
-  }
-};
-
-void G1MarkSweep::mark_sweep_phase3() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  // Adjust the pointers to reflect the new locations
-  GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", gc_timer());
-
-  // Need cleared claim bits for the roots processing
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  CodeBlobToOopClosure adjust_code_closure(&GenMarkSweep::adjust_pointer_closure, CodeBlobToOopClosure::FixRelocations);
-  {
-    G1RootProcessor root_processor(g1h, 1);
-    root_processor.process_all_roots(&GenMarkSweep::adjust_pointer_closure,
-                                     &GenMarkSweep::adjust_cld_closure,
-                                     &adjust_code_closure);
-  }
-
-  assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
-  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
-
-  // Now adjust pointers in remaining weak roots.  (All of which should
-  // have been cleared if they pointed to non-surviving objects.)
-  WeakProcessor::oops_do(&GenMarkSweep::adjust_pointer_closure);
-
-  if (G1StringDedup::is_enabled()) {
-    G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
-  }
-
-  GenMarkSweep::adjust_marks();
-
-  G1AdjustPointersClosure blk;
-  g1h->heap_region_iterate(&blk);
-}
-
-class G1SpaceCompactClosure: public HeapRegionClosure {
-public:
-  G1SpaceCompactClosure() {}
-
-  bool doHeapRegion(HeapRegion* hr) {
-    if (hr->is_humongous()) {
-      if (hr->is_starts_humongous()) {
-        oop obj = oop(hr->bottom());
-        if (obj->is_gc_marked()) {
-          obj->init_mark();
-        } else {
-          assert(hr->is_empty(), "Should have been cleared in phase 2.");
-        }
-      }
-      hr->reset_during_compaction();
-    } else if (!hr->is_pinned()) {
-      hr->compact();
-    }
-    return false;
-  }
-};
-
-void G1MarkSweep::mark_sweep_phase4() {
-  // All pointers are now adjusted, move objects accordingly
-
-  // The ValidateMarkSweep live oops tracking expects us to traverse spaces
-  // in the same order in phase2, phase3 and phase4. We don't quite do that
-  // here (code and comment not fixed for perm removal), so we tell the validate code
-  // to use a higher index (saved from phase2) when verifying perm_gen.
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-  GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", gc_timer());
-
-  G1SpaceCompactClosure blk;
-  g1h->heap_region_iterate(&blk);
-
-}
-
-void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  g1h->heap_region_iterate(blk);
-  blk->update_sets();
-}
-
-void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
-  HeapWord* end = hr->end();
-  FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
-
-  hr->set_containing_set(NULL);
-  _humongous_regions_removed++;
-
-  _g1h->free_humongous_region(hr, &dummy_free_list, false /* skip_remset */);
-  prepare_for_compaction(hr, end);
-  dummy_free_list.remove_all();
-}
-
-void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
-  // If this is the first live region that we came across which we can compact,
-  // initialize the CompactPoint.
-  if (!is_cp_initialized()) {
-    _cp.space = hr;
-    _cp.threshold = hr->initialize_threshold();
-  }
-  prepare_for_compaction_work(&_cp, hr, end);
-}
-
-void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
-                                                          HeapRegion* hr,
-                                                          HeapWord* end) {
-  hr->prepare_for_compaction(cp);
-  // Also clear the part of the card table that will be unused after
-  // compaction.
-  _mrbs->clear(MemRegion(hr->compaction_top(), end));
-}
-
-void G1PrepareCompactClosure::update_sets() {
-  // We'll recalculate total used bytes and recreate the free list
-  // at the end of the GC, so no point in updating those values here.
-  _g1h->remove_from_old_sets(0, _humongous_regions_removed);
-}
-
-bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
-  if (hr->is_humongous()) {
-    oop obj = oop(hr->humongous_start_region()->bottom());
-    if (hr->is_starts_humongous() && obj->is_gc_marked()) {
-      obj->forward_to(obj);
-    }
-    if (!obj->is_gc_marked()) {
-      free_humongous_region(hr);
-    }
-  } else if (!hr->is_pinned()) {
-    prepare_for_compaction(hr, hr->end());
-  }
-  return false;
-}
--- a/src/hotspot/share/gc/g1/g1MarkSweep.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1MARKSWEEP_HPP
-#define SHARE_VM_GC_G1_G1MARKSWEEP_HPP
-
-#include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/heapRegion.hpp"
-#include "gc/serial/genMarkSweep.hpp"
-#include "gc/shared/generation.hpp"
-#include "memory/universe.hpp"
-#include "oops/markOop.hpp"
-#include "oops/oop.hpp"
-#include "runtime/timer.hpp"
-#include "utilities/growableArray.hpp"
-
-class ReferenceProcessor;
-
-// G1MarkSweep takes care of global mark-compact garbage collection for a
-// G1CollectedHeap using a four-phase pointer forwarding algorithm.  All
-// generations are assumed to support marking; those that can also support
-// compaction.
-//
-// Class unloading will only occur when a full gc is invoked.
-class G1PrepareCompactClosure;
-class G1ArchiveRegionMap;
-
-class G1MarkSweep : AllStatic {
- public:
-
-  static void invoke_at_safepoint(ReferenceProcessor* rp,
-                                  bool clear_all_softrefs);
-
-  static STWGCTimer* gc_timer();
-  static SerialOldTracer* gc_tracer();
-
-private:
-  // Mark live objects
-  static void mark_sweep_phase1(bool& marked_for_deopt,
-                                bool clear_all_softrefs);
-  // Calculate new addresses
-  static void mark_sweep_phase2();
-  // Update pointers
-  static void mark_sweep_phase3();
-  // Move objects to new positions
-  static void mark_sweep_phase4();
-
-  static void allocate_stacks();
-  static void prepare_compaction();
-  static void prepare_compaction_work(G1PrepareCompactClosure* blk);
-};
-
-class G1PrepareCompactClosure : public HeapRegionClosure {
- protected:
-  G1CollectedHeap* _g1h;
-  ModRefBarrierSet* _mrbs;
-  CompactPoint _cp;
-  uint _humongous_regions_removed;
-
-  virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
-  void prepare_for_compaction_work(CompactPoint* cp, HeapRegion* hr, HeapWord* end);
-  void free_humongous_region(HeapRegion* hr);
-  bool is_cp_initialized() const { return _cp.space != NULL; }
-
- public:
-  G1PrepareCompactClosure() :
-    _g1h(G1CollectedHeap::heap()),
-    _mrbs(_g1h->g1_barrier_set()),
-    _humongous_regions_removed(0) { }
-
-  void update_sets();
-  bool doHeapRegion(HeapRegion* hr);
-};
-
-#endif // SHARE_VM_GC_G1_G1MARKSWEEP_HPP
--- a/src/hotspot/share/gc/g1/g1MarkSweep_ext.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1MarkSweep.hpp"
-
-void G1MarkSweep::prepare_compaction() {
-  G1PrepareCompactClosure blk;
-  G1MarkSweep::prepare_compaction_work(&blk);
-}
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -30,7 +30,6 @@
 #include "gc/g1/g1OopClosures.hpp"
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 #include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/g1RemSet.inline.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "memory/iterator.inline.hpp"
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -26,7 +26,7 @@
 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
 
 #include "gc/g1/g1ParScanThreadState.hpp"
-#include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/g1RemSet.hpp"
 #include "oops/oop.inline.hpp"
 
 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -31,7 +31,7 @@
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
-#include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
@@ -532,7 +532,7 @@
 
 void G1RemSet::scrub(uint worker_num, HeapRegionClaimer *hrclaimer) {
   G1ScrubRSClosure scrub_cl(&_card_live_data);
-  _g1->heap_region_par_iterate(&scrub_cl, worker_num, hrclaimer);
+  _g1->heap_region_par_iterate_from_worker_offset(&scrub_cl, hrclaimer, worker_num);
 }
 
 inline void check_card_ptr(jbyte* card_ptr, CardTableModRefBS* ct_bs) {
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -114,10 +114,6 @@
 
   G1RemSetScanState* scan_state() const { return _scan_state; }
 
-  // Record, if necessary, the fact that *p (where "p" is in region "from",
-  // which is required to be non-NULL) has changed to a new non-NULL value.
-  template <class T> void par_write_ref(HeapRegion* from, T* p, uint tid);
-
   // Eliminates any remembered set entries that correspond to dead heap ranges.
   void scrub(uint worker_num, HeapRegionClaimer* hrclaimer);
 
@@ -191,25 +187,4 @@
   size_t cards_skipped() const { return _cards_skipped; }
 };
 
-class RebuildRSOopClosure: public ExtendedOopClosure {
-  HeapRegion* _from;
-  G1RemSet* _rs;
-  uint _worker_i;
-
-  template <class T> void do_oop_work(T* p);
-
-public:
-  RebuildRSOopClosure(G1RemSet* rs, uint worker_i = 0) :
-    _from(NULL), _rs(rs), _worker_i(worker_i)
-  {}
-
-  void set_from(HeapRegion* from) {
-    assert(from != NULL, "from region must be non-NULL");
-    _from = from;
-  }
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(oop* p)       { do_oop_work(p); }
-};
-
 #endif // SHARE_VM_GC_G1_G1REMSET_HPP
--- a/src/hotspot/share/gc/g1/g1RemSet.inline.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1REMSET_INLINE_HPP
-#define SHARE_VM_GC_G1_G1REMSET_INLINE_HPP
-
-#include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/heapRegion.hpp"
-#include "gc/g1/heapRegionRemSet.hpp"
-#include "oops/oop.inline.hpp"
-
-template <class T>
-inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) {
-  oop obj = oopDesc::load_decode_heap_oop(p);
-  if (obj == NULL) {
-    return;
-  }
-
-#ifdef ASSERT
-  // can't do because of races
-  // assert(oopDesc::is_oop_or_null(obj), "expected an oop");
-  assert(check_obj_alignment(obj), "not oop aligned");
-  assert(_g1->is_in_reserved(obj), "must be in heap");
-#endif // ASSERT
-
-  assert(from->is_in_reserved(p) || from->is_starts_humongous(), "p is not in from");
-
-  HeapRegion* to = _g1->heap_region_containing(obj);
-  if (from != to) {
-    assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
-    to->rem_set()->add_reference(p, tid);
-  }
-}
-
-template <class T>
-inline void RebuildRSOopClosure::do_oop_work(T* p) {
-  assert(_from != NULL, "from region must be non-NULL");
-  _rs->par_write_ref(_from, p, _worker_i);
-}
-
-#endif // SHARE_VM_GC_G1_G1REMSET_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -26,7 +26,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
 #include "gc/g1/g1ConcurrentRefineThread.hpp"
-#include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1RemSetSummary.hpp"
 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
 #include "gc/g1/heapRegion.hpp"
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -37,6 +37,7 @@
 #include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/mutex.hpp"
 #include "services/management.hpp"
@@ -319,6 +320,16 @@
   }
 }
 
+void G1RootProcessor::process_full_gc_weak_roots(OopClosure* oops) {
+  if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
+    _g1h->ref_processor_stw()->weak_oops_do(oops);
+  }
+
+  if (!_process_strong_tasks.is_task_claimed(G1RP_PS_weakProcessor_oops_do)) {
+    WeakProcessor::oops_do(oops);
+  }
+}
+
 uint G1RootProcessor::n_workers() const {
   return _srs.n_threads();
 }
--- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -65,6 +65,7 @@
     G1RP_PS_aot_oops_do,
     G1RP_PS_filter_satb_buffers,
     G1RP_PS_refProcessor_oops_do,
+    G1RP_PS_weakProcessor_oops_do,
     // Leave this one last.
     G1RP_PS_NumElements
   };
@@ -118,6 +119,10 @@
                                          CLDClosure* clds,
                                          CodeBlobClosure* blobs);
 
+  // Apply closure to weak roots in the system. Used during the adjust phase
+  // for the Full GC.
+  void process_full_gc_weak_roots(OopClosure* oops);
+
   // Number of worker threads used by the root processor.
   uint n_workers() const;
 };
--- a/src/hotspot/share/gc/g1/g1SerialFullCollector.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1FullGCScope.hpp"
-#include "gc/g1/g1MarkSweep.hpp"
-#include "gc/g1/g1RemSet.inline.hpp"
-#include "gc/g1/g1SerialFullCollector.hpp"
-#include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-
-G1SerialFullCollector::G1SerialFullCollector(G1FullGCScope* scope,
-                                             ReferenceProcessor* reference_processor) :
-    _scope(scope),
-    _reference_processor(reference_processor),
-    _is_alive_mutator(_reference_processor, NULL),
-    _mt_discovery_mutator(_reference_processor, false) {
-  // Temporarily make discovery by the STW ref processor single threaded (non-MT)
-  // and clear the STW ref processor's _is_alive_non_header field.
-}
-
-void G1SerialFullCollector::prepare_collection() {
-  _reference_processor->enable_discovery();
-  _reference_processor->setup_policy(_scope->should_clear_soft_refs());
-}
-
-void G1SerialFullCollector::complete_collection() {
-  // Enqueue any discovered reference objects that have
-  // not been removed from the discovered lists.
-  ReferenceProcessorPhaseTimes pt(NULL, _reference_processor->num_q());
-  _reference_processor->enqueue_discovered_references(NULL, &pt);
-  pt.print_enqueue_phase();
-
-  // Iterate the heap and rebuild the remembered sets.
-  rebuild_remembered_sets();
-}
-
-void G1SerialFullCollector::collect() {
-  // Do the actual collection work.
-  G1MarkSweep::invoke_at_safepoint(_reference_processor, _scope->should_clear_soft_refs());
-}
-
-class PostMCRemSetClearClosure: public HeapRegionClosure {
-  G1CollectedHeap* _g1h;
-  ModRefBarrierSet* _mr_bs;
-public:
-  PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
-    _g1h(g1h), _mr_bs(mr_bs) {}
-
-  bool doHeapRegion(HeapRegion* r) {
-    HeapRegionRemSet* hrrs = r->rem_set();
-
-    _g1h->reset_gc_time_stamps(r);
-
-    if (r->is_continues_humongous()) {
-      // We'll assert that the strong code root list and RSet is empty
-      assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
-      assert(hrrs->occupied() == 0, "RSet should be empty");
-    } else {
-      hrrs->clear();
-    }
-    // You might think here that we could clear just the cards
-    // corresponding to the used region.  But no: if we leave a dirty card
-    // in a region we might allocate into, then it would prevent that card
-    // from being enqueued, and cause it to be missed.
-    // Re: the performance cost: we shouldn't be doing full GC anyway!
-    _mr_bs->clear(MemRegion(r->bottom(), r->end()));
-
-    return false;
-  }
-};
-
-
-class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
-  G1CollectedHeap*   _g1h;
-  RebuildRSOopClosure _cl;
-public:
-  RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, uint worker_i = 0) :
-    _cl(g1->g1_rem_set(), worker_i),
-    _g1h(g1)
-  { }
-
-  bool doHeapRegion(HeapRegion* r) {
-    if (!r->is_continues_humongous()) {
-      _cl.set_from(r);
-      r->oop_iterate(&_cl);
-    }
-    return false;
-  }
-};
-
-class ParRebuildRSTask: public AbstractGangTask {
-  G1CollectedHeap* _g1;
-  HeapRegionClaimer _hrclaimer;
-
-public:
-  ParRebuildRSTask(G1CollectedHeap* g1) :
-      AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
-
-  void work(uint worker_id) {
-    RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
-    _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
-  }
-};
-
-void G1SerialFullCollector::rebuild_remembered_sets() {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  // First clear the stale remembered sets.
-  PostMCRemSetClearClosure rs_clear(g1h, g1h->g1_barrier_set());
-  g1h->heap_region_iterate(&rs_clear);
-
-  // Rebuild remembered sets of all regions.
-  uint n_workers = AdaptiveSizePolicy::calc_active_workers(g1h->workers()->total_workers(),
-                                                           g1h->workers()->active_workers(),
-                                                           Threads::number_of_non_daemon_threads());
-  g1h->workers()->update_active_workers(n_workers);
-  log_info(gc,task)("Using %u workers of %u to rebuild remembered set", n_workers, g1h->workers()->total_workers());
-
-  ParRebuildRSTask rebuild_rs_task(g1h);
-  g1h->workers()->run_task(&rebuild_rs_task);
-}
--- a/src/hotspot/share/gc/g1/g1SerialFullCollector.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1SERIALCOLLECTOR_HPP
-#define SHARE_VM_GC_G1_G1SERIALCOLLECTOR_HPP
-
-#include "memory/allocation.hpp"
-
-class G1FullGCScope;
-class ReferenceProcessor;
-
-class G1SerialFullCollector : StackObj {
-  G1FullGCScope*                       _scope;
-  ReferenceProcessor*                  _reference_processor;
-  ReferenceProcessorIsAliveMutator     _is_alive_mutator;
-  ReferenceProcessorMTDiscoveryMutator _mt_discovery_mutator;
-
-  void rebuild_remembered_sets();
-
-public:
-  G1SerialFullCollector(G1FullGCScope* scope, ReferenceProcessor* reference_processor);
-
-  void prepare_collection();
-  void collect();
-  void complete_collection();
-};
-
-#endif // SHARE_VM_GC_G1_G1SERIALCOLLECTOR_HPP
--- a/src/hotspot/share/gc/g1/g1StringDedup.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1StringDedup.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -65,10 +65,10 @@
   return false;
 }
 
-void G1StringDedup::enqueue_from_mark(oop java_string) {
+void G1StringDedup::enqueue_from_mark(oop java_string, uint worker_id) {
   assert(is_enabled(), "String deduplication not enabled");
   if (is_candidate_from_mark(java_string)) {
-    G1StringDedupQueue::push(0 /* worker_id */, java_string);
+    G1StringDedupQueue::push(worker_id, java_string);
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1StringDedup.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1StringDedup.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -125,7 +125,7 @@
   // Enqueues a deduplication candidate for later processing by the deduplication
   // thread. Before enqueuing, these functions apply the appropriate candidate
   // selection policy to filters out non-candidates.
-  static void enqueue_from_mark(oop java_string);
+  static void enqueue_from_mark(oop java_string, uint worker_id);
   static void enqueue_from_evacuation(bool from_young, bool to_young,
                                       unsigned int queue, oop java_string);
 
--- a/src/hotspot/share/gc/g1/g1_specialized_oop_closures.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/g1_specialized_oop_closures.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -41,6 +41,9 @@
 class G1CMOopClosure;
 class G1RootRegionScanClosure;
 
+class G1MarkAndPushClosure;
+class G1AdjustAndRebuildClosure;
+
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f) \
       f(G1ScanEvacuatedObjClosure,_nv)             \
       f(G1ScanObjsDuringUpdateRSClosure,_nv)       \
@@ -49,4 +52,8 @@
       f(G1CMOopClosure,_nv)                        \
       f(G1RootRegionScanClosure,_nv)
 
+#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f) \
+      f(G1MarkAndPushClosure,_nv)                      \
+      f(G1AdjustAndRebuildClosure,_nv)
+
 #endif // SHARE_VM_GC_G1_G1_SPECIALIZED_OOP_CLOSURES_HPP
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -42,6 +42,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "utilities/growableArray.hpp"
 
 int    HeapRegion::LogOfHRGrainBytes = 0;
 int    HeapRegion::LogOfHRGrainWords = 0;
@@ -106,14 +107,6 @@
   }
 }
 
-void HeapRegion::reset_after_compaction() {
-  G1ContiguousSpace::reset_after_compaction();
-  // After a compaction the mark bitmap is invalid, so we must
-  // treat all objects as being inside the unmarked area.
-  zero_marked_bytes();
-  init_top_at_mark_start();
-}
-
 void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) {
   assert(_humongous_start_region == NULL,
          "we should have already filtered out humongous regions");
@@ -278,10 +271,6 @@
                                             (uint)allocation_context());
 }
 
-CompactibleSpace* HeapRegion::next_compaction_space() const {
-  return G1CollectedHeap::heap()->next_compaction_region(this);
-}
-
 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
                                                     bool during_conc_mark) {
   // We always recreate the prev marking info and we'll explicitly
@@ -411,7 +400,7 @@
     // We're not verifying code roots.
     return;
   }
-  if (vo == VerifyOption_G1UseMarkWord) {
+  if (vo == VerifyOption_G1UseFullMarking) {
     // Marking verification during a full GC is performed after class
     // unloading, code cache unloading, etc so the strong code roots
     // attached to each heap region are in an inconsistent state. They won't
@@ -482,7 +471,7 @@
 public:
   // _vo == UsePrevMarking -> use "prev" marking information,
   // _vo == UseNextMarking -> use "next" marking information,
-  // _vo == UseMarkWord    -> use mark word from object header.
+  // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
   G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
     _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
     _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) {
@@ -833,7 +822,8 @@
 }
 
 void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
-  scan_and_forward(this, cp);
+  // Not used for G1 anymore, but pure virtual in Space.
+  ShouldNotReachHere();
 }
 
 // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
--- a/src/hotspot/share/gc/g1/heapRegion.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -57,6 +57,7 @@
 
 class G1CollectedHeap;
 class G1CMBitMap;
+class G1IsAliveAndApplyClosure;
 class HeapRegionRemSet;
 class HeapRegionRemSetIterator;
 class HeapRegion;
@@ -355,8 +356,14 @@
   // and the amount of unallocated words if called on top()
   size_t block_size(const HeapWord* p) const;
 
+  // Scans through the region using the bitmap to determine what
+  // objects to call size_t ApplyToMarkedClosure::apply(oop) for.
+  template<typename ApplyToMarkedClosure>
+  inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
   // Override for scan_and_forward support.
   void prepare_for_compaction(CompactPoint* cp);
+  // Update heap region to be consistent after compaction.
+  void complete_compaction();
 
   inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
   inline HeapWord* allocate_no_bot_updates(size_t word_size);
@@ -672,10 +679,6 @@
     _predicted_elapsed_time_ms = ms;
   }
 
-  virtual CompactibleSpace* next_compaction_space() const;
-
-  virtual void reset_after_compaction();
-
   // Routines for managing a list of code roots (attached to the
   // this region's RSet) that point into this heap region.
   void add_strong_code_root(nmethod* nm);
@@ -693,9 +696,9 @@
   void print() const;
   void print_on(outputStream* st) const;
 
-  // vo == UsePrevMarking  -> use "prev" marking information,
+  // vo == UsePrevMarking -> use "prev" marking information,
   // vo == UseNextMarking -> use "next" marking information
-  // vo == UseMarkWord    -> use the mark word in the object header
+  // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
   //
   // NOTE: Only the "prev" marking information is guaranteed to be
   // consistent most of the time, so most calls to this should use
@@ -704,7 +707,7 @@
   // vo == UseNextMarking, which is to verify the "next" marking
   // information at the end of remark.
   // Currently there is only one place where this is called with
-  // vo == UseMarkWord, which is to verify the marking during a
+  // vo == UseFullMarking, which is to verify the marking during a
   // full GC.
   void verify(VerifyOption vo, bool *failures) const;
 
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -27,10 +27,12 @@
 
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/shared/space.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/prefetch.inline.hpp"
 #include "utilities/align.hpp"
 
 inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size,
@@ -180,6 +182,45 @@
   return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap());
 }
 
+inline void HeapRegion::complete_compaction() {
+  // Reset space and bot after compaction is complete if needed.
+  reset_after_compaction();
+  if (used_region().is_empty()) {
+    reset_bot();
+  }
+
+  // After a compaction the mark bitmap is invalid, so we must
+  // treat all objects as being inside the unmarked area.
+  zero_marked_bytes();
+  init_top_at_mark_start();
+
+  // Clear unused heap memory in debug builds.
+  if (ZapUnusedHeapArea) {
+    mangle_unused_area();
+  }
+}
+
+template<typename ApplyToMarkedClosure>
+inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) {
+  HeapWord* limit = scan_limit();
+  HeapWord* next_addr = bottom();
+
+  while (next_addr < limit) {
+    Prefetch::write(next_addr, PrefetchScanIntervalInBytes);
+    // This explicit is_marked check is a way to avoid
+    // some extra work done by get_next_marked_addr for
+    // the case where next_addr is marked.
+    if (bitmap->is_marked(next_addr)) {
+      oop current = oop(next_addr);
+      next_addr += closure->apply(current);
+    } else {
+      next_addr = bitmap->get_next_marked_addr(next_addr, limit);
+    }
+  }
+
+  assert(next_addr == limit, "Should stop the scan at the limit.");
+}
+
 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
                                                          size_t desired_word_size,
                                                          size_t* actual_word_size) {
--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -327,9 +327,7 @@
   return true;
 }
 
-void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
-  const uint start_index = hrclaimer->start_region_for_worker(worker_id);
-
+void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const {
   // Every worker will actually look at all regions, skipping over regions that
   // are currently not committed.
   // This also (potentially) iterates over regions newly allocated during GC. This
@@ -493,7 +491,7 @@
   }
 }
 
-uint HeapRegionClaimer::start_region_for_worker(uint worker_id) const {
+uint HeapRegionClaimer::offset_for_worker(uint worker_id) const {
   assert(worker_id < _n_workers, "Invalid worker_id.");
   return _n_regions * worker_id / _n_workers;
 }
--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -189,7 +189,7 @@
     return _free_list.length();
   }
 
-  size_t total_capacity_bytes() const {
+  size_t total_free_bytes() const {
     return num_free_regions() * HeapRegion::GrainBytes;
   }
 
@@ -240,7 +240,7 @@
   // terminating the iteration early if doHeapRegion() returns true.
   void iterate(HeapRegionClosure* blk) const;
 
-  void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const;
+  void par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const;
 
   // Uncommit up to num_regions_to_remove regions that are completely free.
   // Return the actual number of uncommitted regions.
@@ -274,9 +274,8 @@
     return _n_regions;
   }
 
-  // Calculate the starting region for given worker so
-  // that they do not all start from the same region.
-  uint start_region_for_worker(uint worker_id) const;
+  // Return a start offset given a worker id.
+  uint offset_for_worker(uint worker_id) const;
 
   // Check if region has been claimed with this HRClaimer.
   bool is_region_claimed(uint region_index) const;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/parallelArguments.hpp"
+#include "gc/shared/collectorPolicy.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/java.hpp"
+#include "runtime/vm_version.hpp"
+#include "utilities/defaultStream.hpp"
+
+size_t ParallelArguments::conservative_max_heap_alignment() {
+  return CollectorPolicy::compute_heap_alignment();
+}
+
+void ParallelArguments::initialize_flags() {
+  GCArguments::initialize_flags();
+  assert(UseParallelGC || UseParallelOldGC, "Error");
+  // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
+  if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
+    FLAG_SET_DEFAULT(UseParallelOldGC, true);
+  }
+  FLAG_SET_DEFAULT(UseParallelGC, true);
+
+  // If no heap maximum was requested explicitly, use some reasonable fraction
+  // of the physical memory, up to a maximum of 1GB.
+  FLAG_SET_DEFAULT(ParallelGCThreads,
+                   Abstract_VM_Version::parallel_worker_threads());
+  if (ParallelGCThreads == 0) {
+    jio_fprintf(defaultStream::error_stream(),
+        "The Parallel GC can not be combined with -XX:ParallelGCThreads=0\n");
+    vm_exit(1);
+  }
+
+  if (UseAdaptiveSizePolicy) {
+    // We don't want to limit adaptive heap sizing's freedom to adjust the heap
+    // unless the user actually sets these flags.
+    if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
+      FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+    }
+    if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
+      FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+    }
+  }
+
+  // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
+  // SurvivorRatio has been set, reset their default values to SurvivorRatio +
+  // 2.  By doing this we make SurvivorRatio also work for Parallel Scavenger.
+  // See CR 6362902 for details.
+  if (!FLAG_IS_DEFAULT(SurvivorRatio)) {
+    if (FLAG_IS_DEFAULT(InitialSurvivorRatio)) {
+       FLAG_SET_DEFAULT(InitialSurvivorRatio, SurvivorRatio + 2);
+    }
+    if (FLAG_IS_DEFAULT(MinSurvivorRatio)) {
+      FLAG_SET_DEFAULT(MinSurvivorRatio, SurvivorRatio + 2);
+    }
+  }
+
+  if (UseParallelOldGC) {
+    // Par compact uses lower default values since they are treated as
+    // minimums.  These are different defaults because of the different
+    // interpretation and are not ergonomically set.
+    if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
+      FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/parallelArguments.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_PARALLEL_PARALLELARGUMENTS_HPP
+#define SHARE_GC_PARALLEL_PARALLELARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class ParallelArguments : public GCArguments {
+public:
+  virtual void initialize_flags();
+  virtual size_t conservative_max_heap_alignment();
+};
+
+#endif // SHARE_GC_CMS_PARALLELARGUMENTS_HPP
--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -167,10 +167,8 @@
 
 
 void GenMarkSweep::deallocate_stacks() {
-  if (!UseG1GC) {
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    gch->release_scratch();
-  }
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  gch->release_scratch();
 
   _preserved_mark_stack.clear(true);
   _preserved_oop_stack.clear(true);
--- a/src/hotspot/share/gc/serial/genMarkSweep.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/serial/genMarkSweep.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -29,7 +29,6 @@
 
 class GenMarkSweep : public MarkSweep {
   friend class VM_MarkSweep;
-  friend class G1MarkSweep;
  public:
   static void invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_softrefs);
 
--- a/src/hotspot/share/gc/serial/markSweep.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/serial/markSweep.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -40,9 +40,6 @@
 #include "oops/typeArrayOop.inline.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/stack.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1StringDedup.hpp"
-#endif // INCLUDE_ALL_GCS
 
 uint                    MarkSweep::_total_invocations = 0;
 
@@ -65,13 +62,6 @@
 CLDToOopClosure               MarkSweep::adjust_cld_closure(&adjust_pointer_closure);
 
 inline void MarkSweep::mark_object(oop obj) {
-#if INCLUDE_ALL_GCS
-  if (G1StringDedup::is_enabled()) {
-    // We must enqueue the object before it is marked
-    // as we otherwise can't read the object's age.
-    G1StringDedup::enqueue_from_mark(obj);
-  }
-#endif
   // some marks may contain information we need to preserve so we store them away
   // and overwrite the mark.  We'll restore it at the end of markSweep.
   markOop mark = obj->mark();
@@ -86,8 +76,7 @@
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked() &&
-        !is_closed_archive_object(obj)) {
+    if (!obj->mark()->is_marked()) {
       mark_object(obj);
       _marking_stack.push(obj);
     }
@@ -183,8 +172,7 @@
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    if (!obj->mark()->is_marked() &&
-        !is_closed_archive_object(obj)) {
+    if (!obj->mark()->is_marked()) {
       mark_object(obj);
       follow_object(obj);
     }
@@ -268,7 +256,7 @@
 
 MarkSweep::IsAliveClosure   MarkSweep::is_alive;
 
-bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked() || is_closed_archive_object(p); }
+bool MarkSweep::IsAliveClosure::do_object_b(oop p) { return p->is_gc_marked(); }
 
 MarkSweep::KeepAliveClosure MarkSweep::keep_alive;
 
--- a/src/hotspot/share/gc/serial/markSweep.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/serial/markSweep.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -133,11 +133,6 @@
   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
   static void set_ref_processor(ReferenceProcessor* rp);
 
-  // Archive Object handling
-  static inline bool is_closed_archive_object(oop object);
-  static inline bool is_open_archive_object(oop object);
-  static inline bool is_archive_object(oop object);
-
   static STWGCTimer* gc_timer() { return _gc_timer; }
   static SerialOldTracer* gc_tracer() { return _gc_tracer; }
 
--- a/src/hotspot/share/gc/serial/markSweep.inline.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -30,33 +30,6 @@
 #include "memory/universe.hpp"
 #include "oops/markOop.inline.hpp"
 #include "oops/oop.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1Allocator.inline.hpp"
-#endif // INCLUDE_ALL_GCS
-
-inline bool MarkSweep::is_closed_archive_object(oop object) {
-#if INCLUDE_ALL_GCS
-  return G1ArchiveAllocator::is_closed_archive_object(object);
-#else
-  return false;
-#endif
-}
-
-inline bool MarkSweep::is_open_archive_object(oop object) {
-#if INCLUDE_ALL_GCS
-  return G1ArchiveAllocator::is_open_archive_object(object);
-#else
-  return false;
-#endif
-}
-
-inline bool MarkSweep::is_archive_object(oop object) {
-#if INCLUDE_ALL_GCS
-  return G1ArchiveAllocator::is_archive_object(object);
-#else
-  return false;
-#endif
-}
 
 inline int MarkSweep::adjust_pointers(oop obj) {
   return obj->oop_iterate_size(&MarkSweep::adjust_pointer_closure);
@@ -70,27 +43,16 @@
 
     oop new_obj = oop(obj->mark()->decode_pointer());
 
-    assert(is_archive_object(obj) ||             // no forwarding of archive objects
-           new_obj != NULL ||                         // is forwarding ptr?
+    assert(new_obj != NULL ||                         // is forwarding ptr?
            obj->mark() == markOopDesc::prototype() || // not gc marked?
            (UseBiasedLocking && obj->mark()->has_bias_pattern()),
            // not gc marked?
            "should be forwarded");
 
-#ifndef PRODUCT
-    // open_archive objects are marked by GC. Their mark should
-    // not have forwarding ptr.
-    if (is_open_archive_object(obj)) {
-      assert(new_obj == NULL, "archive heap object has forwarding ptr");
-    }
-#endif
-
     if (new_obj != NULL) {
-      if (!is_closed_archive_object(obj)) {
-        assert(Universe::heap()->is_in_reserved(new_obj),
-              "should be in object space");
-        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
-      }
+      assert(Universe::heap()->is_in_reserved(new_obj),
+             "should be in object space");
+      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
     }
   }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/serial/serialArguments.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/serial/serialArguments.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+
+size_t SerialArguments::conservative_max_heap_alignment() {
+  return GenCollectedHeap::conservative_max_heap_alignment();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/serial/serialArguments.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SERIAL_SERIALARGUMENTS_HPP
+#define SHARE_GC_SERIAL_SERIALARGUMENTS_HPP
+
+#include "gc/shared/gcArguments.hpp"
+
+class SerialArguments : public GCArguments {
+public:
+  virtual size_t conservative_max_heap_alignment();
+};
+
+#endif // SHARE_GC_SERIAL_SERIALARGUMENTS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/gcArguments.hpp"
+#include "gc/serial/serialArguments.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.hpp"
+#include "utilities/macros.hpp"
+
+#if INCLUDE_ALL_GCS
+#include "gc/parallel/parallelArguments.hpp"
+#include "gc/cms/cmsArguments.hpp"
+#include "gc/g1/g1Arguments.hpp"
+#endif
+
+GCArguments* GCArguments::_instance = NULL;
+
+GCArguments* GCArguments::arguments() {
+  assert(is_initialized(), "Heap factory not yet created");
+  return _instance;
+}
+
+bool GCArguments::is_initialized() {
+  return _instance != NULL;
+}
+
+bool GCArguments::gc_selected() {
+#if INCLUDE_ALL_GCS
+  return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC;
+#else
+  return UseSerialGC;
+#endif // INCLUDE_ALL_GCS
+}
+
+void GCArguments::select_gc() {
+  if (!gc_selected()) {
+    select_gc_ergonomically();
+    if (!gc_selected()) {
+      vm_exit_during_initialization("Garbage collector not selected (default collector explicitly disabled)", NULL);
+    }
+  }
+}
+
+void GCArguments::select_gc_ergonomically() {
+#if INCLUDE_ALL_GCS
+  if (os::is_server_class_machine()) {
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
+  } else {
+    FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+  }
+#else
+  UNSUPPORTED_OPTION(UseG1GC);
+  UNSUPPORTED_OPTION(UseParallelGC);
+  UNSUPPORTED_OPTION(UseParallelOldGC);
+  UNSUPPORTED_OPTION(UseConcMarkSweepGC);
+  FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
+#endif // INCLUDE_ALL_GCS
+}
+
+void GCArguments::initialize_flags() {
+#if INCLUDE_ALL_GCS
+  if (AssumeMP && !UseSerialGC) {
+    if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
+      warning("If the number of processors is expected to increase from one, then"
+              " you should configure the number of parallel GC threads appropriately"
+              " using -XX:ParallelGCThreads=N");
+    }
+  }
+  if (MinHeapFreeRatio == 100) {
+    // Keeping the heap 100% free is hard ;-) so limit it to 99%.
+    FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
+  }
+
+  // If class unloading is disabled, also disable concurrent class unloading.
+  if (!ClassUnloading) {
+    FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
+    FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
+  }
+#endif // INCLUDE_ALL_GCS
+}
+
+jint GCArguments::initialize() {
+  assert(!is_initialized(), "GC arguments already initialized");
+
+  select_gc();
+
+#if !INCLUDE_ALL_GCS
+  if (UseParallelGC || UseParallelOldGC) {
+    jio_fprintf(defaultStream::error_stream(), "UseParallelGC not supported in this VM.\n");
+    return JNI_ERR;
+  } else if (UseG1GC) {
+    jio_fprintf(defaultStream::error_stream(), "UseG1GC not supported in this VM.\n");
+    return JNI_ERR;
+  } else if (UseConcMarkSweepGC) {
+    jio_fprintf(defaultStream::error_stream(), "UseConcMarkSweepGC not supported in this VM.\n");
+    return JNI_ERR;
+#else
+  if (UseParallelGC || UseParallelOldGC) {
+    _instance = new ParallelArguments();
+  } else if (UseG1GC) {
+    _instance = new G1Arguments();
+  } else if (UseConcMarkSweepGC) {
+    _instance = new CMSArguments();
+#endif
+  } else if (UseSerialGC) {
+    _instance = new SerialArguments();
+  } else {
+    ShouldNotReachHere();
+  }
+  return JNI_OK;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/gcArguments.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_GCARGUMENTS_HPP
+#define SHARE_GC_SHARED_GCARGUMENTS_HPP
+
+#include "memory/allocation.hpp"
+
+class GCArguments : public CHeapObj<mtGC> {
+private:
+  static GCArguments* _instance;
+
+  static void select_gc();
+  static void select_gc_ergonomically();
+  static bool gc_selected();
+
+public:
+  static jint initialize();
+  static bool is_initialized();
+  static GCArguments* arguments();
+
+  virtual void initialize_flags();
+
+  virtual size_t conservative_max_heap_alignment() = 0;
+};
+
+#endif // SHARE_GC_SHARED_GCARGUMENTS_HPP
--- a/src/hotspot/share/gc/shared/gcName.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/shared/gcName.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,7 @@
   G1New,
   ConcurrentMarkSweep,
   G1Old,
+  G1Full,
   GCNameEndSentinel
 };
 
@@ -53,6 +54,7 @@
       case G1New: return "G1New";
       case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
       case G1Old: return "G1Old";
+      case G1Full: return "G1Full";
       default: ShouldNotReachHere(); return NULL;
     }
   }
--- a/src/hotspot/share/gc/shared/gcTrace.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/shared/gcTrace.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -289,6 +289,12 @@
                                      double predicted_marking_length,
                                      bool prediction_active);
 };
+
+class G1FullGCTracer : public OldGCTracer {
+ public:
+  G1FullGCTracer() : OldGCTracer(G1Full) {}
+};
+
 #endif
 
 class CMSTracer : public OldGCTracer {
--- a/src/hotspot/share/gc/shared/preservedMarks.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/shared/preservedMarks.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,18 @@
   assert_empty();
 }
 
+void PreservedMarks::adjust_during_full_gc() {
+  StackIterator<OopAndMarkOop, mtGC> iter(_stack);
+  while (!iter.is_empty()) {
+    OopAndMarkOop* elem = iter.next_addr();
+
+    oop obj = elem->get_oop();
+    if (obj->is_forwarded()) {
+      elem->set_oop(obj->forwardee());
+    }
+  }
+}
+
 void PreservedMarks::restore_and_increment(volatile size_t* const total_size_addr) {
   const size_t stack_size = size();
   restore();
@@ -104,7 +116,6 @@
   }
 };
 
-
 void PreservedMarksSet::reclaim() {
   assert_empty();
 
--- a/src/hotspot/share/gc/shared/preservedMarks.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/shared/preservedMarks.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
 
 class PreservedMarks VALUE_OBJ_CLASS_SPEC {
 private:
-  class OopAndMarkOop {
+  class OopAndMarkOop VALUE_OBJ_CLASS_SPEC {
   private:
     oop _o;
     markOop _m;
@@ -43,23 +43,26 @@
   public:
     OopAndMarkOop(oop obj, markOop m) : _o(obj), _m(m) { }
 
-    void set_mark() const {
-      _o->set_mark(_m);
-    }
+    oop get_oop() { return _o; }
+    void set_mark() const { _o->set_mark(_m); }
+    void set_oop(oop obj) { _o = obj; }
   };
   typedef Stack<OopAndMarkOop, mtGC> OopAndMarkOopStack;
 
   OopAndMarkOopStack _stack;
 
   inline bool should_preserve_mark(oop obj, markOop m) const;
-  inline void push(oop obj, markOop m);
 
 public:
   size_t size() const { return _stack.size(); }
+  inline void push(oop obj, markOop m);
   inline void push_if_necessary(oop obj, markOop m);
   // Iterate over the stack, restore all preserved marks, and
   // reclaim the memory taken up by the stack segments.
   void restore();
+  // Iterate over the stack, adjust all preserved marks according
+  // to their forwarding location stored in the mark.
+  void adjust_during_full_gc();
 
   void restore_and_increment(volatile size_t* const _total_size_addr);
   inline static void init_forwarded_mark(oop obj);
--- a/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/shared/specialized_oop_closures.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -110,7 +110,8 @@
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)       \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)            \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)           \
-  SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f)
+  SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(f)            \
+  SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1FULL(f)
 #else  // INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f)       \
   SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_MS(f)
--- a/src/hotspot/share/gc/shared/taskqueue.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/shared/taskqueue.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -369,6 +369,7 @@
   typedef typename T::element_type E;
 
   GenericTaskQueueSet(int n);
+  ~GenericTaskQueueSet();
 
   bool steal_best_of_2(uint queue_num, int* seed, E& t);
 
--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -42,6 +42,11 @@
   }
 }
 
+template <class T, MEMFLAGS F>
+inline GenericTaskQueueSet<T, F>::~GenericTaskQueueSet() {
+  FREE_C_HEAP_ARRAY(T*, _queues);
+}
+
 template<class E, MEMFLAGS F, unsigned int N>
 inline void GenericTaskQueue<E, F, N>::initialize() {
   _elems = ArrayAllocator<E>::allocate(N, F);
@@ -49,7 +54,6 @@
 
 template<class E, MEMFLAGS F, unsigned int N>
 inline GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
-  assert(false, "This code is currently never called");
   ArrayAllocator<E>::free(const_cast<E*>(_elems), N);
 }
 
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -99,12 +99,10 @@
   in relation to a safepoint.
 */
 #define SAFEPOINT                                                                 \
-    if ( SafepointSynchronize::is_synchronizing()) {                              \
-        {                                                                         \
-          /* zap freed handles rather than GC'ing them */                         \
-          HandleMarkCleaner __hmc(THREAD);                                        \
-        }                                                                         \
-        CALL_VM(SafepointSynchronize::block(THREAD), handle_exception);           \
+    {                                                                             \
+       /* zap freed handles rather than GC'ing them */                            \
+       HandleMarkCleaner __hmc(THREAD);                                           \
+       CALL_VM(SafepointMechanism::block_if_requested(THREAD), handle_exception); \
     }
 
 /*
--- a/src/hotspot/share/interpreter/templateInterpreter.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/interpreter/templateInterpreter.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -162,6 +162,7 @@
   static int        distance_from_dispatch_table(TosState state){ return _active_table.distance_from(state); }
   static address*   normal_table(TosState state)                { return _normal_table.table_for(state); }
   static address*   normal_table()                              { return _normal_table.table_for(); }
+  static address*   safept_table(TosState state)                { return _safept_table.table_for(state); }
 
   // Support for invokes
   static address*   invoke_return_entry_table()                 { return _invoke_return_entry; }
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -37,6 +37,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
 #include "utilities/align.hpp"
 
 // frequently used constants
@@ -854,9 +855,10 @@
     }
     last_pc_offset = pc_offset;
 
-    if (SafepointSynchronize::do_call_back()) {
+    JavaThread* thread = JavaThread::current();
+    if (SafepointMechanism::poll(thread)) {
       // this is a hacky way to force a safepoint check but nothing else was jumping out at me.
-      ThreadToNativeFromVM ttnfv(JavaThread::current());
+      ThreadToNativeFromVM ttnfv(thread);
     }
   }
 
--- a/src/hotspot/share/logging/logTag.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/logging/logTag.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -54,6 +54,7 @@
   LOG_TAG(compilation) \
   LOG_TAG(constraints) \
   LOG_TAG(constantpool) \
+  LOG_TAG(container) \
   LOG_TAG(coops) \
   LOG_TAG(cpu) \
   LOG_TAG(cset) \
@@ -67,6 +68,7 @@
   LOG_TAG(fingerprint) \
   LOG_TAG(freelist) \
   LOG_TAG(gc) \
+  LOG_TAG(handshake) \
   LOG_TAG(hashtables) \
   LOG_TAG(heap) \
   LOG_TAG(humongous) \
--- a/src/hotspot/share/memory/universe.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/memory/universe.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -90,7 +90,7 @@
       // G1
       VerifyOption_G1UsePrevMarking = VerifyOption_Default,
       VerifyOption_G1UseNextMarking = VerifyOption_G1UsePrevMarking + 1,
-      VerifyOption_G1UseMarkWord    = VerifyOption_G1UseNextMarking + 1
+      VerifyOption_G1UseFullMarking = VerifyOption_G1UseNextMarking + 1
 };
 
 class Universe: AllStatic {
--- a/src/hotspot/share/oops/cpCache.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/oops/cpCache.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -440,7 +440,7 @@
 
   Symbol* error = PENDING_EXCEPTION->klass()->name();
   Symbol* message = java_lang_Throwable::detail_message(PENDING_EXCEPTION);
-  assert("message != NULL", "Missing detail message");
+  assert(message != NULL, "Missing detail message");
 
   SystemDictionary::add_resolution_error(cpool, index, error, message);
   set_indy_resolution_failed();
--- a/src/hotspot/share/opto/cfgnode.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/opto/cfgnode.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -918,11 +918,18 @@
         const TypeInt* stride_t = phase->type(stride)->isa_int();
         if (lo != NULL && hi != NULL && stride_t != NULL) { // Dying loops might have TOP here
           assert(stride_t->_hi >= stride_t->_lo, "bad stride type");
-          if (stride_t->_hi < 0) {          // Down-counter loop
-            swap(lo, hi);
-            return TypeInt::make(MIN2(lo->_lo, hi->_lo) , hi->_hi, 3);
-          } else if (stride_t->_lo >= 0) {
-            return TypeInt::make(lo->_lo, MAX2(lo->_hi, hi->_hi), 3);
+          BoolTest::mask bt = l->loopexit()->test_trip();
+          // If the loop exit condition is "not equal", the condition
+          // would not trigger if init > limit (if stride > 0) or if
+          // init < limit if (stride > 0) so we can't deduce bounds
+          // for the iv from the exit condition.
+          if (bt != BoolTest::ne) {
+            if (stride_t->_hi < 0) {          // Down-counter loop
+              swap(lo, hi);
+              return TypeInt::make(MIN2(lo->_lo, hi->_lo) , hi->_hi, 3);
+            } else if (stride_t->_lo >= 0) {
+              return TypeInt::make(lo->_lo, MAX2(lo->_hi, hi->_hi), 3);
+            }
           }
         }
       }
@@ -933,7 +940,7 @@
       // before the special code for counted loop above has a chance
       // to run (that is as long as the type of the backedge's control
       // is top), we might end up with non monotonic types
-      return phase->type(in(LoopNode::EntryControl));
+      return phase->type(in(LoopNode::EntryControl))->filter_speculative(_type);
     }
   }
 
--- a/src/hotspot/share/opto/parse1.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/opto/parse1.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -2286,7 +2286,14 @@
 
   // Create a node for the polling address
   if( add_poll_param ) {
-    Node *polladr = ConPNode::make((address)os::get_polling_page());
+    Node *polladr;
+    if (SafepointMechanism::uses_thread_local_poll()) {
+      Node *thread = _gvn.transform(new ThreadLocalNode());
+      Node *polling_page_load_addr = _gvn.transform(basic_plus_adr(top(), thread, in_bytes(Thread::polling_page_offset())));
+      polladr = make_load(control(), polling_page_load_addr, TypeRawPtr::BOTTOM, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
+    } else {
+      polladr = ConPNode::make((address)os::get_polling_page());
+    }
     sfpnt->init_req(TypeFunc::Parms+0, _gvn.transform(polladr));
   }
 
--- a/src/hotspot/share/prims/whitebox.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/prims/whitebox.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -49,6 +49,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
+#include "runtime/handshake.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/os.hpp"
@@ -75,6 +76,7 @@
 
 #ifdef LINUX
 #include "utilities/elfFile.hpp"
+#include "osContainer_linux.hpp"
 #endif
 
 #define SIZE_T_MAX_VALUE ((size_t) -1)
@@ -1727,6 +1729,40 @@
 #endif
 WB_END
 
+WB_ENTRY(jint, WB_HandshakeWalkStack(JNIEnv* env, jobject wb, jobject thread_handle, jboolean all_threads))
+  class TraceSelfClosure : public ThreadClosure {
+    jint _num_threads_completed;
+
+    void do_thread(Thread* th) {
+      assert(th->is_Java_thread(), "sanity");
+      JavaThread* jt = (JavaThread*)th;
+      ResourceMark rm;
+
+      jt->print_on(tty);
+      jt->print_stack_on(tty);
+      tty->cr();
+      Atomic::inc(&_num_threads_completed);
+    }
+
+  public:
+    TraceSelfClosure() : _num_threads_completed(0) {}
+
+    jint num_threads_completed() const { return _num_threads_completed; }
+  };
+  TraceSelfClosure tsc;
+
+  if (all_threads) {
+    Handshake::execute(&tsc);
+  } else {
+    oop thread_oop = JNIHandles::resolve(thread_handle);
+    if (thread_oop != NULL) {
+      JavaThread* target = java_lang_Thread::thread(thread_oop);
+      Handshake::execute(&tsc, target);
+    }
+  }
+  return tsc.num_threads_completed();
+WB_END
+
 //Some convenience methods to deal with objects from java
 int WhiteBox::offset_for_field(const char* field_name, oop object,
     Symbol* signature_symbol) {
@@ -1844,6 +1880,16 @@
   return ret;
 WB_END
 
+WB_ENTRY(jboolean, WB_IsContainerized(JNIEnv* env, jobject o))
+  LINUX_ONLY(return OSContainer::is_containerized();)
+  return false;
+WB_END
+
+WB_ENTRY(void, WB_PrintOsInfo(JNIEnv* env, jobject o))
+  os::print_os_info(tty);
+WB_END
+
+
 #define CC (char*)
 
 static JNINativeMethod methods[] = {
@@ -2038,6 +2084,7 @@
   {CC"areOpenArchiveHeapObjectsMapped",   CC"()Z",    (void*)&WB_AreOpenArchiveHeapObjectsMapped},
   {CC"isCDSIncludedInVmBuild",            CC"()Z",    (void*)&WB_IsCDSIncludedInVmBuild },
   {CC"clearInlineCaches0",  CC"(Z)V",                 (void*)&WB_ClearInlineCaches },
+  {CC"handshakeWalkStack", CC"(Ljava/lang/Thread;Z)I", (void*)&WB_HandshakeWalkStack },
   {CC"addCompilerDirective",    CC"(Ljava/lang/String;)I",
                                                       (void*)&WB_AddCompilerDirective },
   {CC"removeCompilerDirective",   CC"(I)V",             (void*)&WB_RemoveCompilerDirective },
@@ -2051,8 +2098,11 @@
                                                       (void*)&WB_RequestConcurrentGCPhase},
   {CC"checkLibSpecifiesNoexecstack", CC"(Ljava/lang/String;)Z",
                                                       (void*)&WB_CheckLibSpecifiesNoexecstack},
+  {CC"isContainerized",           CC"()Z",            (void*)&WB_IsContainerized },
+  {CC"printOsInfo",               CC"()V",            (void*)&WB_PrintOsInfo },
 };
 
+
 #undef CC
 
 JVM_ENTRY(void, JVM_RegisterWhiteBoxMethods(JNIEnv* env, jclass wbclass))
--- a/src/hotspot/share/runtime/arguments.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/arguments.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -29,7 +29,7 @@
 #include "classfile/moduleEntry.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
-#include "gc/shared/cardTableRS.hpp"
+#include "gc/shared/gcArguments.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/taskqueue.hpp"
@@ -50,6 +50,7 @@
 #include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointMechanism.hpp"
 #include "runtime/vm_version.hpp"
 #include "services/management.hpp"
 #include "services/memTracker.hpp"
@@ -60,11 +61,6 @@
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciRuntime.hpp"
 #endif
-#if INCLUDE_ALL_GCS
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#endif // INCLUDE_ALL_GCS
 
 // Note: This is a special bug reporting site for the JVM
 #define DEFAULT_VENDOR_URL_BUG "http://bugreport.java.com/bugreport/crash.jsp"
@@ -383,6 +379,8 @@
   { "MinRAMFraction",               JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
   { "InitialRAMFraction",           JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
   { "UseMembar",                    JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
+  { "FastTLABRefill",               JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
+  { "UseCGroupMemoryLimitForHeap",  JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::jdk(11) },
   { "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
 
   // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
@@ -1505,161 +1503,6 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
-static void disable_adaptive_size_policy(const char* collector_name) {
-  if (UseAdaptiveSizePolicy) {
-    if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
-      warning("Disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
-              collector_name);
-    }
-    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
-  }
-}
-
-void Arguments::set_parnew_gc_flags() {
-  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
-         "control point invariant");
-  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
-
-  if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
-    FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
-    assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
-  } else if (ParallelGCThreads == 0) {
-    jio_fprintf(defaultStream::error_stream(),
-        "The ParNew GC can not be combined with -XX:ParallelGCThreads=0\n");
-    vm_exit(1);
-  }
-
-  // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
-  // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
-  // we set them to 1024 and 1024.
-  // See CR 6362902.
-  if (FLAG_IS_DEFAULT(YoungPLABSize)) {
-    FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
-  }
-  if (FLAG_IS_DEFAULT(OldPLABSize)) {
-    FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
-  }
-
-  // When using compressed oops, we use local overflow stacks,
-  // rather than using a global overflow list chained through
-  // the klass word of the object's pre-image.
-  if (UseCompressedOops && !ParGCUseLocalOverflow) {
-    if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) {
-      warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references");
-    }
-    FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true);
-  }
-  assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error");
-}
-
-// Adjust some sizes to suit CMS and/or ParNew needs; these work well on
-// sparc/solaris for certain applications, but would gain from
-// further optimization and tuning efforts, and would almost
-// certainly gain from analysis of platform and environment.
-void Arguments::set_cms_and_parnew_gc_flags() {
-  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
-  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
-
-  // Turn off AdaptiveSizePolicy by default for cms until it is complete.
-  disable_adaptive_size_policy("UseConcMarkSweepGC");
-
-  set_parnew_gc_flags();
-
-  size_t max_heap = align_down(MaxHeapSize,
-                               CardTableRS::ct_max_alignment_constraint());
-
-  // Now make adjustments for CMS
-  intx   tenuring_default = (intx)6;
-  size_t young_gen_per_worker = CMSYoungGenPerWorker;
-
-  // Preferred young gen size for "short" pauses:
-  // upper bound depends on # of threads and NewRatio.
-  const size_t preferred_max_new_size_unaligned =
-    MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
-  size_t preferred_max_new_size =
-    align_up(preferred_max_new_size_unaligned, os::vm_page_size());
-
-  // Unless explicitly requested otherwise, size young gen
-  // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
-
-  // If either MaxNewSize or NewRatio is set on the command line,
-  // assume the user is trying to set the size of the young gen.
-  if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
-
-    // Set MaxNewSize to our calculated preferred_max_new_size unless
-    // NewSize was set on the command line and it is larger than
-    // preferred_max_new_size.
-    if (!FLAG_IS_DEFAULT(NewSize)) {   // NewSize explicitly set at command-line
-      FLAG_SET_ERGO(size_t, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
-    } else {
-      FLAG_SET_ERGO(size_t, MaxNewSize, preferred_max_new_size);
-    }
-    log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
-
-    // Code along this path potentially sets NewSize and OldSize
-    log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size:  " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
-                        min_heap_size(), InitialHeapSize, max_heap);
-    size_t min_new = preferred_max_new_size;
-    if (FLAG_IS_CMDLINE(NewSize)) {
-      min_new = NewSize;
-    }
-    if (max_heap > min_new && min_heap_size() > min_new) {
-      // Unless explicitly requested otherwise, make young gen
-      // at least min_new, and at most preferred_max_new_size.
-      if (FLAG_IS_DEFAULT(NewSize)) {
-        FLAG_SET_ERGO(size_t, NewSize, MAX2(NewSize, min_new));
-        FLAG_SET_ERGO(size_t, NewSize, MIN2(preferred_max_new_size, NewSize));
-        log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
-      }
-      // Unless explicitly requested otherwise, size old gen
-      // so it's NewRatio x of NewSize.
-      if (FLAG_IS_DEFAULT(OldSize)) {
-        if (max_heap > NewSize) {
-          FLAG_SET_ERGO(size_t, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
-          log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
-        }
-      }
-    }
-  }
-  // Unless explicitly requested otherwise, definitely
-  // promote all objects surviving "tenuring_default" scavenges.
-  if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
-      FLAG_IS_DEFAULT(SurvivorRatio)) {
-    FLAG_SET_ERGO(uintx, MaxTenuringThreshold, tenuring_default);
-  }
-  // If we decided above (or user explicitly requested)
-  // `promote all' (via MaxTenuringThreshold := 0),
-  // prefer minuscule survivor spaces so as not to waste
-  // space for (non-existent) survivors
-  if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
-    FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
-  }
-
-  // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
-  // but rather the number of free blocks of a given size that are used when
-  // replenishing the local per-worker free list caches.
-  if (FLAG_IS_DEFAULT(OldPLABSize)) {
-    if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
-      // OldPLAB sizing manually turned off: Use a larger default setting,
-      // unless it was manually specified. This is because a too-low value
-      // will slow down scavenges.
-      FLAG_SET_ERGO(size_t, OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
-    } else {
-      FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
-    }
-  }
-
-  // If either of the static initialization defaults have changed, note this
-  // modification.
-  if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
-    CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
-  }
-
-  log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
-}
-#endif // INCLUDE_ALL_GCS
-
 void set_object_alignment() {
   // Object alignment.
   assert(is_power_of_2(ObjectAlignmentInBytes), "ObjectAlignmentInBytes must be power of 2");
@@ -1678,11 +1521,6 @@
   if (SurvivorAlignmentInBytes == 0) {
     SurvivorAlignmentInBytes = ObjectAlignmentInBytes;
   }
-
-#if INCLUDE_ALL_GCS
-  // Set CMS global values
-  CompactibleFreeListSpace::set_cms_values();
-#endif // INCLUDE_ALL_GCS
 }
 
 size_t Arguments::max_heap_for_compressed_oops() {
@@ -1758,28 +1596,13 @@
   // the alignments imposed by several sources: any requirements from the heap
   // itself, the collector policy and the maximum page size we may run the VM
   // with.
-  size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
-#if INCLUDE_ALL_GCS
-  if (UseParallelGC) {
-    heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
-  } else if (UseG1GC) {
-    heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
-  }
-#endif // INCLUDE_ALL_GCS
+  size_t heap_alignment = GCArguments::arguments()->conservative_max_heap_alignment();
   _conservative_max_heap_alignment = MAX4(heap_alignment,
                                           (size_t)os::vm_allocation_granularity(),
                                           os::max_page_size(),
                                           CollectorPolicy::compute_heap_alignment());
 }
 
-bool Arguments::gc_selected() {
-#if INCLUDE_ALL_GCS
-  return UseSerialGC || UseParallelGC || UseParallelOldGC || UseConcMarkSweepGC || UseG1GC;
-#else
-  return UseSerialGC;
-#endif // INCLUDE_ALL_GCS
-}
-
 #ifdef TIERED
 bool Arguments::compilation_mode_selected() {
  return !FLAG_IS_DEFAULT(TieredCompilation) || !FLAG_IS_DEFAULT(TieredStopAtLevel) ||
@@ -1799,31 +1622,6 @@
 }
 #endif //TIERED
 
-void Arguments::select_gc_ergonomically() {
-#if INCLUDE_ALL_GCS
-  if (os::is_server_class_machine()) {
-    FLAG_SET_ERGO_IF_DEFAULT(bool, UseG1GC, true);
-  } else {
-    FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
-  }
-#else
-  UNSUPPORTED_OPTION(UseG1GC);
-  UNSUPPORTED_OPTION(UseParallelGC);
-  UNSUPPORTED_OPTION(UseParallelOldGC);
-  UNSUPPORTED_OPTION(UseConcMarkSweepGC);
-  FLAG_SET_ERGO_IF_DEFAULT(bool, UseSerialGC, true);
-#endif // INCLUDE_ALL_GCS
-}
-
-void Arguments::select_gc() {
-  if (!gc_selected()) {
-    select_gc_ergonomically();
-    if (!gc_selected()) {
-      vm_exit_during_initialization("Garbage collector not selected (default collector explicitly disabled)", NULL);
-    }
-  }
-}
-
 #if INCLUDE_JVMCI
 void Arguments::set_jvmci_specific_flags() {
   if (UseJVMCICompiler) {
@@ -1857,13 +1655,17 @@
 }
 #endif
 
-void Arguments::set_ergonomics_flags() {
+jint Arguments::set_ergonomics_flags() {
 #ifdef TIERED
   if (!compilation_mode_selected()) {
     select_compilation_mode_ergonomically();
   }
 #endif
-  select_gc();
+
+  jint gc_result = GCArguments::initialize();
+  if (gc_result != JNI_OK) {
+    return gc_result;
+  }
 
 #if COMPILER2_OR_JVMCI
   // Shared spaces work fine with other GCs but causes bytecode rewriting
@@ -1892,145 +1694,12 @@
 #endif // _LP64
 #endif // !ZERO
 
-}
-
-void Arguments::set_parallel_gc_flags() {
-  assert(UseParallelGC || UseParallelOldGC, "Error");
-  // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
-  if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
-    FLAG_SET_DEFAULT(UseParallelOldGC, true);
-  }
-  FLAG_SET_DEFAULT(UseParallelGC, true);
-
-  // If no heap maximum was requested explicitly, use some reasonable fraction
-  // of the physical memory, up to a maximum of 1GB.
-  FLAG_SET_DEFAULT(ParallelGCThreads,
-                   Abstract_VM_Version::parallel_worker_threads());
-  if (ParallelGCThreads == 0) {
-    jio_fprintf(defaultStream::error_stream(),
-        "The Parallel GC can not be combined with -XX:ParallelGCThreads=0\n");
-    vm_exit(1);
-  }
-
-  if (UseAdaptiveSizePolicy) {
-    // We don't want to limit adaptive heap sizing's freedom to adjust the heap
-    // unless the user actually sets these flags.
-    if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
-      FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
-    }
-    if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
-      FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
-    }
-  }
-
-  // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
-  // SurvivorRatio has been set, reset their default values to SurvivorRatio +
-  // 2.  By doing this we make SurvivorRatio also work for Parallel Scavenger.
-  // See CR 6362902 for details.
-  if (!FLAG_IS_DEFAULT(SurvivorRatio)) {
-    if (FLAG_IS_DEFAULT(InitialSurvivorRatio)) {
-       FLAG_SET_DEFAULT(InitialSurvivorRatio, SurvivorRatio + 2);
-    }
-    if (FLAG_IS_DEFAULT(MinSurvivorRatio)) {
-      FLAG_SET_DEFAULT(MinSurvivorRatio, SurvivorRatio + 2);
-    }
-  }
-
-  if (UseParallelOldGC) {
-    // Par compact uses lower default values since they are treated as
-    // minimums.  These are different defaults because of the different
-    // interpretation and are not ergonomically set.
-    if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
-      FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
-    }
-  }
-}
-
-void Arguments::set_g1_gc_flags() {
-  assert(UseG1GC, "Error");
-#if defined(COMPILER1) || INCLUDE_JVMCI
-  FastTLABRefill = false;
-#endif
-  FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
-  if (ParallelGCThreads == 0) {
-    assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
-    vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
-  }
-
-#if INCLUDE_ALL_GCS
-  if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
-    FLAG_SET_ERGO(uint, G1ConcRefinementThreads, ParallelGCThreads);
-  }
-#endif
-
-  // MarkStackSize will be set (if it hasn't been set by the user)
-  // when concurrent marking is initialized.
-  // Its value will be based upon the number of parallel marking threads.
-  // But we do set the maximum mark stack size here.
-  if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
-    FLAG_SET_DEFAULT(MarkStackSizeMax, 128 * TASKQUEUE_SIZE);
-  }
-
-  if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
-    // In G1, we want the default GC overhead goal to be higher than
-    // it is for PS, or the heap might be expanded too aggressively.
-    // We set it here to ~8%.
-    FLAG_SET_DEFAULT(GCTimeRatio, 12);
-  }
-
-  // Below, we might need to calculate the pause time interval based on
-  // the pause target. When we do so we are going to give G1 maximum
-  // flexibility and allow it to do pauses when it needs to. So, we'll
-  // arrange that the pause interval to be pause time target + 1 to
-  // ensure that a) the pause time target is maximized with respect to
-  // the pause interval and b) we maintain the invariant that pause
-  // time target < pause interval. If the user does not want this
-  // maximum flexibility, they will have to set the pause interval
-  // explicitly.
-
-  if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
-    // The default pause time target in G1 is 200ms
-    FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
-  }
-
-  // Then, if the interval parameter was not set, set it according to
-  // the pause time target (this will also deal with the case when the
-  // pause time target is the default value).
-  if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
-    FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
-  }
-
-  log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
+  return JNI_OK;
 }
 
 void Arguments::set_gc_specific_flags() {
-#if INCLUDE_ALL_GCS
-  // Set per-collector flags
-  if (UseParallelGC || UseParallelOldGC) {
-    set_parallel_gc_flags();
-  } else if (UseConcMarkSweepGC) {
-    set_cms_and_parnew_gc_flags();
-  } else if (UseG1GC) {
-    set_g1_gc_flags();
-  }
-  if (AssumeMP && !UseSerialGC) {
-    if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
-      warning("If the number of processors is expected to increase from one, then"
-              " you should configure the number of parallel GC threads appropriately"
-              " using -XX:ParallelGCThreads=N");
-    }
-  }
-  if (MinHeapFreeRatio == 100) {
-    // Keeping the heap 100% free is hard ;-) so limit it to 99%.
-    FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
-  }
-
-  // If class unloading is disabled, also disable concurrent class unloading.
-  if (!ClassUnloading) {
-    FLAG_SET_CMDLINE(bool, CMSClassUnloadingEnabled, false);
-    FLAG_SET_CMDLINE(bool, ClassUnloadingWithConcurrentMark, false);
-  }
-#endif // INCLUDE_ALL_GCS
+  // Set GC flags
+  GCArguments::arguments()->initialize_flags();
 }
 
 julong Arguments::limit_by_allocatable_memory(julong limit) {
@@ -2687,6 +2356,14 @@
     return result;
   }
 
+  // We need to ensure processor and memory resources have been properly
+  // configured - which may rely on arguments we just processed - before
+  // doing the final argument processing. Any argument processing that
+  // needs to know about processor and memory resources must occur after
+  // this point.
+
+  os::init_container_support();
+
   // Do final processing now that all arguments have been parsed
   result = finalize_vm_init_args(patch_mod_javabase);
   if (result != JNI_OK) {
@@ -3362,12 +3039,6 @@
       _exit_hook = CAST_TO_FN_PTR(exit_hook_t, option->extraInfo);
     } else if (match_option(option, "abort")) {
       _abort_hook = CAST_TO_FN_PTR(abort_hook_t, option->extraInfo);
-    // -XX:+AggressiveHeap
-    } else if (match_option(option, "-XX:+AggressiveHeap")) {
-      jint result = set_aggressive_heap_flags();
-      if (result != JNI_OK) {
-          return result;
-      }
     // Need to keep consistency of MaxTenuringThreshold and AlwaysTenure/NeverTenure;
     // and the last option wins.
     } else if (match_option(option, "-XX:+NeverTenure")) {
@@ -3649,6 +3320,16 @@
     return JNI_ERR;
   }
 
+  // This must be done after all arguments have been processed
+  // and the container support has been initialized since AggressiveHeap
+  // relies on the amount of total memory available.
+  if (AggressiveHeap) {
+    jint result = set_aggressive_heap_flags();
+    if (result != JNI_OK) {
+      return result;
+    }
+  }
+
   // This must be done after all arguments have been processed.
   // java_compiler() true means set to "NONE" or empty.
   if (java_compiler() && !xdebug_mode()) {
@@ -4476,7 +4157,8 @@
 
 jint Arguments::apply_ergo() {
   // Set flags based on ergonomics.
-  set_ergonomics_flags();
+  jint result = set_ergonomics_flags();
+  if (result != JNI_OK) return result;
 
 #if INCLUDE_JVMCI
   set_jvmci_specific_flags();
@@ -4620,6 +4302,32 @@
   }
 #endif
 
+  bool aot_enabled = UseAOT && AOTLibrary != NULL;
+  bool jvmci_enabled = NOT_JVMCI(false) JVMCI_ONLY(EnableJVMCI || UseJVMCICompiler);
+  bool handshakes_supported = SafepointMechanism::supports_thread_local_poll() && !aot_enabled && !jvmci_enabled && ThreadLocalHandshakes;
+  // ThreadLocalHandshakesConstraintFunc handles the constraints.
+  // Here we try to figure out if a mutual exclusive option have been set that conflict with a default.
+  if (handshakes_supported) {
+    FLAG_SET_DEFAULT(UseAOT, false); // Clear the AOT flag to make sure it doesn't try to initialize.
+  } else {
+    if (FLAG_IS_DEFAULT(ThreadLocalHandshakes) && ThreadLocalHandshakes) {
+      if (aot_enabled) {
+        // If user enabled AOT but ThreadLocalHandshakes is at default set it to false.
+        log_debug(ergo)("Disabling ThreadLocalHandshakes for UseAOT.");
+        FLAG_SET_DEFAULT(ThreadLocalHandshakes, false);
+      } else if (jvmci_enabled){
+        // If user enabled JVMCI but ThreadLocalHandshakes is at default set it to false.
+        log_debug(ergo)("Disabling ThreadLocalHandshakes for EnableJVMCI/UseJVMCICompiler.");
+        FLAG_SET_DEFAULT(ThreadLocalHandshakes, false);
+      }
+    }
+  }
+  if (FLAG_IS_DEFAULT(ThreadLocalHandshakes) || !SafepointMechanism::supports_thread_local_poll()) {
+    log_debug(ergo)("ThreadLocalHandshakes %s", ThreadLocalHandshakes ? "enabled." : "disabled.");
+  } else {
+    log_info(ergo)("ThreadLocalHandshakes %s", ThreadLocalHandshakes ? "enabled." : "disabled.");
+  }
+
   return JNI_OK;
 }
 
--- a/src/hotspot/share/runtime/arguments.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/arguments.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -472,8 +472,7 @@
   static void set_conservative_max_heap_alignment();
   static void set_use_compressed_oops();
   static void set_use_compressed_klass_ptrs();
-  static void select_gc();
-  static void set_ergonomics_flags();
+  static jint set_ergonomics_flags();
   static void set_shared_spaces_flags();
   // limits the given memory size by the maximum amount of memory this process is
   // currently allowed to allocate or reserve.
@@ -635,8 +634,6 @@
   static jint adjust_after_os();
 
   static void set_gc_specific_flags();
-  static bool gc_selected(); // whether a gc has been selected
-  static void select_gc_ergonomically();
 #if INCLUDE_JVMCI
   // Check consistency of jvmci vm argument settings.
   static bool check_jvmci_args_consistency();
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 #include "runtime/commandLineFlagConstraintsRuntime.hpp"
 #include "runtime/commandLineFlagRangeList.hpp"
 #include "runtime/globals.hpp"
+#include "runtime/safepointMechanism.hpp"
 #include "runtime/task.hpp"
 #include "utilities/defaultStream.hpp"
 
@@ -130,3 +131,17 @@
     return Flag::SUCCESS;
   }
 }
+
+Flag::Error ThreadLocalHandshakesConstraintFunc(bool value, bool verbose) {
+  if (value) {
+    if (!SafepointMechanism::supports_thread_local_poll()) {
+      CommandLineError::print(verbose, "ThreadLocalHandshakes not yet supported on this platform\n");
+      return Flag::VIOLATES_CONSTRAINT;
+    }
+    if (UseAOT JVMCI_ONLY(|| EnableJVMCI || UseJVMCICompiler)) {
+      CommandLineError::print(verbose, "ThreadLocalHandshakes not yet supported in combination with AOT or JVMCI\n");
+      return Flag::VIOLATES_CONSTRAINT;
+    }
+  }
+  return Flag::SUCCESS;
+}
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,4 +45,7 @@
 
 Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose);
 
+Flag::Error ThreadLocalHandshakesConstraintFunc(bool value, bool verbose);
+
+
 #endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP */
--- a/src/hotspot/share/runtime/globals.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/globals.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -598,6 +598,13 @@
   develop(bool, CleanChunkPoolAsync, true,                                  \
           "Clean the chunk pool asynchronously")                            \
                                                                             \
+  product_pd(bool, ThreadLocalHandshakes,                                   \
+          "Use thread-local polls instead of global poll for safepoints.")  \
+          constraint(ThreadLocalHandshakesConstraintFunc,AfterErgo)         \
+                                                                            \
+  diagnostic(uint, HandshakeTimeout, 0,                                     \
+          "If nonzero set a timeout in milliseconds for handshakes")        \
+                                                                            \
   experimental(bool, AlwaysSafeConstructors, false,                         \
           "Force safe construction, as if all fields are final.")           \
                                                                             \
@@ -2013,8 +2020,8 @@
   product(bool, ZeroTLAB, false,                                            \
           "Zero out the newly created TLAB")                                \
                                                                             \
-  product(bool, FastTLABRefill, true,                                       \
-          "Use fast TLAB refill code")                                      \
+  product(bool, FastTLABRefill, false,                                      \
+          "(Deprecated) Use fast TLAB refill code")                         \
                                                                             \
   product(bool, TLABStats, true,                                            \
           "Provide more detailed and expensive TLAB statistics.")           \
@@ -2029,6 +2036,9 @@
           "Real memory size (in bytes) used to set maximum heap size")      \
           range(0, 0XFFFFFFFFFFFFFFFF)                                      \
                                                                             \
+  product(bool, AggressiveHeap, false,                                      \
+          "Optimize heap options for long-running memory intensive apps")   \
+                                                                            \
   product(size_t, ErgoHeapSizeLimit, 0,                                     \
           "Maximum ergonomically set heap size (in bytes); zero means use " \
           "MaxRAM * MaxRAMPercentage / 100")                                \
@@ -2036,7 +2046,8 @@
                                                                             \
   experimental(bool, UseCGroupMemoryLimitForHeap, false,                    \
           "Use CGroup memory limit as physical memory limit for heap "      \
-          "sizing")                                                         \
+          "sizing"                                                          \
+          "Deprecated, replaced by container support")                      \
                                                                             \
   product(uintx, MaxRAMFraction, 4,                                         \
           "Maximum fraction (1/n) of real memory used for maximum heap "    \
@@ -2068,6 +2079,9 @@
           "Percentage of real memory used for initial heap size")           \
           range(0.0, 100.0)                                                 \
                                                                             \
+  product(int, ActiveProcessorCount, -1,                                    \
+          "Specify the CPU count the VM should use and report as active")   \
+                                                                            \
   develop(uintx, MaxVirtMemFraction, 2,                                     \
           "Maximum fraction (1/n) of virtual memory used for ergonomically "\
           "determining maximum heap size")                                  \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/handshake.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/handshake.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/osThread.hpp"
+#include "runtime/semaphore.hpp"
+#include "runtime/task.hpp"
+#include "runtime/timerTrace.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/formatBuffer.hpp"
+#include "utilities/preserveException.hpp"
+
+#define ALL_JAVA_THREADS(X) for (JavaThread* X = Threads::first(); X; X = X->next())
+
+class HandshakeOperation: public StackObj {
+public:
+  virtual void do_handshake(JavaThread* thread) = 0;
+  virtual void cancel_handshake(JavaThread* thread) = 0;
+};
+
+class HandshakeThreadsOperation: public HandshakeOperation {
+  Semaphore _done;
+  ThreadClosure* _thread_cl;
+
+public:
+  HandshakeThreadsOperation(ThreadClosure* cl) : _done(0), _thread_cl(cl) {}
+  void do_handshake(JavaThread* thread);
+  void cancel_handshake(JavaThread* thread) { _done.signal(); };
+
+  bool thread_has_completed() { return _done.trywait(); }
+};
+
+class VM_Handshake: public VM_Operation {
+  HandshakeThreadsOperation* const _op;
+  const jlong _handshake_timeout;
+ public:
+  bool evaluate_at_safepoint() const { return false; }
+
+  bool evaluate_concurrently() const { return false; }
+
+ protected:
+
+  VM_Handshake(HandshakeThreadsOperation* op) :
+      _op(op),
+      _handshake_timeout(TimeHelper::millis_to_counter(HandshakeTimeout)) {}
+
+  void set_handshake(JavaThread* target) {
+    target->set_handshake_operation(_op);
+  }
+
+  // This method returns true for threads completed their operation
+  // and true for threads canceled their operation.
+  // A cancellation can happen if the thread is exiting.
+  bool poll_for_completed_thread() { return _op->thread_has_completed(); }
+
+  bool handshake_has_timed_out(jlong start_time);
+  static void handle_timeout();
+};
+
+bool VM_Handshake::handshake_has_timed_out(jlong start_time) {
+  // Check if handshake operation has timed out
+  if (_handshake_timeout > 0) {
+    return os::elapsed_counter() >= (start_time + _handshake_timeout);
+  }
+  return false;
+}
+
+void VM_Handshake::handle_timeout() {
+  LogStreamHandle(Warning, handshake) log_stream;
+  MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
+  ALL_JAVA_THREADS(thr) {
+    if (thr->has_handshake()) {
+      log_stream.print("Thread " PTR_FORMAT " has not cleared its handshake op", p2i(thr));
+      thr->print_thread_state_on(&log_stream);
+    }
+  }
+  log_stream.flush();
+  fatal("Handshake operation timed out");
+}
+
+
+class VM_HandshakeOneThread: public VM_Handshake {
+  JavaThread* _target;
+  bool _thread_alive;
+ public:
+  VM_HandshakeOneThread(HandshakeThreadsOperation* op, JavaThread* target) :
+    VM_Handshake(op), _target(target), _thread_alive(false) {}
+
+  void doit() {
+    TraceTime timer("Performing single-target operation (vmoperation doit)", TRACETIME_LOG(Info, handshake));
+
+    {
+      MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
+      if (Threads::includes(_target)) {
+        set_handshake(_target);
+        _thread_alive = true;
+      }
+    }
+
+    if (!_thread_alive) {
+      return;
+    }
+
+    if (!UseMembar) {
+      os::serialize_thread_states();
+    }
+
+    log_trace(handshake)("Thread signaled, begin processing by VMThtread");
+    jlong start_time = os::elapsed_counter();
+    do {
+      if (handshake_has_timed_out(start_time)) {
+        handle_timeout();
+      }
+
+      MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
+      _target->handshake_process_by_vmthread();
+
+    } while (!poll_for_completed_thread());
+  }
+
+  VMOp_Type type() const { return VMOp_HandshakeOneThread; }
+
+  bool thread_alive() const { return _thread_alive; }
+};
+
+class VM_HandshakeAllThreads: public VM_Handshake {
+ public:
+  VM_HandshakeAllThreads(HandshakeThreadsOperation* op) : VM_Handshake(op) {}
+
+  void doit() {
+    TraceTime timer("Performing operation (vmoperation doit)", TRACETIME_LOG(Info, handshake));
+
+    int number_of_threads_issued = -1;
+    int number_of_threads_completed = 0;
+    {
+      MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
+      number_of_threads_issued = Threads::number_of_threads();
+
+      ALL_JAVA_THREADS(thr) {
+        set_handshake(thr);
+      }
+    }
+
+    if (!UseMembar) {
+      os::serialize_thread_states();
+    }
+
+    log_debug(handshake)("Threads signaled, begin processing blocked threads by VMThtread");
+    const jlong start_time = os::elapsed_counter();
+    do {
+      // Check if handshake operation has timed out
+      if (handshake_has_timed_out(start_time)) {
+        handle_timeout();
+      }
+
+      // Have VM thread perform the handshake operation for blocked threads.
+      // Observing a blocked state may of course be transient but the processing is guarded
+      // by semaphores and we optimistically begin by working on the blocked threads
+      {
+          MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
+          ALL_JAVA_THREADS(thr) {
+            thr->handshake_process_by_vmthread();
+          }
+      }
+
+      while (poll_for_completed_thread()) {
+        number_of_threads_completed++;
+      }
+
+    } while (number_of_threads_issued != number_of_threads_completed);
+  }
+
+  VMOp_Type type() const { return VMOp_HandshakeAllThreads; }
+};
+
+class VM_HandshakeFallbackOperation : public VM_Operation {
+  ThreadClosure* _thread_cl;
+  Thread* _target_thread;
+  bool _all_threads;
+  bool _thread_alive;
+public:
+  VM_HandshakeFallbackOperation(ThreadClosure* cl) :
+      _thread_cl(cl), _target_thread(NULL), _all_threads(true), _thread_alive(true) {}
+  VM_HandshakeFallbackOperation(ThreadClosure* cl, Thread* target) :
+      _thread_cl(cl), _target_thread(target), _all_threads(false), _thread_alive(false) {}
+
+  void doit() {
+    ALL_JAVA_THREADS(t) {
+      if (_all_threads || t == _target_thread) {
+        if (t == _target_thread) {
+          _thread_alive = true;
+        }
+        _thread_cl->do_thread(t);
+      }
+    }
+  }
+
+  VMOp_Type type() const { return VMOp_HandshakeFallback; }
+  bool thread_alive() const { return _thread_alive; }
+};
+
+#undef ALL_JAVA_THREADS
+
+void HandshakeThreadsOperation::do_handshake(JavaThread* thread) {
+  ResourceMark rm;
+  FormatBufferResource message("Operation for thread " PTR_FORMAT ", is_vm_thread: %s",
+                               p2i(thread), BOOL_TO_STR(Thread::current()->is_VM_thread()));
+  TraceTime timer(message, TRACETIME_LOG(Debug, handshake, task));
+  _thread_cl->do_thread(thread);
+
+  // Use the semaphore to inform the VM thread that we have completed the operation
+  _done.signal();
+}
+
+void Handshake::execute(ThreadClosure* thread_cl) {
+  if (ThreadLocalHandshakes) {
+    HandshakeThreadsOperation cto(thread_cl);
+    VM_HandshakeAllThreads handshake(&cto);
+    VMThread::execute(&handshake);
+  } else {
+    VM_HandshakeFallbackOperation op(thread_cl);
+    VMThread::execute(&op);
+  }
+}
+
+bool Handshake::execute(ThreadClosure* thread_cl, JavaThread* target) {
+  if (ThreadLocalHandshakes) {
+    HandshakeThreadsOperation cto(thread_cl);
+    VM_HandshakeOneThread handshake(&cto, target);
+    VMThread::execute(&handshake);
+    return handshake.thread_alive();
+  } else {
+    VM_HandshakeFallbackOperation op(thread_cl, target);
+    VMThread::execute(&op);
+    return op.thread_alive();
+  }
+}
+
+HandshakeState::HandshakeState() : _operation(NULL), _semaphore(1), _vmthread_holds_semaphore(false), _thread_in_process_handshake(false) {}
+
+void HandshakeState::set_operation(JavaThread* target, HandshakeOperation* op) {
+  _operation = op;
+  SafepointMechanism::arm_local_poll(target);
+}
+
+void HandshakeState::clear_handshake(JavaThread* target) {
+  _operation = NULL;
+  SafepointMechanism::disarm_local_poll(target);
+}
+
+void HandshakeState::process_self_inner(JavaThread* thread) {
+  assert(Thread::current() == thread, "should call from thread");
+  CautiouslyPreserveExceptionMark pem(thread);
+  ThreadInVMForHandshake tivm(thread);
+  if (!_semaphore.trywait()) {
+    ThreadBlockInVM tbivm(thread);
+    _semaphore.wait();
+  }
+  if (has_operation()) {
+    HandshakeOperation* op = _operation;
+    clear_handshake(thread);
+    if (op != NULL) {
+      op->do_handshake(thread);
+    }
+  }
+  _semaphore.signal();
+}
+
+void HandshakeState::cancel_inner(JavaThread* thread) {
+  assert(Thread::current() == thread, "should call from thread");
+  assert(thread->thread_state() == _thread_in_vm, "must be in vm state");
+#ifdef DEBUG
+  {
+    MutexLockerEx ml(Threads_lock,  Mutex::_no_safepoint_check_flag);
+    assert(!Threads::includes(thread), "java thread must not be on threads list");
+  }
+#endif
+  HandshakeOperation* op = _operation;
+  clear_handshake(thread);
+  if (op != NULL) {
+    op->cancel_handshake(thread);
+  }
+}
+
+bool HandshakeState::vmthread_can_process_handshake(JavaThread* target) {
+  return SafepointSynchronize::safepoint_safe(target, target->thread_state());
+}
+
+bool HandshakeState::claim_handshake_for_vmthread() {
+  if (_semaphore.trywait()) {
+    if (has_operation()) {
+      _vmthread_holds_semaphore = true;
+    } else {
+      _semaphore.signal();
+    }
+  }
+  return _vmthread_holds_semaphore;
+}
+
+void HandshakeState::process_by_vmthread(JavaThread* target) {
+  assert(Thread::current()->is_VM_thread(), "should call from vm thread");
+
+  if (!has_operation()) {
+    // JT has already cleared its handshake
+    return;
+  }
+
+  if (!vmthread_can_process_handshake(target)) {
+    // JT is observed in an unsafe state, it must notice the handshake itself
+    return;
+  }
+
+  // If we own the semaphore at this point and while owning the semaphore
+  // can observe a safe state the thread cannot possibly continue without
+  // getting caught by the semaphore.
+  if (claim_handshake_for_vmthread() && vmthread_can_process_handshake(target)) {
+    guarantee(!_semaphore.trywait(), "we should already own the semaphore");
+
+    _operation->do_handshake(target);
+    clear_handshake(target);
+    _vmthread_holds_semaphore = false;
+    // Release the thread
+    _semaphore.signal();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/handshake.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_HANDSHAKE_HPP
+#define SHARE_VM_RUNTIME_HANDSHAKE_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/semaphore.hpp"
+
+class ThreadClosure;
+class JavaThread;
+
+// A handshake operation is a callback that is executed for each JavaThread
+// while that thread is in a safepoint safe state. The callback is executed
+// either by the thread itself or by the VM thread while keeping the thread
+// in a blocked state. A handshake can be performed with a single
+// JavaThread as well.
+class Handshake : public AllStatic {
+ public:
+  // Execution of handshake operation
+  static void execute(ThreadClosure* thread_cl);
+  static bool execute(ThreadClosure* thread_cl, JavaThread* target);
+};
+
+class HandshakeOperation;
+
+// The HandshakeState keep tracks of an ongoing handshake for one JavaThread.
+// VM thread and JavaThread are serialized with the semaphore making sure
+// the operation is only done by either VM thread on behalf of the JavaThread
+// or the JavaThread itself.
+class HandshakeState VALUE_OBJ_CLASS_SPEC {
+  HandshakeOperation* volatile _operation;
+
+  Semaphore _semaphore;
+  bool _vmthread_holds_semaphore;
+  bool _thread_in_process_handshake;
+
+  bool claim_handshake_for_vmthread();
+  bool vmthread_can_process_handshake(JavaThread* target);
+
+  void clear_handshake(JavaThread* thread);
+  void cancel_inner(JavaThread* thread);
+
+  void process_self_inner(JavaThread* thread);
+public:
+  HandshakeState();
+
+  void set_operation(JavaThread* thread, HandshakeOperation* op);
+
+  bool has_operation() const {
+    return _operation != NULL;
+  }
+
+  void cancel(JavaThread* thread) {
+    if (!_thread_in_process_handshake) {
+      FlagSetting fs(_thread_in_process_handshake, true);
+      cancel_inner(thread);
+    }
+  }
+
+  void process_by_self(JavaThread* thread) {
+    if (!_thread_in_process_handshake) {
+      FlagSetting fs(_thread_in_process_handshake, true);
+      process_self_inner(thread);
+    }
+  }
+  void process_by_vmthread(JavaThread* target);
+};
+
+#endif // SHARE_VM_RUNTIME_HANDSHAKE_HPP
--- a/src/hotspot/share/runtime/interfaceSupport.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/interfaceSupport.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -30,7 +30,7 @@
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
-#include "runtime/safepoint.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -142,9 +142,7 @@
 
     InterfaceSupport::serialize_thread_state(thread);
 
-    if (SafepointSynchronize::do_call_back()) {
-      SafepointSynchronize::block(thread);
-    }
+    SafepointMechanism::block_if_requested(thread);
     thread->set_thread_state(to);
 
     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
@@ -164,9 +162,7 @@
 
     InterfaceSupport::serialize_thread_state_with_handler(thread);
 
-    if (SafepointSynchronize::do_call_back()) {
-      SafepointSynchronize::block(thread);
-    }
+    SafepointMechanism::block_if_requested(thread);
     thread->set_thread_state(to);
 
     CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
@@ -191,7 +187,7 @@
     // We never install asynchronous exceptions when coming (back) in
     // to the runtime from native code because the runtime is not set
     // up to handle exceptions floating around at arbitrary points.
-    if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
+    if (SafepointMechanism::poll(thread) || thread->is_suspend_after_native()) {
       JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
 
       // Clear unhandled oops anywhere where we could block, even if we don't.
@@ -207,6 +203,38 @@
    void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
 };
 
+class ThreadInVMForHandshake : public ThreadStateTransition {
+  const JavaThreadState _original_state;
+
+  void transition_back() {
+    // This can be invoked from transition states and must return to the original state properly
+    assert(_thread->thread_state() == _thread_in_vm, "should only call when leaving VM after handshake");
+    _thread->set_thread_state(_thread_in_vm_trans);
+
+    InterfaceSupport::serialize_thread_state(_thread);
+
+    SafepointMechanism::block_if_requested(_thread);
+
+    _thread->set_thread_state(_original_state);
+  }
+
+ public:
+
+  ThreadInVMForHandshake(JavaThread* thread) : ThreadStateTransition(thread),
+      _original_state(thread->thread_state()) {
+
+    if (thread->has_last_Java_frame()) {
+      thread->frame_anchor()->make_walkable(thread);
+    }
+
+    thread->set_thread_state(_thread_in_vm);
+  }
+
+  ~ThreadInVMForHandshake() {
+    transition_back();
+  }
+
+};
 
 class ThreadInVMfromJava : public ThreadStateTransition {
  public:
--- a/src/hotspot/share/runtime/mutex.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/mutex.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -28,6 +28,7 @@
 #include "runtime/mutex.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/events.hpp"
 #include "utilities/macros.hpp"
@@ -394,7 +395,7 @@
       jint rv = Self->rng[0];
       for (int k = Delay; --k >= 0;) {
         rv = MarsagliaXORV(rv);
-        if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0;
+        if ((flgs & 4) == 0 && SafepointMechanism::poll(Self)) return 0;
       }
       Self->rng[0] = rv;
     } else {
--- a/src/hotspot/share/runtime/objectMonitor.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/objectMonitor.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -35,6 +35,7 @@
 #include "runtime/objectMonitor.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/threadService.hpp"
@@ -1282,7 +1283,7 @@
   OrderAccess::release_store(&_owner, (void*)NULL);
   OrderAccess::fence();                               // ST _owner vs LD in unpark()
 
-  if (SafepointSynchronize::do_call_back()) {
+  if (SafepointMechanism::poll(Self)) {
     TEVENT(unpark before SAFEPOINT);
   }
 
@@ -1936,7 +1937,7 @@
     // This is in keeping with the "no loitering in runtime" rule.
     // We periodically check to see if there's a safepoint pending.
     if ((ctr & 0xFF) == 0) {
-      if (SafepointSynchronize::do_call_back()) {
+      if (SafepointMechanism::poll(Self)) {
         TEVENT(Spin: safepoint);
         goto Abort;           // abrupt spin egress
       }
--- a/src/hotspot/share/runtime/os.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/os.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -142,8 +142,16 @@
   static void  get_summary_os_info(char* buf, size_t buflen);
 
   static void initialize_initial_active_processor_count();
+
+  LINUX_ONLY(static void pd_init_container_support();)
+
  public:
   static void init(void);                      // Called before command line parsing
+
+  static void init_container_support() {       // Called during command line parsing.
+     LINUX_ONLY(pd_init_container_support();)
+  }
+
   static void init_before_ergo(void);          // Called after command line parsing
                                                // before VM ergonomics processing.
   static jint init_2(void);                    // Called after command line parsing
--- a/src/hotspot/share/runtime/safepoint.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/safepoint.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -52,6 +52,7 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -169,21 +170,32 @@
     int initial_running = 0;
 
     _state            = _synchronizing;
-    OrderAccess::fence();
+
+    if (SafepointMechanism::uses_thread_local_poll()) {
+      // Arming the per thread poll while having _state != _not_synchronized means safepointing
+      log_trace(safepoint)("Setting thread local yield flag for threads");
+      for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
+        // Make sure the threads start polling, it is time to yield.
+        SafepointMechanism::arm_local_poll(cur); // release store, global state -> local state
+      }
+    }
+    OrderAccess::fence(); // storestore|storeload, global state -> local state
 
     // Flush all thread states to memory
     if (!UseMembar) {
       os::serialize_thread_states();
     }
 
-    // Make interpreter safepoint aware
-    Interpreter::notice_safepoints();
+    if (SafepointMechanism::uses_global_page_poll()) {
+      // Make interpreter safepoint aware
+      Interpreter::notice_safepoints();
 
-    if (DeferPollingPageLoopCount < 0) {
-      // Make polling safepoint aware
-      guarantee (PageArmed == 0, "invariant") ;
-      PageArmed = 1 ;
-      os::make_polling_page_unreadable();
+      if (DeferPollingPageLoopCount < 0) {
+        // Make polling safepoint aware
+        guarantee (PageArmed == 0, "invariant") ;
+        PageArmed = 1 ;
+        os::make_polling_page_unreadable();
+      }
     }
 
     // Consider using active_processor_count() ... but that call is expensive.
@@ -293,7 +305,7 @@
         // 9. On windows consider using the return value from SwitchThreadTo()
         //    to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
 
-        if (int(iterations) == DeferPollingPageLoopCount) {
+        if (SafepointMechanism::uses_global_page_poll() && int(iterations) == DeferPollingPageLoopCount) {
           guarantee (PageArmed == 0, "invariant") ;
           PageArmed = 1 ;
           os::make_polling_page_unreadable();
@@ -444,7 +456,7 @@
   // A pending_exception cannot be installed during a safepoint.  The threads
   // may install an async exception after they come back from a safepoint into
   // pending_exception after they unblock.  But that should happen later.
-  for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
+  for (JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
     assert (!(cur->has_pending_exception() &&
               cur->safepoint_state()->is_at_poll_safepoint()),
             "safepoint installed a pending exception");
@@ -452,46 +464,60 @@
 #endif // ASSERT
 
   if (PageArmed) {
+    assert(SafepointMechanism::uses_global_page_poll(), "sanity");
     // Make polling safepoint aware
     os::make_polling_page_readable();
     PageArmed = 0 ;
   }
 
-  // Remove safepoint check from interpreter
-  Interpreter::ignore_safepoints();
+  if (SafepointMechanism::uses_global_page_poll()) {
+    // Remove safepoint check from interpreter
+    Interpreter::ignore_safepoints();
+  }
 
   {
     MutexLocker mu(Safepoint_lock);
 
     assert(_state == _synchronized, "must be synchronized before ending safepoint synchronization");
 
-    // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
-    // when they get restarted.
-    _state = _not_synchronized;
-    OrderAccess::fence();
+    if (SafepointMechanism::uses_thread_local_poll()) {
+      _state = _not_synchronized;
+      OrderAccess::storestore(); // global state -> local state
+      for (JavaThread *current = Threads::first(); current; current = current->next()) {
+        ThreadSafepointState* cur_state = current->safepoint_state();
+        cur_state->restart(); // TSS _running
+        SafepointMechanism::disarm_local_poll(current); // release store, local state -> polling page
+      }
+      log_debug(safepoint)("Leaving safepoint region");
+    } else {
+      // Set to not synchronized, so the threads will not go into the signal_thread_blocked method
+      // when they get restarted.
+      _state = _not_synchronized;
+      OrderAccess::fence();
 
-    log_debug(safepoint)("Leaving safepoint region");
+      log_debug(safepoint)("Leaving safepoint region");
 
-    // Start suspended threads
-    for(JavaThread *current = Threads::first(); current; current = current->next()) {
-      // A problem occurring on Solaris is when attempting to restart threads
-      // the first #cpus - 1 go well, but then the VMThread is preempted when we get
-      // to the next one (since it has been running the longest).  We then have
-      // to wait for a cpu to become available before we can continue restarting
-      // threads.
-      // FIXME: This causes the performance of the VM to degrade when active and with
-      // large numbers of threads.  Apparently this is due to the synchronous nature
-      // of suspending threads.
-      //
-      // TODO-FIXME: the comments above are vestigial and no longer apply.
-      // Furthermore, using solaris' schedctl in this particular context confers no benefit
-      if (VMThreadHintNoPreempt) {
-        os::hint_no_preempt();
+      // Start suspended threads
+      for (JavaThread *current = Threads::first(); current; current = current->next()) {
+        // A problem occurring on Solaris is when attempting to restart threads
+        // the first #cpus - 1 go well, but then the VMThread is preempted when we get
+        // to the next one (since it has been running the longest).  We then have
+        // to wait for a cpu to become available before we can continue restarting
+        // threads.
+        // FIXME: This causes the performance of the VM to degrade when active and with
+        // large numbers of threads.  Apparently this is due to the synchronous nature
+        // of suspending threads.
+        //
+        // TODO-FIXME: the comments above are vestigial and no longer apply.
+        // Furthermore, using solaris' schedctl in this particular context confers no benefit
+        if (VMThreadHintNoPreempt) {
+          os::hint_no_preempt();
+        }
+        ThreadSafepointState* cur_state = current->safepoint_state();
+        assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
+        cur_state->restart();
+        assert(cur_state->is_running(), "safepoint state has not been reset");
       }
-      ThreadSafepointState* cur_state = current->safepoint_state();
-      assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint");
-      cur_state->restart();
-      assert(cur_state->is_running(), "safepoint state has not been reset");
     }
 
     RuntimeService::record_safepoint_end();
@@ -855,7 +881,9 @@
 void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) {
   assert(thread->is_Java_thread(), "polling reference encountered by VM thread");
   assert(thread->thread_state() == _thread_in_Java, "should come from Java code");
-  assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization");
+  if (!ThreadLocalHandshakes) {
+    assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization");
+  }
 
   if (ShowSafepointMsgs) {
     tty->print("handle_polling_page_exception: ");
@@ -887,7 +915,7 @@
     tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:");
     ThreadSafepointState *cur_state;
     ResourceMark rm;
-    for(JavaThread *cur_thread = Threads::first(); cur_thread;
+    for (JavaThread *cur_thread = Threads::first(); cur_thread;
         cur_thread = cur_thread->next()) {
       cur_state = cur_thread->safepoint_state();
 
@@ -1053,13 +1081,14 @@
 
 // ---------------------------------------------------------------------------------------------------------------------
 
-// Block the thread at the safepoint poll or poll return.
+// Block the thread at poll or poll return for safepoint/handshake.
 void ThreadSafepointState::handle_polling_page_exception() {
 
   // Check state.  block() will set thread state to thread_in_vm which will
   // cause the safepoint state _type to become _call_back.
-  assert(type() == ThreadSafepointState::_running,
-         "polling page exception on thread not running state");
+  suspend_type t = type();
+  assert(!SafepointMechanism::uses_global_page_poll() || t == ThreadSafepointState::_running,
+         "polling page exception on thread not running state: %u", uint(t));
 
   // Step 1: Find the nmethod from the return address
   if (ShowSafepointMsgs && Verbose) {
@@ -1101,7 +1130,7 @@
     }
 
     // Block the thread
-    SafepointSynchronize::block(thread());
+    SafepointMechanism::block_if_requested(thread());
 
     // restore oop result, if any
     if (return_oop) {
@@ -1117,7 +1146,7 @@
     assert(real_return_addr == caller_fr.pc(), "must match");
 
     // Block the thread
-    SafepointSynchronize::block(thread());
+    SafepointMechanism::block_if_requested(thread());
     set_at_poll_safepoint(false);
 
     // If we have a pending async exception deoptimize the frame
@@ -1398,7 +1427,7 @@
     tty->print_cr("State: %s", (_state == _synchronizing) ? "synchronizing" :
                   "synchronized");
 
-    for(JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
+    for (JavaThread *cur = Threads::first(); cur; cur = cur->next()) {
        cur->safepoint_state()->print();
     }
   }
--- a/src/hotspot/share/runtime/safepoint.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/safepoint.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -160,17 +160,22 @@
   inline static bool is_synchronizing()  { return _state == _synchronizing;  }
   inline static int safepoint_counter()  { return _safepoint_counter; }
 
-  inline static bool do_call_back() {
-    return (_state != _not_synchronized);
-  }
-
   inline static void increment_jni_active_count() {
     assert_locked_or_safepoint(Safepoint_lock);
     _current_jni_active_count++;
   }
 
+private:
+  inline static bool do_call_back() {
+    return (_state != _not_synchronized);
+  }
+
   // Called when a thread voluntarily blocks
   static void   block(JavaThread *thread);
+
+  friend class SafepointMechanism;
+
+public:
   static void   signal_thread_at_safepoint()              { _waiting_to_block--; }
 
   // Exception handling for page polling
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/safepointMechanism.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+SafepointMechanism::PollingType SafepointMechanism::_polling_type = SafepointMechanism::_global_page_poll;
+void* SafepointMechanism::_poll_armed_value;
+void* SafepointMechanism::_poll_disarmed_value;
+
+void SafepointMechanism::default_initialize() {
+  if (ThreadLocalHandshakes) {
+    set_uses_thread_local_poll();
+    const size_t page_size = os::vm_page_size();
+    const size_t allocation_size = 2 * page_size;
+    char* polling_page = os::reserve_memory(allocation_size, NULL, page_size);
+    os::commit_memory_or_exit(polling_page, allocation_size, false, "Unable to commit Safepoint polling page");
+
+    char* bad_page  = polling_page;
+    char* good_page = polling_page + page_size;
+
+    os::protect_memory(bad_page, page_size, os::MEM_PROT_NONE);
+    os::protect_memory(good_page, page_size, os::MEM_PROT_READ);
+
+    log_info(os)("SafePoint Polling address, bad (protected) page:" INTPTR_FORMAT ", good (unprotected) page:" INTPTR_FORMAT, p2i(bad_page), p2i(good_page));
+    os::set_polling_page((address)(bad_page));
+
+    intptr_t poll_page_val = reinterpret_cast<intptr_t>(bad_page);
+    _poll_armed_value = reinterpret_cast<void*>(poll_page_val | poll_bit());
+    _poll_disarmed_value = good_page;
+  } else {
+    const size_t page_size = os::vm_page_size();
+    char* polling_page = os::reserve_memory(page_size, NULL, page_size);
+    os::commit_memory_or_exit(polling_page, page_size, false, "Unable to commit Safepoint polling page");
+    os::protect_memory(polling_page, page_size, os::MEM_PROT_READ);
+
+    log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
+    os::set_polling_page((address)(polling_page));
+  }
+}
+
+void SafepointMechanism::initialize_header(JavaThread* thread) {
+  disarm_local_poll(thread);
+}
+
+void SafepointMechanism::initialize_serialize_page() {
+  if (!UseMembar) {
+    const size_t page_size = os::vm_page_size();
+    char* serialize_page = os::reserve_memory(page_size, NULL, page_size);
+    os::commit_memory_or_exit(serialize_page, page_size, false, "Unable to commit memory serialization page");
+    log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(serialize_page));
+    os::set_memory_serialize_page((address)(serialize_page));
+  }
+}
+
+void SafepointMechanism::initialize() {
+  pd_initialize();
+  initialize_serialize_page();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/safepointMechanism.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_SAFEPOINTMECHANISM_HPP
+#define SHARE_VM_RUNTIME_SAFEPOINTMECHANISM_HPP
+
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/sizes.hpp"
+
+// This is the abstracted interface for the safepoint implementation
+class SafepointMechanism : public AllStatic {
+  enum PollingType {
+    _global_page_poll,
+    _thread_local_poll
+  };
+  static PollingType _polling_type;
+  static void* _poll_armed_value;
+  static void* _poll_disarmed_value;
+  static void set_uses_thread_local_poll()            { _polling_type     = _thread_local_poll; }
+
+  static void* poll_armed_value()                     { return _poll_armed_value; }
+  static void* poll_disarmed_value()                  { return _poll_disarmed_value; }
+
+  static inline bool local_poll_armed(JavaThread* thread);
+
+  static inline bool local_poll(Thread* thread);
+  static inline bool global_poll();
+
+  static inline void block_if_requested_local_poll(JavaThread *thread);
+
+  static void default_initialize();
+  static void initialize_serialize_page();
+
+  static void pd_initialize() NOT_AIX({ default_initialize(); });
+
+  // By adding 8 to the base address of the protected polling page we can differentiate
+  // between the armed and disarmed value by masking out this bit.
+  const static intptr_t _poll_bit = 8;
+public:
+  static intptr_t poll_bit() { return _poll_bit; }
+
+  static bool uses_global_page_poll() { return _polling_type == _global_page_poll; }
+  static bool uses_thread_local_poll() { return _polling_type == _thread_local_poll; }
+
+  static bool supports_thread_local_poll() {
+#ifdef THREAD_LOCAL_POLL
+    return true;
+#else
+    return false;
+#endif
+  }
+
+  // Call this method to see if this thread has depending poll and appropriate action should be taken
+  static inline bool poll(Thread* thread);
+
+  // Blocks a thread until safepoint is completed
+  static inline void block_if_requested(JavaThread* thread);
+
+  static inline void arm_local_poll(JavaThread* thread);
+  static inline void disarm_local_poll(JavaThread* thread);
+
+  // Setup the selected safepoint mechanism
+  static void initialize();
+  static void initialize_header(JavaThread* thread);
+};
+
+#endif // SHARE_VM_RUNTIME_SAFEPOINTMECHANISM_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/safepointMechanism.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_SAFEPOINTMECHANISM_INLINE_HPP
+#define SHARE_VM_RUNTIME_SAFEPOINTMECHANISM_INLINE_HPP
+
+#include "runtime/safepointMechanism.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
+
+bool SafepointMechanism::local_poll_armed(JavaThread* thread) {
+  const intptr_t poll_word = reinterpret_cast<intptr_t>(thread->get_polling_page());
+  return mask_bits_are_true(poll_word, poll_bit());
+}
+
+bool SafepointMechanism::global_poll() {
+  return SafepointSynchronize::do_call_back();
+}
+
+bool SafepointMechanism::local_poll(Thread* thread) {
+  if (thread->is_Java_thread()) {
+    return local_poll_armed((JavaThread*)thread);
+  } else {
+    // If the poll is on a non-java thread we can only check the global state.
+    return global_poll();
+  }
+}
+
+bool SafepointMechanism::poll(Thread* thread) {
+  if (uses_thread_local_poll()) {
+    return local_poll(thread);
+  } else {
+    return global_poll();
+  }
+}
+
+void SafepointMechanism::block_if_requested_local_poll(JavaThread *thread) {
+  bool armed = local_poll_armed(thread); // load acquire, polling page -> op / global state
+  if(armed) {
+    // We could be armed for either a handshake operation or a safepoint
+    if (thread->has_handshake()) {
+      thread->handshake_process_by_self();
+    } else {
+      if (global_poll()) {
+        SafepointSynchronize::block(thread);
+      }
+    }
+  }
+}
+
+void SafepointMechanism::block_if_requested(JavaThread *thread) {
+  if (uses_thread_local_poll()) {
+    block_if_requested_local_poll(thread);
+  } else {
+    // If we don't have per thread poll this could a handshake or a safepoint
+    if (global_poll()) {
+      SafepointSynchronize::block(thread);
+    }
+  }
+}
+
+void SafepointMechanism::arm_local_poll(JavaThread* thread) {
+  thread->set_polling_page(poll_armed_value());
+}
+
+void SafepointMechanism::disarm_local_poll(JavaThread* thread) {
+  thread->set_polling_page(poll_disarmed_value());
+}
+
+#endif // SHARE_VM_RUNTIME_SAFEPOINTMECHANISM_INLINE_HPP
--- a/src/hotspot/share/runtime/thread.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/thread.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -65,6 +65,7 @@
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/globals.hpp"
+#include "runtime/handshake.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/java.hpp"
@@ -77,6 +78,7 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/statSampler.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -1494,6 +1496,10 @@
   _popframe_preserved_args_size = 0;
   _frames_to_pop_failed_realloc = 0;
 
+  if (SafepointMechanism::uses_thread_local_poll()) {
+    SafepointMechanism::initialize_header(this);
+  }
+
   pd_initialize();
 }
 
@@ -1910,6 +1916,11 @@
 
   // Remove from list of active threads list, and notify VM thread if we are the last non-daemon thread
   Threads::remove(this);
+
+  // If someone set a handshake on us just as we entered exit path, we simple cancel it.
+  if (ThreadLocalHandshakes) {
+    cancel_handshake();
+  }
 }
 
 #if INCLUDE_ALL_GCS
@@ -2372,11 +2383,7 @@
     InterfaceSupport::serialize_thread_state_with_handler(thread);
   }
 
-  if (SafepointSynchronize::do_call_back()) {
-    // If we are safepointing, then block the caller which may not be
-    // the same as the target thread (see above).
-    SafepointSynchronize::block(curJT);
-  }
+  SafepointMechanism::block_if_requested(curJT);
 
   if (thread->is_deopt_suspend()) {
     thread->clear_deopt_suspend();
@@ -3521,6 +3528,7 @@
   LogConfiguration::initialize(create_vm_timer.begin_time());
 
   // Parse arguments
+  // Note: this internally calls os::init_container_support()
   jint parse_result = Arguments::parse(args);
   if (parse_result != JNI_OK) return parse_result;
 
@@ -3551,6 +3559,8 @@
   // Timing (must come after argument parsing)
   TraceTime timer("Create VM", TRACETIME_LOG(Info, startuptime));
 
+  SafepointMechanism::initialize();
+
   // Initialize the os module after parsing the args
   jint os_init_2_result = os::init_2();
   if (os_init_2_result != JNI_OK) return os_init_2_result;
--- a/src/hotspot/share/runtime/thread.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/thread.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -31,6 +31,7 @@
 #include "oops/oop.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/frame.hpp"
+#include "runtime/handshake.hpp"
 #include "runtime/javaFrameAnchor.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutexLocker.hpp"
@@ -271,6 +272,8 @@
   friend class PauseNoSafepointVerifier;
   friend class GCLocker;
 
+  volatile void* _polling_page;                 // Thread local polling page
+
   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
                                                 // the Java heap
@@ -549,6 +552,8 @@
   uintptr_t        _self_raw_id;      // used by get_thread (mutable)
   int              _lgrp_id;
 
+  volatile void** polling_page_addr() { return &_polling_page; }
+
  public:
   // Stack overflow support
   address stack_base() const           { assert(_stack_base != NULL,"Sanity check"); return _stack_base; }
@@ -617,6 +622,8 @@
   static ByteSize stack_base_offset()            { return byte_offset_of(Thread, _stack_base); }
   static ByteSize stack_size_offset()            { return byte_offset_of(Thread, _stack_size); }
 
+  static ByteSize polling_page_offset()          { return byte_offset_of(Thread, _polling_page); }
+
 #define TLAB_FIELD_OFFSET(name) \
   static ByteSize tlab_##name##_offset()         { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); }
 
@@ -1135,6 +1142,33 @@
   bool do_not_unlock_if_synchronized()             { return _do_not_unlock_if_synchronized; }
   void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; }
 
+  inline void set_polling_page(void* poll_value);
+  inline volatile void* get_polling_page();
+
+ private:
+  // Support for thread handshake operations
+  HandshakeState _handshake;
+ public:
+  void set_handshake_operation(HandshakeOperation* op) {
+    _handshake.set_operation(this, op);
+  }
+
+  bool has_handshake() const {
+    return _handshake.has_operation();
+  }
+
+  void cancel_handshake() {
+    _handshake.cancel(this);
+  }
+
+  void handshake_process_by_self() {
+    _handshake.process_by_self(this);
+  }
+
+  void handshake_process_by_vmthread() {
+    _handshake.process_by_vmthread(this);
+  }
+
   // Suspend/resume support for JavaThread
  private:
   inline void set_ext_suspended();
--- a/src/hotspot/share/runtime/thread.inline.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/thread.inline.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -163,4 +163,16 @@
   return _stack_guard_state == stack_guard_enabled;
 }
 
+// The release make sure this store is done after storing the handshake
+// operation or global state
+inline void JavaThread::set_polling_page(void* poll_value) {
+  OrderAccess::release_store(polling_page_addr(), poll_value);
+}
+
+// The aqcquire make sure reading of polling page is done before
+// the reading the handshake operation or the global state
+inline volatile void* JavaThread::get_polling_page() {
+  return OrderAccess::load_acquire(polling_page_addr());
+}
+
 #endif // SHARE_VM_RUNTIME_THREAD_INLINE_HPP
--- a/src/hotspot/share/runtime/timer.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/timer.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,11 @@
   return counter_to_seconds(counter) * 1000.0;
 }
 
+jlong TimeHelper::millis_to_counter(jlong millis) {
+  jlong freq = os::elapsed_frequency() / MILLIUNITS;
+  return millis * freq;
+}
+
 elapsedTimer::elapsedTimer(jlong time, jlong timeUnitsPerSecond) {
   _active = false;
   jlong osTimeUnitsPerSecond = os::elapsed_frequency();
--- a/src/hotspot/share/runtime/timer.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/timer.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,6 +93,7 @@
  public:
   static double counter_to_seconds(jlong counter);
   static double counter_to_millis(jlong counter);
+  static jlong millis_to_counter(jlong millis);
 };
 
 #endif // SHARE_VM_RUNTIME_TIMER_HPP
--- a/src/hotspot/share/runtime/vm_operations.hpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/runtime/vm_operations.hpp	Fri Nov 17 02:50:51 2017 +0100
@@ -69,6 +69,9 @@
   template(G1CollectFull)                         \
   template(G1CollectForAllocation)                \
   template(G1IncCollectionPause)                  \
+  template(HandshakeOneThread)                    \
+  template(HandshakeAllThreads)                   \
+  template(HandshakeFallback)                     \
   template(DestroyAllocationContext)              \
   template(EnableBiasedLocking)                   \
   template(RevokeBias)                            \
--- a/src/hotspot/share/utilities/debug.cpp	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/hotspot/share/utilities/debug.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -631,6 +631,27 @@
   VMError::print_native_stack(tty, fr, t, buf, sizeof(buf));
 }
 
+//
+// This version of pns() will not work when called from the debugger, but is
+// useful when called from within hotspot code. The advantages over pns()
+// are not having to pass in any arguments, and it will work on Windows/x64.
+//
+// WARNING: Only intended for use when debugging. Do not leave calls to
+// pns2() in committed source (product or debug).
+//
+extern "C" void pns2() { // print native stack
+  Command c("pns2");
+  static char buf[O_BUFLEN];
+  if (os::platform_print_native_stack(tty, NULL, buf, sizeof(buf))) {
+    // We have printed the native stack in platform-specific code,
+    // so nothing else to do in this case.
+  } else {
+    Thread* t = Thread::current_or_null();
+    frame fr = os::current_frame();
+    VMError::print_native_stack(tty, fr, t, buf, sizeof(buf));
+  }
+}
+
 #endif // !PRODUCT
 
 //////////////////////////////////////////////////////////////////////////////
--- a/src/java.management/share/classes/javax/management/MBeanOperationInfo.java	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/java.management/share/classes/javax/management/MBeanOperationInfo.java	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -146,6 +146,9 @@
      * @param descriptor The descriptor for the operation.  This may be null
      * which is equivalent to an empty descriptor.
      *
+     * @throws IllegalArgumentException if {@code impact} is not one of
+     * {@linkplain #ACTION}, {@linkplain #ACTION_INFO}, {@linkplain #INFO} or {@linkplain #UNKNOWN}.
+     *
      * @since 1.6
      */
     public MBeanOperationInfo(String name,
@@ -157,6 +160,12 @@
 
         super(name, description, descriptor);
 
+        if (impact < INFO || impact > UNKNOWN) {
+            throw new IllegalArgumentException("Argument impact can only be "
+                    + "one of ACTION, ACTION_INFO, "
+                    + "INFO, or UNKNOWN" + " given value is :" + impact);
+        }
+
         if (signature == null || signature.length == 0)
             signature = MBeanParameterInfo.NO_PARAMS;
         else
@@ -259,8 +268,7 @@
         case ACTION: impactString = "action"; break;
         case ACTION_INFO: impactString = "action/info"; break;
         case INFO: impactString = "info"; break;
-        case UNKNOWN: impactString = "unknown"; break;
-        default: impactString = "(" + getImpact() + ")";
+        default: impactString = "unknown";
         }
         return getClass().getName() + "[" +
             "description=" + getDescription() + ", " +
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
   G1New ("G1New"),
   ConcurrentMarkSweep ("ConcurrentMarkSweep"),
   G1Old ("G1Old"),
+  G1Full ("G1Full"),
   GCNameEndSentinel ("GCNameEndSentinel");
 
   private final String value;
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaFieldImpl.java	Thu Nov 16 11:07:44 2017 -0800
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaFieldImpl.java	Fri Nov 17 02:50:51 2017 +0100
@@ -76,7 +76,7 @@
 
     @Override
     public int hashCode() {
-        return offset ^ modifiers;
+        return holder.hashCode() ^ offset;
     }
 
     @Override
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/g1/test_heapRegion.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1BlockOffsetTable.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "unittest.hpp"
+
+class VerifyAndCountMarkClosure : public StackObj {
+  int _count;
+  G1CMBitMap* _bm;
+
+  void ensure_marked(HeapWord* addr) {
+    ASSERT_TRUE(_bm->is_marked(addr));
+  }
+
+public:
+  VerifyAndCountMarkClosure(G1CMBitMap* bm) : _count(0), _bm(bm) { }
+
+  virtual size_t apply(oop object) {
+    _count++;
+    ensure_marked((HeapWord*) object);
+    // Must return positive size to advance the iteration.
+    return MinObjAlignment;
+  }
+
+  void reset() {
+    _count = 0;
+  }
+
+  int count() {
+    return _count;
+  }
+};
+
+#define MARK_OFFSET_1 ( 17 * MinObjAlignment)
+#define MARK_OFFSET_2 ( 99 * MinObjAlignment)
+#define MARK_OFFSET_3 (337 * MinObjAlignment)
+
+TEST_OTHER_VM(HeapRegion, apply_to_marked_objects) {
+  if (!UseG1GC) {
+    return;
+  }
+
+  G1CollectedHeap* heap = G1CollectedHeap::heap();
+
+  // Using region 0 for testing.
+  HeapRegion* region = heap->heap_region_containing(heap->bottom_addr_for_region(0));
+
+  // Mark some "oops" in the bitmap.
+  G1CMBitMap* bitmap = heap->concurrent_mark()->next_mark_bitmap();
+  bitmap->mark(region->bottom());
+  bitmap->mark(region->bottom() + MARK_OFFSET_1);
+  bitmap->mark(region->bottom() + MARK_OFFSET_2);
+  bitmap->mark(region->bottom() + MARK_OFFSET_3);
+  bitmap->mark(region->end());
+
+  VerifyAndCountMarkClosure cl(bitmap);
+
+  // When top is equal to bottom the closure should not be
+  // applied to any object because apply_to_marked_objects
+  // will stop at HeapRegion::scan_limit which is equal to top.
+  region->set_top(region->bottom());
+  region->apply_to_marked_objects(bitmap, &cl);
+  EXPECT_EQ(0, cl.count());
+  cl.reset();
+
+  // Set top to offset_1 and expect only to find 1 entry (bottom)
+  region->set_top(region->bottom() + MARK_OFFSET_1);
+  region->apply_to_marked_objects(bitmap, &cl);
+  EXPECT_EQ(1, cl.count());
+  cl.reset();
+
+  // Set top to (offset_2 + 1) and expect only to find 3
+  // entries (bottom, offset_1 and offset_2)
+  region->set_top(region->bottom() + MARK_OFFSET_2 + MinObjAlignment);
+  region->apply_to_marked_objects(bitmap, &cl);
+  EXPECT_EQ(3, cl.count());
+  cl.reset();
+
+  // Still expect same 3 entries when top is (offset_3 - 1)
+  region->set_top(region->bottom() + MARK_OFFSET_3 - MinObjAlignment);
+  region->apply_to_marked_objects(bitmap, &cl);
+  EXPECT_EQ(3, cl.count());
+  cl.reset();
+
+  // Setting top to end should render 4 entries.
+  region->set_top(region->end());
+  region->apply_to_marked_objects(bitmap, &cl);
+  EXPECT_EQ(4, cl.count());
+  cl.reset();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/gc/shared/test_preservedMarks.cpp	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/preservedMarks.inline.hpp"
+#include "unittest.hpp"
+
+class ScopedDisabledBiasedLocking {
+  bool _orig;
+public:
+  ScopedDisabledBiasedLocking() : _orig(UseBiasedLocking) { UseBiasedLocking = false; }
+  ~ScopedDisabledBiasedLocking() { UseBiasedLocking = _orig; }
+};
+
+// Class to create a "fake" oop with a mark that will
+// return true for calls to must_be_preserved().
+class FakeOop {
+  oopDesc _oop;
+
+public:
+  FakeOop() : _oop() { _oop.set_mark(originalMark()); }
+
+  oop get_oop() { return &_oop; }
+  markOop mark() { return _oop.mark(); }
+  void set_mark(markOop m) { _oop.set_mark(m); }
+  void forward_to(oop obj) {
+    markOop m = markOopDesc::encode_pointer_as_mark(obj);
+    _oop.set_mark(m);
+  }
+
+  static markOop originalMark() { return markOop(markOopDesc::lock_mask_in_place); }
+  static markOop changedMark()  { return markOop(0x4711); }
+};
+
+TEST_VM(PreservedMarks, iterate_and_restore) {
+  // Need to disable biased locking to easily
+  // create oops that "must_be_preseved"
+  ScopedDisabledBiasedLocking dbl;
+
+  PreservedMarks pm;
+  FakeOop o1;
+  FakeOop o2;
+  FakeOop o3;
+  FakeOop o4;
+
+  // Make sure initial marks are correct.
+  ASSERT_EQ(o1.mark(), FakeOop::originalMark());
+  ASSERT_EQ(o2.mark(), FakeOop::originalMark());
+  ASSERT_EQ(o3.mark(), FakeOop::originalMark());
+  ASSERT_EQ(o4.mark(), FakeOop::originalMark());
+
+  // Change the marks and verify change.
+  o1.set_mark(FakeOop::changedMark());
+  o2.set_mark(FakeOop::changedMark());
+  ASSERT_EQ(o1.mark(), FakeOop::changedMark());
+  ASSERT_EQ(o2.mark(), FakeOop::changedMark());
+
+  // Push o1 and o2 to have their marks preserved.
+  pm.push(o1.get_oop(), o1.mark());
+  pm.push(o2.get_oop(), o2.mark());
+
+  // Fake a move from o1->o3 and o2->o4.
+  o1.forward_to(o3.get_oop());
+  o2.forward_to(o4.get_oop());
+  ASSERT_EQ(o1.get_oop()->forwardee(), o3.get_oop());
+  ASSERT_EQ(o2.get_oop()->forwardee(), o4.get_oop());
+  // Adjust will update the PreservedMarks stack to
+  // make sure the mark is updated at the new location.
+  pm.adjust_during_full_gc();
+
+  // Restore all preserved and verify that the changed
+  // mark is now present at o3 and o4.
+  pm.restore();
+  ASSERT_EQ(o3.mark(), FakeOop::changedMark());
+  ASSERT_EQ(o4.mark(), FakeOop::changedMark());
+}
--- a/test/hotspot/jtreg/ProblemList.txt	Thu Nov 16 11:07:44 2017 -0800
+++ b/test/hotspot/jtreg/ProblemList.txt	Fri Nov 17 02:50:51 2017 +0100
@@ -88,8 +88,8 @@
 serviceability/sa/TestInstanceKlassSize.java 8184042 macosx-all
 serviceability/sa/TestInstanceKlassSizeForInterface.java 8184042 macosx-all
 serviceability/sa/TestPrintMdo.java 8184042 macosx-all
+serviceability/sa/TestRevPtrsForInvokeDynamic.java 8191270 generic-all
 serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java 8184042 macosx-all
-serviceability/dcmd/jvmti/LoadAgentDcmdTest.java 8186540 windows-all
 #############################################################################
 
 # :hotspot_misc
--- a/test/hotspot/jtreg/TEST.groups	Thu Nov 16 11:07:44 2017 -0800
+++ b/test/hotspot/jtreg/TEST.groups	Fri Nov 17 02:50:51 2017 +0100
@@ -24,6 +24,10 @@
 hotspot_all = \
   /
 
+hotspot_all_no_apps = \
+  / \
+  -applications
+
 hotspot_compiler = \
   compiler
 
@@ -33,6 +37,9 @@
 hotspot_runtime = \
   runtime
 
+hotspot_handshake = \
+  runtime/handshake
+
 hotspot_serviceability = \
   serviceability
 
@@ -180,6 +187,7 @@
  -runtime/Thread/CancellableThreadTest.java \
  -runtime/Thread/TestThreadDumpMonitorContention.java \
  -runtime/Unsafe/RangeCheck.java \
+ -runtime/containers/ \
   sanity/ \
   testlibrary_tests/TestMutuallyExclusivePlatformPredicates.java
 
@@ -190,7 +198,8 @@
 
 hotspot_tier1_serviceability = \
   serviceability/dcmd/compiler \
-  serviceability/logging
+  serviceability/logging \
+  serviceability/sa
 
 hotspot_tier1 = \
   :hotspot_tier1_common \
@@ -208,6 +217,7 @@
   serviceability/ \
  -runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java \
  -runtime/Thread/TestThreadDumpMonitorContention.java \
+ -runtime/containers/ \
  -:hotspot_tier1_runtime \
  -:hotspot_tier1_serviceability \
  -:hotspot_tier2_runtime_platform_agnostic
@@ -219,6 +229,7 @@
 hotspot_tier3_runtime = \
   runtime/ \
   serviceability/ \
+ -runtime/containers/ \
  -:hotspot_tier1_runtime \
  -:hotspot_tier1_serviceability \
  -:hotspot_tier2_runtime_platform_agnostic \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopopts/TestCountedLoopBadIVRange.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8190375
+ * @summary Bad range for IV phi when exit condition is a not equal test
+ * @run main/othervm -XX:-TieredCompilation TestCountedLoopBadIVRange
+ *
+ */
+
+
+public class TestCountedLoopBadIVRange {
+
+    static int test1(int[] arr) {
+        int j = 0;
+        int res = 0;
+        for (int i = 0; i < 2; i++) {
+            // When entered with j == 10, exit condition never
+            // succeeds so range of values for j can't be computed
+            // from exit condition
+            for (; j != 5; j++) {
+                if (j >= 20) {
+                    break;
+                }
+                res += arr[j];
+            }
+            j = 10;
+        }
+        return res;
+    }
+
+    static int test2(int[] arr) {
+        int j = 10;
+        int res = 0;
+        for (int i = 0; i < 2; i++) {
+            // Same as above but loop variable is decreasing
+            for (; j != 5; j--) {
+                if (j < 0) {
+                    break;
+                }
+                res += arr[j];
+            }
+            j = 1;
+        }
+        return res;
+    }
+
+    public static void main(String[] args) {
+        int[] arr = new int[20];
+        for (int i = 0; i < arr.length; i++) {
+            arr[i] = i;
+        }
+        for (int i = 0; i < 20_000; i++) {
+            int res = test1(arr);
+            if (res != 155) {
+                throw new RuntimeException("Incorrect result " + res);
+            }
+            res = test2(arr);
+            if (res != 41) {
+                throw new RuntimeException("Incorrect result " + res);
+            }
+        }
+    }
+}
--- a/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java	Thu Nov 16 11:07:44 2017 -0800
+++ b/test/hotspot/jtreg/gc/arguments/TestMaxMinHeapFreeRatioFlags.java	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -156,8 +156,9 @@
             if (args.length != 3) {
                 throw new IllegalArgumentException("Expected 3 args: <minRatio> <maxRatio> <shrinkHeapInSteps>");
             }
-            if (GCTypes.OldGCType.getOldGCType() == GCTypes.OldGCType.PSOld) {
-                System.out.println("Test is not applicable to parallel GC");
+            if (GCTypes.OldGCType.getOldGCType() == GCTypes.OldGCType.PSOld ||
+                GCTypes.OldGCType.getOldGCType() == GCTypes.OldGCType.G1) {
+                System.out.println("Test is not applicable to parallel full GCs");
                 return;
             }
 
--- a/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java	Thu Nov 16 11:07:44 2017 -0800
+++ b/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java	Fri Nov 17 02:50:51 2017 +0100
@@ -88,9 +88,23 @@
         output.shouldMatch(match);
     }
 
+    // Deprecated experimental command line options need to be preceded on the
+    // command line by -XX:+UnlockExperimentalVMOption.
+    static void testDeprecatedExperimental(String option, String value)  throws Throwable {
+        String XXoption = CommandLineOptionTest.prepareFlag(option, value);
+        ProcessBuilder processBuilder = ProcessTools.createJavaProcessBuilder(
+            CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS, XXoption, "-version");
+        OutputAnalyzer output = new OutputAnalyzer(processBuilder.start());
+        // check for option deprecation message:
+        output.shouldHaveExitValue(0);
+        String match = getDeprecationString(option);
+        output.shouldMatch(match);
+    }
+
     public static void main(String[] args) throws Throwable {
         testDeprecated(DEPRECATED_OPTIONS);  // Make sure that each deprecated option is mentioned in the output.
         testDeprecatedDiagnostic("UnsyncloadClass", "false");
         testDeprecatedDiagnostic("IgnoreUnverifiableClassesDuringDump", "false");
+        testDeprecatedExperimental("UseCGroupMemoryLimitForHeap", "true");
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/AttemptOOM.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+public class AttemptOOM {
+    private static MyObj[] data;
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Entering AttemptOOM main");
+
+        // each MyObj will allocate 1024 byte array
+        int sizeInMb = Integer.parseInt(args[0]);
+        data = new MyObj[sizeInMb*1024];
+
+        System.out.println("data.length = " + data.length);
+
+        for (int i=0; i < data.length; i++) {
+            data[i] = new MyObj(1024);
+        }
+
+        System.out.println("AttemptOOM allocation successful");
+    }
+
+    private static class MyObj {
+        private byte[] myData;
+        MyObj(int size) {
+            myData = new byte[size];
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/CPUSetsReader.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.io.FileReader;
+import java.util.ArrayList;
+import java.util.Optional;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import jdk.test.lib.Asserts;
+
+
+// A simple CPU sets reader and parser
+public class CPUSetsReader {
+    public static String PROC_SELF_STATUS_PATH="/proc/self/status";
+
+    // Test the parser
+    public static void test() {
+        assertParse("0-7", "0,1,2,3,4,5,6,7");
+        assertParse("1,3,6", "1,3,6");
+        assertParse("0,2-4,6,10-11", "0,2,3,4,6,10,11");
+        assertParse("0", "0");
+    }
+
+
+    private static void assertParse(String cpuSet, String expectedResult) {
+        Asserts.assertEquals(listToString(parseCpuSet(cpuSet)), expectedResult);
+    }
+
+
+    public static String readFromProcStatus(String setType) {
+        String path = PROC_SELF_STATUS_PATH;
+        Optional<String> o = Optional.empty();
+
+        System.out.println("readFromProcStatus() entering for: " + setType);
+
+        try (Stream<String> stream = Files.lines(Paths.get(path))) {
+            o = stream
+                .filter(line -> line.contains(setType))
+                .findFirst();
+        } catch (IOException e) {
+            return null;
+        }
+
+        if (!o.isPresent()) {
+            return null;    // entry not found
+        }
+
+        String[] parts = o.get().replaceAll("\\s","").split(":");
+
+        // Should be 2 parts, before and after ":"
+        Asserts.assertEquals(parts.length, 2);
+
+        String result = parts[1];
+        System.out.println("readFromProcStatus() returning: " + result);
+        return result;
+    }
+
+
+    public static List<Integer> parseCpuSet(String value) {
+        ArrayList<Integer> result = new ArrayList<Integer>();
+
+        try {
+            String[] commaSeparated = value.split(",");
+
+            for (String item : commaSeparated) {
+                if (item.contains("-")) {
+                    addRange(result, item);
+                } else {
+                    result.add(Integer.parseInt(item));
+                }
+            }
+        } catch (Exception e) {
+            System.err.println("Exception in getMaxCpuSets(): " + e);
+            return null;
+        }
+
+        return result;
+    }
+
+
+    private static void addRange(ArrayList<Integer> list, String s) {
+        String[] range = s.split("-");
+        if ( range.length != 2 ) {
+            throw new RuntimeException("Range should only contain two items, but contains "
+                                       + range.length + " items");
+        }
+
+        int min = Integer.parseInt(range[0]);
+        int max = Integer.parseInt(range[1]);
+
+        if (min >= max) {
+            String msg = String.format("min is greater or equals to max, min = %d, max = %d",
+                                       min, max);
+            throw new RuntimeException(msg);
+        }
+
+        for (int i = min; i <= max; i++) {
+            list.add(i);
+        }
+    }
+
+
+    // Convert list of integers to string with comma-separated values
+    public static String listToString(List<Integer> list) {
+        return listToString(list, Integer.MAX_VALUE);
+    }
+
+    // Convert list of integers to a string with comma-separated values;
+    // include up to maxCount.
+    public static String listToString(List<Integer> list, int maxCount) {
+        return list.stream()
+            .limit(maxCount)
+            .map(Object::toString)
+            .collect(Collectors.joining(","));
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/CheckContainerized.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+
+public class CheckContainerized {
+    public static String OUTSIDE_OF_CONTAINER =
+        "CheckContainerized: Running outside of a container";
+    public static String INSIDE_A_CONTAINER =
+        "CheckContainerized: Running inside a container";
+
+    public static void main(String[] args) {
+        System.out.println("CheckContainerized: Entering");
+        WhiteBox wb = WhiteBox.getWhiteBox();
+
+        if (wb.isContainerized()) {
+            System.out.println(INSIDE_A_CONTAINER);
+
+        } else {
+            System.out.println(OUTSIDE_OF_CONTAINER);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/Common.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * Methods and definitions common to docker tests container in this directory
+ */
+
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import jdk.test.lib.containers.docker.DockerRunOptions;
+import jdk.test.lib.containers.docker.DockerTestUtils;
+import jdk.test.lib.Utils;
+import jdk.test.lib.process.OutputAnalyzer;
+
+
+public class Common {
+    public static final String imageNameAndTag = "jdk-internal:test";
+
+    public static String imageName(String suffix) {
+        return imageNameAndTag + "-" + suffix;
+    }
+
+
+    public static void prepareWhiteBox() throws Exception {
+        Files.copy(Paths.get(ClassFileInstaller.getJarPath("whitebox.jar")),
+                   Paths.get(Utils.TEST_CLASSES, "whitebox.jar"));
+    }
+
+
+    // create simple commonly used options
+    public static DockerRunOptions newOpts(String imageNameAndTag) {
+        return new DockerRunOptions(imageNameAndTag, "/jdk/bin/java", "-version")
+            .addJavaOpts("-Xlog:os+container=trace");
+    }
+
+
+    // create commonly used options with class to be launched inside container
+    public static DockerRunOptions newOpts(String imageNameAndTag, String testClass) {
+        DockerRunOptions opts =
+            new DockerRunOptions(imageNameAndTag, "/jdk/bin/java", testClass);
+        opts.addDockerOpts("--volume", Utils.TEST_CLASSES + ":/test-classes/");
+        opts.addJavaOpts("-Xlog:os+container=trace", "-cp", "/test-classes/");
+        return opts;
+    }
+
+
+    public static DockerRunOptions addWhiteBoxOpts(DockerRunOptions opts) {
+        opts.addJavaOpts("-Xbootclasspath/a:/test-classes/whitebox.jar",
+                         "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI");
+        return opts;
+    }
+
+
+    // most common type of run and checks
+    public static OutputAnalyzer run(DockerRunOptions opts) throws Exception {
+        return DockerTestUtils.dockerRunJava(opts)
+            .shouldHaveExitValue(0).shouldContain("Initializing Container Support");
+    }
+
+
+    // log beginning of a test case
+    public static void logNewTestCase(String msg) {
+        System.out.println("========== NEW TEST CASE:      " + msg);
+    }
+
+}
--- a/test/hotspot/jtreg/runtime/containers/docker/DockerBasicTest.java	Thu Nov 16 11:07:44 2017 -0800
+++ b/test/hotspot/jtreg/runtime/containers/docker/DockerBasicTest.java	Fri Nov 17 02:50:51 2017 +0100
@@ -25,7 +25,7 @@
 /*
  * @test
  * @summary Basic (sanity) test for JDK-under-test inside a docker image.
- * @requires (docker.support)
+ * @requires docker.support
  * @library /test/lib
  * @modules java.base/jdk.internal.misc
  *          java.management
@@ -62,8 +62,10 @@
 
 
     private static void testJavaVersion() throws Exception {
-        DockerTestUtils.dockerRunJava(
-            new DockerRunOptions(imageNameAndTag, "/jdk/bin/java", "-version"))
+        DockerRunOptions opts =
+            new DockerRunOptions(imageNameAndTag, "/jdk/bin/java", "-version");
+
+        DockerTestUtils.dockerRunJava(opts)
             .shouldHaveExitValue(0)
             .shouldContain(Platform.vmName);
     }
@@ -79,5 +81,4 @@
             .shouldHaveExitValue(0)
             .shouldContain("Hello Docker");
     }
-
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/PrintContainerInfo.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.hotspot.WhiteBox;
+
+public class PrintContainerInfo {
+
+    public static void main(String[] args) {
+        System.out.println("PrintContainerInfo: Entering");
+        WhiteBox wb = WhiteBox.getWhiteBox();
+
+        wb.printOsInfo();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/TEST.properties	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,1 @@
+exclusiveAccess.dirs=.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/TestCPUAwareness.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @summary Test JVM's CPU resource awareness when running inside docker container
+ * @requires docker.support
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ *          jdk.jartool/sun.tools.jar
+ * @build Common
+ * @run driver TestCPUAwareness
+ */
+import jdk.test.lib.containers.docker.DockerRunOptions;
+import jdk.test.lib.containers.docker.DockerTestUtils;
+
+
+public class TestCPUAwareness {
+    private static final String imageName = Common.imageName("cpu");
+
+    public static void main(String[] args) throws Exception {
+        if (!DockerTestUtils.canTestDocker()) {
+            return;
+        }
+
+        int availableCPUs = Runtime.getRuntime().availableProcessors();
+        System.out.println("Test Environment: detected availableCPUs = " + availableCPUs);
+        DockerTestUtils.buildJdkDockerImage(imageName, "Dockerfile-BasicTest", "jdk-docker");
+
+        try {
+            // cpuset, period, shares, expected Active Processor Count
+            testAPCCombo("0", 200*1000, 100*1000,   4*1024, 1);
+            testAPCCombo("0,1", 200*1000, 100*1000, 4*1024, 2);
+            testAPCCombo("0,1", 200*1000, 100*1000, 1*1024, 2);
+
+            testCpuShares(256, 1);
+            testCpuShares(2048, 2);
+            testCpuShares(4096, 4);
+
+            // leave one CPU for system and tools, otherwise this test may be unstable
+            int maxNrOfAvailableCpus =  availableCPUs - 1;
+            for (int i=1; i < maxNrOfAvailableCpus; i = i * 2) {
+                testCpus(i, i);
+            }
+
+            // If ActiveProcessorCount is set, the VM should use it, regardless of other
+            // container settings, host settings or available CPUs on the host.
+            testActiveProcessorCount(1, 1);
+            testActiveProcessorCount(2, 2);
+
+            testCpuQuotaAndPeriod(50*1000, 100*1000);
+            testCpuQuotaAndPeriod(100*1000, 100*1000);
+            testCpuQuotaAndPeriod(150*1000, 100*1000);
+
+        } finally {
+            DockerTestUtils.removeDockerImage(imageName);
+        }
+    }
+
+
+    private static void testActiveProcessorCount(int valueToSet, int expectedValue) throws Exception {
+        Common.logNewTestCase("Test ActiveProcessorCount: valueToSet = " + valueToSet);
+
+        DockerRunOptions opts = Common.newOpts(imageName)
+            .addJavaOpts("-XX:ActiveProcessorCount=" + valueToSet, "-Xlog:os=trace");
+        Common.run(opts)
+            .shouldMatch("active processor count set by user.*" + expectedValue);
+    }
+
+
+    private static void testCpus(int valueToSet, int expectedTraceValue) throws Exception {
+        Common.logNewTestCase("test cpus: " + valueToSet);
+        DockerRunOptions opts = Common.newOpts(imageName)
+            .addDockerOpts("--cpus", "" + valueToSet);
+        Common.run(opts)
+            .shouldMatch("active_processor_count.*" + expectedTraceValue);
+    }
+
+
+    private static void testCpuQuotaAndPeriod(int quota, int period)
+        throws Exception {
+        Common.logNewTestCase("test cpu quota and period: ");
+        System.out.println("quota = " + quota);
+        System.out.println("period = " + period);
+
+        int expectedAPC = (int) Math.ceil((float) quota / (float) period);
+        System.out.println("expectedAPC = " + expectedAPC);
+
+        DockerRunOptions opts = Common.newOpts(imageName)
+            .addDockerOpts("--cpu-period=" + period)
+            .addDockerOpts("--cpu-quota=" + quota);
+
+        Common.run(opts)
+            .shouldMatch("CPU Period is.*" + period)
+            .shouldMatch("CPU Quota is.*" + quota)
+            .shouldMatch("active_processor_count.*" + expectedAPC);
+    }
+
+
+    // Test correctess of automatically selected active processor cound
+    private static void testAPCCombo(String cpuset, int quota, int period, int shares,
+                                         int expectedAPC) throws Exception {
+        Common.logNewTestCase("test APC Combo");
+        System.out.println("cpuset = " + cpuset);
+        System.out.println("quota = " + quota);
+        System.out.println("period = " + period);
+        System.out.println("shares = " + period);
+        System.out.println("expectedAPC = " + expectedAPC);
+
+        DockerRunOptions opts = Common.newOpts(imageName)
+            .addDockerOpts("--cpuset-cpus", "" + cpuset)
+            .addDockerOpts("--cpu-period=" + period)
+            .addDockerOpts("--cpu-quota=" + quota)
+            .addDockerOpts("--cpu-shares=" + shares);
+        Common.run(opts)
+            .shouldMatch("active_processor_count.*" + expectedAPC);
+    }
+
+
+    private static void testCpuShares(int shares, int expectedAPC) throws Exception {
+        Common.logNewTestCase("test cpu shares, shares = " + shares);
+        DockerRunOptions opts = Common.newOpts(imageName)
+            .addDockerOpts("--cpu-shares=" + shares);
+        Common.run(opts)
+            .shouldMatch("CPU Shares is.*" + shares)
+            .shouldMatch("active_processor_count.*" + expectedAPC);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/TestCPUSets.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @summary Test JVM's awareness of cpu sets (cpus and mems)
+ * @requires docker.support
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ *          jdk.jartool/sun.tools.jar
+ * @build Common AttemptOOM CPUSetsReader sun.hotspot.WhiteBox PrintContainerInfo
+ * @run driver ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run driver TestCPUSets
+ */
+import java.util.List;
+import jdk.test.lib.containers.docker.DockerRunOptions;
+import jdk.test.lib.containers.docker.DockerTestUtils;
+import jdk.test.lib.Asserts;
+import jdk.test.lib.Platform;
+import jdk.test.lib.Utils;
+import jdk.test.lib.process.OutputAnalyzer;
+
+
+public class TestCPUSets {
+    private static final String imageName = Common.imageName("cpusets");
+
+    public static void main(String[] args) throws Exception {
+        if (!DockerTestUtils.canTestDocker()) {
+            return;
+        }
+
+        Common.prepareWhiteBox();
+        DockerTestUtils.buildJdkDockerImage(imageName, "Dockerfile-BasicTest", "jdk-docker");
+
+        try {
+            // Sanity test the cpu sets reader and parser
+            CPUSetsReader.test();
+            testTheSet("Cpus_allowed_list");
+            testTheSet("Mems_allowed_list");
+        } finally {
+            DockerTestUtils.removeDockerImage(imageName);
+        }
+    }
+
+
+    private static void testTheSet(String setType) throws Exception {
+        String cpuSetStr = CPUSetsReader.readFromProcStatus(setType);
+
+        if (cpuSetStr == null) {
+            System.out.printf("The %s test is skipped %n", setType);
+        } else {
+            List<Integer> cpuSet = CPUSetsReader.parseCpuSet(cpuSetStr);
+
+            // Test subset of one, full subset, and half of the subset
+            testCpuSet(CPUSetsReader.listToString(cpuSet, 1));
+            if (cpuSet.size() > 1) {
+                testCpuSet(CPUSetsReader.listToString(cpuSet));
+            }
+            if (cpuSet.size() > 2) {
+                testCpuSet(CPUSetsReader.listToString(cpuSet, cpuSet.size()/2 ));
+            }
+        }
+    }
+
+
+    private static DockerRunOptions commonOpts() {
+        DockerRunOptions opts = new DockerRunOptions(imageName, "/jdk/bin/java",
+                                                     "PrintContainerInfo");
+        opts.addDockerOpts("--volume", Utils.TEST_CLASSES + ":/test-classes/");
+        opts.addJavaOpts("-Xlog:os+container=trace", "-cp", "/test-classes/");
+        Common.addWhiteBoxOpts(opts);
+        return opts;
+    }
+
+
+    private static void checkResult(List<String> lines, String lineMarker, String value) {
+        boolean lineMarkerFound = false;
+
+        for (String line : lines) {
+            if (line.contains(lineMarker)) {
+                lineMarkerFound = true;
+                String[] parts = line.split(":");
+                System.out.println("DEBUG: line = " + line);
+                System.out.println("DEBUG: parts.length = " + parts.length);
+
+                Asserts.assertEquals(parts.length, 2);
+                String set = parts[1].replaceAll("\\s","");
+                String actual = CPUSetsReader.listToString(CPUSetsReader.parseCpuSet(set));
+                Asserts.assertEquals(actual, value);
+                break;
+            }
+        }
+        Asserts.assertTrue(lineMarkerFound);
+    }
+
+
+    private static void testCpuSet(String value) throws Exception {
+        Common.logNewTestCase("cpusets.cpus, value = " + value);
+
+        DockerRunOptions opts = commonOpts();
+        opts.addDockerOpts("--cpuset-cpus=" + value);
+
+        List<String> lines = Common.run(opts).asLines();
+        checkResult(lines, "cpuset.cpus is:", value);
+    }
+
+    private static void testMemSet(String value) throws Exception {
+        Common.logNewTestCase("cpusets.mems, value = " + value);
+
+        DockerRunOptions opts = commonOpts();
+        opts.addDockerOpts("--cpuset-mems=" + value);
+
+        List<String> lines = Common.run(opts).asLines();
+        checkResult(lines, "cpuset.mems is:", value);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/TestMemoryAwareness.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @summary Test JVM's memory resource awareness when running inside docker container
+ * @requires docker.support
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ *          jdk.jartool/sun.tools.jar
+ * @build Common AttemptOOM sun.hotspot.WhiteBox PrintContainerInfo
+ * @run driver ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run driver TestMemoryAwareness
+ */
+import jdk.test.lib.containers.docker.DockerRunOptions;
+import jdk.test.lib.containers.docker.DockerTestUtils;
+
+
+public class TestMemoryAwareness {
+    private static final String imageName = Common.imageName("memory");
+
+    public static void main(String[] args) throws Exception {
+        if (!DockerTestUtils.canTestDocker()) {
+            return;
+        }
+
+        Common.prepareWhiteBox();
+        DockerTestUtils.buildJdkDockerImage(imageName, "Dockerfile-BasicTest", "jdk-docker");
+
+        try {
+            testMemoryLimit("100m", "104857600");
+            testMemoryLimit("500m", "524288000");
+            testMemoryLimit("1g", "1073741824");
+            testMemoryLimit("4g", "4294967296");
+
+            testMemorySoftLimit("500m", "524288000");
+            testMemorySoftLimit("1g", "1073741824");
+
+            // Add extra 10 Mb to allocator limit, to be sure to cause OOM
+            testOOM("256m", 256 + 10);
+
+        } finally {
+            DockerTestUtils.removeDockerImage(imageName);
+        }
+    }
+
+
+    private static void testMemoryLimit(String valueToSet, String expectedTraceValue)
+            throws Exception {
+
+        Common.logNewTestCase("memory limit: " + valueToSet);
+
+        DockerRunOptions opts = Common.newOpts(imageName)
+            .addDockerOpts("--memory", valueToSet);
+
+        Common.run(opts)
+            .shouldMatch("Memory Limit is:.*" + expectedTraceValue);
+    }
+
+
+    private static void testMemorySoftLimit(String valueToSet, String expectedTraceValue)
+            throws Exception {
+        Common.logNewTestCase("memory soft limit: " + valueToSet);
+
+        DockerRunOptions opts = Common.newOpts(imageName, "PrintContainerInfo");
+        Common.addWhiteBoxOpts(opts);
+        opts.addDockerOpts("--memory-reservation=" + valueToSet);
+
+        Common.run(opts)
+            .shouldMatch("Memory Soft Limit.*" + expectedTraceValue);
+    }
+
+
+    // provoke OOM inside the container, see how VM reacts
+    private static void testOOM(String dockerMemLimit, int sizeToAllocInMb) throws Exception {
+        Common.logNewTestCase("OOM");
+
+        DockerRunOptions opts = Common.newOpts(imageName, "AttemptOOM")
+            .addDockerOpts("--memory", dockerMemLimit, "--memory-swap", dockerMemLimit);
+        opts.classParams.add("" + sizeToAllocInMb);
+
+        DockerTestUtils.dockerRunJava(opts)
+            .shouldHaveExitValue(1)
+            .shouldContain("Entering AttemptOOM main")
+            .shouldNotContain("AttemptOOM allocation successful")
+            .shouldContain("java.lang.OutOfMemoryError");
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/docker/TestMisc.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test
+ * @summary Test miscellanous functionality related to JVM running in docker container
+ * @requires docker.support
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ *          jdk.jartool/sun.tools.jar
+ * @build Common CheckContainerized sun.hotspot.WhiteBox PrintContainerInfo
+ * @run driver ClassFileInstaller -jar whitebox.jar sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run driver TestMisc
+ */
+import jdk.test.lib.containers.docker.DockerRunOptions;
+import jdk.test.lib.containers.docker.DockerTestUtils;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+
+public class TestMisc {
+    private static final String imageName = Common.imageName("misc");
+
+    public static void main(String[] args) throws Exception {
+        if (!DockerTestUtils.canTestDocker()) {
+            return;
+        }
+
+        Common.prepareWhiteBox();
+        DockerTestUtils.buildJdkDockerImage(imageName, "Dockerfile-BasicTest", "jdk-docker");
+
+        try {
+            testMinusContainerSupport();
+            testIsContainerized();
+            testPrintContainerInfo();
+        } finally {
+            DockerTestUtils.removeDockerImage(imageName);
+        }
+    }
+
+
+    private static void testMinusContainerSupport() throws Exception {
+        Common.logNewTestCase("Test related flags: '-UseContainerSupport'");
+        DockerRunOptions opts = new DockerRunOptions(imageName, "/jdk/bin/java", "-version");
+        opts.addJavaOpts("-XX:-UseContainerSupport", "-Xlog:os+container=trace");
+
+        Common.run(opts)
+            .shouldContain("Container Support not enabled");
+    }
+
+
+    private static void testIsContainerized() throws Exception {
+        Common.logNewTestCase("Test is_containerized() inside a docker container");
+
+        DockerRunOptions opts = Common.newOpts(imageName, "CheckContainerized");
+        Common.addWhiteBoxOpts(opts);
+
+        Common.run(opts)
+            .shouldContain(CheckContainerized.INSIDE_A_CONTAINER);
+    }
+
+
+    private static void testPrintContainerInfo() throws Exception {
+        Common.logNewTestCase("Test print_container_info()");
+
+        DockerRunOptions opts = Common.newOpts(imageName, "PrintContainerInfo");
+        Common.addWhiteBoxOpts(opts);
+
+        checkContainerInfo(Common.run(opts));
+    }
+
+
+    private static void checkContainerInfo(OutputAnalyzer out) throws Exception {
+        String[] expectedToContain = new String[] {
+            "cpuset.cpus",
+            "cpuset.mems",
+            "CPU Shares",
+            "CPU Quota",
+            "CPU Period",
+            "OSContainer::active_processor_count",
+            "Memory Limit",
+            "Memory Soft Limit",
+            "Memory Usage",
+            "Maximum Memory Usage",
+            "memory_max_usage_in_bytes"
+        };
+
+        for (String s : expectedToContain) {
+            out.shouldContain(s);
+        }
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/handshake/HandshakeTransitionTest.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.io.File;
+import java.nio.file.Paths;
+import java.time.Duration;
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+
+/*
+ * @test HandshakeTransitionTest
+ * @summary This does a sanity test of the poll in the native wrapper.
+ * @requires vm.debug
+ * @library /testlibrary /test/lib
+ * @build HandshakeTransitionTest
+ * @run main/native HandshakeTransitionTest
+ */
+
+public class HandshakeTransitionTest {
+
+    public static native void someTime(int ms);
+
+    public static void main(String[] args) throws Exception {
+        String lib = System.getProperty("test.nativepath");
+        ProcessBuilder pb =
+            ProcessTools.createJavaProcessBuilder(
+                    true,
+                    "-Djava.library.path=" + lib,
+                    "-XX:+SafepointALot",
+                    "-XX:GuaranteedSafepointInterval=20",
+                    "-Xlog:ergo*",
+                    "-XX:ParallelGCThreads=1",
+                    "-XX:ConcGCThreads=1",
+                    "-XX:CICompilerCount=2",
+                    "HandshakeTransitionTest$Test");
+
+
+        OutputAnalyzer output = ProcessTools.executeProcess(pb);
+        output.reportDiagnosticSummary();
+        output.shouldHaveExitValue(0);
+        output.stdoutShouldContain("JOINED");
+    }
+
+    static class Test implements Runnable {
+        final static int testLoops = 2000;
+        final static int testSleep = 1; //ms
+
+        public static void main(String[] args) throws Exception {
+            System.loadLibrary("HandshakeTransitionTest");
+            Test test = new Test();
+            Thread[] threads = new Thread[64];
+            for (int i = 0; i<threads.length ; i++) {
+                threads[i] = new Thread(test);
+                threads[i].start();
+            }
+            for (Thread t : threads) {
+                t.join();
+            }
+            System.out.println("JOINED");
+        }
+
+        @Override
+        public void run() {
+            try {
+                for (int i = 0; i<testLoops ; i++) {
+                    someTime(testSleep);
+                }
+            } catch (Exception e) {
+                System.out.println(e.getMessage());
+                System.exit(1);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/handshake/HandshakeWalkExitTest.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test HandshakeWalkExitTest
+ * @summary This test tries to stress the handshakes with new and exiting threads
+ * @library /testlibrary /test/lib
+ * @build HandshakeWalkExitTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI HandshakeWalkExitTest
+ */
+
+import jdk.test.lib.Asserts;
+import sun.hotspot.WhiteBox;
+
+public class HandshakeWalkExitTest  implements Runnable {
+
+    @Override
+    public void run() {
+    }
+
+    static volatile boolean exit_now = false;
+    static Thread[] threads;
+
+    public static void main(String... args) throws Exception {
+        int testRuns = 100;
+        int testThreads = 500;
+
+        HandshakeWalkExitTest test = new HandshakeWalkExitTest();
+
+        threads = new Thread[64];
+
+        Runnable hser = new Runnable(){
+            public void run(){
+                WhiteBox wb = WhiteBox.getWhiteBox();
+                while(!exit_now) {
+                    wb.handshakeWalkStack(null, true);
+                    try { Thread.sleep(1); } catch(Exception e) {}
+                }
+            }
+        };
+        Thread hst = new Thread(hser);
+        hst.start();
+        for (int k = 0; k<testRuns ; k++) {
+            Thread[] threads = new Thread[testThreads];
+            for (int i = 0; i<threads.length ; i++) {
+                threads[i] = new Thread(test);
+                threads[i].start();
+            }
+        }
+        exit_now = true;
+        hst.join();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/handshake/HandshakeWalkStackFallbackTest.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test HandshakeWalkStackFallbackTest
+ * @summary This test the global safepoint fallback path for handshakes
+ * @library /testlibrary /test/lib
+ * @build HandshakeWalkStackTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:-ThreadLocalHandshakes HandshakeWalkStackTest
+ */
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/handshake/HandshakeWalkStackTest.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test HandshakeWalkStackTest
+ * @library /testlibrary /test/lib
+ * @build HandshakeWalkStackTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI HandshakeWalkStackTest
+ */
+
+import jdk.test.lib.Asserts;
+import sun.hotspot.WhiteBox;
+
+public class HandshakeWalkStackTest {
+
+    public static void main(String... args) throws Exception {
+        int iterations = 3;
+        if (args.length > 0) {
+            iterations = Integer.parseInt(args[0]);
+        }
+        test(iterations);
+    }
+
+    private static void test(int iterations) throws Exception {
+        Thread loop_thread  = new Thread(() -> run_loop(create_list()));
+        Thread alloc_thread = new Thread(() -> run_alloc());
+        Thread wait_thread  = new Thread(() -> run_wait(new Object() {}));
+        loop_thread.start();
+        alloc_thread.start();
+        wait_thread.start();
+
+        WhiteBox wb = WhiteBox.getWhiteBox();
+        int walked = 0;
+        for (int i = 0; i < iterations; i++) {
+            System.out.println("Iteration " + i);
+            System.out.flush();
+            Thread.sleep(200);
+            walked = wb.handshakeWalkStack(loop_thread, false);
+            Asserts.assertEQ(walked, 1, "Must have walked one thread stack");
+            Thread.sleep(200);
+            walked = wb.handshakeWalkStack(alloc_thread, false);
+            Asserts.assertEQ(walked, 1, "Must have walked one thread stack");
+            Thread.sleep(200);
+            walked = wb.handshakeWalkStack(wait_thread, false);
+            Asserts.assertEQ(walked, 1, "Must have walked one thread stack");
+            Thread.sleep(200);
+            walked = wb.handshakeWalkStack(Thread.currentThread(), false);
+            Asserts.assertEQ(walked, 1, "Must have walked one thread stack");
+        }
+        Thread.sleep(200);
+        walked = wb.handshakeWalkStack(null, true);
+        Asserts.assertGT(walked, 4, "Must have walked more than three thread stacks");
+    }
+
+    static class List {
+        List next;
+
+        List(List next) {
+            this.next = next;
+        }
+    }
+
+    public static List create_list() {
+        List head = new List(null);
+        List elem = new List(head);
+        List elem2 = new List(elem);
+        List elem3 = new List(elem2);
+        List elem4 = new List(elem3);
+        head.next = elem4;
+
+        return head;
+    }
+
+    public static void run_loop(List loop) {
+        while (loop.next != null) {
+            loop = loop.next;
+        }
+    }
+
+    public static byte[] array;
+
+    public static void run_alloc() {
+        while (true) {
+            // Write to public static to ensure the byte array escapes.
+            array = new byte[4096];
+        }
+    }
+
+    public static void run_wait(Object lock) {
+        synchronized (lock) {
+            try {
+                lock.wait();
+            } catch (InterruptedException ie) {}
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/handshake/libHandshakeTransitionTest.c	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <jni.h>
+
+#ifdef WINDOWS
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+
+JNIEXPORT void JNICALL Java_HandshakeTransitionTest_someTime
+  (JNIEnv *env, jclass jc, jint ms)
+{
+#ifdef WINDOWS
+  Sleep(ms);
+#else
+  usleep(ms*1000);
+#endif
+}
--- a/test/hotspot/jtreg/serviceability/dcmd/jvmti/LoadAgentDcmdTest.java	Thu Nov 16 11:07:44 2017 -0800
+++ b/test/hotspot/jtreg/serviceability/dcmd/jvmti/LoadAgentDcmdTest.java	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
  *          jdk.internal.jvmstat/sun.jvmstat.monitor
  * @build SimpleJvmtiAgent
  * @run main ClassFileInstaller SimpleJvmtiAgent
- * @run testng LoadAgentDcmdTest
+ * @run testng/othervm LoadAgentDcmdTest
  */
 public class LoadAgentDcmdTest {
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbFlags.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Platform;
+import jdk.test.lib.Utils;
+
+/*
+ * @test
+ * @bug 8190198
+ * @summary Test clhsdb flags command
+ * @library /test/lib
+ * @run main/othervm ClhsdbFlags
+ */
+
+public class ClhsdbFlags {
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Starting ClhsdbFlags test");
+
+        LingeredApp theApp = null;
+        try {
+            ClhsdbLauncher test = new ClhsdbLauncher();
+            List<String> vmArgs = new ArrayList<String>();
+            vmArgs.add("-XX:+UnlockExperimentalVMOptions");
+            vmArgs.add("-XX:+UseJVMCICompiler");
+            vmArgs.add("-XX:-MaxFDLimit");
+            vmArgs.addAll(Utils.getVmOptions());
+            theApp = LingeredApp.startApp(vmArgs);
+            System.out.println("Started LingeredApp with pid " + theApp.getPid());
+
+            List<String> cmds = List.of(
+                    "flags", "flags -nd",
+                    "flags UseJVMCICompiler", "flags MaxFDLimit",
+                    "flags MaxJavaStackTraceDepth");
+
+            Map<String, List<String>> expStrMap = new HashMap<>();
+            expStrMap.put("flags", List.of(
+                    "UseJVMCICompiler = true",
+                    "MaxFDLimit = false",
+                    "MaxJavaStackTraceDepth = 1024",
+                    "UseCompressedClassPointers", "VerifyMergedCPBytecodes",
+                    "ConcGCThreads", "UseThreadPriorities",
+                    "UseInterpreter", "StartFlightRecording",
+                    "ShowHiddenFrames", "UseAppCDS"));
+            expStrMap.put("flags -nd", List.of(
+                    "UseJVMCICompiler = true",
+                    "MaxFDLimit = false",
+                    "UseCompressedClassPointers",
+                    "ConcGCThreads"));
+            expStrMap.put("flags UseJVMCICompiler", List.of(
+                    "UseJVMCICompiler = true"));
+            expStrMap.put("flags MaxFDLimit", List.of(
+                    "MaxFDLimit = false"));
+            expStrMap.put("flags MaxJavaStackTraceDepth", List.of(
+                    "MaxJavaStackTraceDepth = 1024"));
+
+            test.run(theApp.getPid(), cmds, expStrMap, null);
+        } catch (Exception ex) {
+            throw new RuntimeException("Test ERROR " + ex, ex);
+        } finally {
+            LingeredApp.stopApp(theApp);
+        }
+        System.out.println("Test PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbJstack.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Platform;
+
+/*
+ * @test
+ * @bug 8190198
+ * @summary Test clhsdb Jstack command
+ * @library /test/lib
+ * @run main/othervm ClhsdbJstack
+ */
+
+public class ClhsdbJstack {
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Starting ClhsdbJstack test");
+
+        LingeredApp theApp = null;
+        try {
+            ClhsdbLauncher test = new ClhsdbLauncher();
+            theApp = LingeredApp.startApp();
+            System.out.println("Started LingeredApp with pid " + theApp.getPid());
+
+            List<String> cmds = List.of("jstack -v");
+
+            Map<String, List<String>> expStrMap = new HashMap<>();
+            expStrMap.put("jstack -v", List.of(
+                    "No deadlocks found",
+                    "Common-Cleaner",
+                    "Signal Dispatcher",
+                    "java.lang.ref.Finalizer$FinalizerThread.run",
+                    "java.lang.ref.Reference",
+                    "Method*",
+                    "LingeredApp.main"));
+
+            test.run(theApp.getPid(), cmds, expStrMap, null);
+        } catch (Exception ex) {
+            throw new RuntimeException("Test ERROR " + ex, ex);
+        } finally {
+            LingeredApp.stopApp(theApp);
+        }
+        System.out.println("Test PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbLauncher.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.List;
+import java.util.Map;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Platform;
+import jdk.test.lib.JDKToolLauncher;
+import jdk.test.lib.process.OutputAnalyzer;
+
+/**
+ * This is a framework to run 'jhsdb clhsdb' commands.
+ * See open/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java for
+ * an example of how to write a test.
+ */
+
+public class ClhsdbLauncher {
+
+    private Process toolProcess;
+
+    public void ClhsdbLauncher() {
+        toolProcess = null;
+    }
+
+    /**
+     *
+     * Launches 'jhsdb clhsdb' and attaches to the Lingered App process.
+     * @param lingeredAppPid  - pid of the Lingered App or one its sub-classes.
+     */
+    private void attach(long lingeredAppPid)
+        throws IOException {
+
+        System.out.println("Starting clhsdb against " + lingeredAppPid);
+        JDKToolLauncher launcher = JDKToolLauncher.createUsingTestJDK("jhsdb");
+        launcher.addToolArg("clhsdb");
+        launcher.addToolArg("--pid=" + Long.toString(lingeredAppPid));
+
+        ProcessBuilder processBuilder = new ProcessBuilder(launcher.getCommand());
+        processBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
+
+        toolProcess = processBuilder.start();
+    }
+
+    /**
+     *
+     * Runs 'jhsdb clhsdb' commands and checks for expected and unexpected strings.
+     * @param commands  - clhsdb commands to execute.
+     * @param expectedStrMap - Map of expected strings per command which need to
+     *                         be checked in the output of the command.
+     * @param unExpectedStrMap - Map of unexpected strings per command which should
+     *                           not be present in the output of the command.
+     * @return Output of the commands as a String.
+     */
+    private String runCmd(List<String> commands,
+                          Map<String, List<String>> expectedStrMap,
+                          Map<String, List<String>> unExpectedStrMap)
+        throws IOException, InterruptedException {
+        String output;
+
+        if (commands == null) {
+            throw new RuntimeException("CLHSDB command must be provided\n");
+        }
+
+        try (OutputStream out = toolProcess.getOutputStream()) {
+            for (String cmd : commands) {
+                out.write((cmd + "\n").getBytes());
+            }
+            out.write("quit\n".getBytes());
+            out.flush();
+        }
+
+        OutputAnalyzer oa = new OutputAnalyzer(toolProcess);
+        try {
+            toolProcess.waitFor();
+        } catch (InterruptedException ie) {
+            toolProcess.destroyForcibly();
+            throw new Error("Problem awaiting the child process: " + ie);
+        }
+
+        oa.shouldHaveExitValue(0);
+        output = oa.getOutput();
+        System.out.println(output);
+
+        String[] parts = output.split("hsdb>");
+        for (String cmd : commands) {
+            int index = commands.indexOf(cmd) + 1;
+            OutputAnalyzer out = new OutputAnalyzer(parts[index]);
+
+            if (expectedStrMap != null) {
+                List<String> expectedStr = expectedStrMap.get(cmd);
+                if (expectedStr != null) {
+                    for (String exp : expectedStr) {
+                        out.shouldContain(exp);
+                    }
+                }
+            }
+
+            if (unExpectedStrMap != null) {
+                List<String> unExpectedStr = unExpectedStrMap.get(cmd);
+                if (unExpectedStr != null) {
+                    for (String unExp : unExpectedStr) {
+                        out.shouldNotContain(unExp);
+                    }
+                }
+            }
+        }
+        return output;
+    }
+
+    /**
+     *
+     * Launches 'jhsdb clhsdb', attaches to the Lingered App, executes the commands,
+     * checks for expected and unexpected strings.
+     * @param lingeredAppPid  - pid of the Lingered App or one its sub-classes.
+     * @param commands  - clhsdb commands to execute.
+     * @param expectedStrMap - Map of expected strings per command which need to
+     *                         be checked in the output of the command.
+     * @param unExpectedStrMap - Map of unexpected strings per command which should
+     *                           not be present in the output of the command.
+     * @return Output of the commands as a String.
+     */
+    public String run(long lingeredAppPid,
+                      List<String> commands,
+                      Map<String, List<String>> expectedStrMap,
+                      Map<String, List<String>> unExpectedStrMap)
+        throws IOException, InterruptedException {
+
+        if (!Platform.shouldSAAttach()) {
+            // Silently skip the test if we don't have enough permissions to attach
+            System.out.println("SA attach not expected to work - test skipped.");
+            return null;
+        }
+
+        attach(lingeredAppPid);
+        return runCmd(commands, expectedStrMap, unExpectedStrMap);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbLongConstant.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Platform;
+
+/*
+ * @test
+ * @bug 8190198
+ * @summary Test clhsdb longConstant command
+ * @library /test/lib
+ * @run main/othervm ClhsdbLongConstant
+ */
+
+public class ClhsdbLongConstant {
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Starting ClhsdbLongConstant test");
+
+        LingeredApp theApp = null;
+        try {
+            ClhsdbLauncher test = new ClhsdbLauncher();
+            theApp = LingeredApp.startApp();
+            System.out.println("Started LingeredApp with pid " + theApp.getPid());
+
+            List<String> cmds = List.of(
+                    "longConstant",
+                    "longConstant markOopDesc::locked_value",
+                    "longConstant markOopDesc::lock_bits",
+                    "longConstant jtreg::test 6",
+                    "longConstant jtreg::test");
+
+            Map<String, List<String>> expStrMap = new HashMap<>();
+            expStrMap.put("longConstant", List.of(
+                    "longConstant markOopDesc::locked_value",
+                    "longConstant markOopDesc::lock_bits",
+                    "InvocationCounter::count_increment",
+                    "markOopDesc::epoch_mask_in_place"));
+            expStrMap.put("longConstant markOopDesc::locked_value", List.of(
+                    "longConstant markOopDesc::locked_value"));
+            expStrMap.put("longConstant markOopDesc::lock_bits", List.of(
+                    "longConstant markOopDesc::lock_bits"));
+            expStrMap.put("longConstant jtreg::test", List.of(
+                    "longConstant jtreg::test 6"));
+
+            Map<String, List<String>> unExpStrMap = new HashMap<>();
+            unExpStrMap.put("longConstant jtreg::test", List.of(
+                    "Error: java.lang.RuntimeException: No long constant named"));
+
+            test.run(theApp.getPid(), cmds, expStrMap, unExpStrMap);
+        } catch (Exception ex) {
+            throw new RuntimeException("Test ERROR " + ex, ex);
+        } finally {
+            LingeredApp.stopApp(theApp);
+        }
+        System.out.println("Test PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbPmap.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Platform;
+
+/*
+ * @test
+ * @bug 8190198
+ * @summary Test clhsdb pmap command
+ * @library /test/lib
+ * @requires os.family != "mac"
+ * @run main/othervm ClhsdbPmap
+ */
+
+public class ClhsdbPmap {
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Starting ClhsdbPmap test");
+
+        LingeredApp theApp = null;
+        try {
+            ClhsdbLauncher test = new ClhsdbLauncher();
+            theApp = LingeredApp.startApp();
+            System.out.println("Started LingeredApp with pid " + theApp.getPid());
+
+            List<String> cmds = List.of("pmap");
+
+            Map<String, List<String>> expStrMap = new HashMap<>();
+            expStrMap.put("pmap", List.of(
+                    "jvm", "java", "net", "nio",
+                    "jimage", "zip", "verify"));
+
+            test.run(theApp.getPid(), cmds, expStrMap, null);
+        } catch (Exception ex) {
+            throw new RuntimeException("Test ERROR " + ex, ex);
+        } finally {
+            LingeredApp.stopApp(theApp);
+        }
+        System.out.println("Test PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbPrintStatics.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Platform;
+
+/*
+ * @test
+ * @bug 8190198
+ * @summary Test clhsdb printstatics command
+ * @library /test/lib
+ * @run main/othervm ClhsdbPrintStatics
+ */
+
+public class ClhsdbPrintStatics {
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Starting ClhsdbPrintStatics test");
+
+        LingeredApp theApp = null;
+        try {
+            ClhsdbLauncher test = new ClhsdbLauncher();
+            theApp = LingeredApp.startApp();
+            System.out.println("Started LingeredApp with pid " + theApp.getPid());
+
+            List<String> cmds = List.of(
+                    "printstatics", "printstatics SystemDictionary",
+                    "printstatics Threads", "printstatics Universe",
+                    "printstatics JvmtiExport");
+
+            Map<String, List<String>> expStrMap = new HashMap<>();
+            expStrMap.put("printstatics", List.of(
+                    "All known static fields",
+                    "Abstract_VM_Version::_vm_major_version",
+                    "ClassLoaderDataGraph::_head", "SymbolTable::_the_table",
+                    "JNIHandles::_weak_global_handles", "PerfMemory::_top",
+                    "_jfr_checkpoints", "ObjectSynchronizer::gBlockList",
+                    "java_lang_Class::_oop_size_offset",
+                    "CodeCache::_scavenge_root_nmethods"));
+            expStrMap.put("printstatics SystemDictionary", List.of(
+                    "Static fields of SystemDictionary",
+                    "SystemDictionary::Class_klass_knum",
+                    "SystemDictionary::ClassLoader_klass_knum",
+                    "SystemDictionary::Object_klass_knum"));
+            expStrMap.put("printstatics Threads", List.of(
+                    "Static fields of Threads",
+                    "_number_of_threads", "_number_of_non_daemon_threads",
+                    "JavaThread* Threads"));
+            expStrMap.put("printstatics Universe", List.of(
+                    "Static fields of Universe",
+                    "uintptr_t Universe::_verify_oop_mask",
+                    "intptr_t Universe::_non_oop_bits",
+                    "bool Universe::_fully_initialized",
+                    "Universe::_doubleArrayKlassObj"));
+            expStrMap.put("printstatics JvmtiExport", List.of(
+                    "Static fields of JvmtiExport",
+                    "bool JvmtiExport::_can_access_local_variables",
+                    "bool JvmtiExport::_can_hotswap_or_post_breakpoint",
+                    "bool JvmtiExport::_can_post_on_exceptions"));
+
+            test.run(theApp.getPid(), cmds, expStrMap, null);
+        } catch (Exception ex) {
+            throw new RuntimeException("Test ERROR " + ex, ex);
+        } finally {
+            LingeredApp.stopApp(theApp);
+        }
+        System.out.println("Test PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbPstack.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Platform;
+
+/*
+ * @test
+ * @bug 8190198
+ * @summary Test clhsdb pstack command
+ * @library /test/lib
+ * @requires os.family != "mac"
+ * @run main/othervm ClhsdbPstack
+ */
+
+public class ClhsdbPstack {
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Starting ClhsdbPstack test");
+
+        LingeredApp theApp = null;
+        try {
+            ClhsdbLauncher test = new ClhsdbLauncher();
+            theApp = LingeredApp.startApp();
+            System.out.println("Started LingeredApp with pid " + theApp.getPid());
+
+            List<String> cmds = List.of("pstack -v");
+
+            Map<String, List<String>> expStrMap = new HashMap<>();
+            expStrMap.put("pstack -v", List.of(
+                    "No deadlocks found", "Common-Cleaner",
+                    "Signal Dispatcher", "CompilerThread",
+                    "Sweeper thread", "Service Thread",
+                    "Reference Handler", "Finalizer", "main"));
+
+            test.run(theApp.getPid(), cmds, expStrMap, null);
+        } catch (Exception ex) {
+            throw new RuntimeException("Test ERROR " + ex, ex);
+        } finally {
+            LingeredApp.stopApp(theApp);
+        }
+        System.out.println("Test PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbSymbol.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Platform;
+
+/*
+ * @test
+ * @bug 8190198
+ * @summary Test clhsdb symboldump command
+ * @library /test/lib
+ * @run main/othervm ClhsdbSymbol
+ */
+
+public class ClhsdbSymbol {
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Starting ClhsdbSymbol test");
+
+        LingeredApp theApp = null;
+        try {
+            ClhsdbLauncher test = new ClhsdbLauncher();
+            theApp = LingeredApp.startApp();
+            System.out.println("Started LingeredApp with pid " + theApp.getPid());
+
+            List<String> cmds = List.of("symboldump");
+
+            Map<String, List<String>> expStrMap = new HashMap<>();
+            expStrMap.put("symboldump", List.of(
+                    "java/lang/String", "java/util/HashMap", "UsageTracker",
+                    "Ljava/io/InputStream", "LambdaMetafactory", "PerfCounter",
+                    "isAnonymousClass", "JVMTI_THREAD_STATE_TERMINATED", "jdi",
+                    "checkGetClassLoaderPermission", "lockCreationTime",
+                    "storedAppOutput", "storedAppOutput", "getProcess",
+                    "LingeredApp"));
+
+            test.run(theApp.getPid(), cmds, expStrMap, null);
+        } catch (Exception ex) {
+            throw new RuntimeException("Test ERROR " + ex, ex);
+        } finally {
+            LingeredApp.stopApp(theApp);
+        }
+        System.out.println("Test PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbWhere.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.Platform;
+
+/*
+ * @test
+ * @bug 8190198
+ * @summary Test clhsdb where command
+ * @library /test/lib
+ * @run main/othervm ClhsdbWhere
+ */
+
+public class ClhsdbWhere {
+
+    public static void main(String[] args) throws Exception {
+        System.out.println("Starting ClhsdbWhere test");
+
+        LingeredApp theApp = null;
+        try {
+            ClhsdbLauncher test = new ClhsdbLauncher();
+            theApp = LingeredApp.startApp();
+            System.out.println("Started LingeredApp with pid " + theApp.getPid());
+
+            List<String> cmds = List.of("where -a");
+
+            Map<String, List<String>> expStrMap = new HashMap<>();
+            expStrMap.put("where -a", List.of(
+                    "Java Stack Trace for Service Thread",
+                    "Java Stack Trace for Common-Cleaner",
+                    "Java Stack Trace for Sweeper thread",
+                    "CompilerThread",
+                    "Java Stack Trace for Finalizer",
+                    "java.lang.ref.Reference",
+                    "private static void processPendingReferences",
+                    "private static native void waitForReferencePendingList",
+                    "Java Stack Trace for main",
+                    "public static native void sleep"));
+
+            test.run(theApp.getPid(), cmds, expStrMap, null);
+        } catch (Exception ex) {
+            throw new RuntimeException("Test ERROR " + ex, ex);
+        } finally {
+            LingeredApp.stopApp(theApp);
+        }
+
+        System.out.println("Test PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/TestIntConstant.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+import java.io.IOException;
+import java.util.stream.Collectors;
+import java.io.OutputStream;
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.JDKToolLauncher;
+import jdk.test.lib.Platform;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.Utils;
+
+/*
+ * @test
+ * @summary Test the 'intConstant' command of jhsdb clhsdb.
+ * @bug 8190307
+ * @library /test/lib
+ * @build jdk.test.lib.apps.*
+ * @run main/othervm TestIntConstant
+ */
+
+public class TestIntConstant {
+
+    private static void testClhsdbForIntConstant(
+                        long lingeredAppPid,
+                        String commandString,
+                        String[] expectedOutputStrings) throws Exception {
+
+        Process p;
+        JDKToolLauncher launcher = JDKToolLauncher.createUsingTestJDK("jhsdb");
+        launcher.addToolArg("clhsdb");
+        launcher.addToolArg("--pid");
+        launcher.addToolArg(Long.toString(lingeredAppPid));
+
+        ProcessBuilder pb = new ProcessBuilder();
+        pb.command(launcher.getCommand());
+        pb.redirectError(ProcessBuilder.Redirect.INHERIT);
+        System.out.println(
+            pb.command().stream().collect(Collectors.joining(" ")));
+
+        try {
+            p = pb.start();
+        } catch (Exception attachE) {
+            throw new Error("Couldn't start jhsdb or attach to LingeredApp : " + attachE);
+        }
+
+        // Issue the 'intConstant' inputs at the clhsdb prompt.
+        OutputStream input = p.getOutputStream();
+        try {
+            input.write((commandString + "\n").getBytes());
+            input.write("quit\n".getBytes());
+            input.flush();
+        } catch (IOException ioe) {
+            throw new Error("Problem issuing the intConstant command: " +
+                            commandString + ioe);
+        }
+
+        OutputAnalyzer output = new OutputAnalyzer(p);
+
+        System.out.println("Awaiting process completion");
+        try {
+            p.waitFor();
+        } catch (InterruptedException ie) {
+            p.destroyForcibly();
+            throw new Error("Problem awaiting the child process: " + ie);
+        }
+
+        output.shouldHaveExitValue(0);
+        System.out.println(output.getOutput());
+        for (String expectedOutputString: expectedOutputStrings) {
+            output.shouldContain(expectedOutputString);
+        }
+    }
+
+    public static void testIntConstant() throws Exception {
+        LingeredApp app = null;
+
+        try {
+            List<String> vmArgs = new ArrayList<String>();
+            vmArgs.addAll(Utils.getVmOptions());
+
+            app = LingeredApp.startApp(vmArgs);
+            System.out.println ("Started LingeredApp with pid " + app.getPid());
+
+            // Strings to check for in the output of 'intConstant'. The
+            // 'intConstant' command prints out entries from the
+            // 'gHotSpotVMIntConstants', which is a table of integer constants,
+            // with names and the values derived from enums and #define preprocessor
+            // macros in hotspot.
+            String[] defaultOutputStrings =
+                {"CollectedHeap::G1CollectedHeap 2",
+                 "RUNNABLE 2",
+                 "Deoptimization::Reason_class_check 4",
+                 "InstanceKlass::_misc_is_anonymous 32",
+                 "Generation::ParNew 1",
+                 "_thread_uninitialized 0"};
+            String[] tempConstantString = {"intConstant _temp_constant 45"};
+            testClhsdbForIntConstant(app.getPid(), "intConstant", defaultOutputStrings);
+            testClhsdbForIntConstant(
+                app.getPid(),
+                "intConstant _temp_constant 45\nintConstant _temp_constant",
+                tempConstantString);
+          } finally {
+              LingeredApp.stopApp(app);
+          }
+    }
+
+    public static void main (String... args) throws Exception {
+
+        if (!Platform.shouldSAAttach()) {
+            System.out.println(
+               "SA attach not expected to work - test skipped.");
+            return;
+        }
+
+        try {
+            testIntConstant();
+        } catch (Exception e) {
+            throw new Error("Test failed with " + e);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/TestType.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+import java.io.IOException;
+import java.util.stream.Collectors;
+import java.io.OutputStream;
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.JDKToolLauncher;
+import jdk.test.lib.Platform;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.Utils;
+
+/*
+ * @test
+ * @summary Test the 'type' command of jhsdb clhsdb.
+ * @bug 8190307
+ * @library /test/lib
+ * @build jdk.test.lib.apps.*
+ * @run main/othervm TestType
+ */
+
+public class TestType {
+
+    private static void testClhsdbForType(
+                        long lingeredAppPid,
+                        String commandString,
+                        String[] expectedOutputStrings) throws Exception {
+
+        Process p;
+        JDKToolLauncher launcher = JDKToolLauncher.createUsingTestJDK("jhsdb");
+        launcher.addToolArg("clhsdb");
+        launcher.addToolArg("--pid");
+        launcher.addToolArg(Long.toString(lingeredAppPid));
+
+        ProcessBuilder pb = new ProcessBuilder();
+        pb.command(launcher.getCommand());
+        System.out.println(
+            pb.command().stream().collect(Collectors.joining(" ")));
+
+        try {
+            p = pb.start();
+        } catch (Exception attachE) {
+            throw new Error("Couldn't start jhsdb or attach to LingeredApp : " + attachE);
+        }
+
+        // Issue the 'type' commands at the clhsdb prompt.
+        OutputStream input = p.getOutputStream();
+        try {
+            input.write((commandString + "\n").getBytes());
+            input.write("quit\n".getBytes());
+            input.flush();
+        } catch (IOException ioe) {
+            throw new Error("Problem issuing the 'type' command ", ioe);
+        }
+
+        OutputAnalyzer output = new OutputAnalyzer(p);
+
+        try {
+            p.waitFor();
+        } catch (InterruptedException ie) {
+            p.destroyForcibly();
+            throw new Error("Problem awaiting the child process: " + ie);
+        }
+
+        output.shouldHaveExitValue(0);
+        System.out.println(output.getOutput());
+
+        for (String expectedOutputString: expectedOutputStrings) {
+            output.shouldContain(expectedOutputString);
+        }
+    }
+
+    public static void main (String... args) throws Exception {
+        LingeredApp app = null;
+
+        if (!Platform.shouldSAAttach()) {
+            System.out.println(
+               "SA attach not expected to work - test skipped.");
+            return;
+        }
+
+        try {
+            List<String> vmArgs = new ArrayList<String>();
+            vmArgs.addAll(Utils.getVmOptions());
+            // Strings to check for in the output of 'type'. The 'type'
+            // command prints out entries from 'gHotSpotVMTypes', which
+            // is a table containing the hotspot types, their supertypes,
+            // sizes, etc, which are of interest to the SA.
+            String[] defaultOutputStrings =
+                {"type G1CollectedHeap CollectedHeap",
+                 "type ConstantPoolCache MetaspaceObj",
+                 "type ConstantPool Metadata",
+                 "type CompilerThread JavaThread",
+                 "type CardGeneration Generation",
+                 "type ArrayKlass Klass",
+                 "type InstanceKlass Klass"};
+            // String to check for in the output of "type InstanceKlass"
+            String[] instanceKlassOutputString = {"type InstanceKlass Klass"};
+
+            app = LingeredApp.startApp(vmArgs);
+            System.out.println ("Started LingeredApp with pid " + app.getPid());
+            testClhsdbForType(app.getPid(), "type", defaultOutputStrings);
+            testClhsdbForType(app.getPid(),
+                              "type InstanceKlass",
+                              instanceKlassOutputString);
+        } finally {
+            LingeredApp.stopApp(app);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/TestUniverse.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+import java.io.IOException;
+import java.util.stream.Collectors;
+import java.io.OutputStream;
+import jdk.test.lib.apps.LingeredApp;
+import jdk.test.lib.JDKToolLauncher;
+import jdk.test.lib.Platform;
+import jdk.test.lib.process.OutputAnalyzer;
+
+/*
+ * @test
+ * @summary Test the 'universe' command of jhsdb clhsdb.
+ * @bug 8190307
+ * @library /test/lib
+ * @build jdk.test.lib.apps.*
+ * @run main/othervm TestUniverse
+ */
+
+public class TestUniverse {
+
+    private static void testClhsdbForUniverse(long lingeredAppPid,
+                                              String gc) throws Exception {
+
+        Process p;
+        JDKToolLauncher launcher = JDKToolLauncher.createUsingTestJDK("jhsdb");
+        launcher.addToolArg("clhsdb");
+        launcher.addToolArg("--pid");
+        launcher.addToolArg(Long.toString(lingeredAppPid));
+
+        ProcessBuilder pb = new ProcessBuilder();
+        pb.command(launcher.getCommand());
+        System.out.println(
+            pb.command().stream().collect(Collectors.joining(" ")));
+
+        try {
+            p = pb.start();
+        } catch (Exception attachE) {
+            throw new Error("Couldn't start jhsdb or attach to LingeredApp : " + attachE);
+        }
+
+        // Issue the 'universe' command at the clhsdb prompt.
+        OutputStream input = p.getOutputStream();
+        try {
+            input.write("universe\n".getBytes());
+            input.write("quit\n".getBytes());
+            input.flush();
+        } catch (IOException ioe) {
+            throw new Error("Problem issuing the 'universe' command ", ioe);
+        }
+
+        OutputAnalyzer output = new OutputAnalyzer(p);
+
+        try {
+            p.waitFor();
+        } catch (InterruptedException ie) {
+            p.destroyForcibly();
+            throw new Error("Problem awaiting the child process: " + ie, ie);
+        }
+
+        output.shouldHaveExitValue(0);
+        System.out.println(output.getOutput());
+
+        output.shouldContain("Heap Parameters");
+        if (gc.contains("G1GC")) {
+            output.shouldContain("garbage-first heap");
+        }
+        if (gc.contains("UseConcMarkSweepGC")) {
+            output.shouldContain("Gen 1: concurrent mark-sweep generation");
+        }
+        if (gc.contains("UseSerialGC")) {
+            output.shouldContain("Gen 1:   old");
+        }
+        if (gc.contains("UseParallelGC")) {
+            output.shouldContain("ParallelScavengeHeap");
+            output.shouldContain("PSYoungGen");
+            output.shouldContain("eden");
+        }
+
+    }
+
+    public static void test(String gc) throws Exception {
+        LingeredApp app = null;
+        try {
+            List<String> vmArgs = new ArrayList<String>();
+            vmArgs.add(gc);
+            app = LingeredApp.startApp(vmArgs);
+            System.out.println ("Started LingeredApp with the GC option " + gc +
+                                " and pid " + app.getPid());
+            testClhsdbForUniverse(app.getPid(), gc);
+        } finally {
+            LingeredApp.stopApp(app);
+        }
+    }
+
+
+    public static void main (String... args) throws Exception {
+
+        if (!Platform.shouldSAAttach()) {
+            System.out.println(
+               "SA attach not expected to work - test skipped.");
+            return;
+        }
+
+        try {
+            test("-XX:+UseG1GC");
+            test("-XX:+UseParallelGC");
+            test("-XX:+UseSerialGC");
+            test("-XX:+UseConcMarkSweepGC");
+        } catch (Exception e) {
+            throw new Error("Test failed with " + e);
+        }
+    }
+}
--- a/test/jdk/javax/management/MBeanInfo/MBeanInfoHashCodeNPETest.java	Thu Nov 16 11:07:44 2017 -0800
+++ b/test/jdk/javax/management/MBeanInfo/MBeanInfoHashCodeNPETest.java	Fri Nov 17 02:50:51 2017 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,10 +97,6 @@
         test(mbeanOperationInfo, "type");
 
         mbeanOperationInfo = new MBeanOperationInfo(
-                "name", "description", new MBeanParameterInfo[]{}, "type", -1, new DescriptorSupport());
-        test(mbeanOperationInfo, "native impact");
-
-        mbeanOperationInfo = new MBeanOperationInfo(
                 "name", "description", new MBeanParameterInfo[]{}, "type", 1, null);
         test(mbeanOperationInfo, "Descriptor");
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/javax/management/mxbean/MBeanOperationInfoImpactRangeTest.java	Fri Nov 17 02:50:51 2017 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+ /*
+ * @test
+ * @bug 8024352
+ * @modules java.management
+ * @run main MBeanOperationInfoImpactRangeTest
+ * @summary Check that MBeanOperationInfo throws an IllegalArgumentException when impact
+ * value is not among INFO,ACTION,ACTION_INFO,UNKNOWN
+ */
+import javax.management.MBeanOperationInfo;
+
+public class MBeanOperationInfoImpactRangeTest {
+
+    private void checkInRange(int impact) {
+        int impactValue;
+
+        System.out.println("checking that no exception is thrown when a "
+                + "value in range is passed, impact value is :" + impact );
+        MBeanOperationInfo mbi = new MBeanOperationInfo("IRC", "impact Range"
+                + " check", null, null, impact);
+        impactValue = mbi.getImpact();
+        if(impactValue != impact)
+            throw new RuntimeException("unexpected impact value :" + impactValue);
+        System.out.println("given value is :" + impactValue);
+        System.out.println("Success no exception thrown");
+        System.out.println(mbi.toString());
+
+    }
+
+    private void checkOutOfRange(int impact) {
+        int impactValue;
+
+        try {
+            System.out.println("checking that exception is thrown when a value"
+                    + " out of range is passed, impact value is :" + impact);
+            MBeanOperationInfo mbi = new MBeanOperationInfo("IRC", "impact Range"
+                    + " check", null, null, impact);
+            impactValue = mbi.getImpact();
+            System.out.println("IllegalArgumentException not thrown"
+                    + " when a value out of range is passed ,"
+                    + " given value is :" + impactValue);
+            throw new RuntimeException("Test failed !!");
+            // throwing RuntimeException for notifying the unusual behaviour
+        } catch (IllegalArgumentException e) {
+            System.out.println("IllegalArgumentException thrown as expected, "
+                    + "illegal value given as impact :" + impact);
+            System.out.println("success");
+        }
+
+    }
+
+    public static void main(String Args[]) {
+
+        // valid range for impact is {INFO=0,ACTION=1,ACTION_INFO=2,UNKNOWN=3}
+        /* MBeanOperationInfo should throw IllegalArgumentException when impact
+        value is given out of range*/
+        MBeanOperationInfoImpactRangeTest impactRangeTest = new MBeanOperationInfoImpactRangeTest();
+
+        impactRangeTest.checkInRange(MBeanOperationInfo.INFO);
+        impactRangeTest.checkInRange(MBeanOperationInfo.ACTION);
+        impactRangeTest.checkInRange(MBeanOperationInfo.ACTION_INFO);
+        impactRangeTest.checkInRange(MBeanOperationInfo.UNKNOWN);
+        impactRangeTest.checkOutOfRange(-1);
+        impactRangeTest.checkOutOfRange(4);
+
+        System.out.println("Test Passed");
+
+
+    }
+}
\ No newline at end of file
--- a/test/lib/sun/hotspot/WhiteBox.java	Thu Nov 16 11:07:44 2017 -0800
+++ b/test/lib/sun/hotspot/WhiteBox.java	Fri Nov 17 02:50:51 2017 +0100
@@ -531,6 +531,14 @@
   public native int addCompilerDirective(String compDirect);
   public native void removeCompilerDirective(int count);
 
+  // Handshakes
+  public native int handshakeWalkStack(Thread t, boolean all_threads);
+
   // Returns true on linux if library has the noexecstack flag set.
   public native boolean checkLibSpecifiesNoexecstack(String libfilename);
+
+  // Container testing
+  public native boolean isContainerized();
+  public native void printOsInfo();
+
 }