Merge
authorlana
Thu, 25 Feb 2016 11:27:59 -0800
changeset 36107 41753e6731a1
parent 36055 3f4df1866c42 (current diff)
parent 36106 f32f91cbe89d (diff)
child 36108 960f586e00a5
Merge
--- a/hotspot/.hgignore	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/.hgignore	Thu Feb 25 11:27:59 2016 -0800
@@ -10,7 +10,6 @@
 .igv.log
 ^.hgtip
 .DS_Store
-\.class$
 ^\.mx.jvmci/env
 ^\.mx.jvmci/.*\.pyc
 ^\.mx.jvmci/eclipse-launches/.*
--- a/hotspot/make/bsd/makefiles/arm.make	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/make/bsd/makefiles/arm.make	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,4 @@
 
 Obj_Files += bsd_arm.o
 
-ifneq ($(EXT_LIBS_PATH),)
-  LIBS += $(EXT_LIBS_PATH)/sflt_glibc.a 
-endif
-
 CFLAGS += -DVM_LITTLE_ENDIAN
--- a/hotspot/src/cpu/aarch64/vm/c2_globals_aarch64.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/aarch64/vm/c2_globals_aarch64.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -54,6 +54,7 @@
 define_pd_global(intx, InteriorEntryAlignment,       16);
 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
 define_pd_global(intx, LoopUnrollLimit,              60);
+define_pd_global(intx, LoopPercentProfileLimit,      10);
 // InitialCodeCacheSize derived from specjbb2000 run.
 define_pd_global(intx, InitialCodeCacheSize,         2496*K); // Integral multiple of CodeCacheExpansionSize
 define_pd_global(intx, CodeCacheExpansionSize,       64*K);
--- a/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -32,7 +32,6 @@
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
 
-define_pd_global(bool, ConvertSleepToYield,      true);
 define_pd_global(bool, ShareVtableStubs,         true);
 define_pd_global(bool, NeedsDeoptSuspend,        false); // only register window machines need this
 
--- a/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/aarch64/vm/jvmciCodeInstaller_aarch64.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,39 +30,151 @@
 #include "vmreg_aarch64.inline.hpp"
 
 jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS) {
-  Unimplemented();
-  return 0;
+  if (inst->is_call() || inst->is_jump() || inst->is_blr()) {
+    return pc_offset + NativeCall::instruction_size;
+  } else if (inst->is_general_jump()) {
+    return pc_offset + NativeGeneralJump::instruction_size;
+  } else {
+    JVMCI_ERROR_0("unsupported type of instruction for call site");
+  }
 }
 
 void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS) {
-  Unimplemented();
+  address pc = _instructions->start() + pc_offset;
+  Handle obj = HotSpotObjectConstantImpl::object(constant);
+  jobject value = JNIHandles::make_local(obj());
+  if (HotSpotObjectConstantImpl::compressed(constant)) {
+    int oop_index = _oop_recorder->find_index(value);
+    RelocationHolder rspec = oop_Relocation::spec(oop_index);
+    _instructions->relocate(pc, rspec, 1);
+    Unimplemented();
+  } else {
+    NativeMovConstReg* move = nativeMovConstReg_at(pc);
+    move->set_data((intptr_t) value);
+    int oop_index = _oop_recorder->find_index(value);
+    RelocationHolder rspec = oop_Relocation::spec(oop_index);
+    _instructions->relocate(pc, rspec);
+  }
 }
 
 void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
-  Unimplemented();
+  address pc = _instructions->start() + pc_offset;
+  if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
+    narrowKlass narrowOop = record_narrow_metadata_reference(constant, CHECK);
+    TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
+    Unimplemented();
+  } else {
+    NativeMovConstReg* move = nativeMovConstReg_at(pc);
+    Metadata* reference = record_metadata_reference(constant, CHECK);
+    move->set_data((intptr_t) reference);
+    TRACE_jvmci_3("relocating (metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(reference));
+  }
 }
 
-void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
-  Unimplemented();
+void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
+  address pc = _instructions->start() + pc_offset;
+  NativeInstruction* inst = nativeInstruction_at(pc);
+  if (inst->is_adr_aligned()) {
+    address dest = _constants->start() + data_offset;
+    _instructions->relocate(pc, section_word_Relocation::spec((address) dest, CodeBuffer::SECT_CONSTS));
+    TRACE_jvmci_3("relocating at " PTR_FORMAT " (+%d) with destination at %d", p2i(pc), pc_offset, data_offset);
+  } else {
+    JVMCI_ERROR("unknown load or move instruction at " PTR_FORMAT, p2i(pc));
+  }
 }
 
 void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
-  Unimplemented();
+  address pc = (address) inst;
+  if (inst->is_call()) {
+    NativeCall* call = nativeCall_at(pc);
+    call->set_destination((address) foreign_call_destination);
+    _instructions->relocate(call->instruction_address(), runtime_call_Relocation::spec());
+  } else if (inst->is_jump()) {
+    NativeJump* jump = nativeJump_at(pc);
+    jump->set_jump_destination((address) foreign_call_destination);
+    _instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec());
+  } else if (inst->is_general_jump()) {
+    NativeGeneralJump* jump = nativeGeneralJump_at(pc);
+    jump->set_jump_destination((address) foreign_call_destination);
+    _instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec());
+  } else {
+    JVMCI_ERROR("unknown call or jump instruction at " PTR_FORMAT, p2i(pc));
+  }
+  TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
 }
 
 void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
-  Unimplemented();
+#ifdef ASSERT
+  Method* method = NULL;
+  // we need to check, this might also be an unresolved method
+  if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
+    method = getMethodFromHotSpotMethod(hotspot_method());
+  }
+#endif
+  switch (_next_call_type) {
+    case INLINE_INVOKE:
+      break;
+    case INVOKEVIRTUAL:
+    case INVOKEINTERFACE: {
+      assert(method == NULL || !method->is_static(), "cannot call static method with invokeinterface");
+      NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
+      call->set_destination(SharedRuntime::get_resolve_virtual_call_stub());
+      _instructions->relocate(call->instruction_address(), virtual_call_Relocation::spec(_invoke_mark_pc));
+      break;
+    }
+    case INVOKESTATIC: {
+      assert(method == NULL || method->is_static(), "cannot call non-static method with invokestatic");
+      NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
+      call->set_destination(SharedRuntime::get_resolve_static_call_stub());
+      _instructions->relocate(call->instruction_address(), relocInfo::static_call_type);
+      break;
+    }
+    case INVOKESPECIAL: {
+      assert(method == NULL || !method->is_static(), "cannot call static method with invokespecial");
+      NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
+      call->set_destination(SharedRuntime::get_resolve_opt_virtual_call_stub());
+      _instructions->relocate(call->instruction_address(), relocInfo::opt_virtual_call_type);
+      break;
+    }
+    default:
+      JVMCI_ERROR("invalid _next_call_type value");
+      break;
+  }
 }
 
 void CodeInstaller::pd_relocate_poll(address pc, jint mark, TRAPS) {
-  Unimplemented();
+  switch (mark) {
+    case POLL_NEAR:
+      JVMCI_ERROR("unimplemented");
+      break;
+    case POLL_FAR:
+      _instructions->relocate(pc, relocInfo::poll_type);
+      break;
+    case POLL_RETURN_NEAR:
+      JVMCI_ERROR("unimplemented");
+      break;
+    case POLL_RETURN_FAR:
+      _instructions->relocate(pc, relocInfo::poll_return_type);
+      break;
+    default:
+      JVMCI_ERROR("invalid mark value");
+      break;
+  }
 }
 
 // convert JVMCI register indices (as used in oop maps) to HotSpot registers
 VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
-  return NULL;
+  if (jvmci_reg < RegisterImpl::number_of_registers) {
+    return as_Register(jvmci_reg)->as_VMReg();
+  } else {
+    jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_registers;
+    if (floatRegisterNumber < FloatRegisterImpl::number_of_registers) {
+      return as_FloatRegister(floatRegisterNumber)->as_VMReg();
+    }
+    JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);
+  }
 }
 
 bool CodeInstaller::is_general_purpose_reg(VMReg hotspotRegister) {
-  return false;
+  return !hotspotRegister->is_FloatRegister();
 }
--- a/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -136,7 +136,7 @@
     MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
     ICache::invalidate_range(instruction_address(), instruction_size);
   }
-};
+}
 
 void NativeMovConstReg::print() {
   tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
@@ -208,6 +208,32 @@
 
 //-------------------------------------------------------------------
 
+address NativeGeneralJump::jump_destination() const {
+  NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
+  address dest = (address) move->data();
+
+  // We use jump to self as the unresolved address which the inline
+  // cache code (and relocs) know about
+
+  // return -1 if jump to self
+  dest = (dest == (address) this) ? (address) -1 : dest;
+  return dest;
+}
+
+void NativeGeneralJump::set_jump_destination(address dest) {
+  NativeMovConstReg* move = nativeMovConstReg_at(instruction_address());
+
+  // We use jump to self as the unresolved address which the inline
+  // cache code (and relocs) know about
+  if (dest == (address) -1) {
+    dest = instruction_address();
+  }
+
+  move->set_data((uintptr_t) dest);
+};
+
+//-------------------------------------------------------------------
+
 bool NativeInstruction::is_safepoint_poll() {
   // a safepoint_poll is implemented in two steps as either
   //
@@ -249,6 +275,22 @@
           Instruction_aarch64::extract(insn, 4, 0) == 0b11111);
 }
 
+bool NativeInstruction::is_general_jump() {
+  if (is_movz()) {
+    NativeInstruction* inst1 = nativeInstruction_at(addr_at(instruction_size * 1));
+    if (inst1->is_movk()) {
+      NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2));
+      if (inst2->is_movk()) {
+        NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3));
+        if (inst3->is_blr()) {
+          return true;
+        }
+      }
+    }
+  }
+  return false;
+}
+
 bool NativeInstruction::is_movz() {
   return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b10100101;
 }
--- a/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -54,11 +54,22 @@
   friend class Relocation;
   friend bool is_NativeCallTrampolineStub_at(address);
  public:
-  enum { instruction_size = 4 };
+  enum {
+    instruction_size = 4
+  };
+
+  juint encoding() const {
+    return uint_at(0);
+  }
+
+  bool is_blr()                      const { return (encoding() & 0xfffffc1f) == 0xd63f0000; }
+  bool is_adr_aligned()              const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
+
   inline bool is_nop();
   inline bool is_illegal();
   inline bool is_return();
   bool is_jump();
+  bool is_general_jump();
   inline bool is_jump_or_nop();
   inline bool is_cond_jump();
   bool is_safepoint_poll();
@@ -341,11 +352,15 @@
 // An interface for accessing/manipulating native leal instruction of form:
 //        leal reg, [reg + offset]
 
-class NativeLoadAddress: public NativeMovRegMem {
-  static const bool has_rex = true;
-  static const int rex_size = 1;
+class NativeLoadAddress: public NativeInstruction {
+  enum AArch64_specific_constants {
+    instruction_size            =    4,
+    instruction_offset          =    0,
+    data_offset                 =    0,
+    next_instruction_offset     =    4
+  };
+
  public:
-
   void verify();
   void print ();
 
@@ -398,6 +413,10 @@
     data_offset                 =    0,
     next_instruction_offset     =    4 * 4
   };
+
+  address jump_destination() const;
+  void set_jump_destination(address dest);
+
   static void insert_unconditional(address code_pos, address entry);
   static void replace_mt_safe(address instr_addr, address code_buffer);
   static void verify();
--- a/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/ppc/vm/c2_globals_ppc.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -54,6 +54,7 @@
 define_pd_global(bool, UseTLAB,                      true);
 define_pd_global(bool, ResizeTLAB,                   true);
 define_pd_global(intx, LoopUnrollLimit,              60);
+define_pd_global(intx, LoopPercentProfileLimit,      10);
 
 // Peephole and CISC spilling both break the graph, and so make the
 // scheduler sick.
--- a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -32,7 +32,6 @@
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
 
-define_pd_global(bool, ConvertSleepToYield,   true);
 define_pd_global(bool, ShareVtableStubs,      false); // Improves performance markedly for mtrt and compress.
 define_pd_global(bool, NeedsDeoptSuspend,     false); // Only register window machines need this.
 
--- a/hotspot/src/cpu/ppc/vm/jvmciCodeInstaller_ppc.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/ppc/vm/jvmciCodeInstaller_ppc.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
   Unimplemented();
 }
 
-void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
+void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
   Unimplemented();
 }
 
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -52,6 +52,7 @@
 define_pd_global(bool, UseTLAB,                      true);
 define_pd_global(bool, ResizeTLAB,                   true);
 define_pd_global(intx, LoopUnrollLimit,              60); // Design center runs on 1.3.1
+define_pd_global(intx, LoopPercentProfileLimit,      10);
 define_pd_global(intx, MinJumpTableSize,             5);
 
 // Peephole and CISC spilling both break the graph, and so makes the
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,6 @@
 // the load of the dispatch address and hence the jmp would still go to the location
 // according to the prior table. So, we let the thread continue and let it block by itself.
 define_pd_global(bool, DontYieldALot,               true);  // yield no more than 100 times per second
-define_pd_global(bool, ConvertSleepToYield,         false); // do not convert sleep(0) to yield. Helps GUI
 define_pd_global(bool, ShareVtableStubs,            false); // improves performance markedly for mtrt and compress
 define_pd_global(bool, NeedsDeoptSuspend,           true); // register window machines need this
 
--- a/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,7 +85,7 @@
   }
 }
 
-void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
+void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
   address pc = _instructions->start() + pc_offset;
   NativeInstruction* inst = nativeInstruction_at(pc);
   NativeInstruction* inst1 = nativeInstruction_at(pc + 4);
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -2015,23 +2015,33 @@
   int vep_offset = ((intptr_t)__ pc()) - start;
 
 #ifdef COMPILER1
-  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
-    // Object.hashCode can pull the hashCode from the header word
-    // instead of doing a full VM transition once it's been computed.
-    // Since hashCode is usually polymorphic at call sites we can't do
-    // this optimization at the call site without a lot of work.
+  if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
+    // Object.hashCode, System.identityHashCode can pull the hashCode from the
+    // header word instead of doing a full VM transition once it's been computed.
+    // Since hashCode is usually polymorphic at call sites we can't do this
+    // optimization at the call site without a lot of work.
     Label slowCase;
-    Register receiver             = O0;
+    Label done;
+    Register obj_reg              = O0;
     Register result               = O0;
     Register header               = G3_scratch;
     Register hash                 = G3_scratch; // overwrite header value with hash value
     Register mask                 = G1;         // to get hash field from header
 
+    // Unlike for Object.hashCode, System.identityHashCode is static method and
+    // gets object as argument instead of the receiver.
+    if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) {
+      assert(method->is_static(), "method should be static");
+      // return 0 for null reference input
+      __ br_null(obj_reg, false, Assembler::pn, done);
+      __ delayed()->mov(obj_reg, hash);
+    }
+
     // Read the header and build a mask to get its hash field.  Give up if the object is not unlocked.
     // We depend on hash_mask being at most 32 bits and avoid the use of
     // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
     // vm: see markOop.hpp.
-    __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
+    __ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header);
     __ sethi(markOopDesc::hash_mask, mask);
     __ btst(markOopDesc::unlocked_value, header);
     __ br(Assembler::zero, false, Assembler::pn, slowCase);
@@ -2054,6 +2064,7 @@
     __ delayed()->nop();
 
     // leaf return.
+    __ bind(done);
     __ retl();
     __ delayed()->mov(hash, result);
     __ bind(slowCase);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -2361,7 +2361,7 @@
 void Assembler::movdqu(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
@@ -2398,7 +2398,7 @@
 void Assembler::vmovdqu(XMMRegister dst, Address src) {
   assert(UseAVX > 0, "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
@@ -2486,7 +2486,7 @@
 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
@@ -2515,7 +2515,7 @@
 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
@@ -2640,7 +2640,7 @@
 
 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x10);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -2649,7 +2649,7 @@
 void Assembler::movsd(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x10);
@@ -2668,7 +2668,7 @@
 
 void Assembler::movss(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x10);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -2677,7 +2677,7 @@
 void Assembler::movss(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x10);
@@ -2782,7 +2782,7 @@
 void Assembler::mulsd(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
@@ -2791,7 +2791,7 @@
 
 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -2800,7 +2800,7 @@
 void Assembler::mulss(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
@@ -2809,7 +2809,7 @@
 
 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3993,7 +3993,7 @@
 
 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x51);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4002,7 +4002,7 @@
 void Assembler::sqrtsd(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x51);
@@ -4011,7 +4011,7 @@
 
 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x51);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4024,7 +4024,7 @@
 void Assembler::sqrtss(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x51);
@@ -4078,7 +4078,7 @@
 
 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4087,7 +4087,7 @@
 void Assembler::subsd(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
@@ -4096,7 +4096,7 @@
 
 void Assembler::subss(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4105,7 +4105,7 @@
 void Assembler::subss(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
@@ -4293,7 +4293,7 @@
 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4303,7 +4303,7 @@
 
 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x58);
@@ -4313,7 +4313,7 @@
 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
@@ -4323,7 +4323,7 @@
 
 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x58);
@@ -4333,7 +4333,7 @@
 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4343,7 +4343,7 @@
 
 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5E);
@@ -4353,7 +4353,7 @@
 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
@@ -4363,7 +4363,7 @@
 
 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5E);
@@ -4373,7 +4373,7 @@
 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4383,7 +4383,7 @@
 
 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
@@ -4393,7 +4393,7 @@
 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
@@ -4403,7 +4403,7 @@
 
 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
@@ -4413,7 +4413,7 @@
 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
@@ -4423,7 +4423,7 @@
 
 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
@@ -4433,7 +4433,7 @@
 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   vex_prefix(src, nds_enc, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
@@ -4443,7 +4443,7 @@
 
 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ false);
   int nds_enc = nds->is_valid() ? nds->encoding() : 0;
   int encode = vex_prefix_and_encode(dst->encoding(), nds_enc, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
@@ -5901,7 +5901,7 @@
 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL
 void Assembler::evpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x78);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5911,7 +5911,7 @@
   assert(VM_Version::supports_evex(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
   // swap src<->dst for encoding
   vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@@ -5922,7 +5922,7 @@
 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL
 void Assembler::evpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x79);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5932,7 +5932,7 @@
   assert(VM_Version::supports_evex(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
   // swap src<->dst for encoding
   vex_prefix(src, dst->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@@ -6027,7 +6027,7 @@
 // duplicate 1-byte integer data from src into 16||32|64 locations in dest : requires AVX512BW and AVX512VL
 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x7A);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6036,7 +6036,7 @@
 // duplicate 2-byte integer data from src into 8|16||32 locations in dest : requires AVX512BW and AVX512VL
 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x7B);
   emit_int8((unsigned char)(0xC0 | encode));
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -2044,11 +2044,11 @@
 class InstructionAttr {
 public:
   InstructionAttr(
-    int vector_len,
-    bool rex_vex_w,
-    bool legacy_mode,
-    bool no_reg_mask,
-    bool uses_vl)
+    int vector_len,     // The length of vector to be applied in encoding - for both AVX and EVEX
+    bool rex_vex_w,     // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
+    bool legacy_mode,   // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
+    bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition
+    bool uses_vl)       // This instruction may have legacy constraints based on vector length for EVEX
     :
       _avx_vector_len(vector_len),
       _rex_vex_w(rex_vex_w),
--- a/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/x86/vm/c2_globals_x86.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -46,6 +46,7 @@
 define_pd_global(intx, ConditionalMoveLimit,         3);
 define_pd_global(intx, FreqInlineSize,               325);
 define_pd_global(intx, MinJumpTableSize,             10);
+define_pd_global(intx, LoopPercentProfileLimit,      30);
 #ifdef AMD64
 define_pd_global(intx, INTPRESSURE,                  13);
 define_pd_global(intx, FLOATPRESSURE,                14);
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -31,7 +31,6 @@
 // Sets the default values for platform dependent flags used by the runtime system.
 // (see globals.hpp)
 
-define_pd_global(bool, ConvertSleepToYield,      true);
 define_pd_global(bool, ShareVtableStubs,         true);
 define_pd_global(bool, NeedsDeoptSuspend,        false); // only register window machines need this
 
--- a/hotspot/src/cpu/x86/vm/jvmciCodeInstaller_x86.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/x86/vm/jvmciCodeInstaller_x86.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,7 +101,7 @@
   }
 }
 
-void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset) {
+void CodeInstaller::pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS) {
   address pc = _instructions->start() + pc_offset;
 
   address operand = Assembler::locate_operand(pc, Assembler::disp32_operand);
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -7840,7 +7840,9 @@
   Label COMPARE_WIDE_VECTORS_LOOP_FAILED;  // used only _LP64 && AVX3
   int stride, stride2, adr_stride, adr_stride1, adr_stride2;
   int stride2x2 = 0x40;
-  Address::ScaleFactor scale, scale1, scale2;
+  Address::ScaleFactor scale = Address::no_scale;
+  Address::ScaleFactor scale1 = Address::no_scale;
+  Address::ScaleFactor scale2 = Address::no_scale;
 
   if (ae != StrIntrinsicNode::LL) {
     stride2x2 = 0x20;
@@ -7894,9 +7896,9 @@
       stride = 8;
     }
   } else {
-    scale = Address::no_scale;  // not used
     scale1 = Address::times_1;
     scale2 = Address::times_2;
+    // scale not used
     stride = 8;
   }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "vmreg_x86.inline.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif //COMPILER1
+
+#define __ masm->
+
+#ifdef COMPILER1
+// ---------------------------------------------------------------------------
+// Object.hashCode, System.identityHashCode can pull the hashCode from the
+// header word instead of doing a full VM transition once it's been computed.
+// Since hashCode is usually polymorphic at call sites we can't do this
+// optimization at the call site without a lot of work.
+void SharedRuntime::inline_check_hashcode_from_object_header(MacroAssembler* masm,
+                                 methodHandle method,
+                                 Register obj_reg,
+                                 Register result) {
+  Label slowCase;
+
+  // Unlike for Object.hashCode, System.identityHashCode is static method and
+  // gets object as argument instead of the receiver.
+  if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) {
+    Label Continue;
+    // return 0 for null reference input
+    __ cmpptr(obj_reg, (int32_t)NULL_WORD);
+    __ jcc(Assembler::notEqual, Continue);
+    __ xorptr(result, result);
+    __ ret(0);
+    __ bind(Continue);
+  }
+
+  __ movptr(result, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+
+  // check if locked
+  __ testptr(result, markOopDesc::unlocked_value);
+  __ jcc(Assembler::zero, slowCase);
+
+  if (UseBiasedLocking) {
+    // Check if biased and fall through to runtime if so
+    __ testptr(result, markOopDesc::biased_lock_bit_in_place);
+    __ jcc(Assembler::notZero, slowCase);
+  }
+
+  // get hash
+#ifdef _LP64
+  // Read the header and build a mask to get its hash field.
+  // Depend on hash_mask being at most 32 bits and avoid the use of hash_mask_in_place
+  // because it could be larger than 32 bits in a 64-bit vm. See markOop.hpp.
+  __ shrptr(result, markOopDesc::hash_shift);
+  __ andptr(result, markOopDesc::hash_mask);
+#else
+  __ andptr(result, markOopDesc::hash_mask_in_place);
+#endif //_LP64
+
+  // test if hashCode exists
+  __ jcc(Assembler::zero, slowCase);
+#ifndef _LP64
+  __ shrptr(result, markOopDesc::hash_shift);
+#endif
+  __ ret(0);
+  __ bind(slowCase);
+}
+#endif //COMPILER1
+
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1754,34 +1754,10 @@
   int vep_offset = ((intptr_t)__ pc()) - start;
 
 #ifdef COMPILER1
-  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
-    // Object.hashCode can pull the hashCode from the header word
-    // instead of doing a full VM transition once it's been computed.
-    // Since hashCode is usually polymorphic at call sites we can't do
-    // this optimization at the call site without a lot of work.
-    Label slowCase;
-    Register receiver = rcx;
-    Register result = rax;
-    __ movptr(result, Address(receiver, oopDesc::mark_offset_in_bytes()));
-
-    // check if locked
-    __ testptr(result, markOopDesc::unlocked_value);
-    __ jcc (Assembler::zero, slowCase);
-
-    if (UseBiasedLocking) {
-      // Check if biased and fall through to runtime if so
-      __ testptr(result, markOopDesc::biased_lock_bit_in_place);
-      __ jcc (Assembler::notZero, slowCase);
-    }
-
-    // get hash
-    __ andptr(result, markOopDesc::hash_mask_in_place);
-    // test if hashCode exists
-    __ jcc  (Assembler::zero, slowCase);
-    __ shrptr(result, markOopDesc::hash_shift);
-    __ ret(0);
-    __ bind (slowCase);
-  }
+  // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
+  if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
+    inline_check_hashcode_from_object_header(masm, method, rcx /*obj_reg*/, rax /*result*/);
+   }
 #endif // COMPILER1
 
   // The instruction at the verified entry point must be 5 bytes or longer
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -2058,6 +2058,13 @@
 
   int vep_offset = ((intptr_t)__ pc()) - start;
 
+#ifdef COMPILER1
+  // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
+  if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
+    inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
+  }
+#endif // COMPILER1
+
   // The instruction at the verified entry point must be 5 bytes or longer
   // because it can be patched on the fly by make_non_entrant. The stack bang
   // instruction fits that requirement.
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -32,7 +32,6 @@
 // Set the default values for platform dependent flags used by the
 // runtime system.  See globals.hpp for details of what they do.
 
-define_pd_global(bool,  ConvertSleepToYield,  true);
 define_pd_global(bool,  ShareVtableStubs,     true);
 define_pd_global(bool,  NeedsDeoptSuspend,    false);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/G1HeapRegionType.java	Thu Feb 25 11:27:59 2016 -0800
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.shared;
+
+//These definitions should be kept in sync with the definitions in the HotSpot code.
+
+public enum G1HeapRegionType {
+  Free ("Free"),
+  Eden ("Eden"),
+  Survivor ("Survivor"),
+  StartsHumongous ("Starts Humongous"),
+  ContinuesHumongous ("Continues Humongous"),
+  Old ("Old"),
+  Archive ("Archive"),
+  G1HeapRegionTypeEndSentinel ("G1HeapRegionTypeEndSentinel");
+
+  private final String value;
+
+  G1HeapRegionType(String val) {
+    this.value = val;
+  }
+  public String value() {
+    return value;
+  }
+}
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java	Thu Feb 25 11:27:59 2016 -0800
@@ -70,56 +70,64 @@
     public static final Register r28 = new Register(28, 28, "r28", CPU);
     public static final Register r29 = new Register(29, 29, "r29", CPU);
     public static final Register r30 = new Register(30, 30, "r30", CPU);
+
+    /*
+     * r31 is not a general purpose register, but represents either the stackpointer or the
+     * zero/discard register depending on the instruction. So we represent those two uses as two
+     * different registers. The register numbers are kept in sync with register_aarch64.hpp and have
+     * to be sequential, hence we also need a general r31 register here, which is never used.
+     */
     public static final Register r31 = new Register(31, 31, "r31", CPU);
+    public static final Register zr = new Register(32, 31, "zr", CPU);
+    public static final Register sp = new Register(33, 31, "sp", CPU);
 
     public static final Register lr = r30;
-    public static final Register zr = r31;
-    public static final Register sp = r31;
 
     // @formatter:off
     public static final Register[] cpuRegisters = {
         r0,  r1,  r2,  r3,  r4,  r5,  r6,  r7,
         r8,  r9,  r10, r11, r12, r13, r14, r15,
         r16, r17, r18, r19, r20, r21, r22, r23,
-        r24, r25, r26, r27, r28, r29, r30, r31
+        r24, r25, r26, r27, r28, r29, r30, r31,
+        zr,  sp
     };
     // @formatter:on
 
     public static final RegisterCategory SIMD = new RegisterCategory("SIMD");
 
     // Simd registers
-    public static final Register v0 = new Register(32, 0, "v0", SIMD);
-    public static final Register v1 = new Register(33, 1, "v1", SIMD);
-    public static final Register v2 = new Register(34, 2, "v2", SIMD);
-    public static final Register v3 = new Register(35, 3, "v3", SIMD);
-    public static final Register v4 = new Register(36, 4, "v4", SIMD);
-    public static final Register v5 = new Register(37, 5, "v5", SIMD);
-    public static final Register v6 = new Register(38, 6, "v6", SIMD);
-    public static final Register v7 = new Register(39, 7, "v7", SIMD);
-    public static final Register v8 = new Register(40, 8, "v8", SIMD);
-    public static final Register v9 = new Register(41, 9, "v9", SIMD);
-    public static final Register v10 = new Register(42, 10, "v10", SIMD);
-    public static final Register v11 = new Register(43, 11, "v11", SIMD);
-    public static final Register v12 = new Register(44, 12, "v12", SIMD);
-    public static final Register v13 = new Register(45, 13, "v13", SIMD);
-    public static final Register v14 = new Register(46, 14, "v14", SIMD);
-    public static final Register v15 = new Register(47, 15, "v15", SIMD);
-    public static final Register v16 = new Register(48, 16, "v16", SIMD);
-    public static final Register v17 = new Register(49, 17, "v17", SIMD);
-    public static final Register v18 = new Register(50, 18, "v18", SIMD);
-    public static final Register v19 = new Register(51, 19, "v19", SIMD);
-    public static final Register v20 = new Register(52, 20, "v20", SIMD);
-    public static final Register v21 = new Register(53, 21, "v21", SIMD);
-    public static final Register v22 = new Register(54, 22, "v22", SIMD);
-    public static final Register v23 = new Register(55, 23, "v23", SIMD);
-    public static final Register v24 = new Register(56, 24, "v24", SIMD);
-    public static final Register v25 = new Register(57, 25, "v25", SIMD);
-    public static final Register v26 = new Register(58, 26, "v26", SIMD);
-    public static final Register v27 = new Register(59, 27, "v27", SIMD);
-    public static final Register v28 = new Register(60, 28, "v28", SIMD);
-    public static final Register v29 = new Register(61, 29, "v29", SIMD);
-    public static final Register v30 = new Register(62, 30, "v30", SIMD);
-    public static final Register v31 = new Register(63, 31, "v31", SIMD);
+    public static final Register v0 = new Register(34, 0, "v0", SIMD);
+    public static final Register v1 = new Register(35, 1, "v1", SIMD);
+    public static final Register v2 = new Register(36, 2, "v2", SIMD);
+    public static final Register v3 = new Register(37, 3, "v3", SIMD);
+    public static final Register v4 = new Register(38, 4, "v4", SIMD);
+    public static final Register v5 = new Register(39, 5, "v5", SIMD);
+    public static final Register v6 = new Register(40, 6, "v6", SIMD);
+    public static final Register v7 = new Register(41, 7, "v7", SIMD);
+    public static final Register v8 = new Register(42, 8, "v8", SIMD);
+    public static final Register v9 = new Register(43, 9, "v9", SIMD);
+    public static final Register v10 = new Register(44, 10, "v10", SIMD);
+    public static final Register v11 = new Register(45, 11, "v11", SIMD);
+    public static final Register v12 = new Register(46, 12, "v12", SIMD);
+    public static final Register v13 = new Register(47, 13, "v13", SIMD);
+    public static final Register v14 = new Register(48, 14, "v14", SIMD);
+    public static final Register v15 = new Register(49, 15, "v15", SIMD);
+    public static final Register v16 = new Register(50, 16, "v16", SIMD);
+    public static final Register v17 = new Register(51, 17, "v17", SIMD);
+    public static final Register v18 = new Register(52, 18, "v18", SIMD);
+    public static final Register v19 = new Register(53, 19, "v19", SIMD);
+    public static final Register v20 = new Register(54, 20, "v20", SIMD);
+    public static final Register v21 = new Register(55, 21, "v21", SIMD);
+    public static final Register v22 = new Register(56, 22, "v22", SIMD);
+    public static final Register v23 = new Register(57, 23, "v23", SIMD);
+    public static final Register v24 = new Register(58, 24, "v24", SIMD);
+    public static final Register v25 = new Register(59, 25, "v25", SIMD);
+    public static final Register v26 = new Register(60, 26, "v26", SIMD);
+    public static final Register v27 = new Register(61, 27, "v27", SIMD);
+    public static final Register v28 = new Register(62, 28, "v28", SIMD);
+    public static final Register v29 = new Register(63, 29, "v29", SIMD);
+    public static final Register v30 = new Register(64, 30, "v30", SIMD);
+    public static final Register v31 = new Register(65, 31, "v31", SIMD);
 
     // @formatter:off
     public static final Register[] simdRegisters = {
@@ -136,6 +144,7 @@
         r8,  r9,  r10, r11, r12, r13, r14, r15,
         r16, r17, r18, r19, r20, r21, r22, r23,
         r24, r25, r26, r27, r28, r29, r30, r31,
+        zr,  sp,
 
         v0,  v1,  v2,  v3,  v4,  v5,  v6,  v7,
         v8,  v9,  v10, v11, v12, v13, v14, v15,
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotRegisterConfig.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotRegisterConfig.java	Thu Feb 25 11:27:59 2016 -0800
@@ -31,6 +31,7 @@
 import static jdk.vm.ci.aarch64.AArch64.r28;
 import static jdk.vm.ci.aarch64.AArch64.r29;
 import static jdk.vm.ci.aarch64.AArch64.r3;
+import static jdk.vm.ci.aarch64.AArch64.r31;
 import static jdk.vm.ci.aarch64.AArch64.r4;
 import static jdk.vm.ci.aarch64.AArch64.r5;
 import static jdk.vm.ci.aarch64.AArch64.r6;
@@ -45,11 +46,13 @@
 import static jdk.vm.ci.aarch64.AArch64.v5;
 import static jdk.vm.ci.aarch64.AArch64.v6;
 import static jdk.vm.ci.aarch64.AArch64.v7;
+import static jdk.vm.ci.aarch64.AArch64.zr;
 
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
 import jdk.vm.ci.aarch64.AArch64;
@@ -130,16 +133,20 @@
     public static final Register threadRegister = r28;
     public static final Register fp = r29;
 
+    private static final Register[] reservedRegisters = {threadRegister, fp, lr, r31, zr, sp};
+
     private static Register[] initAllocatable(Architecture arch, boolean reserveForHeapBase) {
         Register[] allRegisters = arch.getAvailableValueRegisters();
-        Register[] registers = new Register[allRegisters.length - (reserveForHeapBase ? 5 : 4)];
+        Register[] registers = new Register[allRegisters.length - reservedRegisters.length - (reserveForHeapBase ? 1 : 0)];
+        List<Register> reservedRegistersList = Arrays.asList(reservedRegisters);
 
         int idx = 0;
         for (Register reg : allRegisters) {
-            if (reg.equals(threadRegister) || reg.equals(fp) || reg.equals(lr) || reg.equals(sp)) {
-                // skip thread register, frame pointer, link register and stack pointer
+            if (reservedRegistersList.contains(reg)) {
+                // skip reserved registers
                 continue;
             }
+            assert !(reg.equals(threadRegister) || reg.equals(fp) || reg.equals(lr) || reg.equals(r31) || reg.equals(zr) || reg.equals(sp));
             if (reserveForHeapBase && reg.equals(heapBaseRegister)) {
                 // skip heap base register
                 continue;
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.amd64/src/jdk/vm/ci/hotspot/amd64/AMD64HotSpotRegisterConfig.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.amd64/src/jdk/vm/ci/hotspot/amd64/AMD64HotSpotRegisterConfig.java	Thu Feb 25 11:27:59 2016 -0800
@@ -45,6 +45,7 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Set;
 
 import jdk.vm.ci.code.Architecture;
@@ -119,14 +120,17 @@
      */
     private final boolean needsNativeStackHomeSpace;
 
+    private static final Register[] reservedRegisters = {rsp, r15};
+
     private static Register[] initAllocatable(Architecture arch, boolean reserveForHeapBase) {
         Register[] allRegisters = arch.getAvailableValueRegisters();
-        Register[] registers = new Register[allRegisters.length - (reserveForHeapBase ? 3 : 2)];
+        Register[] registers = new Register[allRegisters.length - reservedRegisters.length - (reserveForHeapBase ? 1 : 0)];
+        List<Register> reservedRegistersList = Arrays.asList(reservedRegisters);
 
         int idx = 0;
         for (Register reg : allRegisters) {
-            if (reg.equals(rsp) || reg.equals(r15)) {
-                // skip stack pointer and thread register
+            if (reservedRegistersList.contains(reg)) {
+                // skip reserved registers
                 continue;
             }
             if (reserveForHeapBase && reg.equals(r12)) {
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.sparc/src/jdk/vm/ci/hotspot/sparc/SPARCHotSpotRegisterConfig.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot.sparc/src/jdk/vm/ci/hotspot/sparc/SPARCHotSpotRegisterConfig.java	Thu Feb 25 11:27:59 2016 -0800
@@ -68,6 +68,7 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.List;
 
 import jdk.vm.ci.code.Architecture;
 import jdk.vm.ci.code.CallingConvention;
@@ -140,14 +141,17 @@
                     i0, i1, i2, i3, i4, i5, i6, i7};
     // @formatter:on
 
+    private static final Register[] reservedRegisters = {sp, g0, g2};
+
     private static Register[] initAllocatable(Architecture arch, boolean reserveForHeapBase) {
         Register[] allRegisters = arch.getAvailableValueRegisters();
-        Register[] registers = new Register[allRegisters.length - (reserveForHeapBase ? 4 : 3)];
+        Register[] registers = new Register[allRegisters.length - reservedRegisters.length - (reserveForHeapBase ? 1 : 0)];
+        List<Register> reservedRegistersList = Arrays.asList(reservedRegisters);
 
         int idx = 0;
         for (Register reg : allRegisters) {
-            if (reg.equals(sp) || reg.equals(g2) || reg.equals(g0)) {
-                // skip g0, stack pointer and thread register
+            if (reservedRegistersList.contains(reg)) {
+                // skip reserved registers
                 continue;
             }
             if (reserveForHeapBase && reg.equals(g6)) {
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantReflectionProvider.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantReflectionProvider.java	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 import static jdk.vm.ci.hotspot.HotSpotJVMCIRuntimeProvider.getArrayIndexScale;
 
 import java.lang.reflect.Array;
+import java.util.Objects;
 
 import jdk.vm.ci.common.JVMCIError;
 import jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.Option;
@@ -70,13 +71,13 @@
         } else if (x instanceof HotSpotObjectConstantImpl) {
             return y instanceof HotSpotObjectConstantImpl && ((HotSpotObjectConstantImpl) x).object() == ((HotSpotObjectConstantImpl) y).object();
         } else {
-            return x.equals(y);
+            return Objects.equals(x, y);
         }
     }
 
     @Override
     public Integer readArrayLength(JavaConstant array) {
-        if (array.getJavaKind() != JavaKind.Object || array.isNull()) {
+        if (array == null || array.getJavaKind() != JavaKind.Object || array.isNull()) {
             return null;
         }
 
@@ -133,12 +134,12 @@
 
     @Override
     public JavaConstant readArrayElement(JavaConstant array, int index) {
-        if (array.getJavaKind() != JavaKind.Object || array.isNull()) {
+        if (array == null || array.getJavaKind() != JavaKind.Object || array.isNull()) {
             return null;
         }
         Object a = ((HotSpotObjectConstantImpl) array).object();
 
-        if (index < 0 || index >= Array.getLength(a)) {
+        if (!a.getClass().isArray() || index < 0 || index >= Array.getLength(a)) {
             return null;
         }
 
@@ -184,7 +185,7 @@
 
     @Override
     public JavaConstant boxPrimitive(JavaConstant source) {
-        if (!source.getJavaKind().isPrimitive() || !isBoxCached(source)) {
+        if (source == null || !source.getJavaKind().isPrimitive() || !isBoxCached(source)) {
             return null;
         }
         return HotSpotObjectConstantImpl.forObject(source.asBoxedPrimitive());
@@ -192,7 +193,7 @@
 
     @Override
     public JavaConstant unboxPrimitive(JavaConstant source) {
-        if (!source.getJavaKind().isObject()) {
+        if (source == null || !source.getJavaKind().isObject()) {
             return null;
         }
         if (source.isNull()) {
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethod.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethod.java	Thu Feb 25 11:27:59 2016 -0800
@@ -57,6 +57,13 @@
     boolean isDontInline();
 
     /**
+     * Returns true if this method has a {@code ReservedStackAccess} annotation.
+     *
+     * @return true if ReservedStackAccess annotation present, false otherwise
+     */
+    boolean hasReservedStackAccess();
+
+    /**
      * Manually adds a DontInline annotation to this method.
      */
     void setNotInlineable();
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java	Thu Feb 25 11:27:59 2016 -0800
@@ -309,6 +309,15 @@
     }
 
     /**
+     * Returns true if this method has a {@code ReservedStackAccess} annotation.
+     *
+     * @return true if ReservedStackAccess annotation present, false otherwise
+     */
+    public boolean hasReservedStackAccess() {
+        return (getFlags() & config().methodFlagsReservedStackAccess) != 0;
+    }
+
+    /**
      * Manually adds a DontInline annotation to this method.
      */
     public void setNotInlineable() {
--- a/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java	Thu Feb 25 11:27:59 2016 -0800
@@ -850,6 +850,7 @@
     @HotSpotVMFlag(name = "DontCompileHugeMethods") @Stable public boolean dontCompileHugeMethods;
     @HotSpotVMFlag(name = "HugeMethodLimit") @Stable public int hugeMethodLimit;
     @HotSpotVMFlag(name = "PrintInlining") @Stable public boolean printInlining;
+    @HotSpotVMFlag(name = "Inline") @Stable public boolean inline;
     @HotSpotVMFlag(name = "JVMCIUseFastLocking") @Stable public boolean useFastLocking;
     @HotSpotVMFlag(name = "ForceUnreachable") @Stable public boolean forceUnreachable;
     @HotSpotVMFlag(name = "CodeCacheSegmentSize") @Stable public int codeSegmentSize;
@@ -974,6 +975,7 @@
     @HotSpotVMFlag(name = "BlockZeroingLowLimit", archs = {"sparc"}) @Stable public int blockZeroingLowLimit;
 
     @HotSpotVMFlag(name = "StackShadowPages") @Stable public int stackShadowPages;
+    @HotSpotVMFlag(name = "StackReservedPages") @Stable public int stackReservedPages;
     @HotSpotVMFlag(name = "UseStackBanging") @Stable public boolean useStackBanging;
     @HotSpotVMConstant(name = "STACK_BIAS") @Stable public int stackBias;
     @HotSpotVMField(name = "CompilerToVM::Data::vm_page_size", type = "int", get = HotSpotVMField.Type.VALUE) @Stable public int vmPageSize;
@@ -1092,6 +1094,7 @@
     @HotSpotVMField(name = "JavaThread::_satb_mark_queue", type = "SATBMarkQueue", get = HotSpotVMField.Type.OFFSET) @Stable public int javaThreadSatbMarkQueueOffset;
     @HotSpotVMField(name = "JavaThread::_vm_result", type = "oop", get = HotSpotVMField.Type.OFFSET) @Stable public int threadObjectResultOffset;
     @HotSpotVMField(name = "JavaThread::_jvmci_counters", type = "jlong*", get = HotSpotVMField.Type.OFFSET) @Stable public int jvmciCountersThreadOffset;
+    @HotSpotVMField(name = "JavaThread::_reserved_stack_activation", type = "address", get = HotSpotVMField.Type.OFFSET) @Stable public int javaThreadReservedStackActivationOffset;
 
     /**
      * An invalid value for {@link #rtldDefault}.
@@ -1235,6 +1238,7 @@
     @HotSpotVMConstant(name = "Method::_force_inline") @Stable public int methodFlagsForceInline;
     @HotSpotVMConstant(name = "Method::_dont_inline") @Stable public int methodFlagsDontInline;
     @HotSpotVMConstant(name = "Method::_hidden") @Stable public int methodFlagsHidden;
+    @HotSpotVMConstant(name = "Method::_reserved_stack_access") @Stable public int methodFlagsReservedStackAccess;
     @HotSpotVMConstant(name = "Method::nonvirtual_vtable_index") @Stable public int nonvirtualVtableIndex;
     @HotSpotVMConstant(name = "Method::invalid_vtable_index") @Stable public int invalidVtableIndex;
 
@@ -1491,6 +1495,8 @@
     @HotSpotVMField(name = "StubRoutines::_updateBytesCRC32", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long updateBytesCRC32Stub;
     @HotSpotVMField(name = "StubRoutines::_crc_table_adr", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long crcTableAddress;
 
+    @HotSpotVMField(name = "StubRoutines::_throw_delayed_StackOverflowError_entry", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long throwDelayedStackOverflowErrorEntry;
+
     @HotSpotVMField(name = "StubRoutines::_jbyte_arraycopy", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long jbyteArraycopy;
     @HotSpotVMField(name = "StubRoutines::_jshort_arraycopy", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long jshortArraycopy;
     @HotSpotVMField(name = "StubRoutines::_jint_arraycopy", type = "address", get = HotSpotVMField.Type.VALUE) @Stable public long jintArraycopy;
@@ -1548,6 +1554,7 @@
     @HotSpotVMAddress(name = "SharedRuntime::register_finalizer") @Stable public long registerFinalizerAddress;
     @HotSpotVMAddress(name = "SharedRuntime::exception_handler_for_return_address") @Stable public long exceptionHandlerForReturnAddressAddress;
     @HotSpotVMAddress(name = "SharedRuntime::OSR_migration_end") @Stable public long osrMigrationEndAddress;
+    @HotSpotVMAddress(name = "SharedRuntime::enable_stack_reserved_zone") @Stable public long enableStackReservedZoneAddress;
 
     @HotSpotVMAddress(name = "os::javaTimeMillis") @Stable public long javaTimeMillisAddress;
     @HotSpotVMAddress(name = "os::javaTimeNanos") @Stable public long javaTimeNanosAddress;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/jdk.vm.ci/share/classes/jdk.vm.ci.services/.checkstyle_checks.xml	Thu Feb 25 11:27:59 2016 -0800
@@ -0,0 +1,213 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE module PUBLIC "-//Puppy Crawl//DTD Check Configuration 1.3//EN" "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
+
+<!--
+    Checkstyle-Configuration: Checks
+    Description: none
+-->
+<module name="Checker">
+  <property name="severity" value="error"/>
+  <module name="TreeWalker">
+    <property name="tabWidth" value="4"/>
+    <module name="FileContentsHolder"/>
+    <module name="JavadocStyle">
+      <property name="checkHtml" value="false"/>
+    </module>
+    <module name="LocalFinalVariableName"/>
+    <module name="LocalVariableName"/>
+    <module name="MemberName">
+      <property name="format" value="^(([a-z][a-zA-Z0-9]*$)|(_[A-Z][a-zA-Z0-9]*_[a-z][a-zA-Z0-9]*$))"/>
+    </module>
+    <module name="MethodName"/>
+    <module name="PackageName"/>
+    <module name="ParameterName"/>
+    <module name="TypeName">
+      <property name="format" value="^[A-Z][_a-zA-Z0-9]*$"/>
+    </module>
+    <module name="RedundantImport"/>
+    <module name="LineLength">
+      <property name="max" value="250"/>
+    </module>
+    <module name="MethodParamPad"/>
+    <module name="NoWhitespaceAfter">
+      <property name="tokens" value="ARRAY_INIT,BNOT,DEC,DOT,INC,LNOT,UNARY_MINUS,UNARY_PLUS"/>
+    </module>
+    <module name="AvoidStarImport">
+       <property name="allowClassImports" value="false"/>
+       <property name="allowStaticMemberImports" value="false"/>
+    </module>
+    <module name="NoWhitespaceBefore">
+      <property name="tokens" value="SEMI,DOT,POST_DEC,POST_INC"/>
+    </module>
+    <module name="ParenPad"/>
+    <module name="TypecastParenPad">
+      <property name="tokens" value="RPAREN,TYPECAST"/>
+    </module>
+    <module name="WhitespaceAfter"/>
+    <module name="WhitespaceAround">
+      <property name="tokens" value="ASSIGN,BAND,BAND_ASSIGN,BOR,BOR_ASSIGN,BSR,BSR_ASSIGN,BXOR,BXOR_ASSIGN,COLON,DIV,DIV_ASSIGN,EQUAL,GE,GT,LAND,LE,LITERAL_ASSERT,LITERAL_CATCH,LITERAL_DO,LITERAL_ELSE,LITERAL_FINALLY,LITERAL_FOR,LITERAL_IF,LITERAL_RETURN,LITERAL_SYNCHRONIZED,LITERAL_TRY,LITERAL_WHILE,LOR,LT,MINUS,MINUS_ASSIGN,MOD,MOD_ASSIGN,NOT_EQUAL,PLUS,PLUS_ASSIGN,QUESTION,SL,SLIST,SL_ASSIGN,SR,SR_ASSIGN,STAR,STAR_ASSIGN,LITERAL_ASSERT,TYPE_EXTENSION_AND"/>
+    </module>
+    <module name="RedundantModifier"/>
+    <module name="AvoidNestedBlocks">
+      <property name="allowInSwitchCase" value="true"/>
+    </module>
+    <module name="EmptyBlock">
+      <property name="option" value="text"/>
+      <property name="tokens" value="LITERAL_DO,LITERAL_ELSE,LITERAL_FINALLY,LITERAL_IF,LITERAL_TRY,LITERAL_WHILE,STATIC_INIT"/>
+    </module>
+    <module name="LeftCurly"/>
+    <module name="NeedBraces"/>
+    <module name="RightCurly"/>
+    <module name="EmptyStatement"/>
+    <module name="HiddenField">
+      <property name="severity" value="ignore"/>
+      <property name="ignoreConstructorParameter" value="true"/>
+      <metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
+    </module>
+    <module name="FinalClass"/>
+    <module name="HideUtilityClassConstructor">
+      <property name="severity" value="ignore"/>
+      <metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
+    </module>
+    <module name="ArrayTypeStyle"/>
+    <module name="UpperEll"/>
+    <module name="FallThrough"/>
+    <module name="FinalLocalVariable">
+      <property name="severity" value="ignore"/>
+      <metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
+    </module>
+    <module name="MultipleVariableDeclarations"/>
+    <module name="StringLiteralEquality">
+      <property name="severity" value="error"/>
+    </module>
+    <module name="SuperFinalize"/>
+    <module name="UnnecessaryParentheses">
+      <property name="severity" value="ignore"/>
+      <metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
+    </module>
+    <module name="Indentation">
+      <property name="severity" value="ignore"/>
+      <metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
+    </module>
+    <module name="StaticVariableName">
+      <property name="format" value="^[A-Za-z][a-zA-Z0-9]*$"/>
+    </module>
+    <module name="EmptyForInitializerPad"/>
+    <module name="EmptyForIteratorPad"/>
+    <module name="ModifierOrder"/>
+    <module name="DefaultComesLast"/>
+    <module name="InnerAssignment">
+      <property name="severity" value="ignore"/>
+      <metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
+    </module>
+    <module name="ModifiedControlVariable"/>
+    <module name="MutableException">
+      <property name="severity" value="ignore"/>
+      <metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
+    </module>
+    <module name="ParameterAssignment">
+      <property name="severity" value="ignore"/>
+      <metadata name="net.sf.eclipsecs.core.lastEnabledSeverity" value="inherit"/>
+    </module>
+    <module name="RegexpSinglelineJava">
+      <metadata name="net.sf.eclipsecs.core.comment" value="Illegal trailing whitespace(s) at the end of the line."/>
+      <property name="format" value="\s$"/>
+      <property name="message" value="Illegal trailing whitespace(s) at the end of the line."/>
+      <property name="ignoreComments" value="true"/>
+      <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Checks for trailing spaces at the end of a line"/>
+    </module>
+    <module name="RegexpSinglelineJava">
+      <metadata name="net.sf.eclipsecs.core.comment" value="illegal space before a comma"/>
+      <property name="format" value=" ,"/>
+      <property name="message" value="illegal space before a comma"/>
+      <property name="ignoreComments" value="true"/>
+      <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Checks for whitespace before a comma."/>
+      <metadata name="com.atlassw.tools.eclipse.checkstyle.customMessage" value="Illegal whitespace before a comma."/>
+    </module>
+    <module name="RegexpSinglelineJava">
+      <property name="format" value="[^\x00-\x7F]"/>
+      <property name="message" value="Only use ASCII characters."/>
+    </module>
+    <module name="RegexpSinglelineJava">
+      <property name="format" value="new (Hashtable|Vector|Stack|StringBuffer)[^\w]"/>
+      <property name="message" value="Don't use old synchronized collection classes"/>
+    </module>
+  </module>
+  <module name="RegexpHeader">
+    <property name="header" value="/\*\n \* Copyright \(c\) (20[0-9][0-9], )?20[0-9][0-9], Oracle and/or its affiliates. All rights reserved.\n \* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.\n \*\n \* This code is free software; you can redistribute it and/or modify it\n \* under the terms of the GNU General Public License version 2 only, as\n \* published by the Free Software Foundation.\n \*\n \* This code is distributed in the hope that it will be useful, but WITHOUT\n \* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n \* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License\n \* version 2 for more details \(a copy is included in the LICENSE file that\n \* accompanied this code\).\n \*\n \* You should have received a copy of the GNU General Public License version\n \* 2 along with this work; if not, write to the Free Software Foundation,\n \* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.\n \*\n \* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA\n \* or visit www.oracle.com if you need additional information or have any\n \* questions.\n \*/\n"/>
+    <property name="fileExtensions" value="java"/>
+  </module>
+  <module name="FileTabCharacter">
+    <property name="severity" value="error"/>
+    <property name="fileExtensions" value="java"/>
+  </module>
+  <module name="NewlineAtEndOfFile">
+    <property name="lineSeparator" value="lf"/>
+  </module>
+  <module name="Translation"/>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="Checkstyle: stop constant name check"/>
+    <property name="onCommentFormat" value="Checkstyle: resume constant name check"/>
+    <property name="checkFormat" value="ConstantNameCheck"/>
+    <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Allow non-conforming constant names"/>
+  </module>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="Checkstyle: stop method name check"/>
+    <property name="onCommentFormat" value="Checkstyle: resume method name check"/>
+    <property name="checkFormat" value="MethodName"/>
+    <property name="checkC" value="false"/>
+    <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Disable method name checks"/>
+  </module>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="CheckStyle: stop parameter assignment check"/>
+    <property name="onCommentFormat" value="CheckStyle: resume parameter assignment check"/>
+    <property name="checkFormat" value="ParameterAssignment"/>
+    <property name="checkC" value="false"/>
+    <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Disable Parameter Assignment"/>
+  </module>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="Checkstyle: stop final variable check"/>
+    <property name="onCommentFormat" value="Checkstyle: resume final variable check"/>
+    <property name="checkFormat" value="FinalLocalVariable"/>
+    <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Disable final variable checks"/>
+  </module>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="Checkstyle: stop"/>
+    <property name="onCommentFormat" value="Checkstyle: resume"/>
+    <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Disable all checks"/>
+  </module>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="CheckStyle: stop inner assignment check"/>
+    <property name="onCommentFormat" value="CheckStyle: resume inner assignment check"/>
+    <property name="checkFormat" value="InnerAssignment"/>
+    <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Disable inner assignment checks"/>
+  </module>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="Checkstyle: stop field name check"/>
+    <property name="onCommentFormat" value="Checkstyle: resume field name check"/>
+    <property name="checkFormat" value="MemberName"/>
+    <property name="checkC" value="false"/>
+    <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Disable field name checks"/>
+  </module>
+  <module name="RegexpMultiline">
+    <metadata name="net.sf.eclipsecs.core.comment" value="illegal Windows line ending"/>
+    <property name="format" value="\r\n"/>
+    <property name="message" value="illegal Windows line ending"/>
+  </module>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="CheckStyle: stop header check"/>
+    <property name="onCommentFormat" value="CheckStyle: resume header check"/>
+    <property name="checkFormat" value=".*Header"/>
+    <metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Disable header checks"/>
+  </module>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="CheckStyle: stop line length check"/>
+    <property name="onCommentFormat" value="CheckStyle: resume line length check"/>
+    <property name="checkFormat" value="LineLength"/>
+  </module>
+  <module name="SuppressionCommentFilter">
+    <property name="offCommentFormat" value="CheckStyle: start generated"/>
+    <property name="onCommentFormat" value="CheckStyle: stop generated"/>
+    <property name="checkFormat" value=".*Name|.*LineLength|.*Header"/>
+  </module>
+</module>
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -4042,61 +4042,6 @@
 //        could have been signaled after a wait started
 //    1 : signaled - thread is running or ready
 //
-// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
-// hang indefinitely.  For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
-// For specifics regarding the bug see GLIBC BUGID 261237 :
-//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
-// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
-// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
-// is used.  (The simple C test-case provided in the GLIBC bug report manifests the
-// hang).  The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
-// and monitorenter when we're using 1-0 locking.  All those operations may result in
-// calls to pthread_cond_timedwait().  Using LD_ASSUME_KERNEL to use an older version
-// of libpthread avoids the problem, but isn't practical.
-//
-// Possible remedies:
-//
-// 1.   Establish a minimum relative wait time.  50 to 100 msecs seems to work.
-//      This is palliative and probabilistic, however.  If the thread is preempted
-//      between the call to compute_abstime() and pthread_cond_timedwait(), more
-//      than the minimum period may have passed, and the abstime may be stale (in the
-//      past) resultin in a hang.   Using this technique reduces the odds of a hang
-//      but the JVM is still vulnerable, particularly on heavily loaded systems.
-//
-// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
-//      of the usual flag-condvar-mutex idiom.  The write side of the pipe is set
-//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
-//      reduces to poll()+read().  This works well, but consumes 2 FDs per extant
-//      thread.
-//
-// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
-//      that manages timeouts.  We'd emulate pthread_cond_timedwait() by enqueuing
-//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
-//      This also works well.  In fact it avoids kernel-level scalability impediments
-//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
-//      timers in a graceful fashion.
-//
-// 4.   When the abstime value is in the past it appears that control returns
-//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
-//      Subsequent timedwait/wait calls may hang indefinitely.  Given that, we
-//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
-//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
-//      It may be possible to avoid reinitialization by checking the return
-//      value from pthread_cond_timedwait().  In addition to reinitializing the
-//      condvar we must establish the invariant that cond_signal() is only called
-//      within critical sections protected by the adjunct mutex.  This prevents
-//      cond_signal() from "seeing" a condvar that's in the midst of being
-//      reinitialized or that is corrupt.  Sadly, this invariant obviates the
-//      desirable signal-after-unlock optimization that avoids futile context switching.
-//
-//      I'm also concerned that some versions of NTPL might allocate an auxilliary
-//      structure when a condvar is used or initialized.  cond_destroy()  would
-//      release the helper structure.  Our reinitialize-after-timedwait fix
-//      put excessive stress on malloc/free and locks protecting the c-heap.
-//
-// We currently use (4).  See the WorkAroundNTPLTimedWaitHang flag.
-// It may be possible to refine (4) by checking the kernel and NTPL verisons
-// and only enabling the work-around for vulnerable environments.
 
 // utility to compute the abstime argument to timedwait:
 // millis is the relative timeout time
@@ -4208,10 +4153,6 @@
 
   while (_Event < 0) {
     status = pthread_cond_timedwait(_cond, _mutex, &abst);
-    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
-      pthread_cond_destroy(_cond);
-      pthread_cond_init(_cond, NULL);
-    }
     assert_status(status == 0 || status == EINTR ||
                   status == ETIMEDOUT,
                   status, "cond_timedwait");
@@ -4255,10 +4196,6 @@
   assert_status(status == 0, status, "mutex_lock");
   int AnyWaiters = _nParked;
   assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
-  if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
-    AnyWaiters = 0;
-    pthread_cond_signal(_cond);
-  }
   status = pthread_mutex_unlock(_mutex);
   assert_status(status == 0, status, "mutex_unlock");
   if (AnyWaiters != 0) {
@@ -4391,7 +4328,7 @@
   if (_counter > 0)  { // no wait needed
     _counter = 0;
     status = pthread_mutex_unlock(_mutex);
-    assert(status == 0, "invariant");
+    assert_status(status == 0, status, "invariant");
     // Paranoia to ensure our locked and lock-free paths interact
     // correctly with each other and Java-level accesses.
     OrderAccess::fence();
@@ -4414,10 +4351,6 @@
     status = pthread_cond_wait(_cond, _mutex);
   } else {
     status = pthread_cond_timedwait(_cond, _mutex, &absTime);
-    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
-      pthread_cond_destroy(_cond);
-      pthread_cond_init(_cond, NULL);
-    }
   }
   assert_status(status == 0 || status == EINTR ||
                 status == ETIMEDOUT,
@@ -4442,24 +4375,14 @@
 
 void Parker::unpark() {
   int status = pthread_mutex_lock(_mutex);
-  assert(status == 0, "invariant");
+  assert_status(status == 0, status, "invariant");
   const int s = _counter;
   _counter = 1;
+  status = pthread_mutex_unlock(_mutex);
+  assert_status(status == 0, status, "invariant");
   if (s < 1) {
-    if (WorkAroundNPTLTimedWaitHang) {
-      status = pthread_cond_signal(_cond);
-      assert(status == 0, "invariant");
-      status = pthread_mutex_unlock(_mutex);
-      assert(status == 0, "invariant");
-    } else {
-      status = pthread_mutex_unlock(_mutex);
-      assert(status == 0, "invariant");
-      status = pthread_cond_signal(_cond);
-      assert(status == 0, "invariant");
-    }
-  } else {
-    pthread_mutex_unlock(_mutex);
-    assert(status == 0, "invariant");
+    status = pthread_cond_signal(_cond);
+    assert_status(status == 0, status, "invariant");
   }
 }
 
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -4771,6 +4771,25 @@
   }
 }
 
+// older glibc versions don't have this macro (which expands to
+// an optimized bit-counting function) so we have to roll our own
+#ifndef CPU_COUNT
+
+static int _cpu_count(const cpu_set_t* cpus) {
+  int count = 0;
+  // only look up to the number of configured processors
+  for (int i = 0; i < os::processor_count(); i++) {
+    if (CPU_ISSET(i, cpus)) {
+      count++;
+    }
+  }
+  return count;
+}
+
+#define CPU_COUNT(cpus) _cpu_count(cpus)
+
+#endif // CPU_COUNT
+
 // Get the current number of available processors for this process.
 // This value can change at any time during a process's lifetime.
 // sched_getaffinity gives an accurate answer as it accounts for cpusets.
@@ -4786,6 +4805,9 @@
   int configured_cpus = processor_count();  // upper bound on available cpus
   int cpu_count = 0;
 
+// old build platforms may not support dynamic cpu sets
+#ifdef CPU_ALLOC
+
   // To enable easy testing of the dynamic path on different platforms we
   // introduce a diagnostic flag: UseCpuAllocPath
   if (configured_cpus >= CPU_SETSIZE || UseCpuAllocPath) {
@@ -4814,10 +4836,18 @@
     log_trace(os)("active_processor_count: using static path - configured processors: %d",
                   configured_cpus);
   }
+#else // CPU_ALLOC
+// these stubs won't be executed
+#define CPU_COUNT_S(size, cpus) -1
+#define CPU_FREE(cpus)
+
+  log_trace(os)("active_processor_count: only static path available - configured processors: %d",
+                configured_cpus);
+#endif // CPU_ALLOC
 
   // pid 0 means the current thread - which we have to assume represents the process
   if (sched_getaffinity(0, cpus_size, cpus_p) == 0) {
-    if (cpus_p != &cpus) {
+    if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
       cpu_count = CPU_COUNT_S(cpus_size, cpus_p);
     }
     else {
@@ -4831,7 +4861,7 @@
             "which may exceed available processors", strerror(errno), cpu_count);
   }
 
-  if (cpus_p != &cpus) {
+  if (cpus_p != &cpus) { // can only be true when CPU_ALLOC used
     CPU_FREE(cpus_p);
   }
 
@@ -5349,61 +5379,6 @@
 //        could have been signaled after a wait started
 //    1 : signaled - thread is running or ready
 //
-// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
-// hang indefinitely.  For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
-// For specifics regarding the bug see GLIBC BUGID 261237 :
-//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
-// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
-// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
-// is used.  (The simple C test-case provided in the GLIBC bug report manifests the
-// hang).  The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
-// and monitorenter when we're using 1-0 locking.  All those operations may result in
-// calls to pthread_cond_timedwait().  Using LD_ASSUME_KERNEL to use an older version
-// of libpthread avoids the problem, but isn't practical.
-//
-// Possible remedies:
-//
-// 1.   Establish a minimum relative wait time.  50 to 100 msecs seems to work.
-//      This is palliative and probabilistic, however.  If the thread is preempted
-//      between the call to compute_abstime() and pthread_cond_timedwait(), more
-//      than the minimum period may have passed, and the abstime may be stale (in the
-//      past) resultin in a hang.   Using this technique reduces the odds of a hang
-//      but the JVM is still vulnerable, particularly on heavily loaded systems.
-//
-// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
-//      of the usual flag-condvar-mutex idiom.  The write side of the pipe is set
-//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
-//      reduces to poll()+read().  This works well, but consumes 2 FDs per extant
-//      thread.
-//
-// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
-//      that manages timeouts.  We'd emulate pthread_cond_timedwait() by enqueuing
-//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
-//      This also works well.  In fact it avoids kernel-level scalability impediments
-//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
-//      timers in a graceful fashion.
-//
-// 4.   When the abstime value is in the past it appears that control returns
-//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
-//      Subsequent timedwait/wait calls may hang indefinitely.  Given that, we
-//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
-//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
-//      It may be possible to avoid reinitialization by checking the return
-//      value from pthread_cond_timedwait().  In addition to reinitializing the
-//      condvar we must establish the invariant that cond_signal() is only called
-//      within critical sections protected by the adjunct mutex.  This prevents
-//      cond_signal() from "seeing" a condvar that's in the midst of being
-//      reinitialized or that is corrupt.  Sadly, this invariant obviates the
-//      desirable signal-after-unlock optimization that avoids futile context switching.
-//
-//      I'm also concerned that some versions of NTPL might allocate an auxilliary
-//      structure when a condvar is used or initialized.  cond_destroy()  would
-//      release the helper structure.  Our reinitialize-after-timedwait fix
-//      put excessive stress on malloc/free and locks protecting the c-heap.
-//
-// We currently use (4).  See the WorkAroundNTPLTimedWaitHang flag.
-// It may be possible to refine (4) by checking the kernel and NTPL verisons
-// and only enabling the work-around for vulnerable environments.
 
 // utility to compute the abstime argument to timedwait:
 // millis is the relative timeout time
@@ -5529,10 +5504,6 @@
 
   while (_Event < 0) {
     status = pthread_cond_timedwait(_cond, _mutex, &abst);
-    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
-      pthread_cond_destroy(_cond);
-      pthread_cond_init(_cond, os::Linux::condAttr());
-    }
     assert_status(status == 0 || status == EINTR ||
                   status == ETIME || status == ETIMEDOUT,
                   status, "cond_timedwait");
@@ -5576,10 +5547,6 @@
   assert_status(status == 0, status, "mutex_lock");
   int AnyWaiters = _nParked;
   assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
-  if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
-    AnyWaiters = 0;
-    pthread_cond_signal(_cond);
-  }
   status = pthread_mutex_unlock(_mutex);
   assert_status(status == 0, status, "mutex_unlock");
   if (AnyWaiters != 0) {
@@ -5731,7 +5698,7 @@
   if (_counter > 0)  { // no wait needed
     _counter = 0;
     status = pthread_mutex_unlock(_mutex);
-    assert(status == 0, "invariant");
+    assert_status(status == 0, status, "invariant");
     // Paranoia to ensure our locked and lock-free paths interact
     // correctly with each other and Java-level accesses.
     OrderAccess::fence();
@@ -5757,10 +5724,6 @@
   } else {
     _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
     status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
-    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
-      pthread_cond_destroy(&_cond[_cur_index]);
-      pthread_cond_init(&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
-    }
   }
   _cur_index = -1;
   assert_status(status == 0 || status == EINTR ||
@@ -5786,33 +5749,17 @@
 
 void Parker::unpark() {
   int status = pthread_mutex_lock(_mutex);
-  assert(status == 0, "invariant");
+  assert_status(status == 0, status, "invariant");
   const int s = _counter;
   _counter = 1;
-  if (s < 1) {
-    // thread might be parked
-    if (_cur_index != -1) {
-      // thread is definitely parked
-      if (WorkAroundNPTLTimedWaitHang) {
-        status = pthread_cond_signal(&_cond[_cur_index]);
-        assert(status == 0, "invariant");
-        status = pthread_mutex_unlock(_mutex);
-        assert(status == 0, "invariant");
-      } else {
-        // must capture correct index before unlocking
-        int index = _cur_index;
-        status = pthread_mutex_unlock(_mutex);
-        assert(status == 0, "invariant");
-        status = pthread_cond_signal(&_cond[index]);
-        assert(status == 0, "invariant");
-      }
-    } else {
-      pthread_mutex_unlock(_mutex);
-      assert(status == 0, "invariant");
-    }
-  } else {
-    pthread_mutex_unlock(_mutex);
-    assert(status == 0, "invariant");
+  // must capture correct index before unlocking
+  int index = _cur_index;
+  status = pthread_mutex_unlock(_mutex);
+  assert_status(status == 0, status, "invariant");
+  if (s < 1 && index != -1) {
+    // thread is definitely parked
+    status = pthread_cond_signal(&_cond[index]);
+    assert_status(status == 0, status, "invariant");
   }
 }
 
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -246,7 +246,6 @@
 bool PICL::open_library() {
   _dl_handle = dlopen("libpicl.so.1", RTLD_LAZY);
   if (_dl_handle == NULL) {
-    warning("PICL (libpicl.so.1) is missing. Performance will not be optimal.");
     return false;
   }
   if (!bind_library_functions()) {
--- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -730,7 +730,7 @@
     }
   } else if (rt == objectNull &&
            (l->as_NewInstance() || l->as_NewArray() ||
-             (UseNewCode && l->as_Local() && l->as_Local()->is_receiver()))) {
+             (l->as_Local() && l->as_Local()->is_receiver()))) {
     if (x->cond() == Instruction::eql) {
       BlockBegin* sux = x->fsux();
       set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
--- a/hotspot/src/share/vm/ci/ciField.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/ci/ciField.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -211,6 +211,12 @@
   // so there is no hacking of finals going on with them.
   if (holder->is_anonymous())
     return true;
+  // Trust final fields in all boxed classes
+  if (holder->is_box_klass())
+    return true;
+  // Trust final fields in String
+  if (holder->name() == ciSymbol::java_lang_String())
+    return true;
   // Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
   // more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
   if (holder->name() == ciSymbol::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -291,6 +291,9 @@
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     blob = new (size) MethodHandlesAdapterBlob(size);
+    if (blob == NULL) {
+      vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
+    }
   }
   // Track memory usage statistic after releasing CodeCache_lock
   MemoryService::track_code_cache_memory_usage();
--- a/hotspot/src/share/vm/code/nmethod.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -33,6 +33,7 @@
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
 #include "compiler/compilerDirectives.hpp"
+#include "compiler/directivesParser.hpp"
 #include "compiler/disassembler.hpp"
 #include "interpreter/bytecode.hpp"
 #include "oops/methodData.hpp"
@@ -965,6 +966,12 @@
   }
 }
 
+void nmethod::maybe_print_nmethod(DirectiveSet* directive) {
+  bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
+  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
+    print_nmethod(printnmethods);
+  }
+}
 
 void nmethod::print_nmethod(bool printmethod) {
   ttyLocker ttyl;  // keep the following output all in one block
--- a/hotspot/src/share/vm/code/nmethod.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -29,6 +29,8 @@
 #include "code/pcDesc.hpp"
 #include "oops/metadata.hpp"
 
+class DirectiveSet;
+
 // This class is used internally by nmethods, to cache
 // exception/pc/handler information.
 
@@ -714,6 +716,8 @@
   void print_nul_chk_table()                      PRODUCT_RETURN;
   void print_recorded_oops()                      PRODUCT_RETURN;
   void print_recorded_metadata()                  PRODUCT_RETURN;
+
+  void maybe_print_nmethod(DirectiveSet* directive);
   void print_nmethod(bool print_code);
 
   // need to re-define this from CodeBlob else the overload hides it
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1919,12 +1919,9 @@
 
   collect_statistics(thread, time, task);
 
-  bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption;
-  if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) {
-    nmethod* nm = task->code();
-    if (nm != NULL) {
-      nm->print_nmethod(printnmethods);
-    }
+  nmethod* nm = task->code();
+  if (nm != NULL) {
+    nm->maybe_print_nmethod(directive);
   }
   DirectivesStack::release(directive);
 
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -2206,13 +2206,13 @@
     }
     if (res == 0) {
       LogHandle(gc, verify) log;
-      log.info("Livelock: no rank reduction!");
-      log.info(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
-               " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
+      log.error("Livelock: no rank reduction!");
+      log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
+                " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
       ResourceMark rm;
-      _sp->print_on(log.info_stream());
+      _sp->print_on(log.error_stream());
       guarantee(false, "Verification failed.");
     }
     _last_addr = addr;
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -2224,8 +2224,8 @@
     if (!_marks->isMarked(addr)) {
       LogHandle(gc, verify) log;
       ResourceMark rm;
-      oop(addr)->print_on(log.info_stream());
-      log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
+      oop(addr)->print_on(log.error_stream());
+      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
       _failed = true;
     }
     return true;
@@ -2350,9 +2350,9 @@
   verification_mark_bm()->iterate(&vcl);
   if (vcl.failed()) {
     LogHandle(gc, verify) log;
-    log.info("Verification failed");
+    log.error("Failed marking verification after remark");
     ResourceMark rm;
-    gch->print_on(log.info_stream());
+    gch->print_on(log.error_stream());
     fatal("CMS: failed marking verification after remark");
   }
 }
@@ -2923,7 +2923,7 @@
 
   CMSTokenSyncWithLocks ts(true, bitMapLock());
   GCTraceCPUTime tcpu;
-  CMSPhaseAccounting pa(this, "Concrurrent Mark");
+  CMSPhaseAccounting pa(this, "Concurrent Mark");
   bool res = markFromRootsWork();
   if (res) {
     _collectorState = Precleaning;
@@ -5880,8 +5880,8 @@
     if (!_cms_bm->isMarked(addr)) {
       LogHandle(gc, verify) log;
       ResourceMark rm;
-      oop(addr)->print_on(log.info_stream());
-      log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
+      oop(addr)->print_on(log.error_stream());
+      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
       fatal("... aborting");
     }
   }
@@ -6661,8 +6661,8 @@
     if (!_cms_bm->isMarked(addr)) {
       LogHandle(gc, verify) log;
       ResourceMark rm;
-      oop(addr)->print_on(log.info_stream());
-      log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
+      oop(addr)->print_on(log.error_stream());
+      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
       fatal("... aborting");
     }
 
--- a/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/concurrentMarkThread.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/classLoaderData.hpp"
 #include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
@@ -123,6 +124,7 @@
     // wait until started is set.
     sleepBeforeNextCycle();
     if (_should_terminate) {
+      _cm->root_regions()->cancel_scan();
       break;
     }
 
@@ -132,6 +134,11 @@
       HandleMark   hm;
       double cycle_start = os::elapsedVTime();
 
+      {
+        GCConcPhaseTimer(_cm, "Concurrent Clearing of Claimed Marks");
+        ClassLoaderDataGraph::clear_claimed_marks();
+      }
+
       // We have to ensure that we finish scanning the root regions
       // before the next GC takes place. To ensure this we have to
       // make sure that we do not join the STS until the root regions
@@ -140,7 +147,7 @@
       // without the root regions have been scanned which would be a
       // correctness issue.
 
-      if (!cm()->has_aborted()) {
+      {
         GCConcPhaseTimer(_cm, "Concurrent Root Region Scanning");
         _cm->scanRootRegions();
       }
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1290,8 +1290,7 @@
       ref_processor_cm()->verify_no_references_recorded();
 
       // Abandon current iterations of concurrent marking and concurrent
-      // refinement, if any are in progress. We have to do this before
-      // wait_until_scan_finished() below.
+      // refinement, if any are in progress.
       concurrent_mark()->abort();
 
       // Make sure we'll choose a new allocation region afterwards.
@@ -2148,8 +2147,8 @@
   virtual bool doHeapRegion(HeapRegion* hr) {
     unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
     if (_gc_time_stamp != region_gc_time_stamp) {
-      log_info(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
-                           region_gc_time_stamp, _gc_time_stamp);
+      log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
+                            region_gc_time_stamp, _gc_time_stamp);
       _failures = true;
     }
     return false;
@@ -2848,7 +2847,7 @@
     (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
 
   VirtualSpaceSummary heap_summary = create_heap_space_summary();
-  return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes);
+  return G1HeapSummary(heap_summary, used(), eden_used_bytes, eden_capacity_bytes, survivor_used_bytes, num_regions());
 }
 
 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
@@ -5186,8 +5185,8 @@
   NoYoungRegionsClosure() : _success(true) { }
   bool doHeapRegion(HeapRegion* r) {
     if (r->is_young()) {
-      log_info(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
-                           p2i(r->bottom()), p2i(r->end()));
+      log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
+                            p2i(r->bottom()), p2i(r->end()));
       _success = false;
     }
     return false;
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -293,29 +293,83 @@
   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 }
 
-void G1CollectorPolicy::initialize_flags() {
-  if (G1HeapRegionSize != HeapRegion::GrainBytes) {
-    FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
+G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
+
+// There are three command line options related to the young gen size:
+// NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
+// just a short form for NewSize==MaxNewSize). G1 will use its internal
+// heuristics to calculate the actual young gen size, so these options
+// basically only limit the range within which G1 can pick a young gen
+// size. Also, these are general options taking byte sizes. G1 will
+// internally work with a number of regions instead. So, some rounding
+// will occur.
+//
+// If nothing related to the the young gen size is set on the command
+// line we should allow the young gen to be between G1NewSizePercent
+// and G1MaxNewSizePercent of the heap size. This means that every time
+// the heap size changes, the limits for the young gen size will be
+// recalculated.
+//
+// If only -XX:NewSize is set we should use the specified value as the
+// minimum size for young gen. Still using G1MaxNewSizePercent of the
+// heap as maximum.
+//
+// If only -XX:MaxNewSize is set we should use the specified value as the
+// maximum size for young gen. Still using G1NewSizePercent of the heap
+// as minimum.
+//
+// If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
+// No updates when the heap size changes. There is a special case when
+// NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
+// different heuristic for calculating the collection set when we do mixed
+// collection.
+//
+// If only -XX:NewRatio is set we should use the specified ratio of the heap
+// as both min and max. This will be interpreted as "fixed" just like the
+// NewSize==MaxNewSize case above. But we will update the min and max
+// every time the heap size changes.
+//
+// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
+// combined with either NewSize or MaxNewSize. (A warning message is printed.)
+class G1YoungGenSizer : public CHeapObj<mtGC> {
+private:
+  enum SizerKind {
+    SizerDefaults,
+    SizerNewSizeOnly,
+    SizerMaxNewSizeOnly,
+    SizerMaxAndNewSize,
+    SizerNewRatio
+  };
+  SizerKind _sizer_kind;
+  uint _min_desired_young_length;
+  uint _max_desired_young_length;
+  bool _adaptive_size;
+  uint calculate_default_min_length(uint new_number_of_heap_regions);
+  uint calculate_default_max_length(uint new_number_of_heap_regions);
+
+  // Update the given values for minimum and maximum young gen length in regions
+  // given the number of heap regions depending on the kind of sizing algorithm.
+  void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
+
+public:
+  G1YoungGenSizer();
+  // Calculate the maximum length of the young gen given the number of regions
+  // depending on the sizing algorithm.
+  uint max_young_length(uint number_of_heap_regions);
+
+  void heap_size_changed(uint new_number_of_heap_regions);
+  uint min_desired_young_length() {
+    return _min_desired_young_length;
+  }
+  uint max_desired_young_length() {
+    return _max_desired_young_length;
   }
 
-  if (SurvivorRatio < 1) {
-    vm_exit_during_initialization("Invalid survivor ratio specified");
+  bool adaptive_young_list_length() const {
+    return _adaptive_size;
   }
-  CollectorPolicy::initialize_flags();
-  _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
-}
+};
 
-void G1CollectorPolicy::post_heap_initialize() {
-  uintx max_regions = G1CollectedHeap::heap()->max_regions();
-  size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
-  if (max_young_size != MaxNewSize) {
-    FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
-  }
-
-  _ihop_control = create_ihop_control();
-}
-
-G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
 
 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
         _min_desired_young_length(0), _max_desired_young_length(0) {
@@ -412,6 +466,29 @@
           &_max_desired_young_length);
 }
 
+void G1CollectorPolicy::post_heap_initialize() {
+  uintx max_regions = G1CollectedHeap::heap()->max_regions();
+  size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
+  if (max_young_size != MaxNewSize) {
+    FLAG_SET_ERGO(size_t, MaxNewSize, max_young_size);
+  }
+
+  _ihop_control = create_ihop_control();
+}
+
+void G1CollectorPolicy::initialize_flags() {
+  if (G1HeapRegionSize != HeapRegion::GrainBytes) {
+    FLAG_SET_ERGO(size_t, G1HeapRegionSize, HeapRegion::GrainBytes);
+  }
+
+  if (SurvivorRatio < 1) {
+    vm_exit_during_initialization("Invalid survivor ratio specified");
+  }
+  CollectorPolicy::initialize_flags();
+  _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
+}
+
+
 void G1CollectorPolicy::init() {
   // Set aside an initial future to_space.
   _g1 = G1CollectedHeap::heap();
@@ -758,7 +835,7 @@
        curr = curr->get_next_young_region()) {
     SurvRateGroup* group = curr->surv_rate_group();
     if (group == NULL && !curr->is_survivor()) {
-      log_info(gc, verify)("## %s: encountered NULL surv_rate_group", name);
+      log_error(gc, verify)("## %s: encountered NULL surv_rate_group", name);
       ret = false;
     }
 
@@ -766,12 +843,12 @@
       int age = curr->age_in_surv_rate_group();
 
       if (age < 0) {
-        log_info(gc, verify)("## %s: encountered negative age", name);
+        log_error(gc, verify)("## %s: encountered negative age", name);
         ret = false;
       }
 
       if (age <= prev_age) {
-        log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);
+        log_error(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);
         ret = false;
       }
       prev_age = age;
@@ -1601,6 +1678,10 @@
   return young_list_length < young_list_max_length;
 }
 
+bool G1CollectorPolicy::adaptive_young_list_length() const {
+  return _young_gen_sizer->adaptive_young_list_length();
+}
+
 void G1CollectorPolicy::update_max_gc_locker_expansion() {
   uint expansion_region_num = 0;
   if (GCLockerEdenExpansionPercent > 0) {
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -43,6 +43,7 @@
 class HeapRegion;
 class CollectionSetChooser;
 class G1IHOPControl;
+class G1YoungGenSizer;
 
 // TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses
 // (the latter may contain non-young regions - i.e. regions that are
@@ -90,81 +91,6 @@
   void print() const;
 };
 
-// There are three command line options related to the young gen size:
-// NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
-// just a short form for NewSize==MaxNewSize). G1 will use its internal
-// heuristics to calculate the actual young gen size, so these options
-// basically only limit the range within which G1 can pick a young gen
-// size. Also, these are general options taking byte sizes. G1 will
-// internally work with a number of regions instead. So, some rounding
-// will occur.
-//
-// If nothing related to the the young gen size is set on the command
-// line we should allow the young gen to be between G1NewSizePercent
-// and G1MaxNewSizePercent of the heap size. This means that every time
-// the heap size changes, the limits for the young gen size will be
-// recalculated.
-//
-// If only -XX:NewSize is set we should use the specified value as the
-// minimum size for young gen. Still using G1MaxNewSizePercent of the
-// heap as maximum.
-//
-// If only -XX:MaxNewSize is set we should use the specified value as the
-// maximum size for young gen. Still using G1NewSizePercent of the heap
-// as minimum.
-//
-// If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
-// No updates when the heap size changes. There is a special case when
-// NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
-// different heuristic for calculating the collection set when we do mixed
-// collection.
-//
-// If only -XX:NewRatio is set we should use the specified ratio of the heap
-// as both min and max. This will be interpreted as "fixed" just like the
-// NewSize==MaxNewSize case above. But we will update the min and max
-// every time the heap size changes.
-//
-// NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
-// combined with either NewSize or MaxNewSize. (A warning message is printed.)
-class G1YoungGenSizer : public CHeapObj<mtGC> {
-private:
-  enum SizerKind {
-    SizerDefaults,
-    SizerNewSizeOnly,
-    SizerMaxNewSizeOnly,
-    SizerMaxAndNewSize,
-    SizerNewRatio
-  };
-  SizerKind _sizer_kind;
-  uint _min_desired_young_length;
-  uint _max_desired_young_length;
-  bool _adaptive_size;
-  uint calculate_default_min_length(uint new_number_of_heap_regions);
-  uint calculate_default_max_length(uint new_number_of_heap_regions);
-
-  // Update the given values for minimum and maximum young gen length in regions
-  // given the number of heap regions depending on the kind of sizing algorithm.
-  void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
-
-public:
-  G1YoungGenSizer();
-  // Calculate the maximum length of the young gen given the number of regions
-  // depending on the sizing algorithm.
-  uint max_young_length(uint number_of_heap_regions);
-
-  void heap_size_changed(uint new_number_of_heap_regions);
-  uint min_desired_young_length() {
-    return _min_desired_young_length;
-  }
-  uint max_desired_young_length() {
-    return _max_desired_young_length;
-  }
-
-  bool adaptive_young_list_length() const {
-    return _adaptive_size;
-  }
-};
-
 class G1CollectorPolicy: public CollectorPolicy {
  private:
   G1IHOPControl* _ihop_control;
@@ -784,9 +710,7 @@
     return _young_list_max_length;
   }
 
-  bool adaptive_young_list_length() const {
-    return _young_gen_sizer->adaptive_young_list_length();
-  }
+  bool adaptive_young_list_length() const;
 
   virtual bool should_process_references() const {
     return true;
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -372,6 +372,16 @@
   return res;
 }
 
+void G1CMRootRegions::notify_scan_done() {
+  MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
+  _scan_in_progress = false;
+  RootRegionScan_lock->notify_all();
+}
+
+void G1CMRootRegions::cancel_scan() {
+  notify_scan_done();
+}
+
 void G1CMRootRegions::scan_finished() {
   assert(scan_in_progress(), "pre-condition");
 
@@ -381,11 +391,7 @@
   }
   _next_survivor = NULL;
 
-  {
-    MutexLockerEx x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
-    _scan_in_progress = false;
-    RootRegionScan_lock->notify_all();
-  }
+  notify_scan_done();
 }
 
 bool G1CMRootRegions::wait_until_scan_finished() {
@@ -978,13 +984,11 @@
 };
 
 void G1ConcurrentMark::scanRootRegions() {
-  // Start of concurrent marking.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
   // scan_in_progress() will have been set to true only if there was
   // at least one root region to scan. So, if it's false, we
   // should not attempt to do any further work.
   if (root_regions()->scan_in_progress()) {
+    assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
     GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");
 
     _parallel_marking_threads = calc_parallel_marking_threads();
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -229,6 +229,8 @@
   volatile bool        _should_abort;
   HeapRegion* volatile _next_survivor;
 
+  void notify_scan_done();
+
 public:
   G1CMRootRegions();
   // We actually do most of the initialization in this method.
@@ -248,6 +250,8 @@
   // all have been claimed.
   HeapRegion* claim_next();
 
+  void cancel_scan();
+
   // Flag that we're done with root region scanning and notify anyone
   // who's waiting on it. If aborted is false, assume that all regions
   // have been claimed.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1HeapRegionTraceType.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1HEAPREGIONTRACETYPE_HPP
+#define SHARE_VM_GC_G1_G1HEAPREGIONTRACETYPE_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+class G1HeapRegionTraceType : AllStatic {
+ public:
+  enum Type {
+    Free,
+    Eden,
+    Survivor,
+    StartsHumongous,
+    ContinuesHumongous,
+    Old,
+    Archive,
+    G1HeapRegionTypeEndSentinel
+  };
+
+  static const char* to_string(G1HeapRegionTraceType::Type type) {
+    switch (type) {
+      case Free:               return "Free";
+      case Eden:               return "Eden";
+      case Survivor:           return "Survivor";
+      case StartsHumongous:    return "Starts Humongous";
+      case ContinuesHumongous: return "Continues Humongous";
+      case Old:                return "Old";
+      case Archive:            return "Archive";
+      default: ShouldNotReachHere(); return NULL;
+    }
+  }
+};
+
+#endif // SHARE_VM_GC_G1_G1HEAPREGIONTRACETYPE_HPP
--- a/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -63,10 +63,10 @@
         LogHandle(gc, verify) log;
         log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
         if (_vo == VerifyOption_G1UseMarkWord) {
-          log.info("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
+          log.error("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
         }
         ResourceMark rm;
-        obj->print_on(log.info_stream());
+        obj->print_on(log.error_stream());
         _failures = true;
       }
     }
@@ -111,10 +111,10 @@
       // Verify that the strong code root list for this region
       // contains the nmethod
       if (!hrrs->strong_code_roots_list_contains(_nm)) {
-        log_info(gc, verify)("Code root location " PTR_FORMAT " "
-                             "from nmethod " PTR_FORMAT " not in strong "
-                             "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
-                             p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
+        log_error(gc, verify)("Code root location " PTR_FORMAT " "
+                              "from nmethod " PTR_FORMAT " not in strong "
+                              "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
+                              p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
         _failures = true;
       }
     }
@@ -292,8 +292,8 @@
         r->object_iterate(&not_dead_yet_cl);
         if (_vo != VerifyOption_G1UseNextMarking) {
           if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
-            log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
-                                 p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
+            log_error(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
+                                  p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
             _failures = true;
           }
         } else {
@@ -402,13 +402,13 @@
   }
 
   if (failures) {
-    log_info(gc, verify)("Heap after failed verification:");
+    log_error(gc, verify)("Heap after failed verification:");
     // It helps to have the per-region information in the output to
     // help us track down what went wrong. This is why we call
     // print_extended_on() instead of print_on().
     LogHandle(gc, verify) log;
     ResourceMark rm;
-    _g1h->print_extended_on(log.info_stream());
+    _g1h->print_extended_on(log.error_stream());
   }
   guarantee(!failures, "there should not have been any failures");
 }
@@ -597,8 +597,8 @@
             "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
   HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
   if (result < end) {
-    log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
-    log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
+    log_error(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
+    log_error(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
     return false;
   }
   return true;
@@ -623,8 +623,8 @@
     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
   }
   if (!res_p || !res_n) {
-    log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
-    log_info(gc, verify)("#### Caller: %s", caller);
+    log_error(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
+    log_error(gc, verify)("#### Caller: %s", caller);
     return false;
   }
   return true;
@@ -676,41 +676,41 @@
     InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
     if (hr->is_humongous()) {
       if (hr->in_collection_set()) {
-        log_info(gc, verify)("## humongous region %u in CSet", i);
+        log_error(gc, verify)("## humongous region %u in CSet", i);
         _failures = true;
         return true;
       }
       if (cset_state.is_in_cset()) {
-        log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
+        log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
         _failures = true;
         return true;
       }
       if (hr->is_continues_humongous() && cset_state.is_humongous()) {
-        log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
+        log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
         _failures = true;
         return true;
       }
     } else {
       if (cset_state.is_humongous()) {
-        log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
+        log_error(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
         _failures = true;
         return true;
       }
       if (hr->in_collection_set() != cset_state.is_in_cset()) {
-        log_info(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
+        log_error(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
                              hr->in_collection_set(), cset_state.value(), i);
         _failures = true;
         return true;
       }
       if (cset_state.is_in_cset()) {
         if (hr->is_young() != (cset_state.is_young())) {
-          log_info(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
+          log_error(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
                                hr->is_young(), cset_state.value(), i);
           _failures = true;
           return true;
         }
         if (hr->is_old() != (cset_state.is_old())) {
-          log_info(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
+          log_error(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
                                hr->is_old(), cset_state.value(), i);
           _failures = true;
           return true;
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -26,11 +26,13 @@
 #include "code/nmethod.hpp"
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1HeapRegionTraceType.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionBounds.inline.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/g1/heapRegionTracer.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/liveRange.hpp"
 #include "gc/shared/space.inline.hpp"
@@ -212,10 +214,41 @@
   _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
 }
 
+void HeapRegion::set_free() {
+  report_region_type_change(G1HeapRegionTraceType::Free);
+  _type.set_free();
+}
+
+void HeapRegion::set_eden() {
+  report_region_type_change(G1HeapRegionTraceType::Eden);
+  _type.set_eden();
+}
+
+void HeapRegion::set_eden_pre_gc() {
+  report_region_type_change(G1HeapRegionTraceType::Eden);
+  _type.set_eden_pre_gc();
+}
+
+void HeapRegion::set_survivor() {
+  report_region_type_change(G1HeapRegionTraceType::Survivor);
+  _type.set_survivor();
+}
+
+void HeapRegion::set_old() {
+  report_region_type_change(G1HeapRegionTraceType::Old);
+  _type.set_old();
+}
+
+void HeapRegion::set_archive() {
+  report_region_type_change(G1HeapRegionTraceType::Archive);
+  _type.set_archive();
+}
+
 void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
   assert(!is_humongous(), "sanity / pre-condition");
   assert(top() == bottom(), "should be empty");
 
+  report_region_type_change(G1HeapRegionTraceType::StartsHumongous);
   _type.set_starts_humongous();
   _humongous_start_region = this;
 
@@ -227,6 +260,7 @@
   assert(top() == bottom(), "should be empty");
   assert(first_hr->is_starts_humongous(), "pre-condition");
 
+  report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous);
   _type.set_continues_humongous();
   _humongous_start_region = first_hr;
 }
@@ -272,6 +306,15 @@
   record_timestamp();
 }
 
+void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
+  HeapRegionTracer::send_region_type_change(_hrm_index,
+                                            get_trace_type(),
+                                            to,
+                                            (uintptr_t)bottom(),
+                                            used(),
+                                            (uint)allocation_context());
+}
+
 CompactibleSpace* HeapRegion::next_compaction_space() const {
   return G1CollectedHeap::heap()->next_compaction_region(this);
 }
@@ -479,7 +522,7 @@
         // Object is in the region. Check that its less than top
         if (_hr->top() <= (HeapWord*)obj) {
           // Object is above top
-          log_info(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT,
+          log_error(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT,
                                p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top()));
           _failures = true;
           return;
@@ -513,19 +556,19 @@
     if (nm != NULL) {
       // Verify that the nemthod is live
       if (!nm->is_alive()) {
-        log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots",
-                             p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
+        log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots",
+                              p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
         _failures = true;
       } else {
         VerifyStrongCodeRootOopClosure oop_cl(_hr, nm);
         nm->oops_do(&oop_cl);
         if (!oop_cl.has_oops_in_region()) {
-          log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region",
-                               p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
+          log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region",
+                                p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
           _failures = true;
         } else if (oop_cl.failures()) {
-          log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT,
-                               p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
+          log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT,
+                                p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm));
           _failures = true;
         }
       }
@@ -558,8 +601,8 @@
   // on its strong code root list
   if (is_empty()) {
     if (strong_code_roots_length > 0) {
-      log_info(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries",
-                           p2i(bottom()), p2i(end()), strong_code_roots_length);
+      log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries",
+                            p2i(bottom()), p2i(end()), strong_code_roots_length);
       *failures = true;
     }
     return;
@@ -567,8 +610,8 @@
 
   if (is_continues_humongous()) {
     if (strong_code_roots_length > 0) {
-      log_info(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries",
-                           HR_FORMAT_PARAMS(this), strong_code_roots_length);
+      log_error(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries",
+                            HR_FORMAT_PARAMS(this), strong_code_roots_length);
       *failures = true;
     }
     return;
@@ -661,26 +704,26 @@
           Mutex::_no_safepoint_check_flag);
 
         if (!_failures) {
-          log.info("----------");
+          log.error("----------");
         }
         ResourceMark rm;
         if (!_g1h->is_in_closed_subset(obj)) {
           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
-          log.info("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
+          log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
             p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
-          print_object(log.info_stream(), _containing_obj);
-          log.info("points to obj " PTR_FORMAT " not in the heap", p2i(obj));
+          print_object(log.error_stream(), _containing_obj);
+          log.error("points to obj " PTR_FORMAT " not in the heap", p2i(obj));
         } else {
           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
           HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
-          log.info("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
+          log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
             p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
-          print_object(log.info_stream(), _containing_obj);
-          log.info("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
+          print_object(log.error_stream(), _containing_obj);
+          log.error("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
             p2i(obj), p2i(to->bottom()), p2i(to->end()));
-          print_object(log.info_stream(), obj);
+          print_object(log.error_stream(), obj);
         }
-        log.info("----------");
+        log.error("----------");
         _failures = true;
         failed = true;
         _n_failures++;
@@ -730,17 +773,17 @@
             Mutex::_no_safepoint_check_flag);
 
           if (!_failures) {
-            log.info("----------");
+            log.error("----------");
           }
-          log.info("Missing rem set entry:");
-          log.info("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT,
+          log.error("Missing rem set entry:");
+          log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT,
             p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
           ResourceMark rm;
-          _containing_obj->print_on(log.info_stream());
-          log.info("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to));
-          obj->print_on(log.info_stream());
-          log.info("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field);
-          log.info("----------");
+          _containing_obj->print_on(log.error_stream());
+          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT, p2i(obj), HR_FORMAT_PARAMS(to));
+          obj->print_on(log.error_stream());
+          log.error("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field);
+          log.error("----------");
           _failures = true;
           if (!failed) _n_failures++;
         }
@@ -774,13 +817,13 @@
                                    (vo == VerifyOption_G1UsePrevMarking &&
                                    ClassLoaderDataGraph::unload_list_contains(klass));
         if (!is_metaspace_object) {
-          log_info(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
-                               "not metadata", p2i(klass), p2i(obj));
+          log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
+                                "not metadata", p2i(klass), p2i(obj));
           *failures = true;
           return;
         } else if (!klass->is_klass()) {
-          log_info(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
-                               "not a klass", p2i(klass), p2i(obj));
+          log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " "
+                                "not a klass", p2i(klass), p2i(obj));
           *failures = true;
           return;
         } else {
@@ -811,7 +854,7 @@
           }
         }
       } else {
-        log_info(gc, verify)(PTR_FORMAT " not an oop", p2i(obj));
+        log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj));
         *failures = true;
         return;
       }
@@ -827,13 +870,15 @@
   if (is_region_humongous) {
     oop obj = oop(this->humongous_start_region()->bottom());
     if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) {
-      log_info(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
+      log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
+      *failures = true;
+      return;
     }
   }
 
   if (!is_region_humongous && p != top()) {
-    log_info(gc, verify)("end of last object " PTR_FORMAT " "
-                         "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
+    log_error(gc, verify)("end of last object " PTR_FORMAT " "
+                          "does not match top " PTR_FORMAT, p2i(p), p2i(top()));
     *failures = true;
     return;
   }
@@ -847,9 +892,9 @@
     HeapWord* addr_1 = p;
     HeapWord* b_start_1 = _bot_part.block_start_const(addr_1);
     if (b_start_1 != p) {
-      log_info(gc, verify)("BOT look up for top: " PTR_FORMAT " "
-                           " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
-                           p2i(addr_1), p2i(b_start_1), p2i(p));
+      log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " "
+                            " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
+                            p2i(addr_1), p2i(b_start_1), p2i(p));
       *failures = true;
       return;
     }
@@ -859,9 +904,9 @@
     if (addr_2 < the_end) {
       HeapWord* b_start_2 = _bot_part.block_start_const(addr_2);
       if (b_start_2 != p) {
-        log_info(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " "
-                             " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
-                             p2i(addr_2), p2i(b_start_2), p2i(p));
+        log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " "
+                              " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
+                              p2i(addr_2), p2i(b_start_2), p2i(p));
         *failures = true;
         return;
       }
@@ -873,9 +918,9 @@
     if (addr_3 < the_end) {
       HeapWord* b_start_3 = _bot_part.block_start_const(addr_3);
       if (b_start_3 != p) {
-        log_info(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " "
-                             " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
-                             p2i(addr_3), p2i(b_start_3), p2i(p));
+        log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " "
+                              " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
+                              p2i(addr_3), p2i(b_start_3), p2i(p));
         *failures = true;
         return;
       }
@@ -885,9 +930,9 @@
     HeapWord* addr_4 = the_end - 1;
     HeapWord* b_start_4 = _bot_part.block_start_const(addr_4);
     if (b_start_4 != p) {
-      log_info(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " "
-                           " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
-                           p2i(addr_4), p2i(b_start_4), p2i(p));
+      log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " "
+                            " yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
+                            p2i(addr_4), p2i(b_start_4), p2i(p));
       *failures = true;
       return;
     }
@@ -924,7 +969,7 @@
           return;
         }
       } else {
-        log_info(gc, verify)(PTR_FORMAT " not an oop", p2i(obj));
+        log_error(gc, verify)(PTR_FORMAT " not an oop", p2i(obj));
         *failures = true;
         return;
       }
--- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -27,6 +27,8 @@
 
 #include "gc/g1/g1AllocationContext.hpp"
 #include "gc/g1/g1BlockOffsetTable.hpp"
+#include "gc/g1/g1HeapRegionTraceType.hpp"
+#include "gc/g1/heapRegionTracer.hpp"
 #include "gc/g1/heapRegionType.hpp"
 #include "gc/g1/survRateGroup.hpp"
 #include "gc/shared/ageTable.hpp"
@@ -243,6 +245,8 @@
     return HeapRegion::block_size(addr); // Avoid virtual call
   }
 
+  void report_region_type_change(G1HeapRegionTraceType::Type to);
+
  protected:
   // The index of this region in the heap region sequence.
   uint  _hrm_index;
@@ -427,6 +431,7 @@
 
   const char* get_type_str() const { return _type.get_str(); }
   const char* get_short_type_str() const { return _type.get_short_str(); }
+  G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); }
 
   bool is_free() const { return _type.is_free(); }
 
@@ -637,15 +642,15 @@
     }
   }
 
-  void set_free() { _type.set_free(); }
+  void set_free();
 
-  void set_eden()        { _type.set_eden();        }
-  void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
-  void set_survivor()    { _type.set_survivor();    }
+  void set_eden();
+  void set_eden_pre_gc();
+  void set_survivor();
 
-  void set_old() { _type.set_old(); }
+  void set_old();
 
-  void set_archive() { _type.set_archive(); }
+  void set_archive();
 
   // Determine if an object has been allocated since the last
   // mark performed by the collector. This returns true iff the object
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegionTracer.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/heapRegionTracer.hpp"
+#include "trace/tracing.hpp"
+
+void HeapRegionTracer::send_region_type_change(uint index,
+                                               G1HeapRegionTraceType::Type from,
+                                               G1HeapRegionTraceType::Type to,
+                                               uintptr_t start,
+                                               size_t used,
+                                               uint allocationContext) {
+  EventG1HeapRegionTypeChange e;
+  if (e.should_commit()) {
+    e.set_index(index);
+    e.set_from(from);
+    e.set_to(to);
+    e.set_start(start);
+    e.set_used(used);
+    e.set_allocContext(allocationContext);
+    e.commit();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/heapRegionTracer.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_HEAPREGIONTRACER_HPP
+#define SHARE_VM_GC_G1_HEAPREGIONTRACER_HPP
+
+#include "gc/g1/g1HeapRegionTraceType.hpp"
+#include "memory/allocation.hpp"
+
+class HeapRegionTracer : AllStatic {
+  public:
+    static void send_region_type_change(uint index,
+                                        G1HeapRegionTraceType::Type from,
+                                        G1HeapRegionTraceType::Type to,
+                                        uintptr_t start,
+                                        size_t used,
+                                        uint allocationContext);
+};
+
+#endif // SHARE_VM_GC_G1_HEAPREGIONTRACER_HPP
--- a/hotspot/src/share/vm/gc/g1/heapRegionType.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegionType.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/g1HeapRegionTraceType.hpp"
 #include "gc/g1/heapRegionType.hpp"
 
 bool HeapRegionType::is_valid(Tag tag) {
@@ -70,3 +71,19 @@
   // keep some compilers happy
   return NULL;
 }
+
+G1HeapRegionTraceType::Type HeapRegionType::get_trace_type() {
+  hrt_assert_is_valid(_tag);
+  switch (_tag) {
+    case FreeTag:               return G1HeapRegionTraceType::Free;
+    case EdenTag:               return G1HeapRegionTraceType::Eden;
+    case SurvTag:               return G1HeapRegionTraceType::Survivor;
+    case StartsHumongousTag:    return G1HeapRegionTraceType::StartsHumongous;
+    case ContinuesHumongousTag: return G1HeapRegionTraceType::ContinuesHumongous;
+    case OldTag:                return G1HeapRegionTraceType::Old;
+    case ArchiveTag:            return G1HeapRegionTraceType::Archive;
+  }
+  ShouldNotReachHere();
+  // keep some compilers happy
+  return G1HeapRegionTraceType::Free;
+}
--- a/hotspot/src/share/vm/gc/g1/heapRegionType.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/heapRegionType.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_GC_G1_HEAPREGIONTYPE_HPP
 #define SHARE_VM_GC_G1_HEAPREGIONTYPE_HPP
 
+#include "gc/g1/g1HeapRegionTraceType.hpp"
 #include "memory/allocation.hpp"
 
 #define hrt_assert_is_valid(tag) \
@@ -141,6 +142,7 @@
 
   const char* get_str() const;
   const char* get_short_str() const;
+  G1HeapRegionTraceType::Type get_trace_type();
 
   HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
 };
--- a/hotspot/src/share/vm/gc/g1/satbMarkQueue.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/satbMarkQueue.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -221,13 +221,13 @@
 
 #ifdef ASSERT
 void SATBMarkQueueSet::dump_active_states(bool expected_active) {
-  log_info(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE");
-  log_info(gc, verify)("Actual SATB active states:");
-  log_info(gc, verify)("  Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
+  log_error(gc, verify)("Expected SATB active state: %s", expected_active ? "ACTIVE" : "INACTIVE");
+  log_error(gc, verify)("Actual SATB active states:");
+  log_error(gc, verify)("  Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
   for (JavaThread* t = Threads::first(); t; t = t->next()) {
-    log_info(gc, verify)("  Thread \"%s\" queue: %s", t->name(), t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE");
+    log_error(gc, verify)("  Thread \"%s\" queue: %s", t->name(), t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE");
   }
-  log_info(gc, verify)("  Shared queue: %s", shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
+  log_error(gc, verify)("  Shared queue: %s", shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
 }
 
 void SATBMarkQueueSet::verify_active_states(bool expected_active) {
--- a/hotspot/src/share/vm/gc/g1/youngList.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/g1/youngList.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -99,10 +99,10 @@
   HeapRegion* last = NULL;
   while (curr != NULL) {
     if (!curr->is_young()) {
-      log_info(gc, verify)("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
-                           "incorrectly tagged (y: %d, surv: %d)",
-                           p2i(curr->bottom()), p2i(curr->end()),
-                           curr->is_young(), curr->is_survivor());
+      log_error(gc, verify)("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
+                            "incorrectly tagged (y: %d, surv: %d)",
+                            p2i(curr->bottom()), p2i(curr->end()),
+                            curr->is_young(), curr->is_survivor());
       ret = false;
     }
     ++length;
@@ -112,8 +112,8 @@
   ret = ret && (length == _length);
 
   if (!ret) {
-    log_info(gc, verify)("### YOUNG LIST seems not well formed!");
-    log_info(gc, verify)("###   list has %u entries, _length is %u", length, _length);
+    log_error(gc, verify)("### YOUNG LIST seems not well formed!");
+    log_error(gc, verify)("###   list has %u entries, _length is %u", length, _length);
   }
 
   return ret;
@@ -123,19 +123,19 @@
   bool ret = true;
 
   if (_length != 0) {
-    log_info(gc, verify)("### YOUNG LIST should have 0 length, not %u", _length);
+    log_error(gc, verify)("### YOUNG LIST should have 0 length, not %u", _length);
     ret = false;
   }
   if (check_sample && _last_sampled_rs_lengths != 0) {
-    log_info(gc, verify)("### YOUNG LIST has non-zero last sampled RS lengths");
+    log_error(gc, verify)("### YOUNG LIST has non-zero last sampled RS lengths");
     ret = false;
   }
   if (_head != NULL) {
-    log_info(gc, verify)("### YOUNG LIST does not have a NULL head");
+    log_error(gc, verify)("### YOUNG LIST does not have a NULL head");
     ret = false;
   }
   if (!ret) {
-    log_info(gc, verify)("### YOUNG LIST does not seem empty");
+    log_error(gc, verify)("### YOUNG LIST does not seem empty");
   }
 
   return ret;
--- a/hotspot/src/share/vm/gc/shared/ageTable.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/shared/ageTable.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/ageTable.inline.hpp"
+#include "gc/shared/ageTableTracer.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/collectorPolicy.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
@@ -100,17 +101,19 @@
   log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold " UINTX_FORMAT " (max threshold " UINTX_FORMAT ")",
                      desired_survivor_size*oopSize, (uintx) result, MaxTenuringThreshold);
 
-  if (log_is_enabled(Trace, gc, age) || UsePerfData) {
+  if (log_is_enabled(Trace, gc, age) || UsePerfData || AgeTableTracer::is_tenuring_distribution_event_enabled()) {
     size_t total = 0;
     uint age = 1;
     while (age < table_size) {
-      total += sizes[age];
-      if (sizes[age] > 0) {
+      size_t wordSize = sizes[age];
+      total += wordSize;
+      if (wordSize > 0) {
         log_trace(gc, age)("- age %3u: " SIZE_FORMAT_W(10) " bytes, " SIZE_FORMAT_W(10) " total",
-                            age, sizes[age]*oopSize, total*oopSize);
+                            age, wordSize*oopSize, total*oopSize);
       }
+      AgeTableTracer::send_tenuring_distribution_event(age, wordSize*oopSize);
       if (UsePerfData) {
-        _perf_sizes[age]->set_value(sizes[age]*oopSize);
+        _perf_sizes[age]->set_value(wordSize*oopSize);
       }
       age++;
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/shared/ageTableTracer.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/ageTableTracer.hpp"
+#include "gc/shared/gcId.hpp"
+#include "trace/tracing.hpp"
+
+void AgeTableTracer::send_tenuring_distribution_event(uint age, size_t size) {
+  EventTenuringDistribution e;
+  if (e.should_commit()) {
+    e.set_gcId(GCId::current());
+    e.set_age(age);
+    e.set_size(size);
+    e.commit();
+  }
+}
+
+bool AgeTableTracer::is_tenuring_distribution_event_enabled() {
+  return EventTenuringDistribution::is_enabled();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/shared/ageTableTracer.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_AGETABLETRACER_HPP
+#define SHARE_VM_GC_SHARED_AGETABLETRACER_HPP
+
+#include "memory/allocation.hpp"
+
+class AgeTableTracer : AllStatic {
+  public:
+    static void send_tenuring_distribution_event(uint age, size_t size);
+    static bool is_tenuring_distribution_event_enabled();
+};
+
+#endif // SHARE_VM_GC_SHARED_AGETABLETRACER_HPP
--- a/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/shared/concurrentGCThread.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 void ConcurrentGCThread::wait_for_universe_init() {
   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
   while (!is_init_completed() && !_should_terminate) {
-    CGC_lock->wait(Mutex::_no_safepoint_check_flag, 200);
+    CGC_lock->wait(Mutex::_no_safepoint_check_flag, 1);
   }
 }
 
--- a/hotspot/src/share/vm/gc/shared/concurrentGCThread.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/shared/concurrentGCThread.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
   friend class VMStructs;
 
 protected:
-  bool _should_terminate;
+  bool volatile _should_terminate;
   bool _has_terminated;
 
   // Create and start the thread (setting it's priority high.)
--- a/hotspot/src/share/vm/gc/shared/gcHeapSummary.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/shared/gcHeapSummary.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -131,12 +131,14 @@
   size_t  _edenUsed;
   size_t  _edenCapacity;
   size_t  _survivorUsed;
+  uint    _numberOfRegions;
  public:
-   G1HeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, size_t edenUsed, size_t edenCapacity, size_t survivorUsed) :
-       GCHeapSummary(heap_space, heap_used), _edenUsed(edenUsed), _edenCapacity(edenCapacity), _survivorUsed(survivorUsed) { }
+   G1HeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, size_t edenUsed, size_t edenCapacity, size_t survivorUsed, uint numberOfRegions) :
+      GCHeapSummary(heap_space, heap_used), _edenUsed(edenUsed), _edenCapacity(edenCapacity), _survivorUsed(survivorUsed), _numberOfRegions(numberOfRegions) { }
    const size_t edenUsed() const { return _edenUsed; }
    const size_t edenCapacity() const { return _edenCapacity; }
    const size_t survivorUsed() const { return _survivorUsed; }
+   const uint   numberOfRegions() const { return _numberOfRegions; }
 
    virtual void accept(GCHeapSummaryVisitor* visitor) const {
      visitor->visit(this);
--- a/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/shared/gcTraceSend.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -359,6 +359,7 @@
       e.set_edenUsedSize(g1_heap_summary->edenUsed());
       e.set_edenTotalSize(g1_heap_summary->edenCapacity());
       e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
+      e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
       e.commit();
     }
   }
--- a/hotspot/src/share/vm/gc/shared/memset_with_concurrent_readers.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/gc/shared/memset_with_concurrent_readers.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -34,7 +34,7 @@
 #if INCLUDE_ALL_GCS
 
 // Unit test
-#ifdef ASSERT
+#ifndef PRODUCT
 
 static unsigned line_byte(const char* line, size_t i) {
   return unsigned(line[i]) & 0xFF;
--- a/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1091,7 +1091,7 @@
   } else if (reference->is_a(site_DataSectionReference::klass())) {
     int data_offset = site_DataSectionReference::offset(reference);
     if (0 <= data_offset && data_offset < _constants_size) {
-      pd_patch_DataSectionReference(pc_offset, data_offset);
+      pd_patch_DataSectionReference(pc_offset, data_offset, CHECK);
     } else {
       JVMCI_ERROR("data offset 0x%X points outside data section (size 0x%X)", data_offset, _constants_size);
     }
--- a/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/jvmci/jvmciCodeInstaller.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -156,7 +156,7 @@
   jint pd_next_offset(NativeInstruction* inst, jint pc_offset, Handle method, TRAPS);
   void pd_patch_OopConstant(int pc_offset, Handle constant, TRAPS);
   void pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS);
-  void pd_patch_DataSectionReference(int pc_offset, int data_offset);
+  void pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS);
   void pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS);
   void pd_relocate_JavaMethod(Handle method, jint pc_offset, TRAPS);
   void pd_relocate_poll(address pc, jint mark, TRAPS);
--- a/hotspot/src/share/vm/jvmci/jvmciEnv.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/jvmci/jvmciEnv.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -591,6 +591,13 @@
   // JVMTI -- compiled method notification (must be done outside lock)
   if (nm != NULL) {
     nm->post_compiled_method_load_event();
+
+    if (env == NULL) {
+      // This compile didn't come through the CompileBroker so perform the printing here
+      DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, compiler);
+      nm->maybe_print_nmethod(directive);
+      DirectivesStack::release(directive);
+    }
   }
 
   return result;
--- a/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/jvmci/vmStructs_jvmci.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -151,6 +151,7 @@
   nonstatic_field(JavaThread,                  _pending_failed_speculation,                   oop)                                   \
   nonstatic_field(JavaThread,                  _pending_transfer_to_interpreter,              bool)                                  \
   nonstatic_field(JavaThread,                  _jvmci_counters,                               jlong*)                                \
+  nonstatic_field(JavaThread,                  _reserved_stack_activation,                    address)                               \
                                                                                                                                      \
   static_field(java_lang_Class,                _klass_offset,                                 int)                                   \
   static_field(java_lang_Class,                _array_klass_offset,                           int)                                   \
@@ -210,6 +211,8 @@
                                                                                                                                      \
   static_field(StubRoutines,                _verify_oop_count,                                jint)                                  \
                                                                                                                                      \
+  static_field(StubRoutines,                _throw_delayed_StackOverflowError_entry,          address)                               \
+                                                                                                                                     \
   static_field(StubRoutines,                _jbyte_arraycopy,                                 address)                               \
   static_field(StubRoutines,                _jshort_arraycopy,                                address)                               \
   static_field(StubRoutines,                _jint_arraycopy,                                  address)                               \
@@ -471,6 +474,7 @@
   declare_constant(Method::_force_inline)                                 \
   declare_constant(Method::_dont_inline)                                  \
   declare_constant(Method::_hidden)                                       \
+  declare_constant(Method::_reserved_stack_access)                        \
                                                                           \
   declare_constant(Method::nonvirtual_vtable_index)                       \
   declare_constant(Method::invalid_vtable_index)                          \
@@ -517,6 +521,7 @@
   declare_function(SharedRuntime::register_finalizer)                     \
   declare_function(SharedRuntime::exception_handler_for_return_address)   \
   declare_function(SharedRuntime::OSR_migration_end)                      \
+  declare_function(SharedRuntime::enable_stack_reserved_zone)             \
   declare_function(SharedRuntime::dsin)                                   \
   declare_function(SharedRuntime::dcos)                                   \
   declare_function(SharedRuntime::dtan)                                   \
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -182,6 +182,10 @@
           "Unroll loop bodies with node count less than this")              \
           range(0, max_jint / 4)                                            \
                                                                             \
+  product_pd(intx, LoopPercentProfileLimit,                                 \
+             "Unroll loop bodies with % node count of profile limit")       \
+             range(10, 100)                                                 \
+                                                                            \
   product(intx,  LoopMaxUnroll, 16,                                         \
           "Maximum number of unrolls for main loop")                        \
           range(0, max_jint)                                                \
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -825,10 +825,12 @@
         input_not_const = false;
         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
         ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
-        guarantee(!target->is_method_handle_intrinsic(), "should not happen");  // XXX remove
         const int vtable_index = Method::invalid_vtable_index;
-        CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
-        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
+        CallGenerator* cg = C->call_generator(target, vtable_index,
+                                              false /* call_does_dispatch */,
+                                              jvms,
+                                              true /* allow_inline */,
+                                              PROB_ALWAYS);
         return cg;
       } else {
         const char* msg = "receiver not constant";
@@ -899,13 +901,15 @@
           target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
                                             target, receiver_type, is_virtual,
                                             call_does_dispatch, vtable_index, // out-parameters
-                                            /*check_access=*/false);
+                                            false /* check_access */);
           // We lack profiling at this call but type speculation may
           // provide us with a type
           speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
         }
-        CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, /*allow_inline=*/true, PROB_ALWAYS, speculative_receiver_type, true, true);
-        assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
+        CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
+                                              true /* allow_inline */,
+                                              PROB_ALWAYS,
+                                              speculative_receiver_type);
         return cg;
       } else {
         const char* msg = "member_name not constant";
--- a/hotspot/src/share/vm/opto/castnode.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/castnode.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -37,7 +37,6 @@
 Node* ConstraintCastNode::Identity(PhaseGVN* phase) {
   Node* dom = dominating_cast(phase);
   if (dom != NULL) {
-    assert(_carry_dependency, "only for casts that carry a dependency");
     return dom;
   }
   if (_carry_dependency) {
@@ -110,18 +109,22 @@
 }
 
 TypeNode* ConstraintCastNode::dominating_cast(PhaseTransform *phase) const {
-  if (!carry_dependency()) {
-    return NULL;
-  }
   Node* val = in(1);
   Node* ctl = in(0);
   int opc = Opcode();
   if (ctl == NULL) {
     return NULL;
   }
+  // Range check CastIIs may all end up under a single range check and
+  // in that case only the narrower CastII would be kept by the code
+  // below which would be incorrect.
+  if (is_CastII() && as_CastII()->has_range_check()) {
+    return NULL;
+  }
   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
     Node* u = val->fast_out(i);
     if (u != this &&
+        u->outcnt() > 0 &&
         u->Opcode() == opc &&
         u->in(0) != NULL &&
         u->bottom_type()->higher_equal(type())) {
@@ -300,7 +303,6 @@
 Node* CheckCastPPNode::Identity(PhaseGVN* phase) {
   Node* dom = dominating_cast(phase);
   if (dom != NULL) {
-    assert(_carry_dependency, "only for casts that carry a dependency");
     return dom;
   }
   if (_carry_dependency) {
--- a/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/loopTransform.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -666,7 +666,8 @@
     if (future_unroll_ct > LoopMaxUnroll) return false;
   } else {
     // obey user constraints on vector mapped loops with additional unrolling applied
-    if ((future_unroll_ct / cl->slp_max_unroll()) > LoopMaxUnroll) return false;
+    int unroll_constraint = (cl->slp_max_unroll()) ? cl->slp_max_unroll() : 1;
+    if ((future_unroll_ct / unroll_constraint) > LoopMaxUnroll) return false;
   }
 
   // Check for initial stride being a small enough constant
@@ -689,7 +690,7 @@
   //   Progress defined as current size less than 20% larger than previous size.
   if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
       future_unroll_ct > LoopUnrollMin &&
-      (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() &&
+      (future_unroll_ct - 1) * (100 / LoopPercentProfileLimit) > cl->profile_trip_cnt() &&
       1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
     return false;
   }
@@ -1260,6 +1261,146 @@
   loop->record_for_igvn();
 }
 
+//------------------------------insert_vector_post_loop------------------------
+// Insert a copy of the atomic unrolled vectorized main loop as a post loop,
+// unroll_policy has already informed us that more unrolling is about to happen to
+// the main loop.  The resultant post loop will serve as a vectorized drain loop.
+void PhaseIdealLoop::insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new) {
+  if (!loop->_head->is_CountedLoop()) return;
+
+  CountedLoopNode *cl = loop->_head->as_CountedLoop();
+
+  // only process vectorized main loops
+  if (!cl->is_vectorized_loop() || !cl->is_main_loop()) return;
+
+  int slp_max_unroll_factor = cl->slp_max_unroll();
+  int cur_unroll = cl->unrolled_count();
+
+  if (slp_max_unroll_factor == 0) return;
+
+  // only process atomic unroll vector loops (not super unrolled after vectorization)
+  if (cur_unroll != slp_max_unroll_factor) return;
+
+  // we only ever process this one time
+  if (cl->has_atomic_post_loop()) return;
+
+#ifndef PRODUCT
+  if (TraceLoopOpts) {
+    tty->print("PostVector  ");
+    loop->dump_head();
+  }
+#endif
+  C->set_major_progress();
+
+  // Find common pieces of the loop being guarded with pre & post loops
+  CountedLoopNode *main_head = loop->_head->as_CountedLoop();
+  CountedLoopEndNode *main_end = main_head->loopexit();
+  guarantee(main_end != NULL, "no loop exit node");
+  // diagnostic to show loop end is not properly formed
+  assert(main_end->outcnt() == 2, "1 true, 1 false path only");
+  uint dd_main_head = dom_depth(main_head);
+  uint max = main_head->outcnt();
+
+  // mark this loop as processed
+  main_head->mark_has_atomic_post_loop();
+
+  Node *pre_header = main_head->in(LoopNode::EntryControl);
+  Node *init = main_head->init_trip();
+  Node *incr = main_end->incr();
+  Node *limit = main_end->limit();
+  Node *stride = main_end->stride();
+  Node *cmp = main_end->cmp_node();
+  BoolTest::mask b_test = main_end->test_trip();
+
+  //------------------------------
+  // Step A: Create a new post-Loop.
+  Node* main_exit = main_end->proj_out(false);
+  assert(main_exit->Opcode() == Op_IfFalse, "");
+  int dd_main_exit = dom_depth(main_exit);
+
+  // Step A1: Clone the loop body of main.  The clone becomes the vector post-loop.
+  // The main loop pre-header illegally has 2 control users (old & new loops).
+  clone_loop(loop, old_new, dd_main_exit);
+  assert(old_new[main_end->_idx]->Opcode() == Op_CountedLoopEnd, "");
+  CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop();
+  post_head->set_normal_loop();
+  post_head->set_post_loop(main_head);
+
+  // Reduce the post-loop trip count.
+  CountedLoopEndNode* post_end = old_new[main_end->_idx]->as_CountedLoopEnd();
+  post_end->_prob = PROB_FAIR;
+
+  // Build the main-loop normal exit.
+  IfFalseNode *new_main_exit = new IfFalseNode(main_end);
+  _igvn.register_new_node_with_optimizer(new_main_exit);
+  set_idom(new_main_exit, main_end, dd_main_exit);
+  set_loop(new_main_exit, loop->_parent);
+
+  // Step A2: Build a zero-trip guard for the vector post-loop.  After leaving the
+  // main-loop, the vector post-loop may not execute at all.  We 'opaque' the incr
+  // (the vectorized main-loop trip-counter exit value) because we will be changing
+  // the exit value (via additional unrolling) so we cannot constant-fold away the zero
+  // trip guard until all unrolling is done.
+  Node *zer_opaq = new Opaque1Node(C, incr);
+  Node *zer_cmp = new CmpINode(zer_opaq, limit);
+  Node *zer_bol = new BoolNode(zer_cmp, b_test);
+  register_new_node(zer_opaq, new_main_exit);
+  register_new_node(zer_cmp, new_main_exit);
+  register_new_node(zer_bol, new_main_exit);
+
+  // Build the IfNode
+  IfNode *zer_iff = new IfNode(new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN);
+  _igvn.register_new_node_with_optimizer(zer_iff);
+  set_idom(zer_iff, new_main_exit, dd_main_exit);
+  set_loop(zer_iff, loop->_parent);
+
+  // Plug in the false-path, taken if we need to skip vector post-loop
+  _igvn.replace_input_of(main_exit, 0, zer_iff);
+  set_idom(main_exit, zer_iff, dd_main_exit);
+  set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
+  // Make the true-path, must enter the vector post loop
+  Node *zer_taken = new IfTrueNode(zer_iff);
+  _igvn.register_new_node_with_optimizer(zer_taken);
+  set_idom(zer_taken, zer_iff, dd_main_exit);
+  set_loop(zer_taken, loop->_parent);
+  // Plug in the true path
+  _igvn.hash_delete(post_head);
+  post_head->set_req(LoopNode::EntryControl, zer_taken);
+  set_idom(post_head, zer_taken, dd_main_exit);
+
+  Arena *a = Thread::current()->resource_area();
+  VectorSet visited(a);
+  Node_Stack clones(a, main_head->back_control()->outcnt());
+  // Step A3: Make the fall-in values to the vector post-loop come from the
+  // fall-out values of the main-loop.
+  for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
+    Node* main_phi = main_head->fast_out(i);
+    if (main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0) {
+      Node *cur_phi = old_new[main_phi->_idx];
+      Node *fallnew = clone_up_backedge_goo(main_head->back_control(),
+                                            post_head->init_control(),
+                                            main_phi->in(LoopNode::LoopBackControl),
+                                            visited, clones);
+      _igvn.hash_delete(cur_phi);
+      cur_phi->set_req(LoopNode::EntryControl, fallnew);
+    }
+  }
+
+  // CastII for the new post loop:
+  bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head);
+  assert(inserted, "no castII inserted");
+
+  // It's difficult to be precise about the trip-counts
+  // for post loops.  They are usually very short,
+  // so guess that unit vector trips is a reasonable value.
+  post_head->set_profile_trip_cnt((float)slp_max_unroll_factor);
+
+  // Now force out all loop-invariant dominating tests.  The optimizer
+  // finds some, but we _know_ they are all useless.
+  peeled_dom_test_elim(loop, old_new);
+  loop->record_for_igvn();
+}
+
 //------------------------------is_invariant-----------------------------
 // Return true if n is invariant
 bool IdealLoopTree::is_invariant(Node* n) const {
@@ -2608,6 +2749,9 @@
     // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
     // peeling.
     if (should_unroll && !should_peel) {
+      if (SuperWordLoopUnrollAnalysis) {
+        phase->insert_vector_post_loop(this, old_new);
+      }
       phase->do_unroll(this, old_new, true);
     }
 
--- a/hotspot/src/share/vm/opto/loopnode.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/loopnode.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -67,7 +67,9 @@
          HasReductions=128,
          WasSlpAnalyzed=256,
          PassedSlpAnalysis=512,
-         DoUnrollOnly=1024 };
+         DoUnrollOnly=1024,
+         VectorizedLoop=2048,
+         HasAtomicPostLoop=4096 };
   char _unswitch_count;
   enum { _unswitch_max=3 };
 
@@ -86,6 +88,8 @@
   void mark_was_slp() { _loop_flags |= WasSlpAnalyzed; }
   void mark_passed_slp() { _loop_flags |= PassedSlpAnalysis; }
   void mark_do_unroll_only() { _loop_flags |= DoUnrollOnly; }
+  void mark_loop_vectorized() { _loop_flags |= VectorizedLoop; }
+  void mark_has_atomic_post_loop() { _loop_flags |= HasAtomicPostLoop; }
 
   int unswitch_max() { return _unswitch_max; }
   int unswitch_count() { return _unswitch_count; }
@@ -221,6 +225,8 @@
   int has_passed_slp   () const { return (_loop_flags&PassedSlpAnalysis) == PassedSlpAnalysis; }
   int do_unroll_only      () const { return (_loop_flags&DoUnrollOnly) == DoUnrollOnly; }
   int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; }
+  int is_vectorized_loop    () const { return (_loop_flags & VectorizedLoop) == VectorizedLoop; }
+  int has_atomic_post_loop  () const { return (_loop_flags & HasAtomicPostLoop) == HasAtomicPostLoop; }
   void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; }
 
   int main_idx() const { return _main_idx; }
@@ -893,6 +899,8 @@
   // Add pre and post loops around the given loop.  These loops are used
   // during RCE, unrolling and aligning loops.
   void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only );
+  // Add a vector post loop between a vector main loop and the current post loop
+  void insert_vector_post_loop(IdealLoopTree *loop, Node_List &old_new);
   // If Node n lives in the back_ctrl block, we clone a private version of n
   // in preheader_ctrl block and return that, otherwise return n.
   Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones );
@@ -1105,6 +1113,8 @@
   Node *place_near_use( Node *useblock ) const;
   Node* try_move_store_before_loop(Node* n, Node *n_ctrl);
   void try_move_store_after_loop(Node* n);
+  bool identical_backtoback_ifs(Node *n);
+  bool can_split_if(Node *n_ctrl);
 
   bool _created_loop_node;
 public:
--- a/hotspot/src/share/vm/opto/loopopts.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/loopopts.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -47,6 +47,14 @@
     return NULL;
   }
 
+  // Splitting range check CastIIs through a loop induction Phi can
+  // cause new Phis to be created that are left unrelated to the loop
+  // induction Phi and prevent optimizations (vectorization)
+  if (n->Opcode() == Op_CastII && n->as_CastII()->has_range_check() &&
+      region->is_CountedLoop() && n->in(1) == region->as_CountedLoop()->phi()) {
+    return NULL;
+  }
+
   int wins = 0;
   assert(!n->is_CFG(), "");
   assert(region->is_Region(), "");
@@ -1020,108 +1028,193 @@
 }
 
 
+bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) {
+  if (!n->is_If()) {
+    return false;
+  }
+  if (!n->in(0)->is_Region()) {
+    return false;
+  }
+  Node* region = n->in(0);
+  Node* dom = idom(region);
+  if (!dom->is_If() || dom->in(1) != n->in(1)) {
+    return false;
+  }
+  IfNode* dom_if = dom->as_If();
+  Node* proj_true = dom_if->proj_out(1);
+  Node* proj_false = dom_if->proj_out(0);
+
+  for (uint i = 1; i < region->req(); i++) {
+    if (is_dominator(proj_true, region->in(i))) {
+      continue;
+    }
+    if (is_dominator(proj_false, region->in(i))) {
+      continue;
+    }
+    return false;
+  }
+
+  return true;
+}
+
+bool PhaseIdealLoop::can_split_if(Node *n_ctrl) {
+  if (C->live_nodes() > 35000) {
+    return false; // Method too big
+  }
+
+  // Do not do 'split-if' if irreducible loops are present.
+  if (_has_irreducible_loops) {
+    return false;
+  }
+
+  if (merge_point_too_heavy(C, n_ctrl)) {
+    return false;
+  }
+
+  // Do not do 'split-if' if some paths are dead.  First do dead code
+  // elimination and then see if its still profitable.
+  for (uint i = 1; i < n_ctrl->req(); i++) {
+    if (n_ctrl->in(i) == C->top()) {
+      return false;
+    }
+  }
+
+  // If trying to do a 'Split-If' at the loop head, it is only
+  // profitable if the cmp folds up on BOTH paths.  Otherwise we
+  // risk peeling a loop forever.
+
+  // CNC - Disabled for now.  Requires careful handling of loop
+  // body selection for the cloned code.  Also, make sure we check
+  // for any input path not being in the same loop as n_ctrl.  For
+  // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
+  // because the alternative loop entry points won't be converted
+  // into LoopNodes.
+  IdealLoopTree *n_loop = get_loop(n_ctrl);
+  for (uint j = 1; j < n_ctrl->req(); j++) {
+    if (get_loop(n_ctrl->in(j)) != n_loop) {
+      return false;
+    }
+  }
+
+  // Check for safety of the merge point.
+  if (!merge_point_safe(n_ctrl)) {
+    return false;
+  }
+
+  return true;
+}
+
 //------------------------------split_if_with_blocks_post----------------------
 // Do the real work in a non-recursive function.  CFG hackery wants to be
 // in the post-order, so it can dirty the I-DOM info and not use the dirtied
 // info.
-void PhaseIdealLoop::split_if_with_blocks_post( Node *n ) {
+void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
 
   // Cloning Cmp through Phi's involves the split-if transform.
   // FastLock is not used by an If
-  if( n->is_Cmp() && !n->is_FastLock() ) {
-    if( C->live_nodes() > 35000 ) return; // Method too big
-
-    // Do not do 'split-if' if irreducible loops are present.
-    if( _has_irreducible_loops )
-      return;
-
+  if (n->is_Cmp() && !n->is_FastLock()) {
     Node *n_ctrl = get_ctrl(n);
     // Determine if the Node has inputs from some local Phi.
     // Returns the block to clone thru.
-    Node *n_blk = has_local_phi_input( n );
-    if( n_blk != n_ctrl ) return;
+    Node *n_blk = has_local_phi_input(n);
+    if (n_blk != n_ctrl) {
+      return;
+    }
 
-    if( merge_point_too_heavy(C, n_ctrl) )
+    if (!can_split_if(n_ctrl)) {
       return;
+    }
 
-    if( n->outcnt() != 1 ) return; // Multiple bool's from 1 compare?
+    if (n->outcnt() != 1) {
+      return; // Multiple bool's from 1 compare?
+    }
     Node *bol = n->unique_out();
-    assert( bol->is_Bool(), "expect a bool here" );
-    if( bol->outcnt() != 1 ) return;// Multiple branches from 1 compare?
+    assert(bol->is_Bool(), "expect a bool here");
+    if (bol->outcnt() != 1) {
+      return;// Multiple branches from 1 compare?
+    }
     Node *iff = bol->unique_out();
 
     // Check some safety conditions
-    if( iff->is_If() ) {        // Classic split-if?
-      if( iff->in(0) != n_ctrl ) return; // Compare must be in same blk as if
+    if (iff->is_If()) {        // Classic split-if?
+      if (iff->in(0) != n_ctrl) {
+        return; // Compare must be in same blk as if
+      }
     } else if (iff->is_CMove()) { // Trying to split-up a CMOVE
       // Can't split CMove with different control edge.
-      if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) return;
-      if( get_ctrl(iff->in(2)) == n_ctrl ||
-          get_ctrl(iff->in(3)) == n_ctrl )
+      if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) {
+        return;
+      }
+      if (get_ctrl(iff->in(2)) == n_ctrl ||
+          get_ctrl(iff->in(3)) == n_ctrl) {
         return;                 // Inputs not yet split-up
-      if ( get_loop(n_ctrl) != get_loop(get_ctrl(iff)) ) {
+      }
+      if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) {
         return;                 // Loop-invar test gates loop-varying CMOVE
       }
     } else {
       return;  // some other kind of node, such as an Allocate
     }
 
-    // Do not do 'split-if' if some paths are dead.  First do dead code
-    // elimination and then see if its still profitable.
-    for( uint i = 1; i < n_ctrl->req(); i++ )
-      if( n_ctrl->in(i) == C->top() )
-        return;
-
     // When is split-if profitable?  Every 'win' on means some control flow
     // goes dead, so it's almost always a win.
     int policy = 0;
-    // If trying to do a 'Split-If' at the loop head, it is only
-    // profitable if the cmp folds up on BOTH paths.  Otherwise we
-    // risk peeling a loop forever.
-
-    // CNC - Disabled for now.  Requires careful handling of loop
-    // body selection for the cloned code.  Also, make sure we check
-    // for any input path not being in the same loop as n_ctrl.  For
-    // irreducible loops we cannot check for 'n_ctrl->is_Loop()'
-    // because the alternative loop entry points won't be converted
-    // into LoopNodes.
-    IdealLoopTree *n_loop = get_loop(n_ctrl);
-    for( uint j = 1; j < n_ctrl->req(); j++ )
-      if( get_loop(n_ctrl->in(j)) != n_loop )
-        return;
-
-    // Check for safety of the merge point.
-    if( !merge_point_safe(n_ctrl) ) {
+    // Split compare 'n' through the merge point if it is profitable
+    Node *phi = split_thru_phi( n, n_ctrl, policy);
+    if (!phi) {
       return;
     }
 
-    // Split compare 'n' through the merge point if it is profitable
-    Node *phi = split_thru_phi( n, n_ctrl, policy );
-    if( !phi ) return;
-
     // Found a Phi to split thru!
     // Replace 'n' with the new phi
-    _igvn.replace_node( n, phi );
+    _igvn.replace_node(n, phi);
 
     // Now split the bool up thru the phi
-    Node *bolphi = split_thru_phi( bol, n_ctrl, -1 );
+    Node *bolphi = split_thru_phi(bol, n_ctrl, -1);
     guarantee(bolphi != NULL, "null boolean phi node");
 
-    _igvn.replace_node( bol, bolphi );
-    assert( iff->in(1) == bolphi, "" );
+    _igvn.replace_node(bol, bolphi);
+    assert(iff->in(1) == bolphi, "");
 
-    if( bolphi->Value(&_igvn)->singleton() )
+    if (bolphi->Value(&_igvn)->singleton()) {
       return;
+    }
 
     // Conditional-move?  Must split up now
-    if( !iff->is_If() ) {
-      Node *cmovphi = split_thru_phi( iff, n_ctrl, -1 );
-      _igvn.replace_node( iff, cmovphi );
+    if (!iff->is_If()) {
+      Node *cmovphi = split_thru_phi(iff, n_ctrl, -1);
+      _igvn.replace_node(iff, cmovphi);
       return;
     }
 
     // Now split the IF
-    do_split_if( iff );
+    do_split_if(iff);
+    return;
+  }
+
+  // Two identical ifs back to back can be merged
+  if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) {
+    Node *n_ctrl = n->in(0);
+    PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
+    IfNode* dom_if = idom(n_ctrl)->as_If();
+    Node* proj_true = dom_if->proj_out(1);
+    Node* proj_false = dom_if->proj_out(0);
+    Node* con_true = _igvn.makecon(TypeInt::ONE);
+    Node* con_false = _igvn.makecon(TypeInt::ZERO);
+
+    for (uint i = 1; i < n_ctrl->req(); i++) {
+      if (is_dominator(proj_true, n_ctrl->in(i))) {
+        bolphi->init_req(i, con_true);
+      } else {
+        assert(is_dominator(proj_false, n_ctrl->in(i)), "bad if");
+        bolphi->init_req(i, con_false);
+      }
+    }
+    register_new_node(bolphi, n_ctrl);
+    _igvn.replace_input_of(n, 1, bolphi);
+
+    // Now split the IF
+    do_split_if(n);
     return;
   }
 
--- a/hotspot/src/share/vm/opto/memnode.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1709,38 +1709,10 @@
             // unsafe field access may not have a constant offset
             C->has_unsafe_access(),
             "Field accesses must be precise" );
-    // For oop loads, we expect the _type to be precise
-    if (klass == env->String_klass() &&
-        adr->is_AddP() && off != Type::OffsetBot) {
-      // For constant Strings treat the final fields as compile time constants.
-      // While we can list what field types java.lang.String has, it is more
-      // future-proof to handle all possible field types, anticipating future
-      // changes and experiments in String code.
-      Node* base = adr->in(AddPNode::Base);
-      const TypeOopPtr* t = phase->type(base)->isa_oopptr();
-      if (t != NULL && t->singleton()) {
-        ciField* field = env->String_klass()->get_field_by_offset(off, false);
-        if (field != NULL && field->is_final()) {
-          ciObject* string = t->const_oop();
-          ciConstant constant = string->as_instance()->field_value(field);
-          // Type::make_from_constant does not handle narrow oops, so handle it here.
-          // Everything else can use the factory method.
-          if ((constant.basic_type() == T_ARRAY || constant.basic_type() == T_OBJECT)
-                  && adr->bottom_type()->is_ptr_to_narrowoop()) {
-            return TypeNarrowOop::make_from_constant(constant.as_object(), true);
-          } else {
-            return Type::make_from_constant(constant, true);
-          }
-        }
-      }
-    }
+    // For oop loads, we expect the _type to be precise.
     // Optimizations for constant objects
     ciObject* const_oop = tinst->const_oop();
     if (const_oop != NULL) {
-      // For constant Boxed value treat the target field as a compile time constant.
-      if (tinst->is_ptr_to_boxed_value()) {
-        return tinst->get_const_boxed_value();
-      } else
       // For constant CallSites treat the target field as a compile time constant.
       if (const_oop->is_call_site()) {
         ciCallSite* call_site = const_oop->as_call_site();
--- a/hotspot/src/share/vm/opto/stringopts.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/stringopts.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1552,8 +1552,7 @@
 
   if (str->is_Con()) {
     // Constant source string
-    const TypeOopPtr* t = kit.gvn().type(src_array)->isa_oopptr();
-    ciTypeArray* src_array_type = t->const_oop()->as_type_array();
+    ciTypeArray* src_array_type = get_constant_value(kit, str);
 
     // Check encoding of constant string
     bool src_is_byte = (get_constant_coder(kit, str) == java_lang_String::CODER_LATIN1);
@@ -1673,9 +1672,15 @@
 
 int PhaseStringOpts::get_constant_length(GraphKit& kit, Node* str) {
   assert(str->is_Con(), "String must be constant");
-  Node* src_array = kit.load_String_value(kit.control(), str);
-  const TypeOopPtr* t = kit.gvn().type(src_array)->isa_oopptr();
-  return t->const_oop()->as_type_array()->length();
+  return get_constant_value(kit, str)->length();
+}
+
+ciTypeArray* PhaseStringOpts::get_constant_value(GraphKit& kit, Node* str) {
+  assert(str->is_Con(), "String must be constant");
+  const TypeOopPtr* str_type = kit.gvn().type(str)->isa_oopptr();
+  ciInstance* str_instance = str_type->const_oop()->as_instance();
+  ciObject* src_array = str_instance->field_value_by_offset(java_lang_String::value_offset_in_bytes()).as_object();
+  return src_array->as_type_array();
 }
 
 void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
--- a/hotspot/src/share/vm/opto/stringopts.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/stringopts.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -97,6 +97,9 @@
   // Returns the length of a constant string
   int get_constant_length(GraphKit& kit, Node* str);
 
+  // Returns the value array of a constant string
+  ciTypeArray* get_constant_value(GraphKit& kit, Node* str);
+
   // Clean up any leftover nodes
   void record_dead_node(Node* node);
   void remove_dead_nodes();
--- a/hotspot/src/share/vm/opto/superword.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/opto/superword.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -2253,6 +2253,9 @@
           C->set_major_progress();
         }
         cl->mark_do_unroll_only();
+        if (do_reserve_copy()) {
+          cl->mark_loop_vectorized();
+        }
       }
     }
   }
--- a/hotspot/src/share/vm/prims/methodComparator.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/prims/methodComparator.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,9 +34,6 @@
 BytecodeStream *MethodComparator::_s_new;
 ConstantPool* MethodComparator::_old_cp;
 ConstantPool* MethodComparator::_new_cp;
-BciMap *MethodComparator::_bci_map;
-bool MethodComparator::_switchable_test;
-GrowableArray<int> *MethodComparator::_fwd_jmps;
 
 bool MethodComparator::methods_EMCP(Method* old_method, Method* new_method) {
   if (old_method->code_size() != new_method->code_size())
@@ -55,7 +52,6 @@
   BytecodeStream s_new(new_method);
   _s_old = &s_old;
   _s_new = &s_new;
-  _switchable_test = false;
   Bytecodes::Code c_old, c_new;
 
   while ((c_old = s_old.next()) >= 0) {
@@ -68,64 +64,6 @@
   return true;
 }
 
-
-bool MethodComparator::methods_switchable(Method* old_method, Method* new_method,
-                                          BciMap &bci_map) {
-  if (old_method->code_size() > new_method->code_size())
-    // Something has definitely been deleted in the new method, compared to the old one.
-    return false;
-
-  if (! check_stack_and_locals_size(old_method, new_method))
-    return false;
-
-  _old_cp = old_method->constants();
-  _new_cp = new_method->constants();
-  BytecodeStream s_old(old_method);
-  BytecodeStream s_new(new_method);
-  _s_old = &s_old;
-  _s_new = &s_new;
-  _bci_map = &bci_map;
-  _switchable_test = true;
-  GrowableArray<int> fwd_jmps(16);
-  _fwd_jmps = &fwd_jmps;
-  Bytecodes::Code c_old, c_new;
-
-  while ((c_old = s_old.next()) >= 0) {
-    if ((c_new = s_new.next()) < 0)
-      return false;
-    if (! (c_old == c_new && args_same(c_old, c_new))) {
-      int old_bci = s_old.bci();
-      int new_st_bci = s_new.bci();
-      bool found_match = false;
-      do {
-        c_new = s_new.next();
-        if (c_new == c_old && args_same(c_old, c_new)) {
-          found_match = true;
-          break;
-        }
-      } while (c_new >= 0);
-      if (! found_match)
-        return false;
-      int new_end_bci = s_new.bci();
-      bci_map.store_fragment_location(old_bci, new_st_bci, new_end_bci);
-    }
-  }
-
-  // Now we can test all forward jumps
-  for (int i = 0; i < fwd_jmps.length() / 2; i++) {
-    if (! bci_map.old_and_new_locations_same(fwd_jmps.at(i*2), fwd_jmps.at(i*2+1))) {
-      RC_TRACE(0x00800000,
-        ("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d",
-        fwd_jmps.at(i*2), bci_map.new_bci_for_old(fwd_jmps.at(i*2)),
-        fwd_jmps.at(i*2+1)));
-      return false;
-    }
-  }
-
-  return true;
-}
-
-
 bool MethodComparator::args_same(Bytecodes::Code c_old, Bytecodes::Code c_new) {
   // BytecodeStream returns the correct standard Java bytecodes for various "fast"
   // bytecode versions, so we don't have to bother about them here..
@@ -275,22 +213,8 @@
   case Bytecodes::_jsr       : {
     int old_ofs = _s_old->bytecode().get_offset_s2(c_old);
     int new_ofs = _s_new->bytecode().get_offset_s2(c_new);
-    if (_switchable_test) {
-      int old_dest = _s_old->bci() + old_ofs;
-      int new_dest = _s_new->bci() + new_ofs;
-      if (old_ofs < 0 && new_ofs < 0) {
-        if (! _bci_map->old_and_new_locations_same(old_dest, new_dest))
-          return false;
-      } else if (old_ofs > 0 && new_ofs > 0) {
-        _fwd_jmps->append(old_dest);
-        _fwd_jmps->append(new_dest);
-      } else {
-        return false;
-      }
-    } else {
-      if (old_ofs != new_ofs)
-        return false;
-    }
+    if (old_ofs != new_ofs)
+      return false;
     break;
   }
 
@@ -312,73 +236,19 @@
   case Bytecodes::_jsr_w  : {
     int old_ofs = _s_old->bytecode().get_offset_s4(c_old);
     int new_ofs = _s_new->bytecode().get_offset_s4(c_new);
-    if (_switchable_test) {
-      int old_dest = _s_old->bci() + old_ofs;
-      int new_dest = _s_new->bci() + new_ofs;
-      if (old_ofs < 0 && new_ofs < 0) {
-        if (! _bci_map->old_and_new_locations_same(old_dest, new_dest))
-          return false;
-      } else if (old_ofs > 0 && new_ofs > 0) {
-        _fwd_jmps->append(old_dest);
-        _fwd_jmps->append(new_dest);
-      } else {
-        return false;
-      }
-    } else {
-      if (old_ofs != new_ofs)
-        return false;
-    }
+    if (old_ofs != new_ofs)
+      return false;
     break;
   }
 
   case Bytecodes::_lookupswitch : // fall through
   case Bytecodes::_tableswitch  : {
-    if (_switchable_test) {
-      address aligned_bcp_old = (address) round_to((intptr_t)_s_old->bcp() + 1, jintSize);
-      address aligned_bcp_new = (address) round_to((intptr_t)_s_new->bcp() + 1, jintSize);
-      int default_old = (int) Bytes::get_Java_u4(aligned_bcp_old);
-      int default_new = (int) Bytes::get_Java_u4(aligned_bcp_new);
-      _fwd_jmps->append(_s_old->bci() + default_old);
-      _fwd_jmps->append(_s_new->bci() + default_new);
-      if (c_old == Bytecodes::_lookupswitch) {
-        int npairs_old = (int) Bytes::get_Java_u4(aligned_bcp_old + jintSize);
-        int npairs_new = (int) Bytes::get_Java_u4(aligned_bcp_new + jintSize);
-        if (npairs_old != npairs_new)
-          return false;
-        for (int i = 0; i < npairs_old; i++) {
-          int match_old = (int) Bytes::get_Java_u4(aligned_bcp_old + (2+2*i)*jintSize);
-          int match_new = (int) Bytes::get_Java_u4(aligned_bcp_new + (2+2*i)*jintSize);
-          if (match_old != match_new)
-            return false;
-          int ofs_old = (int) Bytes::get_Java_u4(aligned_bcp_old + (2+2*i+1)*jintSize);
-          int ofs_new = (int) Bytes::get_Java_u4(aligned_bcp_new + (2+2*i+1)*jintSize);
-          _fwd_jmps->append(_s_old->bci() + ofs_old);
-          _fwd_jmps->append(_s_new->bci() + ofs_new);
-        }
-      } else if (c_old == Bytecodes::_tableswitch) {
-        int lo_old = (int) Bytes::get_Java_u4(aligned_bcp_old + jintSize);
-        int lo_new = (int) Bytes::get_Java_u4(aligned_bcp_new + jintSize);
-        if (lo_old != lo_new)
-          return false;
-        int hi_old = (int) Bytes::get_Java_u4(aligned_bcp_old + 2*jintSize);
-        int hi_new = (int) Bytes::get_Java_u4(aligned_bcp_new + 2*jintSize);
-        if (hi_old != hi_new)
-          return false;
-        for (int i = 0; i < hi_old - lo_old + 1; i++) {
-          int ofs_old = (int) Bytes::get_Java_u4(aligned_bcp_old + (3+i)*jintSize);
-          int ofs_new = (int) Bytes::get_Java_u4(aligned_bcp_new + (3+i)*jintSize);
-          _fwd_jmps->append(_s_old->bci() + ofs_old);
-          _fwd_jmps->append(_s_new->bci() + ofs_new);
-        }
-      }
-    } else { // !_switchable_test, can use fast rough compare
-      int len_old = _s_old->instruction_size();
-      int len_new = _s_new->instruction_size();
-      if (len_old != len_new)
-        return false;
-      if (memcmp(_s_old->bcp(), _s_new->bcp(), len_old) != 0)
-        return false;
-    }
+    int len_old = _s_old->instruction_size();
+    int len_new = _s_new->instruction_size();
+    if (len_old != len_new)
+      return false;
+    if (memcmp(_s_old->bcp(), _s_new->bcp(), len_old) != 0)
+      return false;
     break;
   }
   }
--- a/hotspot/src/share/vm/prims/methodComparator.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/prims/methodComparator.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,8 +29,6 @@
 #include "oops/constantPool.hpp"
 #include "oops/method.hpp"
 
-class BciMap;
-
 // methodComparator provides an interface for determining if methods of
 // different versions of classes are equivalent or switchable
 
@@ -39,9 +37,6 @@
   static BytecodeStream *_s_old, *_s_new;
   static ConstantPool* _old_cp;
   static ConstantPool* _new_cp;
-  static BciMap *_bci_map;
-  static bool _switchable_test;
-  static GrowableArray<int> *_fwd_jmps;
 
   static bool args_same(Bytecodes::Code c_old, Bytecodes::Code c_new);
   static bool pool_constants_same(int cpi_old, int cpi_new);
@@ -55,79 +50,6 @@
   // these indices eventually point to the same constants for both method versions.
   static bool methods_EMCP(Method* old_method, Method* new_method);
 
-  static bool methods_switchable(Method* old_method, Method* new_method, BciMap &bci_map);
-};
-
-
-// ByteCode Index Map. For two versions of the same method, where the new version may contain
-// fragments not found in the old version, provides a mapping from an index of a bytecode in
-// the old method to the index of the same bytecode in the new method.
-
-class BciMap {
- private:
-  int *_old_bci, *_new_st_bci, *_new_end_bci;
-  int _cur_size, _cur_pos;
-  int _pos;
-
- public:
-  BciMap() {
-    _cur_size = 50;
-    _old_bci = (int*) malloc(sizeof(int) * _cur_size);
-    _new_st_bci = (int*) malloc(sizeof(int) * _cur_size);
-    _new_end_bci = (int*) malloc(sizeof(int) * _cur_size);
-    _cur_pos = 0;
-  }
-
-  ~BciMap() {
-    free(_old_bci);
-    free(_new_st_bci);
-    free(_new_end_bci);
-  }
-
-  // Store the position of an added fragment, e.g.
-  //
-  //                              |<- old_bci
-  // -----------------------------------------
-  // Old method   |invokevirtual 5|aload 1|...
-  // -----------------------------------------
-  //
-  //                                 |<- new_st_bci          |<- new_end_bci
-  // --------------------------------------------------------------------
-  // New method       |invokevirual 5|aload 2|invokevirtual 6|aload 1|...
-  // --------------------------------------------------------------------
-  //                                 ^^^^^^^^^^^^^^^^^^^^^^^^
-  //                                    Added fragment
-
-  void store_fragment_location(int old_bci, int new_st_bci, int new_end_bci) {
-    if (_cur_pos == _cur_size) {
-      _cur_size += 10;
-      _old_bci = (int*) realloc(_old_bci, sizeof(int) * _cur_size);
-      _new_st_bci = (int*) realloc(_new_st_bci, sizeof(int) * _cur_size);
-      _new_end_bci = (int*) realloc(_new_end_bci, sizeof(int) * _cur_size);
-    }
-    _old_bci[_cur_pos] = old_bci;
-    _new_st_bci[_cur_pos] = new_st_bci;
-    _new_end_bci[_cur_pos] = new_end_bci;
-    _cur_pos++;
-  }
-
-  int new_bci_for_old(int old_bci) {
-    if (_cur_pos == 0 || old_bci < _old_bci[0]) return old_bci;
-    _pos = 1;
-    while (_pos < _cur_pos && old_bci >= _old_bci[_pos])
-      _pos++;
-    return _new_end_bci[_pos-1] + (old_bci - _old_bci[_pos-1]);
-  }
-
-  // Test if two indexes - one in the old method and another in the new one - correspond
-  // to the same bytecode
-  bool old_and_new_locations_same(int old_dest_bci, int new_dest_bci) {
-    if (new_bci_for_old(old_dest_bci) == new_dest_bci)
-      return true;
-    else if (_old_bci[_pos-1] == old_dest_bci)
-      return (new_dest_bci == _new_st_bci[_pos-1]);
-    else return false;
-  }
 };
 
 #endif // SHARE_VM_PRIMS_METHODCOMPARATOR_HPP
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -63,30 +63,21 @@
 bool MethodHandles::_enabled = false; // set true after successful native linkage
 MethodHandlesAdapterBlob* MethodHandles::_adapter_code = NULL;
 
-
 /**
  * Generates method handle adapters. Returns 'false' if memory allocation
  * failed and true otherwise.
  */
-bool MethodHandles::generate_adapters() {
-  if (SystemDictionary::MethodHandle_klass() == NULL) {
-    return true;
-  }
-
+void MethodHandles::generate_adapters() {
+  assert(SystemDictionary::MethodHandle_klass() != NULL, "should be present");
   assert(_adapter_code == NULL, "generate only once");
 
   ResourceMark rm;
   TraceTime timer("MethodHandles adapters generation", TraceStartupTime);
   _adapter_code = MethodHandlesAdapterBlob::create(adapter_code_size);
-  if (_adapter_code == NULL) {
-     return false;
-  }
-
   CodeBuffer code(_adapter_code);
   MethodHandlesAdapterGenerator g(&code);
   g.generate();
   code.log_section_sizes("MethodHandlesAdapterBlob");
-  return true;
 }
 
 //------------------------------------------------------------------------------
@@ -1436,53 +1427,31 @@
 };
 
 /**
- * Helper method to register native methods.
- */
-static bool register_natives(JNIEnv* env, jclass clazz, const JNINativeMethod* methods, jint nMethods) {
-  int status = env->RegisterNatives(clazz, methods, nMethods);
-  if (status != JNI_OK || env->ExceptionOccurred()) {
-    warning("JSR 292 method handle code is mismatched to this JVM.  Disabling support.");
-    env->ExceptionClear();
-    return false;
-  }
-  return true;
-}
-
-/**
  * This one function is exported, used by NativeLookup.
  */
 JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) {
   assert(!MethodHandles::enabled(), "must not be enabled");
-  bool enable_MH = true;
+  assert(SystemDictionary::MethodHandle_klass() != NULL, "should be present");
 
-  jclass MH_class = NULL;
-  if (SystemDictionary::MethodHandle_klass() == NULL) {
-    enable_MH = false;
-  } else {
-    oop mirror = SystemDictionary::MethodHandle_klass()->java_mirror();
-    MH_class = (jclass) JNIHandles::make_local(env, mirror);
-  }
+  oop mirror = SystemDictionary::MethodHandle_klass()->java_mirror();
+  jclass MH_class = (jclass) JNIHandles::make_local(env, mirror);
 
-  if (enable_MH) {
+  {
     ThreadToNativeFromVM ttnfv(thread);
 
-    if (enable_MH) {
-      enable_MH = register_natives(env, MHN_class, MHN_methods, sizeof(MHN_methods)/sizeof(JNINativeMethod));
-    }
-    if (enable_MH) {
-      enable_MH = register_natives(env, MH_class, MH_methods, sizeof(MH_methods)/sizeof(JNINativeMethod));
-    }
+    int status = env->RegisterNatives(MHN_class, MHN_methods, sizeof(MHN_methods)/sizeof(JNINativeMethod));
+    guarantee(status == JNI_OK && !env->ExceptionOccurred(),
+              "register java.lang.invoke.MethodHandleNative natives");
+
+    status = env->RegisterNatives(MH_class, MH_methods, sizeof(MH_methods)/sizeof(JNINativeMethod));
+    guarantee(status == JNI_OK && !env->ExceptionOccurred(),
+              "register java.lang.invoke.MethodHandle natives");
   }
 
   if (TraceInvokeDynamic) {
     tty->print_cr("MethodHandle support loaded (using LambdaForms)");
   }
 
-  if (enable_MH) {
-    if (MethodHandles::generate_adapters() == false) {
-      THROW_MSG(vmSymbols::java_lang_VirtualMachineError(), "Out of space in CodeCache for method handle adapters");
-    }
-    MethodHandles::set_enabled(true);
-  }
+  MethodHandles::set_enabled(true);
 }
 JVM_END
--- a/hotspot/src/share/vm/prims/methodHandles.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/prims/methodHandles.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -81,7 +81,7 @@
   static void flush_dependent_nmethods(Handle call_site, Handle target);
 
   // Generate MethodHandles adapters.
-  static bool generate_adapters();
+  static void generate_adapters();
 
   // Called from MethodHandlesAdapterGenerator.
   static address generate_method_handle_interpreter_entry(MacroAssembler* _masm, vmIntrinsics::ID iid);
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -660,6 +660,36 @@
   Copy::conjoint_memory_atomic(src, dst, sz);
 UNSAFE_END
 
+// This function is a leaf since if the source and destination are both in native memory
+// the copy may potentially be very large, and we don't want to disable GC if we can avoid it.
+// If either source or destination (or both) are on the heap, the function will enter VM using
+// JVM_ENTRY_FROM_LEAF
+JVM_LEAF(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size, jlong elemSize)) {
+  UnsafeWrapper("Unsafe_CopySwapMemory0");
+
+  size_t sz = (size_t)size;
+  size_t esz = (size_t)elemSize;
+
+  if (srcObj == NULL && dstObj == NULL) {
+    // Both src & dst are in native memory
+    address src = (address)srcOffset;
+    address dst = (address)dstOffset;
+
+    Copy::conjoint_swap(src, dst, sz, esz);
+  } else {
+    // At least one of src/dst are on heap, transition to VM to access raw pointers
+
+    JVM_ENTRY_FROM_LEAF(env, void, Unsafe_CopySwapMemory0) {
+      oop srcp = JNIHandles::resolve(srcObj);
+      oop dstp = JNIHandles::resolve(dstObj);
+
+      address src = (address)index_oop_from_field_offset_long(srcp, srcOffset);
+      address dst = (address)index_oop_from_field_offset_long(dstp, dstOffset);
+
+      Copy::conjoint_swap(src, dst, sz, esz);
+    } JVM_END
+  }
+} JVM_END
 
 ////// Random queries
 
@@ -1363,6 +1393,7 @@
     {CC "getLoadAverage",     CC "([DI)I",               FN_PTR(Unsafe_Loadavg)},
 
     {CC "copyMemory",         CC "(" OBJ "J" OBJ "JJ)V", FN_PTR(Unsafe_CopyMemory)},
+    {CC "copySwapMemory0",    CC "(" OBJ "J" OBJ "JJJ)V", FN_PTR(Unsafe_CopySwapMemory0)},
     {CC "setMemory",          CC "(" OBJ "JJB)V",        FN_PTR(Unsafe_SetMemory)},
 
     {CC "defineAnonymousClass", CC "(" DAC_Args ")" CLS, FN_PTR(Unsafe_DefineAnonymousClass)},
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -333,6 +333,8 @@
   // --- Non-alias flags - sorted by obsolete_in then expired_in:
   { "MaxGCMinorPauseMillis",        JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
   { "UseParNewGC",                  JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::jdk(10) },
+  { "ConvertSleepToYield",          JDK_Version::jdk(9), JDK_Version::jdk(10),     JDK_Version::jdk(11) },
+  { "ConvertYieldToSleep",          JDK_Version::jdk(9), JDK_Version::jdk(10),     JDK_Version::jdk(11) },
 
   // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
   { "DefaultMaxRAMFraction",        JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1239,9 +1239,8 @@
   product_pd(bool, DontYieldALot,                                           \
           "Throw away obvious excess yield calls")                          \
                                                                             \
-  product_pd(bool, ConvertSleepToYield,                                     \
-          "Convert sleep(0) to thread yield "                               \
-          "(may be off for Solaris to improve GUI)")                        \
+  product(bool, ConvertSleepToYield, true,                                  \
+          "Convert sleep(0) to thread yield ")                              \
                                                                             \
   product(bool, ConvertYieldToSleep, false,                                 \
           "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \
@@ -1279,10 +1278,6 @@
   experimental(intx, hashCode, 5,                                           \
                "(Unstable) select hashCode generation algorithm")           \
                                                                             \
-  experimental(intx, WorkAroundNPTLTimedWaitHang, 0,                        \
-               "(Unstable, Linux-specific) "                                \
-               "avoid NPTL-FUTEX hang pthread_cond_timedwait")              \
-                                                                            \
   product(bool, FilterSpuriousWakeups, true,                                \
           "When true prevents OS-level spurious, or premature, wakeups "    \
           "from Object.wait (Ignored for Windows)")                         \
@@ -2012,11 +2007,15 @@
           range(min_intx, 100)                                              \
                                                                             \
   product(uintx, InitiatingHeapOccupancyPercent, 45,                        \
-          "Percentage of the (entire) heap occupancy to start a "           \
-          "concurrent GC cycle. It is used by GCs that trigger a "          \
-          "concurrent GC cycle based on the occupancy of the entire heap, " \
-          "not just one of the generations (e.g., G1). A value of 0 "       \
-          "denotes 'do constant GC cycles'.")                               \
+          "The percent occupancy (IHOP) of the current old generation "     \
+          "capacity above which a concurrent mark cycle will be initiated " \
+          "Its value may change over time if adaptive IHOP is enabled, "    \
+          "otherwise the value remains constant. "                          \
+          "In the latter case a value of 0 will result as frequent as "     \
+          "possible concurrent marking cycles. A value of 100 disables "    \
+          "concurrent marking. "                                            \
+          "Fragmentation waste in the old generation is not considered "    \
+          "free space in this calculation. (G1 collector only)")            \
           range(0, 100)                                                     \
                                                                             \
   manageable(intx, CMSTriggerInterval, -1,                                  \
--- a/hotspot/src/share/vm/runtime/init.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/runtime/init.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -145,6 +145,7 @@
   }
   javaClasses_init();   // must happen after vtable initialization
   stubRoutines_init2(); // note: StubRoutines need 2-phase init
+  MethodHandles::generate_adapters();
   CodeCacheExtensions::complete_step(CodeCacheExtensionsSteps::StubRoutines2);
 
 #if INCLUDE_NMT
@@ -181,8 +182,7 @@
   }
 }
 
-
-static bool _init_completed = false;
+static volatile bool _init_completed = false;
 
 bool is_init_completed() {
   return _init_completed;
--- a/hotspot/src/share/vm/runtime/interfaceSupport.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -417,6 +417,14 @@
   os::verify_stack_alignment();                                      \
   /* begin of body */
 
+#define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)         \
+  TRACE_CALL(result_type, header)                                    \
+  debug_only(ResetNoHandleMark __rnhm;)                              \
+  HandleMarkCleaner __hm(thread);                                    \
+  Thread* THREAD = thread;                                           \
+  os::verify_stack_alignment();                                      \
+  /* begin of body */
+
 
 // ENTRY routines may lock, GC and throw exceptions
 
@@ -584,6 +592,14 @@
     VM_LEAF_BASE(result_type, header)
 
 
+#define JVM_ENTRY_FROM_LEAF(env, result_type, header)                \
+  { {                                                                \
+    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+    ThreadInVMfromNative __tiv(thread);                              \
+    debug_only(VMNativeEntryWrapper __vew;)                          \
+    VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
+
+
 #define JVM_END } }
 
 #endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -359,6 +359,11 @@
   static address clean_opt_virtual_call_entry();
   static address clean_static_call_entry();
 
+#if defined(X86) && defined(COMPILER1)
+  // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
+  static void inline_check_hashcode_from_object_header(MacroAssembler* masm, methodHandle method, Register obj_reg, Register result);
+#endif // X86 && COMPILER1
+
  public:
 
   // Read the array of BasicTypes from a Java signature, and compute where
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -37,7 +37,7 @@
 
 StubCodeDesc* StubCodeDesc::_list = NULL;
 int           StubCodeDesc::_count = 0;
-
+bool          StubCodeDesc::_frozen = false;
 
 StubCodeDesc* StubCodeDesc::desc_for(address pc) {
   StubCodeDesc* p = _list;
@@ -46,20 +46,23 @@
   return p;
 }
 
-
 StubCodeDesc* StubCodeDesc::desc_for_index(int index) {
   StubCodeDesc* p = _list;
   while (p != NULL && p->index() != index) p = p->_next;
   return p;
 }
 
-
 const char* StubCodeDesc::name_for(address pc) {
   StubCodeDesc* p = desc_for(pc);
   return p == NULL ? NULL : p->name();
 }
 
 
+void StubCodeDesc::freeze() {
+  assert(!_frozen, "repeated freeze operation");
+  _frozen = true;
+}
+
 void StubCodeDesc::print_on(outputStream* st) const {
   st->print("%s", group());
   st->print("::");
@@ -110,12 +113,10 @@
   }
 }
 
-
 void StubCodeGenerator::stub_prolog(StubCodeDesc* cdesc) {
   // default implementation - do nothing
 }
 
-
 void StubCodeGenerator::stub_epilog(StubCodeDesc* cdesc) {
   // default implementation - record the cdesc
   if (_first_stub == NULL)  _first_stub = cdesc;
--- a/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/runtime/stubCodeGenerator.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -28,7 +28,7 @@
 #include "asm/assembler.hpp"
 #include "memory/allocation.hpp"
 
-// All the basic framework for stubcode generation/debugging/printing.
+// All the basic framework for stub code generation/debugging/printing.
 
 
 // A StubCodeDesc describes a piece of generated code (usually stubs).
@@ -37,9 +37,10 @@
 // this may have to change if searching becomes too slow.
 
 class StubCodeDesc: public CHeapObj<mtCode> {
- protected:
+ private:
   static StubCodeDesc* _list;                  // the list of all descriptors
   static int           _count;                 // length of list
+  static bool          _frozen;                // determines whether _list modifications are allowed
 
   StubCodeDesc*        _next;                  // the next element in the linked list
   const char*          _group;                 // the group to which the stub code belongs
@@ -68,6 +69,7 @@
   static const char*   name_for(address pc);     // returns the name of the code containing pc or NULL
 
   StubCodeDesc(const char* group, const char* name, address begin, address end = NULL) {
+    assert(!_frozen, "no modifications allowed");
     assert(name != NULL, "no name specified");
     _next           = _list;
     _group          = group;
@@ -78,6 +80,8 @@
     _list           = this;
   };
 
+  static void freeze();
+
   const char* group() const                      { return _group; }
   const char* name() const                       { return _name; }
   int         index() const                      { return _index; }
@@ -117,7 +121,7 @@
 // later via an address pointing into it.
 
 class StubCodeMark: public StackObj {
- protected:
+ private:
   StubCodeGenerator* _cgen;
   StubCodeDesc*      _cdesc;
 
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -3600,6 +3600,9 @@
     vm_exit_during_initialization("Failed to initialize tracing backend");
   }
 
+  // No more stub generation allowed after that point.
+  StubCodeDesc::freeze();
+
   // Set flag that basic initialization has completed. Used by exceptions and various
   // debug stuff, that does not work until all basic classes have been initialized.
   set_init_completed();
--- a/hotspot/src/share/vm/trace/trace.xml	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/trace/trace.xml	Thu Feb 25 11:27:59 2016 -0800
@@ -283,6 +283,7 @@
       <value type="BYTES64" field="edenUsedSize" label="Eden Used Size" />
       <value type="BYTES64" field="edenTotalSize" label="Eden Total Size" />
       <value type="BYTES64" field="survivorUsedSize" label="Survivor Used Size" />
+      <value type="UINT" field="numberOfRegions" label="Number of Regions" />
     </event>
 
     <event id="GCGarbageCollection" path="vm/gc/collector/garbage_collection" label="Garbage Collection"
@@ -478,6 +479,23 @@
       <value type="BYTES64" field="size" label="Allocation Size" />
     </event>
 
+    <event id="TenuringDistribution" path="vm/gc/detailed/tenuring_distribution" label="Tenuring Distribution"
+           is_instant="true">
+      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="UINT" field="age" label="Age" />
+      <value type="BYTES64" field="size" label="Size" />
+    </event>
+
+    <event id="G1HeapRegionTypeChange" path="vm/gc/detailed/g1_heap_region_type_change" label="G1 Heap Region Type Change"
+           description="Information about a G1 heap region type change." is_instant="true">
+      <value type="UINT" field="index" label="Index" />
+      <value type="G1HEAPREGIONTYPE" field="from" label="From Type" />
+      <value type="G1HEAPREGIONTYPE" field="to" label="To Type" />
+      <value type="ADDRESS" field="start" label="Start" />
+      <value type="BYTES64" field="used" label="Used" />
+      <value type="UINT" field="allocContext" label="Allocation Context" />
+    </event>
+
     <!-- Compiler events -->
 
     <event id="Compilation" path="vm/compiler/compilation" label="Compilation"
--- a/hotspot/src/share/vm/trace/tracetypes.xml	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/trace/tracetypes.xml	Thu Feb 25 11:27:59 2016 -0800
@@ -126,6 +126,11 @@
       <value type="UTF8" field="when" label="when" />
     </content_type>
 
+    <content_type id="G1HeapRegionType" hr_name="G1 Heap Region Type"
+                  type="U1" jvm_type="G1HEAPREGIONTYPE">
+      <value type="UTF8" field="type" label="type" />
+    </content_type>
+    
     <content_type id="G1YCType" hr_name="G1 YC Type"
                   type="U1" jvm_type="G1YCTYPE">
       <value type="UTF8" field="type" label="type" />
@@ -345,6 +350,10 @@
     <primary_type symbol="GCWHEN" datatype="U1" contenttype="GCWHEN"
                   type="u1" sizeop="sizeof(u1)" />
 
+    <!-- G1HEAPREGIONTYPE -->
+    <primary_type symbol="G1HEAPREGIONTYPE" datatype="U1" contenttype="G1HEAPREGIONTYPE"
+                  type="u1" sizeop="sizeof(u1)" />
+
     <!-- G1YCType -->
     <primary_type symbol="G1YCTYPE" datatype="U1" contenttype="G1YCTYPE"
                   type="u1" sizeop="sizeof(u1)" />
--- a/hotspot/src/share/vm/utilities/copy.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/utilities/copy.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,175 @@
   }
 }
 
+class CopySwap : AllStatic {
+public:
+  /**
+   * Copy and byte swap elements
+   *
+   * @param src address of source
+   * @param dst address of destination
+   * @param byte_count number of bytes to copy
+   * @param elem_size size of the elements to copy-swap
+   */
+  static void conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
+    assert(src != NULL, "address must not be NULL");
+    assert(dst != NULL, "address must not be NULL");
+    assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
+           "incorrect element size: " SIZE_FORMAT, elem_size);
+    assert(is_size_aligned(byte_count, elem_size),
+           "byte_count " SIZE_FORMAT " must be multiple of element size " SIZE_FORMAT, byte_count, elem_size);
+
+    address src_end = src + byte_count;
+
+    if (dst <= src || dst >= src_end) {
+      do_conjoint_swap<RIGHT>(src, dst, byte_count, elem_size);
+    } else {
+      do_conjoint_swap<LEFT>(src, dst, byte_count, elem_size);
+    }
+  }
+
+private:
+  /**
+   * Byte swap a 16-bit value
+   */
+  static uint16_t byte_swap(uint16_t x) {
+    return (x << 8) | (x >> 8);
+  }
+
+  /**
+   * Byte swap a 32-bit value
+   */
+  static uint32_t byte_swap(uint32_t x) {
+    uint16_t lo = (uint16_t)x;
+    uint16_t hi = (uint16_t)(x >> 16);
+
+    return ((uint32_t)byte_swap(lo) << 16) | (uint32_t)byte_swap(hi);
+  }
+
+  /**
+   * Byte swap a 64-bit value
+   */
+  static uint64_t byte_swap(uint64_t x) {
+    uint32_t lo = (uint32_t)x;
+    uint32_t hi = (uint32_t)(x >> 32);
+
+    return ((uint64_t)byte_swap(lo) << 32) | (uint64_t)byte_swap(hi);
+  }
+
+  enum CopyDirection {
+    RIGHT, // lower -> higher address
+    LEFT   // higher -> lower address
+  };
+
+  /**
+   * Copy and byte swap elements
+   *
+   * <T> - type of element to copy
+   * <D> - copy direction
+   * <is_src_aligned> - true if src argument is aligned to element size
+   * <is_dst_aligned> - true if dst argument is aligned to element size
+   *
+   * @param src address of source
+   * @param dst address of destination
+   * @param byte_count number of bytes to copy
+   */
+  template <typename T, CopyDirection D, bool is_src_aligned, bool is_dst_aligned>
+  static void do_conjoint_swap(address src, address dst, size_t byte_count) {
+    address cur_src, cur_dst;
+
+    switch (D) {
+    case RIGHT:
+      cur_src = src;
+      cur_dst = dst;
+      break;
+    case LEFT:
+      cur_src = src + byte_count - sizeof(T);
+      cur_dst = dst + byte_count - sizeof(T);
+      break;
+    }
+
+    for (size_t i = 0; i < byte_count / sizeof(T); i++) {
+      T tmp;
+
+      if (is_src_aligned) {
+        tmp = *(T*)cur_src;
+      } else {
+        memcpy(&tmp, cur_src, sizeof(T));
+      }
+
+      tmp = byte_swap(tmp);
+
+      if (is_dst_aligned) {
+        *(T*)cur_dst = tmp;
+      } else {
+        memcpy(cur_dst, &tmp, sizeof(T));
+      }
+
+      switch (D) {
+      case RIGHT:
+        cur_src += sizeof(T);
+        cur_dst += sizeof(T);
+        break;
+      case LEFT:
+        cur_src -= sizeof(T);
+        cur_dst -= sizeof(T);
+        break;
+      }
+    }
+  }
+
+  /**
+   * Copy and byte swap elements
+   *
+   * <T> - type of element to copy
+   * <D> - copy direction
+   *
+   * @param src address of source
+   * @param dst address of destination
+   * @param byte_count number of bytes to copy
+   */
+  template <typename T, CopyDirection direction>
+  static void do_conjoint_swap(address src, address dst, size_t byte_count) {
+    if (is_ptr_aligned(src, sizeof(T))) {
+      if (is_ptr_aligned(dst, sizeof(T))) {
+        do_conjoint_swap<T,direction,true,true>(src, dst, byte_count);
+      } else {
+        do_conjoint_swap<T,direction,true,false>(src, dst, byte_count);
+      }
+    } else {
+      if (is_ptr_aligned(dst, sizeof(T))) {
+        do_conjoint_swap<T,direction,false,true>(src, dst, byte_count);
+      } else {
+        do_conjoint_swap<T,direction,false,false>(src, dst, byte_count);
+      }
+    }
+  }
+
+
+  /**
+   * Copy and byte swap elements
+   *
+   * <D> - copy direction
+   *
+   * @param src address of source
+   * @param dst address of destination
+   * @param byte_count number of bytes to copy
+   * @param elem_size size of the elements to copy-swap
+   */
+  template <CopyDirection D>
+  static void do_conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
+    switch (elem_size) {
+    case 2: do_conjoint_swap<uint16_t,D>(src, dst, byte_count); break;
+    case 4: do_conjoint_swap<uint32_t,D>(src, dst, byte_count); break;
+    case 8: do_conjoint_swap<uint64_t,D>(src, dst, byte_count); break;
+    default: guarantee(false, "do_conjoint_swap: Invalid elem_size %zd\n", elem_size);
+    }
+  }
+};
+
+void Copy::conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size) {
+  CopySwap::conjoint_swap(src, dst, byte_count, elem_size);
+}
 
 // Fill bytes; larger units are filled atomically if everything is aligned.
 void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) {
--- a/hotspot/src/share/vm/utilities/copy.hpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/utilities/copy.hpp	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -227,6 +227,16 @@
     }
   }
 
+  /**
+   * Copy and *unconditionally* byte swap elements
+   *
+   * @param src address of source
+   * @param dst address of destination
+   * @param byte_count number of bytes to copy
+   * @param elem_size size of the elements to copy-swap
+   */
+  static void conjoint_swap(address src, address dst, size_t byte_count, size_t elem_size);
+
   // Fill methods
 
   // Fill word-aligned words, not atomic on each word
--- a/hotspot/src/share/vm/utilities/quickSort.cpp	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/src/share/vm/utilities/quickSort.cpp	Thu Feb 25 11:27:59 2016 -0800
@@ -44,6 +44,31 @@
   }
   return 1;
 }
+
+static void print_array(const char* prefix, int* array, int length) {
+  tty->print("%s:", prefix);
+  for (int i = 0; i < length; i++) {
+    tty->print(" %d", array[i]);
+  }
+  tty->cr();
+}
+
+static bool compare_arrays(int* actual, int* expected, int length) {
+  for (int i = 0; i < length; i++) {
+    if (actual[i] != expected[i]) {
+      print_array("Sorted array  ", actual, length);
+      print_array("Expected array", expected, length);
+      return false;
+    }
+  }
+  return true;
+}
+
+template <class C>
+static bool sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent = false) {
+  QuickSort::sort<int, C>(arrayToSort, length, comparator, idempotent);
+  return compare_arrays(arrayToSort, expectedResult, length);
+}
 #endif // ASSERT
 
 static int test_even_odd_comparator(int a, int b) {
@@ -72,31 +97,6 @@
   }
 }
 
-static void print_array(const char* prefix, int* array, int length) {
-  tty->print("%s:", prefix);
-  for (int i = 0; i < length; i++) {
-    tty->print(" %d", array[i]);
-  }
-  tty->cr();
-}
-
-static bool compare_arrays(int* actual, int* expected, int length) {
-  for (int i = 0; i < length; i++) {
-    if (actual[i] != expected[i]) {
-      print_array("Sorted array  ", actual, length);
-      print_array("Expected array", expected, length);
-      return false;
-    }
-  }
-  return true;
-}
-
-template <class C>
-static bool sort_and_compare(int* arrayToSort, int* expectedResult, int length, C comparator, bool idempotent = false) {
-  QuickSort::sort<int, C>(arrayToSort, length, comparator, idempotent);
-  return compare_arrays(arrayToSort, expectedResult, length);
-}
-
 void QuickSort_test() {
   {
     int* test_array = NULL;
--- a/hotspot/test/compiler/intrinsics/string/TestStringIntrinsics2.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/intrinsics/string/TestStringIntrinsics2.java	Thu Feb 25 11:27:59 2016 -0800
@@ -33,9 +33,10 @@
  *
  * @run main/othervm
  *        -Xbootclasspath/a:.
+ *        -Xmixed
  *        -XX:+UnlockDiagnosticVMOptions
  *        -XX:+WhiteBoxAPI
- *        -XX:MaxInlineSize=100
+ *        -XX:MaxInlineSize=70
  *        -XX:MinInliningThreshold=0
  *        TestStringIntrinsics2
  */
--- a/hotspot/test/compiler/loopopts/superword/ProdRed_Double.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/loopopts/superword/ProdRed_Double.java	Thu Feb 25 11:27:59 2016 -0800
@@ -26,6 +26,7 @@
  * @test
  * @bug 8074981
  * @summary Add C2 x86 Superword support for scalar product reduction optimizations : float test
+ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
  *
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 ProdRed_Double
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 ProdRed_Double
--- a/hotspot/test/compiler/loopopts/superword/ProdRed_Float.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/loopopts/superword/ProdRed_Float.java	Thu Feb 25 11:27:59 2016 -0800
@@ -26,6 +26,7 @@
  * @test
  * @bug 8074981
  * @summary Add C2 x86 Superword support for scalar product reduction optimizations : float test
+ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
  *
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 ProdRed_Float
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 ProdRed_Float
--- a/hotspot/test/compiler/loopopts/superword/ProdRed_Int.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/loopopts/superword/ProdRed_Int.java	Thu Feb 25 11:27:59 2016 -0800
@@ -26,6 +26,7 @@
  * @test
  * @bug 8074981
  * @summary Add C2 x86 Superword support for scalar product reduction optimizations : int test
+ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
  *
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 ProdRed_Int
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 ProdRed_Int
--- a/hotspot/test/compiler/loopopts/superword/ReductionPerf.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/loopopts/superword/ReductionPerf.java	Thu Feb 25 11:27:59 2016 -0800
@@ -26,6 +26,7 @@
  * @test
  * @bug 8074981
  * @summary Add C2 x86 Superword support for scalar product reduction optimizations : int test
+ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
  *
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+SuperWordReductions -XX:LoopUnrollLimit=250 -XX:CompileThresholdScaling=0.1 -XX:CompileCommand=exclude,ReductionPerf::main ReductionPerf
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-SuperWordReductions -XX:LoopUnrollLimit=250 -XX:CompileThresholdScaling=0.1 -XX:CompileCommand=exclude,ReductionPerf::main ReductionPerf
--- a/hotspot/test/compiler/loopopts/superword/SumRedSqrt_Double.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/loopopts/superword/SumRedSqrt_Double.java	Thu Feb 25 11:27:59 2016 -0800
@@ -26,7 +26,7 @@
 * @test
 * @bug 8135028
 * @summary Add C2 x86 Superword support for scalar sum reduction optimizations : double sqrt test
-* @requires os.arch=="x86" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
+* @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
 *
 * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 SumRedSqrt_Double
 * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 SumRedSqrt_Double
--- a/hotspot/test/compiler/loopopts/superword/SumRed_Double.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/loopopts/superword/SumRed_Double.java	Thu Feb 25 11:27:59 2016 -0800
@@ -26,6 +26,7 @@
  * @test
  * @bug 8074981
  * @summary Add C2 x86 Superword support for scalar sum reduction optimizations : double test
+ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
  *
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 SumRed_Double
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 SumRed_Double
--- a/hotspot/test/compiler/loopopts/superword/SumRed_Float.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/loopopts/superword/SumRed_Float.java	Thu Feb 25 11:27:59 2016 -0800
@@ -26,6 +26,7 @@
  * @test
  * @bug 8074981
  * @summary Add C2 x86 Superword support for scalar sum reduction optimizations : float test
+ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
  *
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 SumRed_Float
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 SumRed_Float
--- a/hotspot/test/compiler/loopopts/superword/SumRed_Int.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/loopopts/superword/SumRed_Int.java	Thu Feb 25 11:27:59 2016 -0800
@@ -26,6 +26,7 @@
  * @test
  * @bug 8074981
  * @summary Add C2 x86 Superword support for scalar sum reduction optimizations : int test
+ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64" | os.arch=="aarch64"
  *
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 SumRed_Int
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=2 -XX:CompileThresholdScaling=0.1 SumRed_Int
--- a/hotspot/test/compiler/loopopts/superword/SumRed_Long.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/compiler/loopopts/superword/SumRed_Long.java	Thu Feb 25 11:27:59 2016 -0800
@@ -26,6 +26,7 @@
  * @test
  * @bug 8076276
  * @summary Add C2 x86 Superword support for scalar sum reduction optimizations : long test
+ * @requires os.arch=="x86" | os.arch=="i386" | os.arch=="amd64" | os.arch=="x86_64"
  *
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=4 -XX:CompileThresholdScaling=0.1 SumRed_Long
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-SuperWordReductions -XX:LoopUnrollLimit=250 -XX:LoopMaxUnroll=4 -XX:CompileThresholdScaling=0.1 SumRed_Long
--- a/hotspot/test/runtime/CommandLine/VMDeprecatedOptions.java	Thu Feb 25 09:41:40 2016 -0800
+++ b/hotspot/test/runtime/CommandLine/VMDeprecatedOptions.java	Thu Feb 25 11:27:59 2016 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,8 @@
         // deprecated non-alias flags:
         {"MaxGCMinorPauseMillis", "1032"},
         {"UseParNewGC", "false"},
+        {"ConvertSleepToYield", "false" },
+        {"ConvertYieldToSleep", "false" },
 
         // deprecated alias flags (see also aliased_jvm_flags):
         {"DefaultMaxRAMFraction", "4"},