Merge
authorcfang
Tue, 10 Nov 2009 17:00:18 -0800
changeset 4433 2a23318c15bf
parent 4144 09e0b33177af (current diff)
parent 4432 29b057bf202d (diff)
child 4439 dfe376b4f7df
child 4440 e53e962bd403
Merge
hotspot/src/cpu/x86/vm/assembler_x86.cpp
hotspot/src/cpu/x86/vm/assembler_x86.hpp
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -189,14 +189,17 @@
   Register OSR_buf = osrBufferPointer()->as_register();
   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
     int monitor_offset = BytesPerWord * method()->max_locals() +
-      (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
+      (2 * BytesPerWord) * (number_of_locks - 1);
+    // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
+    // the OSR buffer using 2 word entries: first the lock and then
+    // the oop.
     for (int i = 0; i < number_of_locks; i++) {
-      int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
+      int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 #ifdef ASSERT
       // verify the interpreter's monitor has a non-null object
       {
         Label L;
-        __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
+        __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
         __ cmp(G0, O7);
         __ br(Assembler::notEqual, false, Assembler::pt, L);
         __ delayed()->nop();
@@ -205,9 +208,9 @@
       }
 #endif // ASSERT
       // Copy the lock field into the compiled activation.
-      __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7);
+      __ ld_ptr(OSR_buf, slot_offset + 0, O7);
       __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
-      __ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
+      __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
       __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
     }
   }
@@ -953,9 +956,11 @@
         } else {
 #ifdef _LP64
           assert(base != to_reg->as_register_lo(), "can't handle this");
+          assert(O7 != to_reg->as_register_lo(), "can't handle this");
           __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
+          __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
           __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
-          __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
+          __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
 #else
           if (base == to_reg->as_register_lo()) {
             __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
@@ -976,8 +981,8 @@
           FloatRegister reg = to_reg->as_double_reg();
           // split unaligned loads
           if (unaligned || PatchALot) {
-            __ ldf(FloatRegisterImpl::S, base, offset + BytesPerWord, reg->successor());
-            __ ldf(FloatRegisterImpl::S, base, offset,                reg);
+            __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
+            __ ldf(FloatRegisterImpl::S, base, offset,     reg);
           } else {
             __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
           }
@@ -2200,6 +2205,7 @@
   Register len     = O2;
 
   __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
+  LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
   if (shift == 0) {
     __ add(src_ptr, src_pos, src_ptr);
   } else {
@@ -2208,6 +2214,7 @@
   }
 
   __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
+  LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
   if (shift == 0) {
     __ add(dst_ptr, dst_pos, dst_ptr);
   } else {
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -144,17 +144,17 @@
   if (index->is_register()) {
     // apply the shift and accumulate the displacement
     if (shift > 0) {
-      LIR_Opr tmp = new_register(T_INT);
+      LIR_Opr tmp = new_pointer_register();
       __ shift_left(index, shift, tmp);
       index = tmp;
     }
     if (disp != 0) {
-      LIR_Opr tmp = new_register(T_INT);
+      LIR_Opr tmp = new_pointer_register();
       if (Assembler::is_simm13(disp)) {
-        __ add(tmp, LIR_OprFact::intConst(disp), tmp);
+        __ add(tmp, LIR_OprFact::intptrConst(disp), tmp);
         index = tmp;
       } else {
-        __ move(LIR_OprFact::intConst(disp), tmp);
+        __ move(LIR_OprFact::intptrConst(disp), tmp);
         __ add(tmp, index, tmp);
         index = tmp;
       }
@@ -162,8 +162,8 @@
     }
   } else if (disp != 0 && !Assembler::is_simm13(disp)) {
     // index is illegal so replace it with the displacement loaded into a register
-    index = new_register(T_INT);
-    __ move(LIR_OprFact::intConst(disp), index);
+    index = new_pointer_register();
+    __ move(LIR_OprFact::intptrConst(disp), index);
     disp = 0;
   }
 
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -150,8 +150,7 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
-  assert(!unbox, "NYI");//6815692//
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   address compiled_entry = __ pc();
   Label cont;
 
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -2251,6 +2251,7 @@
   emit_byte(0x9D);
 }
 
+#ifndef _LP64 // no 32bit push/pop on amd64
 void Assembler::popl(Address dst) {
   // NOTE: this will adjust stack by 8byte on 64bits
   InstructionMark im(this);
@@ -2258,6 +2259,7 @@
   emit_byte(0x8F);
   emit_operand(rax, dst);
 }
+#endif
 
 void Assembler::prefetch_prefix(Address src) {
   prefix(src);
@@ -2428,6 +2430,7 @@
   emit_byte(0x9C);
 }
 
+#ifndef _LP64 // no 32bit push/pop on amd64
 void Assembler::pushl(Address src) {
   // Note this will push 64bit on 64bit
   InstructionMark im(this);
@@ -2435,6 +2438,7 @@
   emit_byte(0xFF);
   emit_operand(rsi, src);
 }
+#endif
 
 void Assembler::pxor(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
@@ -5591,7 +5595,12 @@
 }
 
 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
-  andpd(dst, as_Address(src));
+  if (reachable(src)) {
+    andpd(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    andpd(dst, Address(rscratch1, 0));
+  }
 }
 
 void MacroAssembler::andptr(Register dst, int32_t imm32) {
@@ -6078,11 +6087,21 @@
 }
 
 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
-  comisd(dst, as_Address(src));
+  if (reachable(src)) {
+    comisd(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    comisd(dst, Address(rscratch1, 0));
+  }
 }
 
 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
-  comiss(dst, as_Address(src));
+  if (reachable(src)) {
+    comiss(dst, as_Address(src));
+  } else {
+    lea(rscratch1, src);
+    comiss(dst, Address(rscratch1, 0));
+  }
 }
 
 
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1244,7 +1244,9 @@
   void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
   void pcmpestri(XMMRegister xmm1, Address src, int imm8);
 
+#ifndef _LP64 // no 32bit push/pop on amd64
   void popl(Address dst);
+#endif
 
 #ifdef _LP64
   void popq(Address dst);
@@ -1285,7 +1287,9 @@
   // Interleave Low Bytes
   void punpcklbw(XMMRegister dst, XMMRegister src);
 
+#ifndef _LP64 // no 32bit push/pop on amd64
   void pushl(Address src);
+#endif
 
   void pushq(Address src);
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -301,22 +301,25 @@
   Register OSR_buf = osrBufferPointer()->as_pointer_register();
   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
     int monitor_offset = BytesPerWord * method()->max_locals() +
-      (BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
+      (2 * BytesPerWord) * (number_of_locks - 1);
+    // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
+    // the OSR buffer using 2 word entries: first the lock and then
+    // the oop.
     for (int i = 0; i < number_of_locks; i++) {
-      int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
+      int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 #ifdef ASSERT
       // verify the interpreter's monitor has a non-null object
       {
         Label L;
-        __ cmpptr(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
+        __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
         __ jcc(Assembler::notZero, L);
         __ stop("locked object is NULL");
         __ bind(L);
       }
 #endif
-      __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes()));
+      __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
       __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
-      __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()));
+      __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
       __ movptr(frame_map()->address_for_monitor_object(i), rbx);
     }
   }
@@ -785,7 +788,13 @@
           ShouldNotReachHere();
           __ movoop(as_Address(addr, noreg), c->as_jobject());
         } else {
+#ifdef _LP64
+          __ movoop(rscratch1, c->as_jobject());
+          null_check_here = code_offset();
+          __ movptr(as_Address_lo(addr), rscratch1);
+#else
           __ movoop(as_Address(addr), c->as_jobject());
+#endif
         }
       }
       break;
@@ -1118,8 +1127,14 @@
       __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
       __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
     } else {
+#ifndef _LP64
       __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
       __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
+#else
+      //no pushl on 64bits
+      __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
+      __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
+#endif
     }
 
   } else if (src->is_double_stack()) {
@@ -3136,8 +3151,10 @@
 
 #ifdef _LP64
   assert_different_registers(c_rarg0, dst, dst_pos, length);
+  __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
   __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
   assert_different_registers(c_rarg1, length);
+  __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
   __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
   __ mov(c_rarg2, length);
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -755,8 +755,19 @@
   }
 
   LIR_Opr addr = new_pointer_register();
-  __ move(obj.result(), addr);
-  __ add(addr, offset.result(), addr);
+  LIR_Address* a;
+  if(offset.result()->is_constant()) {
+    a = new LIR_Address(obj.result(),
+                        NOT_LP64(offset.result()->as_constant_ptr()->as_jint()) LP64_ONLY((int)offset.result()->as_constant_ptr()->as_jlong()),
+                        as_BasicType(type));
+  } else {
+    a = new LIR_Address(obj.result(),
+                        offset.result(),
+                        LIR_Address::times_1,
+                        0,
+                        as_BasicType(type));
+  }
+  __ leal(LIR_OprFact::address(a), addr);
 
   if (type == objectType) {  // Write-barrier needed for Object fields.
     // Do the pre-write barrier, if any.
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -155,15 +155,8 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   TosState incoming_state = state;
-  if (EnableInvokeDynamic) {
-    if (unbox) {
-      incoming_state = atos;
-    }
-  } else {
-    assert(!unbox, "old behavior");
-  }
 
   Label interpreter_entry;
   address compiled_entry = __ pc();
@@ -216,46 +209,6 @@
   __ restore_bcp();
   __ restore_locals();
 
-  Label L_fail;
-
-  if (unbox && state != atos) {
-    // cast and unbox
-    BasicType type = as_BasicType(state);
-    if (type == T_BYTE)  type = T_BOOLEAN; // FIXME
-    KlassHandle boxk = SystemDictionaryHandles::box_klass(type);
-    __ mov32(rbx, ExternalAddress((address) boxk.raw_value()));
-    __ testl(rax, rax);
-    Label L_got_value, L_get_value;
-    // convert nulls to zeroes (avoid NPEs here)
-    if (!(type == T_FLOAT || type == T_DOUBLE)) {
-      // if rax already contains zero bits, forge ahead
-      __ jcc(Assembler::zero, L_got_value);
-    } else {
-      __ jcc(Assembler::notZero, L_get_value);
-      __ fldz();
-      __ jmp(L_got_value);
-    }
-    __ bind(L_get_value);
-    __ cmp32(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
-    __ jcc(Assembler::notEqual, L_fail);
-    int offset = java_lang_boxing_object::value_offset_in_bytes(type);
-    // Cf. TemplateTable::getfield_or_static
-    switch (type) {
-      case T_BYTE:     // fall through:
-      case T_BOOLEAN:  __ load_signed_byte(rax, Address(rax, offset));    break;
-      case T_CHAR:     __ load_unsigned_short(rax, Address(rax, offset)); break;
-      case T_SHORT:    __ load_signed_short(rax, Address(rax, offset));   break;
-      case T_INT:      __ movl(rax, Address(rax, offset));                break;
-      case T_FLOAT:    __ fld_s(Address(rax, offset));                    break;
-      case T_DOUBLE:   __ fld_d(Address(rax, offset));                    break;
-      // Access to java.lang.Double.value does not need to be atomic:
-      case T_LONG:   { __ movl(rdx, Address(rax, offset + 4));
-                       __ movl(rax, Address(rax, offset + 0));  }         break;
-      default: ShouldNotReachHere();
-    }
-    __ bind(L_got_value);
-  }
-
   Label L_got_cache, L_giant_index;
   if (EnableInvokeDynamic) {
     __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
@@ -263,32 +216,6 @@
   }
   __ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
   __ bind(L_got_cache);
-  if (unbox && state == atos) {
-    // insert a casting conversion, to keep verifier sane
-    Label L_ok, L_ok_pops;
-    __ testl(rax, rax);
-    __ jcc(Assembler::zero, L_ok);
-    __ push(rax);               // save the object to check
-    __ push(rbx);               // save CP cache reference
-    __ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
-    __ movl(rbx, Address(rbx, rcx,
-                      Address::times_4, constantPoolCacheOopDesc::base_offset() +
-                      ConstantPoolCacheEntry::f1_offset()));
-    __ movl(rbx, Address(rbx, __ delayed_value(sun_dyn_CallSiteImpl::type_offset_in_bytes, rcx)));
-    __ movl(rbx, Address(rbx, __ delayed_value(java_dyn_MethodType::rtype_offset_in_bytes, rcx)));
-    __ movl(rax, Address(rbx, __ delayed_value(java_lang_Class::klass_offset_in_bytes, rcx)));
-    __ check_klass_subtype(rdx, rax, rbx, L_ok_pops);
-    __ pop(rcx);                // pop and discard CP cache
-    __ mov(rbx, rax);           // target supertype into rbx for L_fail
-    __ pop(rax);                // failed object into rax for L_fail
-    __ jmp(L_fail);
-
-    __ bind(L_ok_pops);
-    // restore pushed temp regs:
-    __ pop(rbx);
-    __ pop(rax);
-    __ bind(L_ok);
-  }
   __ movl(rbx, Address(rbx, rcx,
                     Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
                     ConstantPoolCacheEntry::flags_offset()));
@@ -301,14 +228,6 @@
     __ bind(L_giant_index);
     __ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
     __ jmp(L_got_cache);
-
-    if (unbox) {
-      __ bind(L_fail);
-      __ push(rbx);             // missed klass (required)
-      __ push(rax);             // bad object (actual)
-      __ movptr(rdx, ExternalAddress((address) &Interpreter::_throw_WrongMethodType_entry));
-      __ call(rdx);
-    }
   }
 
   return entry;
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -166,8 +166,7 @@
 
 
 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
-                                                                int step, bool unbox) {
-  assert(!unbox, "NYI");//6815692//
+                                                                int step) {
 
   // amd64 doesn't need to do anything special about compiled returns
   // to the interpreter so the code that exists on x86 to place a sentinel
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -2890,9 +2890,6 @@
 
 
 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
-  bool is_invdyn_bootstrap = (byte_no < 0);
-  if (is_invdyn_bootstrap)  byte_no = -byte_no;
-
   // determine flags
   Bytecodes::Code code = bytecode();
   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
@@ -2907,8 +2904,6 @@
   const Register flags  = rdx;
   assert_different_registers(method, index, recv, flags);
 
-  assert(!is_invdyn_bootstrap || is_invokedynamic, "byte_no<0 hack only for invdyn");
-
   // save 'interpreter return address'
   __ save_bcp();
 
@@ -2944,9 +2939,7 @@
   // load return address
   {
     address table_addr;
-    if (is_invdyn_bootstrap)
-      table_addr = (address)Interpreter::return_5_unbox_addrs_by_index_table();
-    else if (is_invokeinterface || is_invokedynamic)
+    if (is_invokeinterface || is_invokedynamic)
       table_addr = (address)Interpreter::return_5_addrs_by_index_table();
     else
       table_addr = (address)Interpreter::return_3_addrs_by_index_table();
@@ -3154,53 +3147,10 @@
   }
 
   Label handle_unlinked_site;
-  __ movptr(rcx, Address(rax, __ delayed_value(sun_dyn_CallSiteImpl::target_offset_in_bytes, rcx)));
-  __ testptr(rcx, rcx);
-  __ jcc(Assembler::zero, handle_unlinked_site);
-
+  __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
+  __ null_check(rcx);
   __ prepare_to_jump_from_interpreted();
   __ jump_to_method_handle_entry(rcx, rdx);
-
-  // Initial calls come here...
-  __ bind(handle_unlinked_site);
-  __ pop(rcx);                 // remove return address pushed by prepare_invoke
-
-  // box stacked arguments into an array for the bootstrap method
-  address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::bootstrap_invokedynamic);
-  __ restore_bcp();      // rsi must be correct for call_VM
-  __ call_VM(rax, entry, rax);
-  __ movl(rdi, rax);            // protect bootstrap MH from prepare_invoke
-
-  // recompute return address
-  __ restore_bcp();      // rsi must be correct for prepare_invoke
-  prepare_invoke(rax, rbx, -byte_no);  // smashes rcx, rdx
-  // rax: CallSite object (f1)
-  // rbx: unused (f2)
-  // rdi: bootstrap MH
-  // rdx: flags
-
-  // now load up the arglist, which has been neatly boxed
-  __ get_thread(rcx);
-  __ movptr(rdx, Address(rcx, JavaThread::vm_result_2_offset()));
-  __ movptr(Address(rcx, JavaThread::vm_result_2_offset()), NULL_WORD);
-  __ verify_oop(rdx);
-  // rdx = arglist
-
-  // save SP now, before we add the bootstrap call to the stack
-  // We must preserve a fiction that the original arguments are outgoing,
-  // because the return sequence will reset the stack to this point
-  // and then pop all those arguments.  It seems error-prone to use
-  // a different argument list size just for bootstrapping.
-  __ prepare_to_jump_from_interpreted();
-
-  // Now let's play adapter, pushing the real arguments on the stack.
-  __ pop(rbx);                  // return PC
-  __ push(rdi);                 // boot MH
-  __ push(rax);                 // call site
-  __ push(rdx);                 // arglist
-  __ push(rbx);                 // return PC, again
-  __ mov(rcx, rdi);
-  __ jump_to_method_handle_entry(rcx, rdx);
 }
 
 //----------------------------------------------------------------------------------------------------
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -255,6 +255,8 @@
   if (!VM_Version::supports_sse2()) {
     vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
   }
+  // in 64 bit the use of SSE2 is the minimum
+  if (UseSSE < 2) UseSSE = 2;
 #endif
 
   // If the OS doesn't support SSE, we can't use this feature even if the HW does
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -365,7 +365,7 @@
     if (_next_loop_index < 31) _next_loop_index++;
   } else {
     // block already marked as loop header
-    assert(is_power_of_2(_loop_map.at(block->block_id())), "exactly one bit must be set");
+    assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
   }
 }
 
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1855,12 +1855,26 @@
     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
   } else {
 #ifdef X86
+#ifdef _LP64
+    if (!index_op->is_illegal() && index_op->type() == T_INT) {
+      LIR_Opr tmp = new_pointer_register();
+      __ convert(Bytecodes::_i2l, index_op, tmp);
+      index_op = tmp;
+    }
+#endif
     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
 #else
     if (index_op->is_illegal() || log2_scale == 0) {
+#ifdef _LP64
+      if (!index_op->is_illegal() && index_op->type() == T_INT) {
+        LIR_Opr tmp = new_pointer_register();
+        __ convert(Bytecodes::_i2l, index_op, tmp);
+        index_op = tmp;
+      }
+#endif
       addr = new LIR_Address(base_op, index_op, dst_type);
     } else {
-      LIR_Opr tmp = new_register(T_INT);
+      LIR_Opr tmp = new_pointer_register();
       __ shift_left(index_op, log2_scale, tmp);
       addr = new LIR_Address(base_op, tmp, dst_type);
     }
@@ -1915,10 +1929,25 @@
   LIR_Opr index_op = idx.result();
   if (log2_scale != 0) {
     // temporary fix (platform dependent code without shift on Intel would be better)
-    index_op = new_register(T_INT);
-    __ move(idx.result(), index_op);
+    index_op = new_pointer_register();
+#ifdef _LP64
+    if(idx.result()->type() == T_INT) {
+      __ convert(Bytecodes::_i2l, idx.result(), index_op);
+    } else {
+#endif
+      __ move(idx.result(), index_op);
+#ifdef _LP64
+    }
+#endif
     __ shift_left(index_op, log2_scale, index_op);
   }
+#ifdef _LP64
+  else if(!index_op->is_illegal() && index_op->type() == T_INT) {
+    LIR_Opr tmp = new_pointer_register();
+    __ convert(Bytecodes::_i2l, index_op, tmp);
+    index_op = tmp;
+  }
+#endif
 
   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
   __ move(value.result(), addr);
--- a/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/c1/c1_LinearScan.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -2464,6 +2464,10 @@
 
     case T_LONG: // fall through
     case T_DOUBLE: {
+#ifdef _LP64
+      scope_values->append(&_int_0_scope_value);
+      scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
+#else
       if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
         scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
         scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
@@ -2471,7 +2475,7 @@
         scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
         scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
       }
-
+#endif
       return 2;
     }
 
@@ -2503,17 +2507,18 @@
   } else if (opr->is_single_cpu()) {
     bool is_oop = opr->is_oop_register();
     int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0);
+    Location::Type int_loc_type = NOT_LP64(Location::normal) LP64_ONLY(Location::int_in_long);
 
     ScopeValue* sv = _scope_value_cache.at(cache_idx);
     if (sv == NULL) {
-      Location::Type loc_type = is_oop ? Location::oop : Location::normal;
+      Location::Type loc_type = is_oop ? Location::oop : int_loc_type;
       VMReg rname = frame_map()->regname(opr);
       sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
       _scope_value_cache.at_put(cache_idx, sv);
     }
 
     // check if cached value is correct
-    DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : Location::normal, frame_map()->regname(opr)))));
+    DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : int_loc_type, frame_map()->regname(opr)))));
 
     scope_values->append(sv);
     return 1;
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -690,10 +690,8 @@
   ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
 
   // Get the method's name and signature.
-  int nt_index = cpool->name_and_type_ref_index_at(index);
-  int sig_index = cpool->signature_ref_index_at(nt_index);
   symbolOop name_sym = cpool->name_ref_at(index);
-  symbolOop sig_sym = cpool->symbol_at(sig_index);
+  symbolOop sig_sym  = cpool->signature_ref_at(index);
 
   if (holder_is_accessible) { // Our declared holder is loaded.
     instanceKlass* lookup = declared_holder->get_instanceKlass();
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -2430,15 +2430,15 @@
 }
 
 
-// Support for sun_dyn_CallSiteImpl
-
-int sun_dyn_CallSiteImpl::_type_offset;
-int sun_dyn_CallSiteImpl::_target_offset;
-int sun_dyn_CallSiteImpl::_vmmethod_offset;
-
-void sun_dyn_CallSiteImpl::compute_offsets() {
+// Support for java_dyn_CallSite
+
+int java_dyn_CallSite::_type_offset;
+int java_dyn_CallSite::_target_offset;
+int java_dyn_CallSite::_vmmethod_offset;
+
+void java_dyn_CallSite::compute_offsets() {
   if (!EnableInvokeDynamic)  return;
-  klassOop k = SystemDictionary::CallSiteImpl_klass();
+  klassOop k = SystemDictionary::CallSite_klass();
   if (k != NULL) {
     compute_offset(_type_offset,   k, vmSymbols::type_name(),   vmSymbols::java_dyn_MethodType_signature(), true);
     compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
@@ -2446,23 +2446,23 @@
   }
 }
 
-oop sun_dyn_CallSiteImpl::type(oop site) {
+oop java_dyn_CallSite::type(oop site) {
   return site->obj_field(_type_offset);
 }
 
-oop sun_dyn_CallSiteImpl::target(oop site) {
+oop java_dyn_CallSite::target(oop site) {
   return site->obj_field(_target_offset);
 }
 
-void sun_dyn_CallSiteImpl::set_target(oop site, oop target) {
+void java_dyn_CallSite::set_target(oop site, oop target) {
   site->obj_field_put(_target_offset, target);
 }
 
-oop sun_dyn_CallSiteImpl::vmmethod(oop site) {
+oop java_dyn_CallSite::vmmethod(oop site) {
   return site->obj_field(_vmmethod_offset);
 }
 
-void sun_dyn_CallSiteImpl::set_vmmethod(oop site, oop ref) {
+void java_dyn_CallSite::set_vmmethod(oop site, oop ref) {
   site->obj_field_put(_vmmethod_offset, ref);
 }
 
@@ -2811,7 +2811,7 @@
     java_dyn_MethodTypeForm::compute_offsets();
   }
   if (EnableInvokeDynamic) {
-    sun_dyn_CallSiteImpl::compute_offsets();
+    java_dyn_CallSite::compute_offsets();
   }
   java_security_AccessControlContext::compute_offsets();
   // Initialize reflection classes. The layouts of these classes
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1061,9 +1061,9 @@
 };
 
 
-// Interface to sun.dyn.CallSiteImpl objects
+// Interface to java.dyn.CallSite objects
 
-class sun_dyn_CallSiteImpl: AllStatic {
+class java_dyn_CallSite: AllStatic {
   friend class JavaClasses;
 
 private:
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1973,7 +1973,7 @@
   WKID indy_group_end   = WK_KLASS_ENUM_NAME(Dynamic_klass);
   initialize_wk_klasses_until(indy_group_start, scan, CHECK);
   if (EnableInvokeDynamic) {
-    initialize_wk_klasses_through(indy_group_start, scan, CHECK);
+    initialize_wk_klasses_through(indy_group_end, scan, CHECK);
   }
   if (_well_known_klasses[indy_group_start] == NULL) {
     // Skip the rest of the dynamic typing classes, if Linkage is not loaded.
@@ -2404,7 +2404,7 @@
                                                 methodHandle mh_invdyn,
                                                 TRAPS) {
   Handle empty;
-  // call sun.dyn.CallSiteImpl::makeSite(caller, name, mtype, cmid, cbci)
+  // call java.dyn.CallSite::makeSite(caller, name, mtype, cmid, cbci)
   oop name_str_oop = StringTable::intern(name(), CHECK_(empty)); // not a handle!
   JavaCallArguments args(Handle(THREAD, caller->java_mirror()));
   args.push_oop(name_str_oop);
@@ -2413,17 +2413,19 @@
   args.push_int(caller_bci);
   JavaValue result(T_OBJECT);
   JavaCalls::call_static(&result,
-                         SystemDictionary::CallSiteImpl_klass(),
+                         SystemDictionary::CallSite_klass(),
                          vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
                          &args, CHECK_(empty));
   oop call_site_oop = (oop) result.get_jobject();
   assert(call_site_oop->is_oop()
-         /*&& sun_dyn_CallSiteImpl::is_instance(call_site_oop)*/, "must be sane");
-  sun_dyn_CallSiteImpl::set_vmmethod(call_site_oop, mh_invdyn());
+         /*&& java_dyn_CallSite::is_instance(call_site_oop)*/, "must be sane");
+  java_dyn_CallSite::set_vmmethod(call_site_oop, mh_invdyn());
   if (TraceMethodHandles) {
+#ifndef PRODUCT
     tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
     call_site_oop->print();
     tty->cr();
+#endif //PRODUCT
   }
   return call_site_oop;
 }
@@ -2436,9 +2438,17 @@
 
   instanceKlassHandle ik(THREAD, caller());
 
-  if (ik->bootstrap_method() != NULL) {
-    return Handle(THREAD, ik->bootstrap_method());
+  oop boot_method_oop = ik->bootstrap_method();
+  if (boot_method_oop != NULL) {
+    if (TraceMethodHandles) {
+      tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop);
+    }
+    NOT_PRODUCT(if (!boot_method_oop->is_oop()) { tty->print_cr("*** boot MH of "PTR_FORMAT" = "PTR_FORMAT, ik(), boot_method_oop); ik()->print(); });
+    assert(boot_method_oop->is_oop()
+           && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
+    return Handle(THREAD, boot_method_oop);
   }
+  boot_method_oop = NULL;  // GC safety
 
   // call java.dyn.Linkage::findBootstrapMethod(caller, sbk)
   JavaCallArguments args(Handle(THREAD, ik->java_mirror()));
@@ -2452,9 +2462,18 @@
                          vmSymbols::findBootstrapMethod_name(),
                          vmSymbols::findBootstrapMethod_signature(),
                          &args, CHECK_(empty));
-  oop boot_method_oop = (oop) result.get_jobject();
+  boot_method_oop = (oop) result.get_jobject();
 
   if (boot_method_oop != NULL) {
+    if (TraceMethodHandles) {
+#ifndef PRODUCT
+      tty->print_cr("--------");
+      tty->print_cr("bootstrap method for "PTR_FORMAT" computed as "PTR_FORMAT":", ik(), boot_method_oop);
+      ik()->print();
+      boot_method_oop->print();
+      tty->print_cr("========");
+#endif //PRODUCT
+    }
     assert(boot_method_oop->is_oop()
            && java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
     // probably no race conditions, but let's be careful:
@@ -2463,6 +2482,14 @@
     else
       boot_method_oop = ik->bootstrap_method();
   } else {
+    if (TraceMethodHandles) {
+#ifndef PRODUCT
+      tty->print_cr("--------");
+      tty->print_cr("bootstrap method for "PTR_FORMAT" computed as NULL:", ik());
+      ik()->print();
+      tty->print_cr("========");
+#endif //PRODUCT
+    }
     boot_method_oop = ik->bootstrap_method();
   }
 
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -144,7 +144,6 @@
   template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \
   template(Linkage_klass,                java_dyn_Linkage,               Opt) \
   template(CallSite_klass,               java_dyn_CallSite,              Opt) \
-  template(CallSiteImpl_klass,           sun_dyn_CallSiteImpl,     Opt) \
   template(Dynamic_klass,                java_dyn_Dynamic,               Opt) \
   /* Note: MethodHandle must be first, and Dynamic last in group */           \
                                                                               \
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1903,17 +1903,8 @@
   verify_cp_type(index, cp, types, CHECK_VERIFY(this));
 
   // Get method name and signature
-  symbolHandle method_name;
-  symbolHandle method_sig;
-  if (opcode == Bytecodes::_invokedynamic) {
-    int name_index = cp->name_ref_index_at(index);
-    int sig_index  = cp->signature_ref_index_at(index);
-    method_name = symbolHandle(THREAD, cp->symbol_at(name_index));
-    method_sig  = symbolHandle(THREAD, cp->symbol_at(sig_index));
-  } else {
-    method_name = symbolHandle(THREAD, cp->name_ref_at(index));
-    method_sig  = symbolHandle(THREAD, cp->signature_ref_at(index));
-  }
+  symbolHandle method_name(THREAD, cp->name_ref_at(index));
+  symbolHandle method_sig(THREAD, cp->signature_ref_at(index));
 
   if (!SignatureVerifier::is_valid_method_signature(method_sig)) {
     class_format_error(
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -233,10 +233,9 @@
   template(sun_dyn_AdapterMethodHandle,               "sun/dyn/AdapterMethodHandle")              \
   template(sun_dyn_BoundMethodHandle,                 "sun/dyn/BoundMethodHandle")                \
   template(sun_dyn_DirectMethodHandle,                "sun/dyn/DirectMethodHandle")               \
-  template(sun_dyn_CallSiteImpl,                      "sun/dyn/CallSiteImpl")                     \
   template(makeImpl_name,                             "makeImpl") /*MethodType::makeImpl*/        \
   template(makeImpl_signature,    "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \
-  template(makeSite_name,                             "makeSite") /*CallSiteImpl::makeImpl*/       \
+  template(makeSite_name,                             "makeSite") /*CallSite::makeSite*/          \
   template(makeSite_signature,    "(Ljava/lang/Class;Ljava/lang/String;Ljava/dyn/MethodType;II)Ljava/dyn/CallSite;") \
   template(findBootstrapMethod_name,                  "findBootstrapMethod")                      \
   template(findBootstrapMethod_signature, "(Ljava/lang/Class;Ljava/lang/Class;)Ljava/dyn/MethodHandle;") \
--- a/hotspot/src/share/vm/includeDB_core	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/includeDB_core	Tue Nov 10 17:00:18 2009 -0800
@@ -1291,6 +1291,7 @@
 cpCacheOop.cpp                          markSweep.inline.hpp
 cpCacheOop.cpp                          objArrayOop.hpp
 cpCacheOop.cpp                          oop.inline.hpp
+cpCacheOop.cpp                          rewriter.hpp
 cpCacheOop.cpp                          universe.inline.hpp
 
 cpCacheOop.hpp                          allocation.hpp
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -282,18 +282,21 @@
   constantPoolOop constants = method()->constants();
   constantTag tag = constants->tag_at(i);
 
+  int nt_index = -1;
+
   switch (tag.value()) {
   case JVM_CONSTANT_InterfaceMethodref:
   case JVM_CONSTANT_Methodref:
   case JVM_CONSTANT_Fieldref:
+  case JVM_CONSTANT_NameAndType:
     break;
   default:
     st->print_cr(" bad tag=%d at %d", tag.value(), i);
     return;
   }
 
-  symbolOop name = constants->name_ref_at(orig_i);
-  symbolOop signature = constants->signature_ref_at(orig_i);
+  symbolOop name = constants->uncached_name_ref_at(i);
+  symbolOop signature = constants->uncached_signature_ref_at(i);
   st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string());
 }
 
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -314,6 +314,20 @@
       break;
     }
 
+   case Bytecodes::_invokedynamic: {
+      Thread *thread = Thread::current();
+      ResourceMark rm(thread);
+      methodHandle mh(thread, method);
+      type = Bytecode_invoke_at(mh, bci)->result_type(thread);
+      // since the cache entry might not be initialized:
+      // (NOT needed for the old calling convension)
+      if (!is_top_frame) {
+        int index = Bytes::get_native_u4(bcp+1);
+        method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
+      }
+      break;
+    }
+
     case Bytecodes::_ldc   :
       type = constant_pool_type( method, *(bcp+1) );
       break;
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -681,7 +681,7 @@
 IRT_END
 
 
-// First time execution:  Resolve symbols, create a permanent CallSiteImpl object.
+// First time execution:  Resolve symbols, create a permanent CallSite object.
 IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
   ResourceMark rm(thread);
 
@@ -708,21 +708,16 @@
   constantPoolHandle pool(thread, caller_method->constants());
   pool->set_invokedynamic();    // mark header to flag active call sites
 
-  int raw_index = four_byte_index(thread);
-  assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "invokedynamic indexes marked specially");
-
-  // there are two CPC entries that are of interest:
-  int site_index = constantPoolCacheOopDesc::decode_secondary_index(raw_index);
-  int main_index = pool->cache()->entry_at(site_index)->main_entry_index();
-  // and there is one CP entry, a NameAndType:
-  int nt_index = pool->map_instruction_operand_to_index(raw_index);
+  int site_index = four_byte_index(thread);
+  // there is a second CPC entries that is of interest; it caches signature info:
+  int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
 
   // first resolve the signature to a MH.invoke methodOop
   if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) {
     JvmtiHideSingleStepping jhss(thread);
     CallInfo info;
     LinkResolver::resolve_invoke(info, Handle(), pool,
-                                 raw_index, bytecode, CHECK);
+                                 site_index, bytecode, CHECK);
     // The main entry corresponds to a JVM_CONSTANT_NameAndType, and serves
     // as a common reference point for all invokedynamic call sites with
     // that exact call descriptor.  We will link it in the CP cache exactly
@@ -741,7 +736,7 @@
   assert(mh_invdyn.not_null() && mh_invdyn->is_method() && mh_invdyn->is_method_handle_invoke(),
          "correct result from LinkResolver::resolve_invokedynamic");
 
-  symbolHandle call_site_name(THREAD, pool->nt_name_ref_at(nt_index));
+  symbolHandle call_site_name(THREAD, pool->name_ref_at(site_index));
   Handle call_site
     = SystemDictionary::make_dynamic_call_site(caller_method->method_holder(),
                                                caller_method->method_idnum(),
@@ -753,61 +748,11 @@
   // In the secondary entry, the f1 field is the call site, and the f2 (index)
   // field is some data about the invoke site.
   int extra_data = 0;
-  pool->cache()->entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
+  pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
 }
 IRT_END
 
 
-// Called on first time execution, and also whenever the CallSite.target is null.
-// FIXME:  Do more of this in Java code.
-IRT_ENTRY(void, InterpreterRuntime::bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site)) {
-  methodHandle   mh_invdyn(thread, (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site));
-  Handle         mh_type(thread,   mh_invdyn->method_handle_type());
-  objArrayHandle mh_ptypes(thread, java_dyn_MethodType::ptypes(mh_type()));
-
-  // squish the arguments down to a single array
-  int nargs = mh_ptypes->length();
-  objArrayHandle arg_array;
-  {
-    objArrayOop aaoop = oopFactory::new_objArray(SystemDictionary::object_klass(), nargs, CHECK);
-    arg_array = objArrayHandle(thread, aaoop);
-  }
-  frame fr = thread->last_frame();
-  assert(fr.interpreter_frame_bcp() != NULL, "sanity");
-  int tos_offset = 0;
-  for (int i = nargs; --i >= 0; ) {
-    intptr_t* slot_addr = fr.interpreter_frame_tos_at(tos_offset++);
-    oop ptype = mh_ptypes->obj_at(i);
-    oop arg = NULL;
-    if (!java_lang_Class::is_primitive(ptype)) {
-      arg = *(oop*) slot_addr;
-    } else {
-      BasicType bt = java_lang_Class::primitive_type(ptype);
-      assert(frame::interpreter_frame_expression_stack_direction() < 0, "else reconsider this code");
-      jvalue value;
-      Interpreter::get_jvalue_in_slot(slot_addr, bt, &value);
-      tos_offset += type2size[bt]-1;
-      arg = java_lang_boxing_object::create(bt, &value, CHECK);
-      // FIXME:  These boxing objects are not canonicalized under
-      // the Java autoboxing rules.  They should be...
-      // The best approach would be to push the arglist creation into Java.
-      // The JVM should use a lower-level interface to communicate argument lists.
-    }
-    arg_array->obj_at_put(i, arg);
-  }
-
-  // now find the bootstrap method
-  oop bootstrap_mh_oop = instanceKlass::cast(fr.interpreter_frame_method()->method_holder())->bootstrap_method();
-  assert(bootstrap_mh_oop != NULL, "resolve_invokedynamic ensures a BSM");
-
-  // return the bootstrap method and argument array via vm_result/_2
-  thread->set_vm_result(bootstrap_mh_oop);
-  thread->set_vm_result_2(arg_array());
-}
-IRT_END
-
-
-
 //------------------------------------------------------------------------------------------------------------------------
 // Miscellaneous
 
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -91,7 +91,6 @@
   // Calls
   static void    resolve_invoke       (JavaThread* thread, Bytecodes::Code bytecode);
   static void    resolve_invokedynamic(JavaThread* thread);
-  static void  bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site);
 
   // Breakpoints
   static void _breakpoint(JavaThread* thread, methodOopDesc* method, address bcp);
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1015,11 +1015,8 @@
 
   // This guy is reached from InterpreterRuntime::resolve_invokedynamic.
 
-  assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "must be secondary index");
-  int nt_index = pool->map_instruction_operand_to_index(raw_index);
-
   // At this point, we only need the signature, and can ignore the name.
-  symbolHandle method_signature(THREAD, pool->nt_signature_ref_at(nt_index));
+  symbolHandle method_signature(THREAD, pool->signature_ref_at(raw_index));  // raw_index works directly
   symbolHandle method_name = vmSymbolHandles::invoke_name();
   KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
 
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -48,16 +48,6 @@
 }
 
 
-int Rewriter::add_extra_cp_cache_entry(int main_entry) {
-  // Hack: We put it on the map as an encoded value.
-  // The only place that consumes this is ConstantPoolCacheEntry::set_initial_state
-  int encoded = constantPoolCacheOopDesc::encode_secondary_index(main_entry);
-  int plain_secondary_index = _cp_cache_map.append(encoded);
-  return constantPoolCacheOopDesc::encode_secondary_index(plain_secondary_index);
-}
-
-
-
 // Creates a constant pool cache given a CPC map
 // This creates the constant pool cache initially in a state
 // that is unsafe for concurrent GC processing but sets it to
@@ -127,7 +117,7 @@
   assert(p[-1] == Bytecodes::_invokedynamic, "");
   int cp_index = Bytes::get_Java_u2(p);
   int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
-  int cpc2 = add_extra_cp_cache_entry(cpc);
+  int cpc2 = add_secondary_cp_cache_entry(cpc);
 
   // Replace the trailing four bytes with a CPC index for the dynamic
   // call site.  Unlike other CPC entries, there is one per bytecode,
@@ -137,7 +127,7 @@
   // all these entries.  That is the main reason invokedynamic
   // must have a five-byte instruction format.  (Of course, other JVM
   // implementations can use the bytes for other purposes.)
-  Bytes::put_native_u4(p, cpc2);
+  Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
   // Note: We use native_u4 format exclusively for 4-byte indexes.
 }
 
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -43,13 +43,18 @@
   bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
   int maybe_add_cp_cache_entry(int i) { return has_cp_cache(i) ? _cp_map[i] : add_cp_cache_entry(i); }
   int add_cp_cache_entry(int cp_index) {
+    assert((cp_index & _secondary_entry_tag) == 0, "bad tag");
     assert(_cp_map[cp_index] == -1, "not twice on same cp_index");
     int cache_index = _cp_cache_map.append(cp_index);
     _cp_map.at_put(cp_index, cache_index);
     assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
     return cache_index;
   }
-  int add_extra_cp_cache_entry(int main_entry);
+  int add_secondary_cp_cache_entry(int main_cpc_entry) {
+    assert(main_cpc_entry < _cp_cache_map.length(), "must be earlier CP cache entry");
+    int cache_index = _cp_cache_map.append(main_cpc_entry | _secondary_entry_tag);
+    return cache_index;
+  }
 
   // All the work goes in here:
   Rewriter(instanceKlassHandle klass, TRAPS);
@@ -65,4 +70,8 @@
  public:
   // Driver routine:
   static void rewrite(instanceKlassHandle klass, TRAPS);
+
+  enum {
+    _secondary_entry_tag = nth_bit(30)
+  };
 };
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -178,14 +178,12 @@
 #endif // !PRODUCT
 EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries];
 EntryPoint TemplateInterpreter::_earlyret_entry;
-EntryPoint TemplateInterpreter::_return_unbox_entry;
 EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ];
 EntryPoint TemplateInterpreter::_continuation_entry;
 EntryPoint TemplateInterpreter::_safept_entry;
 
 address    TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
 address    TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
-address    TemplateInterpreter::_return_5_unbox_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
 
 DispatchTable TemplateInterpreter::_active_table;
 DispatchTable TemplateInterpreter::_normal_table;
@@ -253,22 +251,6 @@
     }
   }
 
-  if (EnableInvokeDynamic) {
-    CodeletMark cm(_masm, "unboxing return entry points");
-    Interpreter::_return_unbox_entry =
-      EntryPoint(
-        generate_return_unbox_entry_for(btos, 5),
-        generate_return_unbox_entry_for(ctos, 5),
-        generate_return_unbox_entry_for(stos, 5),
-        generate_return_unbox_entry_for(atos, 5), // cast conversion
-        generate_return_unbox_entry_for(itos, 5),
-        generate_return_unbox_entry_for(ltos, 5),
-        generate_return_unbox_entry_for(ftos, 5),
-        generate_return_unbox_entry_for(dtos, 5),
-        Interpreter::_return_entry[5].entry(vtos) // no unboxing for void
-      );
-  }
-
   { CodeletMark cm(_masm, "earlyret entry points");
     Interpreter::_earlyret_entry =
       EntryPoint(
@@ -319,8 +301,6 @@
     int index = Interpreter::TosState_as_index(states[j]);
     Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
     Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
-    if (EnableInvokeDynamic)
-      Interpreter::_return_5_unbox_addrs_by_index[index] = Interpreter::return_unbox_entry(states[j], 5);
   }
 
   { CodeletMark cm(_masm, "continuation entry points");
@@ -547,18 +527,6 @@
 }
 
 
-address TemplateInterpreter::return_unbox_entry(TosState state, int length) {
-  assert(EnableInvokeDynamic, "");
-  if (state == vtos) {
-    // no unboxing to do, actually
-    return return_entry(state, length);
-  } else {
-    assert(length == 5, "unboxing entries generated for invokedynamic only");
-    return _return_unbox_entry.entry(state);
-  }
-}
-
-
 address TemplateInterpreter::deopt_entry(TosState state, int length) {
   guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length");
   return _deopt_entry[length].entry(state);
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -110,14 +110,12 @@
 #endif // !PRODUCT
   static EntryPoint _return_entry[number_of_return_entries];    // entry points to return to from a call
   static EntryPoint _earlyret_entry;                            // entry point to return early from a call
-  static EntryPoint _return_unbox_entry;                        // entry point to unbox a return value from a call
   static EntryPoint _deopt_entry[number_of_deopt_entries];      // entry points to return to from a deoptimization
   static EntryPoint _continuation_entry;
   static EntryPoint _safept_entry;
 
   static address    _return_3_addrs_by_index[number_of_return_addrs];     // for invokevirtual   return entries
   static address    _return_5_addrs_by_index[number_of_return_addrs];     // for invokeinterface return entries
-  static address    _return_5_unbox_addrs_by_index[number_of_return_addrs]; // for invokedynamic bootstrap methods
 
   static DispatchTable _active_table;                           // the active    dispatch table (used by the interpreter for dispatch)
   static DispatchTable _normal_table;                           // the normal    dispatch table (used to set the active table in normal mode)
@@ -159,12 +157,10 @@
   // Support for invokes
   static address*   return_3_addrs_by_index_table()             { return _return_3_addrs_by_index; }
   static address*   return_5_addrs_by_index_table()             { return _return_5_addrs_by_index; }
-  static address*   return_5_unbox_addrs_by_index_table()       { return _return_5_unbox_addrs_by_index; }
   static int        TosState_as_index(TosState state);          // computes index into return_3_entry_by_index table
 
   static address    return_entry  (TosState state, int length);
   static address    deopt_entry   (TosState state, int length);
-  static address    return_unbox_entry(TosState state, int length);
 
   // Safepoint support
   static void       notice_safepoints();                        // stops the thread when reaching a safepoint
--- a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -51,10 +51,7 @@
   address generate_WrongMethodType_handler();
   address generate_ArrayIndexOutOfBounds_handler(const char* name);
   address generate_continuation_for(TosState state);
-  address generate_return_entry_for(TosState state, int step, bool unbox = false);
-  address generate_return_unbox_entry_for(TosState state, int step) {
-    return generate_return_entry_for(state, step, true);
-  }
+  address generate_return_entry_for(TosState state, int step);
   address generate_earlyret_entry_for(TosState state);
   address generate_deopt_entry_for(TosState state, int step);
   address generate_safept_entry_for(TosState state, address runtime_entry);
--- a/hotspot/src/share/vm/memory/universe.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/memory/universe.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -744,22 +744,22 @@
 static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 
 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
+  size_t base = 0;
 #ifdef _LP64
   if (UseCompressedOops) {
     assert(mode == UnscaledNarrowOop  ||
            mode == ZeroBasedNarrowOop ||
            mode == HeapBasedNarrowOop, "mode is invalid");
+    const size_t total_size = heap_size + HeapBaseMinAddress;
     // Return specified base for the first request.
     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
-      return (char*)HeapBaseMinAddress;
-    }
-    const size_t total_size = heap_size + HeapBaseMinAddress;
-    if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
+      base = HeapBaseMinAddress;
+    } else if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
       if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
           (Universe::narrow_oop_shift() == 0)) {
         // Use 32-bits oops without encoding and
         // place heap's top on the 4Gb boundary
-        return (char*)(NarrowOopHeapMax - heap_size);
+        base = (NarrowOopHeapMax - heap_size);
       } else {
         // Can't reserve with NarrowOopShift == 0
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
@@ -768,16 +768,38 @@
           // Use zero based compressed oops with encoding and
           // place heap's top on the 32Gb boundary in case
           // total_size > 4Gb or failed to reserve below 4Gb.
-          return (char*)(OopEncodingHeapMax - heap_size);
+          base = (OopEncodingHeapMax - heap_size);
         }
       }
     } else {
       // Can't reserve below 32Gb.
       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
     }
+    // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
+    // used in ReservedHeapSpace() constructors.
+    // The final values will be set in initialize_heap() below.
+    if (base != 0 && (base + heap_size) <= OopEncodingHeapMax) {
+      // Use zero based compressed oops
+      Universe::set_narrow_oop_base(NULL);
+      // Don't need guard page for implicit checks in indexed
+      // addressing mode with zero based Compressed Oops.
+      Universe::set_narrow_oop_use_implicit_null_checks(true);
+    } else {
+      // Set to a non-NULL value so the ReservedSpace ctor computes
+      // the correct no-access prefix.
+      // The final value will be set in initialize_heap() below.
+      Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
+#ifdef _WIN64
+      if (UseLargePages) {
+        // Cannot allocate guard pages for implicit checks in indexed
+        // addressing mode when large pages are specified on windows.
+        Universe::set_narrow_oop_use_implicit_null_checks(false);
+      }
+#endif //  _WIN64
+    }
   }
 #endif
-  return NULL; // also return NULL (don't care) for 32-bit VM
+  return (char*)base; // also return NULL (don't care) for 32-bit VM
 }
 
 jint Universe::initialize_heap() {
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -262,25 +262,48 @@
 
 
 int constantPoolOopDesc::impl_name_and_type_ref_index_at(int which, bool uncached) {
-  jint ref_index = field_or_method_at(which, uncached);
+  int i = which;
+  if (!uncached && cache() != NULL) {
+    if (constantPoolCacheOopDesc::is_secondary_index(which))
+      // Invokedynamic indexes are always processed in native order
+      // so there is no question of reading a native u2 in Java order here.
+      return cache()->main_entry_at(which)->constant_pool_index();
+    // change byte-ordering and go via cache
+    i = remap_instruction_operand_from_cache(which);
+  } else {
+    if (tag_at(which).is_name_and_type())
+      // invokedynamic index is a simple name-and-type
+      return which;
+  }
+  assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
+  jint ref_index = *int_at_addr(i);
   return extract_high_short_from_int(ref_index);
 }
 
 
 int constantPoolOopDesc::impl_klass_ref_index_at(int which, bool uncached) {
-  jint ref_index = field_or_method_at(which, uncached);
+  guarantee(!constantPoolCacheOopDesc::is_secondary_index(which),
+            "an invokedynamic instruction does not have a klass");
+  int i = which;
+  if (!uncached && cache() != NULL) {
+    // change byte-ordering and go via cache
+    i = remap_instruction_operand_from_cache(which);
+  }
+  assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
+  jint ref_index = *int_at_addr(i);
   return extract_low_short_from_int(ref_index);
 }
 
 
 
-int constantPoolOopDesc::map_instruction_operand_to_index(int operand) {
-  if (constantPoolCacheOopDesc::is_secondary_index(operand)) {
-    return cache()->main_entry_at(operand)->constant_pool_index();
-  }
+int constantPoolOopDesc::remap_instruction_operand_from_cache(int operand) {
+  // Operand was fetched by a stream using get_Java_u2, yet was stored
+  // by Rewriter::rewrite_member_reference in native order.
+  // So now we have to fix the damage by swapping back to native order.
   assert((int)(u2)operand == operand, "clean u2");
-  int index = Bytes::swap_u2(operand);
-  return cache()->entry_at(index)->constant_pool_index();
+  int cpc_index = Bytes::swap_u2(operand);
+  int member_index = cache()->entry_at(cpc_index)->constant_pool_index();
+  return member_index;
 }
 
 
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -342,12 +342,14 @@
   }
 
   // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
-  // name_and_type_ref_index_at) all expect constant pool indices
-  // from the bytecodes to be passed in, which are actually potentially byte-swapped
-  // or rewritten constant pool cache indices.  They all call map_instruction_operand_to_index.
-  int map_instruction_operand_to_index(int operand);
+  // name_and_type_ref_index_at) all expect to be passed indices obtained
+  // directly from the bytecode, and extracted according to java byte order.
+  // If the indices are meant to refer to fields or methods, they are
+  // actually potentially byte-swapped, rewritten constant pool cache indices.
+  // The routine remap_instruction_operand_from_cache manages the adjustment
+  // of these values back to constant pool indices.
 
-  // There are also "uncached" versions which do not map the operand index; see below.
+  // There are also "uncached" versions which do not adjust the operand index; see below.
 
   // Lookup for entries consisting of (klass_index, name_and_type index)
   klassOop klass_ref_at(int which, TRAPS);
@@ -361,8 +363,6 @@
   // Lookup for entries consisting of (name_index, signature_index)
   int name_ref_index_at(int which_nt);            // ==  low-order jshort of name_and_type_at(which_nt)
   int signature_ref_index_at(int which_nt);       // == high-order jshort of name_and_type_at(which_nt)
-  symbolOop nt_name_ref_at(int which_nt)          { return symbol_at(name_ref_index_at(which_nt)); }
-  symbolOop nt_signature_ref_at(int which_nt)     { return symbol_at(signature_ref_index_at(which_nt)); }
 
   BasicType basic_type_for_signature_at(int which);
 
@@ -425,18 +425,7 @@
   int       impl_klass_ref_index_at(int which, bool uncached);
   int       impl_name_and_type_ref_index_at(int which, bool uncached);
 
-  // Takes either a constant pool cache index in possibly byte-swapped
-  // byte order (which comes from the bytecodes after rewriting) or,
-  // if "uncached" is true, a vanilla constant pool index
-  jint field_or_method_at(int which, bool uncached) {
-    int i = which;
-    if (!uncached && cache() != NULL) {
-      // change byte-ordering and go via cache
-      i = map_instruction_operand_to_index(which);
-    }
-    assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
-    return *int_at_addr(i);
-  }
+  int remap_instruction_operand_from_cache(int operand);
 
   // Used while constructing constant pool (only by ClassFileParser)
   jint klass_index_at(int which) {
--- a/hotspot/src/share/vm/oops/cpCacheOop.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCacheOop.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -28,21 +28,17 @@
 
 // Implememtation of ConstantPoolCacheEntry
 
-void ConstantPoolCacheEntry::set_initial_state(int index) {
-  if (constantPoolCacheOopDesc::is_secondary_index(index)) {
-    // Hack:  The rewriter is trying to say that this entry itself
-    // will be a secondary entry.
-    int main_index = constantPoolCacheOopDesc::decode_secondary_index(index);
-    assert(0 <= main_index && main_index < 0x10000, "sanity check");
-    _indices = (main_index << 16);
-    assert(main_entry_index() == main_index, "");
-    return;
-  }
+void ConstantPoolCacheEntry::initialize_entry(int index) {
   assert(0 < index && index < 0x10000, "sanity check");
   _indices = index;
   assert(constant_pool_index() == index, "");
 }
 
+void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
+  assert(0 <= main_index && main_index < 0x10000, "sanity check");
+  _indices = (main_index << 16);
+  assert(main_entry_index() == main_index, "");
+}
 
 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
                     bool is_vfinal, bool is_volatile,
@@ -223,10 +219,10 @@
 
 
 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) {
-  methodOop method = (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site());
+  methodOop method = (methodOop) java_dyn_CallSite::vmmethod(call_site());
   assert(method->is_method(), "must be initialized properly");
   int param_size = method->size_of_parameters();
-  assert(param_size > 1, "method argument size must include MH.this & initial dynamic receiver");
+  assert(param_size >= 1, "method argument size must include MH.this");
   param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
   if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
     // racing threads might be trying to install their own favorites
@@ -439,7 +435,18 @@
 
 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
   assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
-  for (int i = 0; i < length(); i++) entry_at(i)->set_initial_state(inverse_index_map[i]);
+  for (int i = 0; i < length(); i++) {
+    ConstantPoolCacheEntry* e = entry_at(i);
+    int original_index = inverse_index_map[i];
+    if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
+      int main_index = (original_index - Rewriter::_secondary_entry_tag);
+      assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
+      e->initialize_secondary_entry(main_index);
+    } else {
+      e->initialize_entry(original_index);
+    }
+    assert(entry_at(i) == e, "sanity");
+  }
 }
 
 // RedefineClasses() API support:
--- a/hotspot/src/share/vm/oops/cpCacheOop.hpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/oops/cpCacheOop.hpp	Tue Nov 10 17:00:18 2009 -0800
@@ -154,7 +154,8 @@
   };
 
   // Initialization
-  void set_initial_state(int index);             // sets entry to initial state
+  void initialize_entry(int original_index);     // initialize primary entry
+  void initialize_secondary_entry(int main_index); // initialize secondary entry
 
   void set_field(                                // sets entry to resolved field state
     Bytecodes::Code get_code,                    // the bytecode used for reading the field
@@ -251,6 +252,7 @@
 
   // Code generation support
   static WordSize size()                         { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); }
+  static ByteSize size_in_bytes()                { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); }
   static ByteSize indices_offset()               { return byte_offset_of(ConstantPoolCacheEntry, _indices); }
   static ByteSize f1_offset()                    { return byte_offset_of(ConstantPoolCacheEntry, _f1); }
   static ByteSize f2_offset()                    { return byte_offset_of(ConstantPoolCacheEntry, _f2); }
@@ -321,6 +323,7 @@
   ConstantPoolCacheEntry* base() const           { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); }
 
   friend class constantPoolCacheKlass;
+  friend class ConstantPoolCacheEntry;
 
  public:
   // Initialization
@@ -329,7 +332,8 @@
   // Secondary indexes.
   // They must look completely different from normal indexes.
   // The main reason is that byte swapping is sometimes done on normal indexes.
-  // Also, it is helpful for debugging to tell the two apart.
+  // Also, some of the CP accessors do different things for secondary indexes.
+  // Finally, it is helpful for debugging to tell the two apart.
   static bool is_secondary_index(int i) { return (i < 0); }
   static int  decode_secondary_index(int i) { assert(is_secondary_index(i),  ""); return ~i; }
   static int  encode_secondary_index(int i) { assert(!is_secondary_index(i), ""); return ~i; }
@@ -337,18 +341,35 @@
   // Accessors
   void set_constant_pool(constantPoolOop pool)   { oop_store_without_check((oop*)&_constant_pool, (oop)pool); }
   constantPoolOop constant_pool() const          { return _constant_pool; }
-  ConstantPoolCacheEntry* entry_at(int i) const  { assert(0 <= i && i < length(), "index out of bounds"); return base() + i; }
+  // Fetches the entry at the given index.
+  // The entry may be either primary or secondary.
+  // In either case the index must not be encoded or byte-swapped in any way.
+  ConstantPoolCacheEntry* entry_at(int i) const {
+    assert(0 <= i && i < length(), "index out of bounds");
+    return base() + i;
+  }
+  // Fetches the secondary entry referred to by index.
+  // The index may be a secondary index, and must not be byte-swapped.
+  ConstantPoolCacheEntry* secondary_entry_at(int i) const {
+    int raw_index = i;
+    if (is_secondary_index(i)) {  // correct these on the fly
+      raw_index = decode_secondary_index(i);
+    }
+    assert(entry_at(raw_index)->is_secondary_entry(), "not a secondary entry");
+    return entry_at(raw_index);
+  }
+  // Given a primary or secondary index, fetch the corresponding primary entry.
+  // Indirect through the secondary entry, if the index is encoded as a secondary index.
+  // The index must not be byte-swapped.
   ConstantPoolCacheEntry* main_entry_at(int i) const {
-    ConstantPoolCacheEntry* e;
+    int primary_index = i;
     if (is_secondary_index(i)) {
       // run through an extra level of indirection:
-      i = decode_secondary_index(i);
-      e = entry_at(i);
-      i = e->main_entry_index();
+      int raw_index = decode_secondary_index(i);
+      primary_index = entry_at(raw_index)->main_entry_index();
     }
-    e = entry_at(i);
-    assert(!e->is_secondary_entry(), "only one level of indirection");
-    return e;
+    assert(!entry_at(primary_index)->is_secondary_entry(), "only one level of indirection");
+    return entry_at(primary_index);
   }
 
   // GC support
@@ -359,6 +380,12 @@
 
   // Code generation
   static ByteSize base_offset()                  { return in_ByteSize(sizeof(constantPoolCacheOopDesc)); }
+  static ByteSize entry_offset(int raw_index) {
+    int index = raw_index;
+    if (is_secondary_index(raw_index))
+      index = decode_secondary_index(raw_index);
+    return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index);
+  }
 
   // RedefineClasses() API support:
   // If any entry of this constantPoolCache points to any of
--- a/hotspot/src/share/vm/oops/generateOopMap.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/oops/generateOopMap.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1556,13 +1556,13 @@
     case Bytecodes::_getfield:          do_field(true,  false, itr->get_index_big(), itr->bci()); break;
     case Bytecodes::_putfield:          do_field(false, false, itr->get_index_big(), itr->bci()); break;
 
-    case Bytecodes::_invokevirtual:
-    case Bytecodes::_invokespecial:     do_method(false, false, itr->get_index_big(), itr->bci()); break;
-    case Bytecodes::_invokestatic:      do_method(true,  false, itr->get_index_big(), itr->bci()); break;
-    case Bytecodes::_invokedynamic:     do_method(false, true,  itr->get_index_int(), itr->bci()); break;
-    case Bytecodes::_invokeinterface:   do_method(false, true,  itr->get_index_big(), itr->bci()); break;
-    case Bytecodes::_newarray:
-    case Bytecodes::_anewarray:         pp_new_ref(vCTS, itr->bci()); break;
+   case Bytecodes::_invokevirtual:
+   case Bytecodes::_invokespecial:     do_method(false, false, itr->get_index_big(), itr->bci()); break;
+   case Bytecodes::_invokestatic:      do_method(true,  false, itr->get_index_big(), itr->bci()); break;
+    case Bytecodes::_invokedynamic:     do_method(true,  false, itr->get_index_int(), itr->bci()); break;
+   case Bytecodes::_invokeinterface:   do_method(false, true,  itr->get_index_big(), itr->bci()); break;
+   case Bytecodes::_newarray:
+   case Bytecodes::_anewarray:         pp_new_ref(vCTS, itr->bci()); break;
     case Bytecodes::_checkcast:         do_checkcast(); break;
     case Bytecodes::_arraylength:
     case Bytecodes::_instanceof:        pp(rCTS, vCTS); break;
@@ -1900,11 +1900,9 @@
 }
 
 void GenerateOopMap::do_method(int is_static, int is_interface, int idx, int bci) {
-  // Dig up signature for field in constant pool
-  constantPoolOop cp    = _method->constants();
-  int nameAndTypeIdx    = cp->name_and_type_ref_index_at(idx);
-  int signatureIdx      = cp->signature_ref_index_at(nameAndTypeIdx);  // @@@@@
-  symbolOop signature   = cp->symbol_at(signatureIdx);
+ // Dig up signature for field in constant pool
+  constantPoolOop cp  = _method->constants();
+  symbolOop signature = cp->signature_ref_at(idx);
 
   // Parse method signature
   CellTypeState out[4];
--- a/hotspot/src/share/vm/oops/instanceKlassKlass.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/oops/instanceKlassKlass.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -317,6 +317,11 @@
     pm->claim_or_forward_breadth(sg_addr);
   }
 
+  oop* bsm_addr = ik->adr_bootstrap_method();
+  if (PSScavenge::should_scavenge(bsm_addr)) {
+    pm->claim_or_forward_breadth(bsm_addr);
+  }
+
   klassKlass::oop_copy_contents(pm, obj);
 }
 
@@ -345,6 +350,11 @@
     pm->claim_or_forward_depth(sg_addr);
   }
 
+  oop* bsm_addr = ik->adr_bootstrap_method();
+  if (PSScavenge::should_scavenge(bsm_addr)) {
+    pm->claim_or_forward_depth(bsm_addr);
+  }
+
   klassKlass::oop_copy_contents(pm, obj);
 }
 
--- a/hotspot/src/share/vm/opto/escape.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/opto/escape.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -537,8 +537,9 @@
   }
 
   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
-  // Do NOT remove the next call: ensure an new alias index is allocated
-  // for the instance type
+  // Do NOT remove the next line: ensure a new alias index is allocated
+  // for the instance type. Note: C++ will not remove it since the call
+  // has side effect.
   int alias_idx = _compile->get_alias_index(tinst);
   igvn->set_type(addp, tinst);
   // record the allocation in the node map
--- a/hotspot/src/share/vm/opto/matcher.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1832,67 +1832,23 @@
       case Op_Binary:         // These are introduced in the Post_Visit state.
         ShouldNotReachHere();
         break;
-      case Op_StoreB:         // Do match these, despite no ideal reg
-      case Op_StoreC:
-      case Op_StoreCM:
-      case Op_StoreD:
-      case Op_StoreF:
-      case Op_StoreI:
-      case Op_StoreL:
-      case Op_StoreP:
-      case Op_StoreN:
-      case Op_Store16B:
-      case Op_Store8B:
-      case Op_Store4B:
-      case Op_Store8C:
-      case Op_Store4C:
-      case Op_Store2C:
-      case Op_Store4I:
-      case Op_Store2I:
-      case Op_Store2L:
-      case Op_Store4F:
-      case Op_Store2F:
-      case Op_Store2D:
       case Op_ClearArray:
       case Op_SafePoint:
         mem_op = true;
         break;
-      case Op_LoadB:
-      case Op_LoadUS:
-      case Op_LoadD:
-      case Op_LoadF:
-      case Op_LoadI:
-      case Op_LoadKlass:
-      case Op_LoadNKlass:
-      case Op_LoadL:
-      case Op_LoadS:
-      case Op_LoadP:
-      case Op_LoadN:
-      case Op_LoadRange:
-      case Op_LoadD_unaligned:
-      case Op_LoadL_unaligned:
-      case Op_Load16B:
-      case Op_Load8B:
-      case Op_Load4B:
-      case Op_Load4C:
-      case Op_Load2C:
-      case Op_Load8C:
-      case Op_Load8S:
-      case Op_Load4S:
-      case Op_Load2S:
-      case Op_Load4I:
-      case Op_Load2I:
-      case Op_Load2L:
-      case Op_Load4F:
-      case Op_Load2F:
-      case Op_Load2D:
-        mem_op = true;
-        // Must be root of match tree due to prior load conflict
-        if( C->subsume_loads() == false ) {
-          set_shared(n);
+      default:
+        if( n->is_Store() ) {
+          // Do match stores, despite no ideal reg
+          mem_op = true;
+          break;
+        }
+        if( n->is_Mem() ) { // Loads and LoadStores
+          mem_op = true;
+          // Loads must be root of match tree due to prior load conflict
+          if( C->subsume_loads() == false )
+            set_shared(n);
         }
         // Fall into default case
-      default:
         if( !n->ideal_reg() )
           set_dontcare(n);  // Unmatchable Nodes
       } // end_switch
@@ -1913,15 +1869,15 @@
           continue; // for(int i = ...)
         }
 
-        // Clone addressing expressions as they are "free" in most instructions
+        if( mop == Op_AddP && m->in(AddPNode::Base)->Opcode() == Op_DecodeN ) {
+          // Bases used in addresses must be shared but since
+          // they are shared through a DecodeN they may appear
+          // to have a single use so force sharing here.
+          set_shared(m->in(AddPNode::Base)->in(1));
+        }
+
+        // Clone addressing expressions as they are "free" in memory access instructions
         if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
-          if (m->in(AddPNode::Base)->Opcode() == Op_DecodeN) {
-            // Bases used in addresses must be shared but since
-            // they are shared through a DecodeN they may appear
-            // to have a single use so force sharing here.
-            set_shared(m->in(AddPNode::Base)->in(1));
-          }
-
           // Some inputs for address expression are not put on stack
           // to avoid marking them as shared and forcing them into register
           // if they are used only in address expressions.
--- a/hotspot/src/share/vm/opto/memnode.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -255,6 +255,13 @@
     return NodeSentinel; // caller will return NULL
   }
 
+  // Do NOT remove or optimize the next lines: ensure a new alias index
+  // is allocated for an oop pointer type before Escape Analysis.
+  // Note: C++ will not remove it since the call has side effect.
+  if ( t_adr->isa_oopptr() ) {
+    int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
+  }
+
 #ifdef ASSERT
   Node* base = NULL;
   if (address->is_AddP())
--- a/hotspot/src/share/vm/opto/superword.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/opto/superword.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1921,6 +1921,11 @@
   }
   // Match AddP(base, AddP(ptr, k*iv [+ invariant]), constant)
   Node* base = adr->in(AddPNode::Base);
+  //unsafe reference could not be aligned appropriately without runtime checking
+  if (base == NULL || base->bottom_type() == Type::TOP) {
+    assert(!valid(), "unsafe access");
+    return;
+  }
   for (int i = 0; i < 3; i++) {
     if (!scaled_iv_plus_offset(adr->in(AddPNode::Offset))) {
       assert(!valid(), "too complex");
--- a/hotspot/src/share/vm/prims/jvm.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -2257,10 +2257,8 @@
   switch (cp->tag_at(cp_index).value()) {
     case JVM_CONSTANT_InterfaceMethodref:
     case JVM_CONSTANT_Methodref:
+    case JVM_CONSTANT_NameAndType:  // for invokedynamic
       return cp->uncached_name_ref_at(cp_index)->as_utf8();
-    case JVM_CONSTANT_NameAndType:
-      // for invokedynamic
-      return cp->nt_name_ref_at(cp_index)->as_utf8();
     default:
       fatal("JVM_GetCPMethodNameUTF: illegal constant");
   }
@@ -2277,10 +2275,8 @@
   switch (cp->tag_at(cp_index).value()) {
     case JVM_CONSTANT_InterfaceMethodref:
     case JVM_CONSTANT_Methodref:
+    case JVM_CONSTANT_NameAndType:  // for invokedynamic
       return cp->uncached_signature_ref_at(cp_index)->as_utf8();
-    case JVM_CONSTANT_NameAndType:
-      // for invokedynamic
-      return cp->nt_signature_ref_at(cp_index)->as_utf8();
     default:
       fatal("JVM_GetCPMethodSignatureUTF: illegal constant");
   }
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -2347,9 +2347,9 @@
 JVM_ENTRY(void, MH_linkCallSite(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) {
   // No special action required, yet.
   oop site_oop = JNIHandles::resolve(site_jh);
-  if (site_oop == NULL || site_oop->klass() != SystemDictionary::CallSiteImpl_klass())
+  if (site_oop == NULL || site_oop->klass() != SystemDictionary::CallSite_klass())
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "call site");
-  sun_dyn_CallSiteImpl::set_target(site_oop, JNIHandles::resolve(target_jh));
+  java_dyn_CallSite::set_target(site_oop, JNIHandles::resolve(target_jh));
 }
 JVM_END
 
@@ -2365,6 +2365,7 @@
 #define OBJ   LANG"Object;"
 #define CLS   LANG"Class;"
 #define STRG  LANG"String;"
+#define CST   JDYN"CallSite;"
 #define MT    JDYN"MethodType;"
 #define MH    JDYN"MethodHandle;"
 #define MHI   IDYN"MethodHandleImpl;"
@@ -2372,7 +2373,6 @@
 #define AMH   IDYN"AdapterMethodHandle;"
 #define BMH   IDYN"BoundMethodHandle;"
 #define DMH   IDYN"DirectMethodHandle;"
-#define CSTI  IDYN"CallSiteImpl;"
 
 #define CC (char*)  /*cast a literal from (const char*)*/
 #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
@@ -2398,7 +2398,7 @@
 
 // More entry points specifically for EnableInvokeDynamic.
 static JNINativeMethod methods2[] = {
-  {CC"linkCallSite",            CC"("CSTI MH")V",               FN_PTR(MH_linkCallSite)}
+  {CC"linkCallSite",            CC"("CST MH")V",                FN_PTR(MH_linkCallSite)}
 };
 
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 17:03:05 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Tue Nov 10 17:00:18 2009 -0800
@@ -1234,9 +1234,11 @@
   // Check that UseCompressedOops can be set with the max heap size allocated
   // by ergonomics.
   if (MaxHeapSize <= max_heap_for_compressed_oops()) {
+#ifndef COMPILER1
     if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
       FLAG_SET_ERGO(bool, UseCompressedOops, true);
     }
+#endif
 #ifdef _WIN64
     if (UseLargePages && UseCompressedOops) {
       // Cannot allocate guard pages for implicit checks in indexed addressing
@@ -2675,6 +2677,10 @@
     }
   }
 
+#if defined(_LP64) && defined(COMPILER1)
+  UseCompressedOops = false;
+#endif
+
 #ifdef SERIALGC
   set_serial_gc_flags();
 #endif // SERIALGC
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6769124/TestArrayCopy6769124.java	Tue Nov 10 17:00:18 2009 -0800
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6769124
+ * @summary arraycopy may crash the VM with c1 on 64 bit
+ */
+
+public class TestArrayCopy6769124 {
+
+    public static void main(String[] args) {
+
+        int k = 1 << 31;
+
+
+        for(int j = 0; j <1000000; j++) {
+            int i = -1;
+            while(i < 10) {
+                i++;
+            }
+
+            int m = k * i;
+
+            int[] O1 = new int[20];
+            int[] O2 = new int[20];
+
+            System.arraycopy(O1, i, O2, i, 1); //will crash on amd64
+            System.arraycopy(O1, m, O2, m, 1); //will crash on sparcv9
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6769124/TestDeoptInt6769124.java	Tue Nov 10 17:00:18 2009 -0800
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6769124
+ * @summary int value might not be correctly decoded on deopt with c1 on 64 bit
+ *
+ * @run main/othervm -Xcomp -XX:CompileOnly=TestDeoptInt6769124.m TestDeoptInt6769124
+ */
+
+public class TestDeoptInt6769124 {
+
+    static class A {
+        volatile int vl;
+        A(int v) {
+            vl = v;
+        }
+    }
+
+    static void m(int b) {
+        A a = new A(10);
+        int c;
+        c = b + a.vl; //accessing volatile field of class not loaded at compile time forces a deopt
+        if(c != 20) {
+            System.out.println("a (= " + a.vl + ") + b (= " + b + ") = c (= " + c + ") != 20");
+            throw new InternalError();
+        }
+    }
+
+    public static void main(String[] args) {
+        m(10);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6769124/TestUnalignedLoad6769124.java	Tue Nov 10 17:00:18 2009 -0800
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6769124
+ * @summary unaligned load may fail with c1 on 64 bit
+ */
+
+public class TestUnalignedLoad6769124 {
+
+    static long l1v = 0x200000003L;
+    static long l2v = 0x400000005L;
+    static double d1v = Double.MAX_VALUE;
+    static double d2v = Double.MIN_VALUE;
+
+    public static void main(String[] args) {
+        long l1 = l1v;
+        double d1 = d1v;
+        long l2 = l2v;
+        double d2 = d2v;
+
+        // Run long enough to induce an OSR
+        for (int i = 0; i < 10000000; i++) {
+        }
+        boolean error = false;
+
+        if (l1 != l1v) {
+            System.out.println(l1 + " != " + l1v);
+            error = true;
+        }
+        if (l2 != l2v) {
+            System.out.println(l2 + " != " + l2v);
+            error = true;
+        }
+        if (d1 != d1v) {
+            System.out.println(d1 + " != " + d1v);
+            error = true;
+        }
+        if (d2 != d2v) {
+            System.out.println(d2 + " != " + d2v);
+            error = true;
+        }
+        if (error) {
+            throw new InternalError();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6852078/Test6852078.java	Tue Nov 10 17:00:18 2009 -0800
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6852078
+ * @summary Disable SuperWord optimization for unsafe read/write
+ *
+ * @run main/othervm Test6852078
+ */
+
+import java.util.*;
+import java.nio.ByteBuffer;
+import com.sun.corba.se.impl.encoding.ByteBufferWithInfo;
+import com.sun.jndi.toolkit.corba.CorbaUtils;
+
+public class Test6852078 {
+
+    public Test6852078(String [] args) {
+
+        int capacity = 128;
+        ByteBuffer bb = ByteBuffer.allocateDirect(capacity);
+        ByteBufferWithInfo bbwi = new ByteBufferWithInfo( CorbaUtils.getOrb(null, -1, new Hashtable()), bb);
+        byte[] tmpBuf;
+        tmpBuf = new byte[bbwi.buflen];
+
+        for (int i = 0; i < capacity; i++)
+            tmpBuf[i] = bbwi.byteBuffer.get(i);
+    }
+
+    public static void main(String [] args) {
+        for (int i=0; i<2000; i++) {
+            Test6852078 t = new Test6852078(args);
+        }
+    }
+}