Merge
authortwisti
Thu, 04 Feb 2010 03:34:05 -0800
changeset 4759 bbc25efafb8a
parent 4744 40fc0ab5cd15 (current diff)
parent 4758 5911cc4cb361 (diff)
child 4760 70aa3bc938c2
Merge
hotspot/src/share/vm/runtime/globals.hpp
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -357,7 +357,7 @@
 }
 
 
-void LIR_Assembler::emit_exception_handler() {
+int LIR_Assembler::emit_exception_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
@@ -373,13 +373,10 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("exception handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-  compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
-
 
   if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
     __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
@@ -390,11 +387,13 @@
   __ delayed()->nop();
   debug_only(__ stop("should have gone to the caller");)
   assert(code_offset() - offset <= exception_handler_size, "overflow");
-
   __ end_a_stub();
+
+  return offset;
 }
 
-void LIR_Assembler::emit_deopt_handler() {
+
+int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
@@ -408,23 +407,18 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("deopt handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-  compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
-
   AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
-
   __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
   __ delayed()->nop();
-
   assert(code_offset() - offset <= deopt_handler_size, "overflow");
-
   debug_only(__ stop("should have gone to the caller");)
-
   __ end_a_stub();
+
+  return offset;
 }
 
 
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -366,8 +366,9 @@
   // as get_original_pc() needs correct value for unextended_sp()
   if (_pc != NULL) {
     _cb = CodeCache::find_blob(_pc);
-    if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-      _pc = ((nmethod*)_cb)->get_original_pc(this);
+    address original_pc = nmethod::get_deopt_original_pc(this);
+    if (original_pc != NULL) {
+      _pc = original_pc;
       _deopt_state = is_deoptimized;
     } else {
       _deopt_state = not_deoptimized;
@@ -519,9 +520,9 @@
   _cb = CodeCache::find_blob(pc);
   *O7_addr() = pc - pc_return_offset;
   _cb = CodeCache::find_blob(_pc);
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    address orig = ((nmethod*)_cb)->get_original_pc(this);
-    assert(orig == _pc, "expected original to be stored before patching");
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    assert(original_pc == _pc, "expected original to be stored before patching");
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1681,11 +1681,8 @@
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(profile_continue);
 
-    // We are making a call.  Increment the count.
-    increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
-
     // Record the receiver type.
-    record_klass_in_profile(receiver, scratch);
+    record_klass_in_profile(receiver, scratch, true);
 
     // The method data pointer needs to be updated to reflect the new target.
     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
@@ -1695,9 +1692,13 @@
 
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register scratch,
-                                        int start_row, Label& done) {
-  if (TypeProfileWidth == 0)
+                                        int start_row, Label& done, bool is_virtual_call) {
+  if (TypeProfileWidth == 0) {
+    if (is_virtual_call) {
+      increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
+    }
     return;
+  }
 
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
@@ -1714,6 +1715,7 @@
     // See if the receiver is receiver[n].
     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
     test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
+    // delayed()->tst(scratch);
 
     // The receiver is receiver[n].  Increment count[n].
     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
@@ -1723,20 +1725,31 @@
     bind(next_test);
 
     if (test_for_null_also) {
+      Label found_null;
       // Failed the equality check on receiver[n]...  Test for null.
       if (start_row == last_row) {
         // The only thing left to do is handle the null case.
-        brx(Assembler::notZero, false, Assembler::pt, done);
-        delayed()->nop();
+        if (is_virtual_call) {
+          brx(Assembler::zero, false, Assembler::pn, found_null);
+          delayed()->nop();
+          // Receiver did not match any saved receiver and there is no empty row for it.
+          // Increment total counter to indicate polimorphic case.
+          increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
+          ba(false, done);
+          delayed()->nop();
+          bind(found_null);
+        } else {
+          brx(Assembler::notZero, false, Assembler::pt, done);
+          delayed()->nop();
+        }
         break;
       }
       // Since null is rare, make it be the branch-taken case.
-      Label found_null;
       brx(Assembler::zero, false, Assembler::pn, found_null);
       delayed()->nop();
 
       // Put all the "Case 3" tests here.
-      record_klass_in_profile_helper(receiver, scratch, start_row + 1, done);
+      record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
 
       // Found a null.  Keep searching for a matching receiver,
       // but remember that this is an empty (unused) slot.
@@ -1753,16 +1766,18 @@
   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
   mov(DataLayout::counter_increment, scratch);
   set_mdp_data_at(count_offset, scratch);
-  ba(false, done);
-  delayed()->nop();
+  if (start_row > 0) {
+    ba(false, done);
+    delayed()->nop();
+  }
 }
 
 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
-                                                        Register scratch) {
+                                                        Register scratch, bool is_virtual_call) {
   assert(ProfileInterpreter, "must be profiling");
   Label done;
 
-  record_klass_in_profile_helper(receiver, scratch, 0, done);
+  record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
 
   bind (done);
 }
@@ -1840,7 +1855,7 @@
       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
 
       // Record the object type.
-      record_klass_in_profile(klass, scratch);
+      record_klass_in_profile(klass, scratch, false);
     }
 
     // The method data pointer needs to be updated.
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -290,9 +290,9 @@
   void test_mdp_data_at(int offset, Register value, Label& not_equal_continue,
                         Register scratch);
 
-  void record_klass_in_profile(Register receiver, Register scratch);
+  void record_klass_in_profile(Register receiver, Register scratch, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register scratch,
-                                      int start_row, Label& done);
+                                      int start_row, Label& done, bool is_virtual_call);
 
   void update_mdp_by_offset(int offset_of_disp, Register scratch);
   void update_mdp_by_offset(Register reg, int offset_of_disp,
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -418,13 +418,12 @@
 }
 
 
-void LIR_Assembler::emit_exception_handler() {
+int LIR_Assembler::emit_exception_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
   // failures when searching for the corresponding bci => add a nop
   // (was bug 5/14/1999 - gri)
-
   __ nop();
 
   // generate code for exception handler
@@ -432,13 +431,10 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("exception handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-
-  compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
 
   // if the method does not have an exception handler, then there is
   // no reason to search for one
@@ -474,19 +470,19 @@
   // unwind activation and forward exception to caller
   // rax,: exception
   __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
-
   assert(code_offset() - offset <= exception_handler_size, "overflow");
-
   __ end_a_stub();
+
+  return offset;
 }
 
-void LIR_Assembler::emit_deopt_handler() {
+
+int LIR_Assembler::emit_deopt_handler() {
   // if the last instruction is a call (typically to do a throw which
   // is coming at the end after block reordering) the return address
   // must still point into the code area in order to avoid assertion
   // failures when searching for the corresponding bci => add a nop
   // (was bug 5/14/1999 - gri)
-
   __ nop();
 
   // generate code for exception handler
@@ -494,23 +490,17 @@
   if (handler_base == NULL) {
     // not enough space left for the handler
     bailout("deopt handler overflow");
-    return;
+    return -1;
   }
-#ifdef ASSERT
+
   int offset = code_offset();
-#endif // ASSERT
-
-  compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
-
   InternalAddress here(__ pc());
   __ pushptr(here.addr());
-
   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
-
   assert(code_offset() - offset <= deopt_handler_size, "overflow");
-
   __ end_a_stub();
 
+  return offset;
 }
 
 
@@ -3219,7 +3209,6 @@
   Register mdo  = op->mdo()->as_register();
   __ movoop(mdo, md->constant_encoding());
   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
-  __ addl(counter_addr, DataLayout::counter_increment);
   Bytecodes::Code bc = method->java_code_at_bci(bci);
   // Perform additional virtual call profiling for invokevirtual and
   // invokeinterface bytecodes
@@ -3286,14 +3275,18 @@
         __ jcc(Assembler::notEqual, next_test);
         __ movptr(recv_addr, recv);
         __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
-        if (i < (VirtualCallData::row_limit() - 1)) {
-          __ jmp(update_done);
-        }
+        __ jmp(update_done);
         __ bind(next_test);
       }
+      // Receiver did not match any saved receiver and there is no empty row for it.
+      // Increment total counter to indicate polimorphic case.
+      __ addl(counter_addr, DataLayout::counter_increment);
 
       __ bind(update_done);
     }
+  } else {
+    // Static call
+    __ addl(counter_addr, DataLayout::counter_increment);
   }
 }
 
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,9 +222,9 @@
   }
   ((address *)sp())[-1] = pc;
   _cb = CodeCache::find_blob(pc);
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    address orig = (((nmethod*)_cb)->get_original_pc(this));
-    assert(orig == _pc, "expected original to be stored before patching");
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    assert(original_pc == _pc, "expected original PC to be stored before patching");
     _deopt_state = is_deoptimized;
     // leave _pc as is
   } else {
@@ -323,19 +323,61 @@
   return fr;
 }
 
+
+//------------------------------------------------------------------------------
+// frame::verify_deopt_original_pc
+//
+// Verifies the calculated original PC of a deoptimization PC for the
+// given unextended SP.  The unextended SP might also be the saved SP
+// for MethodHandle call sites.
+#if ASSERT
+void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
+  frame fr;
+
+  // This is ugly but it's better than to change {get,set}_original_pc
+  // to take an SP value as argument.  And it's only a debugging
+  // method anyway.
+  fr._unextended_sp = unextended_sp;
+
+  address original_pc = nm->get_original_pc(&fr);
+  assert(nm->code_contains(original_pc), "original PC must be in nmethod");
+  assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
+}
+#endif
+
+
+//------------------------------------------------------------------------------
+// frame::sender_for_interpreter_frame
 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
-  // sp is the raw sp from the sender after adapter or interpreter extension
-  intptr_t* sp = (intptr_t*) addr_at(sender_sp_offset);
+  // SP is the raw SP from the sender after adapter or interpreter
+  // extension.
+  intptr_t* sender_sp = this->sender_sp();
 
   // This is the sp before any possible extension (adapter/locals).
   intptr_t* unextended_sp = interpreter_frame_sender_sp();
 
+  // Stored FP.
+  intptr_t* saved_fp = link();
+
   address sender_pc = this->sender_pc();
   CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
   assert(sender_cb, "sanity");
   nmethod* sender_nm = sender_cb->as_nmethod_or_null();
-  if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
-    unextended_sp = (intptr_t*) at(link_offset);
+
+  if (sender_nm != NULL) {
+    // If the sender PC is a deoptimization point, get the original
+    // PC.  For MethodHandle call site the unextended_sp is stored in
+    // saved_fp.
+    if (sender_nm->is_deopt_mh_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
+      unextended_sp = saved_fp;
+    }
+    else if (sender_nm->is_deopt_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
+    }
+    else if (sender_nm->is_method_handle_return(sender_pc)) {
+      unextended_sp = saved_fp;
+    }
   }
 
   // The interpreter and compiler(s) always save EBP/RBP in a known
@@ -359,40 +401,51 @@
     }
 #endif // AMD64
   }
-#endif /* COMPILER2 */
-  return frame(sp, unextended_sp, link(), sender_pc);
+#endif // COMPILER2
+
+  return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
 }
 
 
-//------------------------------sender_for_compiled_frame-----------------------
+//------------------------------------------------------------------------------
+// frame::sender_for_compiled_frame
 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
   assert(map != NULL, "map must be set");
-  const bool c1_compiled = _cb->is_compiled_by_c1();
 
   // frame owned by optimizing compiler
-  intptr_t* sender_sp = NULL;
-
   assert(_cb->frame_size() >= 0, "must have non-zero frame size");
-  sender_sp = unextended_sp() + _cb->frame_size();
+  intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
+  intptr_t* unextended_sp = sender_sp;
 
   // On Intel the return_address is always the word on the stack
   address sender_pc = (address) *(sender_sp-1);
 
-  // This is the saved value of ebp which may or may not really be an fp.
-  // it is only an fp if the sender is an interpreter frame (or c1?)
-
-  intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
+  // This is the saved value of EBP which may or may not really be an FP.
+  // It is only an FP if the sender is an interpreter frame (or C1?).
+  intptr_t* saved_fp = (intptr_t*) *(sender_sp - frame::sender_sp_offset);
 
-  intptr_t* unextended_sp = sender_sp;
-  // If we are returning to a compiled method handle call site,
-  // the saved_fp will in fact be a saved value of the unextended SP.
-  // The simplest way to tell whether we are returning to such a call
-  // site is as follows:
+  // If we are returning to a compiled MethodHandle call site, the
+  // saved_fp will in fact be a saved value of the unextended SP.  The
+  // simplest way to tell whether we are returning to such a call site
+  // is as follows:
   CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
   assert(sender_cb, "sanity");
   nmethod* sender_nm = sender_cb->as_nmethod_or_null();
-  if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
-    unextended_sp = saved_fp;
+
+  if (sender_nm != NULL) {
+    // If the sender PC is a deoptimization point, get the original
+    // PC.  For MethodHandle call site the unextended_sp is stored in
+    // saved_fp.
+    if (sender_nm->is_deopt_mh_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, saved_fp));
+      unextended_sp = saved_fp;
+    }
+    else if (sender_nm->is_deopt_entry(sender_pc)) {
+      DEBUG_ONLY(verify_deopt_original_pc(sender_nm, unextended_sp));
+    }
+    else if (sender_nm->is_method_handle_return(sender_pc)) {
+      unextended_sp = saved_fp;
+    }
   }
 
   if (map->update_map()) {
@@ -403,7 +456,7 @@
     if (_cb->oop_maps() != NULL) {
       OopMapSet::update_register_map(this, map);
     }
-    // Since the prolog does the save and restore of epb there is no oopmap
+    // Since the prolog does the save and restore of EBP there is no oopmap
     // for it so we must fill in its location as if there was an oopmap entry
     // since if our caller was compiled code there could be live jvm state in it.
     map->set_location(rbp->as_VMReg(), (address) (sender_sp - frame::sender_sp_offset));
@@ -422,6 +475,9 @@
   return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
 }
 
+
+//------------------------------------------------------------------------------
+// frame::sender
 frame frame::sender(RegisterMap* map) const {
   // Default is we done have to follow them. The sender_for_xxx will
   // update it accordingly
--- a/hotspot/src/cpu/x86/vm/frame_x86.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/frame_x86.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -163,6 +163,14 @@
     return (intptr_t*) addr_at(offset);
   }
 
+#if ASSERT
+  // Used in frame::sender_for_{interpreter,compiled}_frame
+  static void verify_deopt_original_pc(   nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return = false);
+  static void verify_deopt_mh_original_pc(nmethod* nm, intptr_t* unextended_sp) {
+    verify_deopt_original_pc(nm, unextended_sp, true);
+  }
+#endif
+
  public:
   // Constructors
 
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,32 +35,35 @@
   _deopt_state = unknown;
 }
 
-inline frame:: frame(intptr_t* sp, intptr_t* fp, address pc) {
+inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
   _sp = sp;
   _unextended_sp = sp;
   _fp = fp;
   _pc = pc;
   assert(pc != NULL, "no pc?");
   _cb = CodeCache::find_blob(pc);
-  _deopt_state = not_deoptimized;
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    _pc = (((nmethod*)_cb)->get_original_pc(this));
+
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
   }
 }
 
-inline frame:: frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
+inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc) {
   _sp = sp;
   _unextended_sp = unextended_sp;
   _fp = fp;
   _pc = pc;
   assert(pc != NULL, "no pc?");
   _cb = CodeCache::find_blob(pc);
-  _deopt_state = not_deoptimized;
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    _pc = (((nmethod*)_cb)->get_original_pc(this));
+
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
+    assert(((nmethod*)_cb)->code_contains(_pc), "original PC must be in nmethod");
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
@@ -86,9 +89,9 @@
 
   _cb = CodeCache::find_blob(_pc);
 
-  _deopt_state = not_deoptimized;
-  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
-    _pc = (((nmethod*)_cb)->get_original_pc(this));
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
     _deopt_state = is_deoptimized;
   } else {
     _deopt_state = not_deoptimized;
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1239,17 +1239,19 @@
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(mdp, profile_continue);
 
-    // We are making a call.  Increment the count.
-    increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
-
     Label skip_receiver_profile;
     if (receiver_can_be_null) {
+      Label not_null;
       testptr(receiver, receiver);
-      jcc(Assembler::zero, skip_receiver_profile);
+      jccb(Assembler::notZero, not_null);
+      // We are making a call.  Increment the count for null receiver.
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+      jmp(skip_receiver_profile);
+      bind(not_null);
     }
 
     // Record the receiver type.
-    record_klass_in_profile(receiver, mdp, reg2);
+    record_klass_in_profile(receiver, mdp, reg2, true);
     bind(skip_receiver_profile);
 
     // The method data pointer needs to be updated to reflect the new target.
@@ -1263,10 +1265,14 @@
 
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register mdp,
-                                        Register reg2,
-                                        int start_row, Label& done) {
-  if (TypeProfileWidth == 0)
+                                        Register reg2, int start_row,
+                                        Label& done, bool is_virtual_call) {
+  if (TypeProfileWidth == 0) {
+    if (is_virtual_call) {
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+    }
     return;
+  }
 
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
@@ -1294,19 +1300,28 @@
     bind(next_test);
 
     if (row == start_row) {
+      Label found_null;
       // Failed the equality check on receiver[n]...  Test for null.
       testptr(reg2, reg2);
       if (start_row == last_row) {
         // The only thing left to do is handle the null case.
-        jcc(Assembler::notZero, done);
+        if (is_virtual_call) {
+          jccb(Assembler::zero, found_null);
+          // Receiver did not match any saved receiver and there is no empty row for it.
+          // Increment total counter to indicate polimorphic case.
+          increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+          jmp(done);
+          bind(found_null);
+        } else {
+          jcc(Assembler::notZero, done);
+        }
         break;
       }
       // Since null is rare, make it be the branch-taken case.
-      Label found_null;
       jcc(Assembler::zero, found_null);
 
       // Put all the "Case 3" tests here.
-      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
+      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
 
       // Found a null.  Keep searching for a matching receiver,
       // but remember that this is an empty (unused) slot.
@@ -1323,16 +1338,18 @@
   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
   movptr(reg2, (int32_t)DataLayout::counter_increment);
   set_mdp_data_at(mdp, count_offset, reg2);
-  jmp(done);
+  if (start_row > 0) {
+    jmp(done);
+  }
 }
 
 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
-                                                        Register mdp,
-                                                        Register reg2) {
+                                                        Register mdp, Register reg2,
+                                                        bool is_virtual_call) {
   assert(ProfileInterpreter, "must be profiling");
   Label done;
 
-  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
+  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
 
   bind (done);
 }
@@ -1425,7 +1442,7 @@
       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
 
       // Record the object type.
-      record_klass_in_profile(klass, mdp, reg2);
+      record_klass_in_profile(klass, mdp, reg2, false);
       assert(reg2 == rdi, "we know how to fix this blown reg");
       restore_locals();         // Restore EDI
     }
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -213,10 +213,10 @@
                         Label& not_equal_continue);
 
   void record_klass_in_profile(Register receiver, Register mdp,
-                               Register reg2);
+                               Register reg2, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register mdp,
-                                      Register reg2,
-                                      int start_row, Label& done);
+                                      Register reg2, int start_row,
+                                      Label& done, bool is_virtual_call);
 
   void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
   void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1262,17 +1262,19 @@
     // If no method data exists, go to profile_continue.
     test_method_data_pointer(mdp, profile_continue);
 
-    // We are making a call.  Increment the count.
-    increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
-
     Label skip_receiver_profile;
     if (receiver_can_be_null) {
+      Label not_null;
       testptr(receiver, receiver);
-      jcc(Assembler::zero, skip_receiver_profile);
+      jccb(Assembler::notZero, not_null);
+      // We are making a call.  Increment the count for null receiver.
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+      jmp(skip_receiver_profile);
+      bind(not_null);
     }
 
     // Record the receiver type.
-    record_klass_in_profile(receiver, mdp, reg2);
+    record_klass_in_profile(receiver, mdp, reg2, true);
     bind(skip_receiver_profile);
 
     // The method data pointer needs to be updated to reflect the new target.
@@ -1296,10 +1298,14 @@
 // See below for example code.
 void InterpreterMacroAssembler::record_klass_in_profile_helper(
                                         Register receiver, Register mdp,
-                                        Register reg2,
-                                        int start_row, Label& done) {
-  if (TypeProfileWidth == 0)
+                                        Register reg2, int start_row,
+                                        Label& done, bool is_virtual_call) {
+  if (TypeProfileWidth == 0) {
+    if (is_virtual_call) {
+      increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+    }
     return;
+  }
 
   int last_row = VirtualCallData::row_limit() - 1;
   assert(start_row <= last_row, "must be work left to do");
@@ -1327,19 +1333,28 @@
     bind(next_test);
 
     if (test_for_null_also) {
+      Label found_null;
       // Failed the equality check on receiver[n]...  Test for null.
       testptr(reg2, reg2);
       if (start_row == last_row) {
         // The only thing left to do is handle the null case.
-        jcc(Assembler::notZero, done);
+        if (is_virtual_call) {
+          jccb(Assembler::zero, found_null);
+          // Receiver did not match any saved receiver and there is no empty row for it.
+          // Increment total counter to indicate polimorphic case.
+          increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+          jmp(done);
+          bind(found_null);
+        } else {
+          jcc(Assembler::notZero, done);
+        }
         break;
       }
       // Since null is rare, make it be the branch-taken case.
-      Label found_null;
       jcc(Assembler::zero, found_null);
 
       // Put all the "Case 3" tests here.
-      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done);
+      record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
 
       // Found a null.  Keep searching for a matching receiver,
       // but remember that this is an empty (unused) slot.
@@ -1356,7 +1371,9 @@
   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
   movl(reg2, DataLayout::counter_increment);
   set_mdp_data_at(mdp, count_offset, reg2);
-  jmp(done);
+  if (start_row > 0) {
+    jmp(done);
+  }
 }
 
 // Example state machine code for three profile rows:
@@ -1368,7 +1385,7 @@
 //     if (row[1].rec != NULL) {
 //       // degenerate decision tree, rooted at row[2]
 //       if (row[2].rec == rec) { row[2].incr(); goto done; }
-//       if (row[2].rec != NULL) { goto done; } // overflow
+//       if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
 //       row[2].init(rec); goto done;
 //     } else {
 //       // remember row[1] is empty
@@ -1381,14 +1398,15 @@
 //     if (row[2].rec == rec) { row[2].incr(); goto done; }
 //     row[0].init(rec); goto done;
 //   }
+//   done:
 
 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
-                                                        Register mdp,
-                                                        Register reg2) {
+                                                        Register mdp, Register reg2,
+                                                        bool is_virtual_call) {
   assert(ProfileInterpreter, "must be profiling");
   Label done;
 
-  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done);
+  record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
 
   bind (done);
 }
@@ -1484,7 +1502,7 @@
       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
 
       // Record the object type.
-      record_klass_in_profile(klass, mdp, reg2);
+      record_klass_in_profile(klass, mdp, reg2, false);
     }
     update_mdp_by_constant(mdp, mdp_delta);
 
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -222,10 +222,10 @@
                         Label& not_equal_continue);
 
   void record_klass_in_profile(Register receiver, Register mdp,
-                               Register reg2);
+                               Register reg2, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register mdp,
-                                      Register reg2,
-                                      int start_row, Label& done);
+                                      Register reg2, int start_row,
+                                      Label& done, bool is_virtual_call);
 
   void update_mdp_by_offset(Register mdp_in, int offset_of_offset);
   void update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp);
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Thu Feb 04 03:34:05 2010 -0800
@@ -235,6 +235,11 @@
 //----------SOURCE BLOCK-------------------------------------------------------
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description
+source_hpp %{
+// Must be visible to the DFA in dfa_x86_32.cpp
+extern bool is_operand_hi32_zero(Node* n);
+%}
+
 source %{
 #define   RELOC_IMM32    Assembler::imm_operand
 #define   RELOC_DISP32   Assembler::disp32_operand
@@ -1485,6 +1490,21 @@
   return EBP_REG_mask;
 }
 
+// Returns true if the high 32 bits of the value is known to be zero.
+bool is_operand_hi32_zero(Node* n) {
+  int opc = n->Opcode();
+  if (opc == Op_LoadUI2L) {
+    return true;
+  }
+  if (opc == Op_AndL) {
+    Node* o2 = n->in(2);
+    if (o2->is_Con() && (o2->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
+      return true;
+    }
+  }
+  return false;
+}
+
 %}
 
 //----------ENCODING BLOCK-----------------------------------------------------
@@ -8599,6 +8619,63 @@
   ins_pipe( pipe_slow );
 %}
 
+// Multiply Register Long where the left operand's high 32 bits are zero
+instruct mulL_eReg_lhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
+  predicate(is_operand_hi32_zero(n->in(1)));
+  match(Set dst (MulL dst src));
+  effect(KILL cr, TEMP tmp);
+  ins_cost(2*100+2*400);
+// Basic idea: lo(result) = lo(x_lo * y_lo)
+//             hi(result) = hi(x_lo * y_lo) + lo(x_lo * y_hi) where lo(x_hi * y_lo) = 0 because x_hi = 0
+  format %{ "MOV    $tmp,$src.hi\n\t"
+            "IMUL   $tmp,EAX\n\t"
+            "MUL    EDX:EAX,$src.lo\n\t"
+            "ADD    EDX,$tmp" %}
+  ins_encode %{
+    __ movl($tmp$$Register, HIGH_FROM_LOW($src$$Register));
+    __ imull($tmp$$Register, rax);
+    __ mull($src$$Register);
+    __ addl(rdx, $tmp$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// Multiply Register Long where the right operand's high 32 bits are zero
+instruct mulL_eReg_rhi0(eADXRegL dst, eRegL src, eRegI tmp, eFlagsReg cr) %{
+  predicate(is_operand_hi32_zero(n->in(2)));
+  match(Set dst (MulL dst src));
+  effect(KILL cr, TEMP tmp);
+  ins_cost(2*100+2*400);
+// Basic idea: lo(result) = lo(x_lo * y_lo)
+//             hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) where lo(x_lo * y_hi) = 0 because y_hi = 0
+  format %{ "MOV    $tmp,$src.lo\n\t"
+            "IMUL   $tmp,EDX\n\t"
+            "MUL    EDX:EAX,$src.lo\n\t"
+            "ADD    EDX,$tmp" %}
+  ins_encode %{
+    __ movl($tmp$$Register, $src$$Register);
+    __ imull($tmp$$Register, rdx);
+    __ mull($src$$Register);
+    __ addl(rdx, $tmp$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+// Multiply Register Long where the left and the right operands' high 32 bits are zero
+instruct mulL_eReg_hi0(eADXRegL dst, eRegL src, eFlagsReg cr) %{
+  predicate(is_operand_hi32_zero(n->in(1)) && is_operand_hi32_zero(n->in(2)));
+  match(Set dst (MulL dst src));
+  effect(KILL cr);
+  ins_cost(1*400);
+// Basic idea: lo(result) = lo(x_lo * y_lo)
+//             hi(result) = hi(x_lo * y_lo) where lo(x_hi * y_lo) = 0 and lo(x_lo * y_hi) = 0 because x_hi = 0 and y_hi = 0
+  format %{ "MUL    EDX:EAX,$src.lo\n\t" %}
+  ins_encode %{
+    __ mull($src$$Register);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
 // Multiply Register Long by small constant
 instruct mulL_eReg_con(eADXRegL dst, immL_127 src, eRegI tmp, eFlagsReg cr) %{
   match(Set dst (MulL dst src));
--- a/hotspot/src/cpu/zero/vm/interpreter_zero.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/zero/vm/interpreter_zero.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,10 @@
   return ShouldNotCallThisEntry();
 }
 
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  return true;
+}
+
 int AbstractInterpreter::size_activation(methodOop method,
                                          int tempcount,
                                          int popframe_extra_args,
--- a/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,6 +1,6 @@
 /*
  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,8 +47,10 @@
                         int total_args_passed,
                         int comp_args_on_stack,
                         const BasicType *sig_bt,
-                        const VMRegPair *regs) {
-  return new AdapterHandlerEntry(
+                        const VMRegPair *regs,
+                        AdapterFingerPrint *fingerprint) {
+  return AdapterHandlerLibrary::new_entry(
+    fingerprint,
     ShouldNotCallThisStub(),
     ShouldNotCallThisStub(),
     ShouldNotCallThisStub());
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1496,7 +1496,7 @@
   unsigned      i;
 
   // Generate Expand function header
-  fprintf(fp,"MachNode *%sNode::Expand(State *state, Node_List &proj_list) {\n", node->_ident);
+  fprintf(fp,"MachNode *%sNode::Expand(State *state, Node_List &proj_list, Node* mem) {\n", node->_ident);
   fprintf(fp,"Compile* C = Compile::current();\n");
   // Generate expand code
   if( node->expands() ) {
@@ -1546,15 +1546,16 @@
     // Build a mapping from operand index to input edges
     fprintf(fp,"  unsigned idx0 = oper_input_base();\n");
 
-    // The order in which inputs are added to a node is very
+    // The order in which the memory input is added to a node is very
     // strange.  Store nodes get a memory input before Expand is
-    // called and all other nodes get it afterwards so
-    // oper_input_base is wrong during expansion.  This code adjusts
-    // is so that expansion will work correctly.
-    bool missing_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames) &&
-                               node->is_ideal_store() == Form::none;
-    if (missing_memory_edge) {
-      fprintf(fp,"  idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
+    // called and other nodes get it afterwards or before depending on
+    // match order so oper_input_base is wrong during expansion.  This
+    // code adjusts it so that expansion will work correctly.
+    int has_memory_edge = node->_matrule->needs_ideal_memory_edge(_globalNames);
+    if (has_memory_edge) {
+      fprintf(fp,"  if (mem == (Node*)1) {\n");
+      fprintf(fp,"    idx0--; // Adjust base because memory edge hasn't been inserted yet\n");
+      fprintf(fp,"  }\n");
     }
 
     for( i = 0; i < node->num_opnds(); i++ ) {
@@ -1611,9 +1612,11 @@
         int node_mem_op = node->memory_operand(_globalNames);
         assert( node_mem_op != InstructForm::NO_MEMORY_OPERAND,
                 "expand rule member needs memory but top-level inst doesn't have any" );
-        if (!missing_memory_edge) {
+        if (has_memory_edge) {
           // Copy memory edge
-          fprintf(fp,"  n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+          fprintf(fp,"  if (mem != (Node*)1) {\n");
+          fprintf(fp,"    n%d->add_req(_in[1]);\t// Add memory edge\n", cnt);
+          fprintf(fp,"  }\n");
         }
       }
 
@@ -1689,7 +1692,7 @@
       } // done iterating over a new instruction's operands
 
       // Invoke Expand() for the newly created instruction.
-      fprintf(fp,"  result = n%d->Expand( state, proj_list );\n", cnt);
+      fprintf(fp,"  result = n%d->Expand( state, proj_list, mem );\n", cnt);
       assert( !new_inst->expands(), "Do not have complete support for recursive expansion");
     } // done iterating over new instructions
     fprintf(fp,"\n");
--- a/hotspot/src/share/vm/adlc/output_h.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/adlc/output_h.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1754,7 +1754,7 @@
         instr->has_temps() ||
         instr->_matrule != NULL &&
         instr->num_opnds() != instr->num_unique_opnds() ) {
-      fprintf(fp,"  virtual MachNode      *Expand(State *state, Node_List &proj_list);\n");
+      fprintf(fp,"  virtual MachNode      *Expand(State *state, Node_List &proj_list, Node* mem);\n");
     }
 
     if (instr->is_pinned(_globalNames)) {
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,7 @@
                  Dtrace_trap = OSR_Entry,  // dtrace probes can never have an OSR entry so reuse it
                  Exceptions,     // Offset where exception handler lives
                  Deopt,          // Offset where deopt handler lives
+                 DeoptMH,        // Offset where MethodHandle deopt handler lives
                  max_Entries };
 
   // special value to note codeBlobs where profile (forte) stack walking is
@@ -51,12 +52,13 @@
 
 public:
   CodeOffsets() {
-    _values[Entry] = 0;
+    _values[Entry         ] = 0;
     _values[Verified_Entry] = 0;
     _values[Frame_Complete] = frame_never_safe;
-    _values[OSR_Entry] = 0;
-    _values[Exceptions] = -1;
-    _values[Deopt] = -1;
+    _values[OSR_Entry     ] = 0;
+    _values[Exceptions    ] = -1;
+    _values[Deopt         ] = -1;
+    _values[DeoptMH       ] = -1;
   }
 
   int value(Entries e) { return _values[e]; }
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -205,6 +205,8 @@
 void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
   CHECK_BAILOUT();
 
+  CodeOffsets* code_offsets = assembler->offsets();
+
   // generate code or slow cases
   assembler->emit_slow_case_stubs();
   CHECK_BAILOUT();
@@ -213,10 +215,18 @@
   assembler->emit_exception_entries(exception_info_list());
   CHECK_BAILOUT();
 
-  // generate code for exception handler
-  assembler->emit_exception_handler();
+  // Generate code for exception handler.
+  code_offsets->set_value(CodeOffsets::Exceptions, assembler->emit_exception_handler());
   CHECK_BAILOUT();
-  assembler->emit_deopt_handler();
+
+  // Generate code for deopt handler.
+  code_offsets->set_value(CodeOffsets::Deopt, assembler->emit_deopt_handler());
+  CHECK_BAILOUT();
+
+  // Generate code for MethodHandle deopt handler.  We can use the
+  // same code as for the normal deopt handler, we just need a
+  // different entry point address.
+  code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
   CHECK_BAILOUT();
 
   // done
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,9 +133,9 @@
   void add_call_info_here(CodeEmitInfo* info)                              { add_call_info(code_offset(), info); }
 
   // code patterns
-  void emit_exception_handler();
+  int  emit_exception_handler();
   void emit_exception_entries(ExceptionInfoList* info_list);
-  void emit_deopt_handler();
+  int  emit_deopt_handler();
 
   void emit_code(BlockList* hir);
   void emit_block(BlockBegin* block);
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -962,18 +962,10 @@
     if (nm == NULL) {
       // The CodeCache is full.  Print out warning and disable compilation.
       record_failure("code cache is full");
-      UseInterpreter = true;
-      if (UseCompiler || AlwaysCompileLoopMethods ) {
-#ifndef PRODUCT
-        warning("CodeCache is full. Compiler has been disabled");
-        if (CompileTheWorld || ExitOnFullCodeCache) {
-          before_exit(JavaThread::current());
-          exit_globals(); // will delete tty
-          vm_direct_exit(CompileTheWorld ? 0 : 1);
-        }
-#endif
-        UseCompiler               = false;
-        AlwaysCompileLoopMethods  = false;
+      {
+        MutexUnlocker ml(Compile_lock);
+        MutexUnlocker locker(MethodCompileQueue_lock);
+        CompileBroker::handle_full_code_cache();
       }
     } else {
       NOT_PRODUCT(nm->set_has_debug_info(has_debug_info); )
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -436,15 +436,20 @@
           // we will set result._method also.
         }
         // Determine call site's morphism.
-        // The call site count could be == (receivers_count_total + 1)
-        // not only in the case of a polymorphic call but also in the case
-        // when a method data snapshot is taken after the site count was updated
-        // but before receivers counters were updated.
-        if (morphism == result._limit) {
-           // There were no array klasses and morphism <= MorphismLimit.
-           if (morphism <  ciCallProfile::MorphismLimit ||
-               morphism == ciCallProfile::MorphismLimit &&
-               (receivers_count_total+1) >= count) {
+        // The call site count is 0 with known morphism (onlt 1 or 2 receivers)
+        // or < 0 in the case of a type check failured for checkcast, aastore, instanceof.
+        // The call site count is > 0 in the case of a polymorphic virtual call.
+        if (morphism > 0 && morphism == result._limit) {
+           // The morphism <= MorphismLimit.
+           if ((morphism <  ciCallProfile::MorphismLimit) ||
+               (morphism == ciCallProfile::MorphismLimit && count == 0)) {
+#ifdef ASSERT
+             if (count > 0) {
+               tty->print_cr("bci: %d", bci);
+               this->print_codes();
+               assert(false, "this call site should not be polymorphic");
+             }
+#endif
              result._morphism = morphism;
            }
         }
@@ -452,10 +457,8 @@
         // zero or less, presume that this is a typecheck profile and
         // do nothing.  Otherwise, increase count to be the sum of all
         // receiver's counts.
-        if (count > 0) {
-          if (count < receivers_count_total) {
-            count = receivers_count_total;
-          }
+        if (count >= 0) {
+          count += receivers_count_total;
         }
       }
       result._count = count;
--- a/hotspot/src/share/vm/code/codeCache.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -96,6 +96,7 @@
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 bool CodeCache::_needs_cache_clean = false;
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
+nmethod* CodeCache::_saved_nmethods = NULL;
 
 
 CodeBlob* CodeCache::first() {
@@ -395,6 +396,85 @@
 }
 #endif //PRODUCT
 
+
+nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
+  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  nmethod* saved = _saved_nmethods;
+  nmethod* prev = NULL;
+  while (saved != NULL) {
+    if (saved->is_in_use() && saved->method() == m) {
+      if (prev != NULL) {
+        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
+      } else {
+        _saved_nmethods = saved->saved_nmethod_link();
+      }
+      assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
+      saved->set_speculatively_disconnected(false);
+      saved->set_saved_nmethod_link(NULL);
+      if (PrintMethodFlushing) {
+        saved->print_on(tty, " ### nmethod is reconnected");
+      }
+      if (LogCompilation && (xtty != NULL)) {
+        ttyLocker ttyl;
+        xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
+        xtty->method(methodOop(m));
+        xtty->stamp();
+        xtty->end_elem();
+      }
+      return saved;
+    }
+    prev = saved;
+    saved = saved->saved_nmethod_link();
+  }
+  return NULL;
+}
+
+void CodeCache::remove_saved_code(nmethod* nm) {
+  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+  assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
+  nmethod* saved = _saved_nmethods;
+  nmethod* prev = NULL;
+  while (saved != NULL) {
+    if (saved == nm) {
+      if (prev != NULL) {
+        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
+      } else {
+        _saved_nmethods = saved->saved_nmethod_link();
+      }
+      if (LogCompilation && (xtty != NULL)) {
+        ttyLocker ttyl;
+        xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
+        xtty->stamp();
+        xtty->end_elem();
+      }
+      return;
+    }
+    prev = saved;
+    saved = saved->saved_nmethod_link();
+  }
+  ShouldNotReachHere();
+}
+
+void CodeCache::speculatively_disconnect(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
+  nm->set_saved_nmethod_link(_saved_nmethods);
+  _saved_nmethods = nm;
+  if (PrintMethodFlushing) {
+    nm->print_on(tty, " ### nmethod is speculatively disconnected");
+  }
+  if (LogCompilation && (xtty != NULL)) {
+    ttyLocker ttyl;
+    xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
+    xtty->method(methodOop(nm->method()));
+    xtty->stamp();
+    xtty->end_elem();
+  }
+  nm->method()->clear_code();
+  nm->set_speculatively_disconnected(true);
+}
+
+
 void CodeCache::gc_prologue() {
   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 }
--- a/hotspot/src/share/vm/code/codeCache.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -46,6 +46,7 @@
   static int _number_of_nmethods_with_dependencies;
   static bool _needs_cache_clean;
   static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
+  static nmethod* _saved_nmethods;          // linked via nm->saved_nmethod_look()
 
   static void verify_if_often() PRODUCT_RETURN;
 
@@ -141,11 +142,16 @@
   static size_t  capacity()                      { return _heap->capacity(); }
   static size_t  max_capacity()                  { return _heap->max_capacity(); }
   static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
+  static bool    needs_flushing()                { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
 
   static bool needs_cache_clean()                { return _needs_cache_clean; }
   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   static void clear_inline_caches();             // clear all inline caches
 
+  static nmethod* find_and_remove_saved_code(methodOop m);
+  static void remove_saved_code(nmethod* nm);
+  static void speculatively_disconnect(nmethod* nm);
+
   // Deoptimization
   static int  mark_for_deoptimization(DepChange& changes);
 #ifdef HOTSWAP
--- a/hotspot/src/share/vm/code/dependencies.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/code/dependencies.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -843,13 +843,15 @@
     if (occasional_print || final_stats) {
       // Every now and then dump a little info about dependency searching.
       if (xtty != NULL) {
-        xtty->elem("deps_find_witness calls='%d' steps='%d' recursions='%d' singles='%d'",
+       ttyLocker ttyl;
+       xtty->elem("deps_find_witness calls='%d' steps='%d' recursions='%d' singles='%d'",
                    deps_find_witness_calls,
                    deps_find_witness_steps,
                    deps_find_witness_recursions,
                    deps_find_witness_singles);
       }
       if (final_stats || (TraceDependencies && WizardMode)) {
+        ttyLocker ttyl;
         tty->print_cr("Dependency check (find_witness) "
                       "calls=%d, steps=%d (avg=%.1f), recursions=%d, singles=%d",
                       deps_find_witness_calls,
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -587,11 +587,13 @@
     _osr_link                = NULL;
     _scavenge_root_link      = NULL;
     _scavenge_root_state     = 0;
+    _saved_nmethod_link      = NULL;
     _compiler                = NULL;
     // We have no exception handler or deopt handler make the
     // values something that will never match a pc like the nmethod vtable entry
     _exception_offset        = 0;
     _deoptimize_offset       = 0;
+    _deoptimize_mh_offset    = 0;
     _orig_pc_offset          = 0;
 #ifdef HAVE_DTRACE_H
     _trap_offset             = 0;
@@ -682,6 +684,7 @@
     // values something that will never match a pc like the nmethod vtable entry
     _exception_offset        = 0;
     _deoptimize_offset       = 0;
+    _deoptimize_mh_offset    = 0;
     _trap_offset             = offsets->value(CodeOffsets::Dtrace_trap);
     _orig_pc_offset          = 0;
     _stub_offset             = data_offset();
@@ -794,6 +797,7 @@
     // Exception handler and deopt handler are in the stub section
     _exception_offset        = _stub_offset + offsets->value(CodeOffsets::Exceptions);
     _deoptimize_offset       = _stub_offset + offsets->value(CodeOffsets::Deopt);
+    _deoptimize_mh_offset    = _stub_offset + offsets->value(CodeOffsets::DeoptMH);
     _consts_offset           = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start());
     _scopes_data_offset      = data_offset();
     _scopes_pcs_offset       = _scopes_data_offset   + round_to(debug_info->data_size         (), oopSize);
@@ -1033,7 +1037,7 @@
         if( cb != NULL && cb->is_nmethod() ) {
           nmethod* nm = (nmethod*)cb;
           // Clean inline caches pointing to both zombie and not_entrant methods
-          if (!nm->is_in_use()) ic->set_to_clean();
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
         }
         break;
       }
@@ -1043,7 +1047,7 @@
         if( cb != NULL && cb->is_nmethod() ) {
           nmethod* nm = (nmethod*)cb;
           // Clean inline caches pointing to both zombie and not_entrant methods
-          if (!nm->is_in_use()) csc->set_to_clean();
+          if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
         }
         break;
       }
@@ -1113,7 +1117,6 @@
     if (_method->code() == this) {
       _method->clear_code(); // Break a cycle
     }
-    inc_decompile_count();     // Last chance to make a mark on the MDO
     _method = NULL;            // Clear the method of this dead nmethod
   }
   // Make the class unloaded - i.e., change state and notify sweeper
@@ -1173,15 +1176,17 @@
 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
 
-  // If the method is already zombie there is nothing to do
-  if (is_zombie()) {
-    return false;
-  }
+  bool was_alive = false;
 
   // Make sure the nmethod is not flushed in case of a safepoint in code below.
   nmethodLocker nml(this);
 
   {
+    // If the method is already zombie there is nothing to do
+    if (is_zombie()) {
+      return false;
+    }
+
     // invalidate osr nmethod before acquiring the patching lock since
     // they both acquire leaf locks and we don't want a deadlock.
     // This logic is equivalent to the logic below for patching the
@@ -1219,6 +1224,8 @@
       assert(state == not_entrant, "other cases may need to be handled differently");
     }
 
+    was_alive = is_in_use(); // Read state under lock
+
     // Change state
     flags.state = state;
 
@@ -1245,8 +1252,11 @@
     mark_as_seen_on_stack();
   }
 
-  // It's a true state change, so mark the method as decompiled.
-  inc_decompile_count();
+  if (was_alive) {
+    // It's a true state change, so mark the method as decompiled.
+    // Do it only for transition from alive.
+    inc_decompile_count();
+  }
 
   // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
   // and it hasn't already been reported for this nmethod then report it now.
@@ -1312,7 +1322,8 @@
   // completely deallocate this method
   EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
   if (PrintMethodFlushing) {
-    tty->print_cr("*flushing nmethod " INTPTR_FORMAT ". Live blobs: %d", this, CodeCache::nof_blobs());
+    tty->print_cr("*flushing nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT "/Free CodeCache:" SIZE_FORMAT "Kb",
+        _compile_id, this, CodeCache::nof_blobs(), CodeCache::unallocated_capacity()/1024);
   }
 
   // We need to deallocate any ExceptionCache data.
@@ -1330,6 +1341,10 @@
     CodeCache::drop_scavenge_root_nmethod(this);
   }
 
+  if (is_speculatively_disconnected()) {
+    CodeCache::remove_saved_code(this);
+  }
+
   ((CodeBlob*)(this))->flush();
 
   CodeCache::free(this);
@@ -2031,9 +2046,21 @@
   guarantee(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
 }
 
-bool nmethod::is_deopt_pc(address pc) {
-  bool ret =  pc == deopt_handler_begin();
-  return ret;
+
+// -----------------------------------------------------------------------------
+// nmethod::get_deopt_original_pc
+//
+// Return the original PC for the given PC if:
+// (a) the given PC belongs to a nmethod and
+// (b) it is a deopt PC
+address nmethod::get_deopt_original_pc(const frame* fr) {
+  if (fr->cb() == NULL)  return NULL;
+
+  nmethod* nm = fr->cb()->as_nmethod_or_null();
+  if (nm != NULL && nm->is_deopt_pc(fr->pc()))
+    return nm->get_original_pc(fr);
+
+  return NULL;
 }
 
 
@@ -2404,6 +2431,8 @@
   if (block_begin == verified_entry_point())    stream->print_cr("[Verified Entry Point]");
   if (block_begin == exception_begin())         stream->print_cr("[Exception Handler]");
   if (block_begin == stub_begin())              stream->print_cr("[Stub Code]");
+  if (block_begin == deopt_handler_begin())     stream->print_cr("[Deopt Handler Code]");
+  if (block_begin == deopt_mh_handler_begin())  stream->print_cr("[Deopt MH Handler Code]");
   if (block_begin == consts_begin())            stream->print_cr("[Constants]");
   if (block_begin == entry_point()) {
     methodHandle m = method();
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -95,6 +95,8 @@
   unsigned int has_unsafe_access:1;          // May fault due to unsafe access.
   unsigned int has_method_handle_invokes:1;  // Has this method MethodHandle invokes?
 
+  unsigned int speculatively_disconnected:1; // Marked for potential unload
+
   void clear();
 };
 
@@ -137,6 +139,7 @@
   // To support simple linked-list chaining of nmethods:
   nmethod*  _osr_link;         // from instanceKlass::osr_nmethods_head
   nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
+  nmethod*  _saved_nmethod_link; // from CodeCache::speculatively_disconnect
 
   static nmethod* volatile _oops_do_mark_nmethods;
   nmethod*        volatile _oops_do_mark_link;
@@ -145,8 +148,12 @@
 
   // Offsets for different nmethod parts
   int _exception_offset;
-  // All deoptee's will resume execution at this location described by this offset
+  // All deoptee's will resume execution at this location described by
+  // this offset.
   int _deoptimize_offset;
+  // All deoptee's at a MethodHandle call site will resume execution
+  // at this location described by this offset.
+  int _deoptimize_mh_offset;
 #ifdef HAVE_DTRACE_H
   int _trap_offset;
 #endif // def HAVE_DTRACE_H
@@ -329,24 +336,25 @@
   bool is_compiled_by_c2() const;
 
   // boundaries for different parts
-  address code_begin         () const             { return _entry_point; }
-  address code_end           () const             { return           header_begin() + _stub_offset          ; }
-  address exception_begin    () const             { return           header_begin() + _exception_offset     ; }
-  address deopt_handler_begin() const             { return           header_begin() + _deoptimize_offset    ; }
-  address stub_begin         () const             { return           header_begin() + _stub_offset          ; }
-  address stub_end           () const             { return           header_begin() + _consts_offset        ; }
-  address consts_begin       () const             { return           header_begin() + _consts_offset        ; }
-  address consts_end         () const             { return           header_begin() + _scopes_data_offset   ; }
-  address scopes_data_begin  () const             { return           header_begin() + _scopes_data_offset   ; }
-  address scopes_data_end    () const             { return           header_begin() + _scopes_pcs_offset    ; }
-  PcDesc* scopes_pcs_begin   () const             { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
-  PcDesc* scopes_pcs_end     () const             { return (PcDesc*)(header_begin() + _dependencies_offset); }
-  address dependencies_begin () const             { return           header_begin() + _dependencies_offset ; }
-  address dependencies_end   () const             { return           header_begin() + _handler_table_offset ; }
-  address handler_table_begin() const             { return           header_begin() + _handler_table_offset ; }
-  address handler_table_end  () const             { return           header_begin() + _nul_chk_table_offset   ; }
-  address nul_chk_table_begin() const             { return           header_begin() + _nul_chk_table_offset ; }
-  address nul_chk_table_end  () const             { return           header_begin() + _nmethod_end_offset   ; }
+  address code_begin            () const          { return _entry_point; }
+  address code_end              () const          { return           header_begin() + _stub_offset          ; }
+  address exception_begin       () const          { return           header_begin() + _exception_offset     ; }
+  address deopt_handler_begin   () const          { return           header_begin() + _deoptimize_offset    ; }
+  address deopt_mh_handler_begin() const          { return           header_begin() + _deoptimize_mh_offset ; }
+  address stub_begin            () const          { return           header_begin() + _stub_offset          ; }
+  address stub_end              () const          { return           header_begin() + _consts_offset        ; }
+  address consts_begin          () const          { return           header_begin() + _consts_offset        ; }
+  address consts_end            () const          { return           header_begin() + _scopes_data_offset   ; }
+  address scopes_data_begin     () const          { return           header_begin() + _scopes_data_offset   ; }
+  address scopes_data_end       () const          { return           header_begin() + _scopes_pcs_offset    ; }
+  PcDesc* scopes_pcs_begin      () const          { return (PcDesc*)(header_begin() + _scopes_pcs_offset   ); }
+  PcDesc* scopes_pcs_end        () const          { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
+  address dependencies_begin    () const          { return           header_begin() + _dependencies_offset  ; }
+  address dependencies_end      () const          { return           header_begin() + _handler_table_offset ; }
+  address handler_table_begin   () const          { return           header_begin() + _handler_table_offset ; }
+  address handler_table_end     () const          { return           header_begin() + _nul_chk_table_offset ; }
+  address nul_chk_table_begin   () const          { return           header_begin() + _nul_chk_table_offset ; }
+  address nul_chk_table_end     () const          { return           header_begin() + _nmethod_end_offset   ; }
 
   int code_size         () const                  { return      code_end         () -      code_begin         (); }
   int stub_size         () const                  { return      stub_end         () -      stub_begin         (); }
@@ -413,6 +421,9 @@
   bool  has_method_handle_invokes() const         { return flags.has_method_handle_invokes; }
   void  set_has_method_handle_invokes(bool z)     { flags.has_method_handle_invokes = z; }
 
+  bool  is_speculatively_disconnected() const     { return flags.speculatively_disconnected; }
+  void  set_speculatively_disconnected(bool z)     { flags.speculatively_disconnected = z; }
+
   int   level() const                             { return flags.level; }
   void  set_level(int newLevel)                   { check_safepoint(); flags.level = newLevel; }
 
@@ -437,6 +448,9 @@
   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
 
+  nmethod* saved_nmethod_link() const                  { return _saved_nmethod_link; }
+  void     set_saved_nmethod_link(nmethod *n)          { _saved_nmethod_link = n; }
+
  public:
 
   // Sweeper support
@@ -515,7 +529,7 @@
  private:
   ScopeDesc* scope_desc_in(address begin, address end);
 
-  address* orig_pc_addr(const frame* fr ) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
+  address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
 
   PcDesc* find_pc_desc_internal(address pc, bool approximate);
 
@@ -538,13 +552,17 @@
   void copy_scopes_pcs(PcDesc* pcs, int count);
   void copy_scopes_data(address buffer, int size);
 
-  // deopt
-  // return true is the pc is one would expect if the frame is being deopted.
-  bool is_deopt_pc(address pc);
+  // Deopt
+  // Return true is the PC is one would expect if the frame is being deopted.
+  bool is_deopt_pc      (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
+  bool is_deopt_entry   (address pc) { return pc == deopt_handler_begin(); }
+  bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
   // Accessor/mutator for the original pc of a frame before a frame was deopted.
   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 
+  static address get_deopt_original_pc(const frame* fr);
+
   // MethodHandle
   bool is_method_handle_return(address return_pc);
 
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,6 +69,7 @@
 
 bool CompileBroker::_initialized = false;
 volatile bool CompileBroker::_should_block = false;
+volatile jint CompileBroker::_should_compile_new_jobs = run_compilation;
 
 // The installed compiler(s)
 AbstractCompiler* CompileBroker::_compilers[2];
@@ -986,6 +987,13 @@
       return method_code;
     }
     if (method->is_not_compilable(comp_level)) return NULL;
+
+    nmethod* saved = CodeCache::find_and_remove_saved_code(method());
+    if (saved != NULL) {
+      method->set_code(method, saved);
+      return saved;
+    }
+
   } else {
     // osr compilation
 #ifndef TIERED
@@ -1037,6 +1045,14 @@
     method->jmethod_id();
   }
 
+  // If the compiler is shut off due to code cache flushing or otherwise,
+  // fail out now so blocking compiles dont hang the java thread
+  if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
+    method->invocation_counter()->decay();
+    method->backedge_counter()->decay();
+    return NULL;
+  }
+
   // do the compilation
   if (method->is_native()) {
     if (!PreferInterpreterNativeStubs) {
@@ -1116,7 +1132,7 @@
   // the specified level
   if (is_native &&
       (!CICompileNatives || !compiler(comp_level)->supports_native())) {
-    method->set_not_compilable();
+    method->set_not_compilable_quietly();
     return true;
   }
 
@@ -1140,7 +1156,7 @@
       method->print_short_name(tty);
       tty->cr();
     }
-    method->set_not_compilable();
+    method->set_not_compilable_quietly();
   }
 
   return false;
@@ -1173,7 +1189,7 @@
   }
 
   // Method was not in the appropriate compilation range.
-  method->set_not_compilable();
+  method->set_not_compilable_quietly();
   return 0;
 }
 
@@ -1325,26 +1341,13 @@
     {
       // We need this HandleMark to avoid leaking VM handles.
       HandleMark hm(thread);
+
       if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
-        // The CodeCache is full.  Print out warning and disable compilation.
-        UseInterpreter = true;
-        if (UseCompiler || AlwaysCompileLoopMethods ) {
-          if (log != NULL) {
-            log->begin_elem("code_cache_full");
-            log->stamp();
-            log->end_elem();
-          }
-#ifndef PRODUCT
-          warning("CodeCache is full. Compiler has been disabled");
-          if (CompileTheWorld || ExitOnFullCodeCache) {
-            before_exit(thread);
-            exit_globals(); // will delete tty
-            vm_direct_exit(CompileTheWorld ? 0 : 1);
-          }
-#endif
-          UseCompiler               = false;
-          AlwaysCompileLoopMethods  = false;
-        }
+        // the code cache is really full
+        handle_full_code_cache();
+      } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
+        // Attempt to start cleaning the code cache while there is still a little headroom
+        NMethodSweeper::handle_full_code_cache(false);
       }
 
       CompileTask* task = queue->get();
@@ -1369,7 +1372,7 @@
       // Never compile a method if breakpoints are present in it
       if (method()->number_of_breakpoints() == 0) {
         // Compile the method.
-        if (UseCompiler || AlwaysCompileLoopMethods) {
+        if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
 #ifdef COMPILER1
           // Allow repeating compilations for the purpose of benchmarking
           // compile speed. This is not useful for customers.
@@ -1587,10 +1590,10 @@
     if (is_osr) {
       method->set_not_osr_compilable();
     } else {
-      method->set_not_compilable();
+      method->set_not_compilable_quietly();
     }
   } else if (compilable == ciEnv::MethodCompilable_not_at_tier) {
-    method->set_not_compilable(task->comp_level());
+    method->set_not_compilable_quietly(task->comp_level());
   }
 
   // Note that the queued_for_compilation bits are cleared without
@@ -1614,6 +1617,38 @@
 
 
 // ------------------------------------------------------------------
+// CompileBroker::handle_full_code_cache
+//
+// The CodeCache is full.  Print out warning and disable compilation or
+// try code cache cleaning so compilation can continue later.
+void CompileBroker::handle_full_code_cache() {
+  UseInterpreter = true;
+  if (UseCompiler || AlwaysCompileLoopMethods ) {
+    CompilerThread* thread = CompilerThread::current();
+    CompileLog* log = thread->log();
+    if (log != NULL) {
+      log->begin_elem("code_cache_full");
+      log->stamp();
+      log->end_elem();
+    }
+  #ifndef PRODUCT
+    warning("CodeCache is full. Compiler has been disabled");
+    if (CompileTheWorld || ExitOnFullCodeCache) {
+      before_exit(JavaThread::current());
+      exit_globals(); // will delete tty
+      vm_direct_exit(CompileTheWorld ? 0 : 1);
+    }
+  #endif
+    if (UseCodeCacheFlushing) {
+      NMethodSweeper::handle_full_code_cache(true);
+    } else {
+      UseCompiler               = false;
+      AlwaysCompileLoopMethods  = false;
+    }
+  }
+}
+
+// ------------------------------------------------------------------
 // CompileBroker::set_last_compile
 //
 // Record this compilation for debugging purposes.
--- a/hotspot/src/share/vm/compiler/compileBroker.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -193,6 +193,9 @@
   static bool _initialized;
   static volatile bool _should_block;
 
+  // This flag can be used to stop compilation or turn it back on
+  static volatile jint _should_compile_new_jobs;
+
   // The installed compiler(s)
   static AbstractCompiler* _compilers[2];
 
@@ -319,6 +322,7 @@
 
   static void compiler_thread_loop();
 
+  static uint get_compilation_id() { return _compilation_id; }
   static bool is_idle();
 
   // Set _should_block.
@@ -328,6 +332,20 @@
   // Call this from the compiler at convenient points, to poll for _should_block.
   static void maybe_block();
 
+  enum {
+    // Flags for toggling compiler activity
+    stop_compilation = 0,
+    run_compilation  = 1
+  };
+
+  static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
+  static bool set_should_compile_new_jobs(jint new_state) {
+    // Return success if the current caller set it
+    jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
+    return (old == (1-new_state));
+  }
+  static void handle_full_code_cache();
+
   // Return total compilation ticks
   static jlong total_compilation_ticks() {
     return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0;
--- a/hotspot/src/share/vm/includeDB_compiler2	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/includeDB_compiler2	Thu Feb 04 03:34:05 2010 -0800
@@ -775,6 +775,7 @@
 output.cpp                              assembler.inline.hpp
 output.cpp                              callnode.hpp
 output.cpp                              cfgnode.hpp
+output.cpp                              compileBroker.hpp
 output.cpp                              debugInfo.hpp
 output.cpp                              debugInfoRec.hpp
 output.cpp                              handles.inline.hpp
--- a/hotspot/src/share/vm/includeDB_core	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/includeDB_core	Thu Feb 04 03:34:05 2010 -0800
@@ -1032,6 +1032,7 @@
 codeCache.cpp                           oop.inline.hpp
 codeCache.cpp                           pcDesc.hpp
 codeCache.cpp                           resourceArea.hpp
+codeCache.cpp                           xmlstream.hpp
 
 codeCache.hpp                           allocation.hpp
 codeCache.hpp                           codeBlob.hpp
@@ -1120,6 +1121,7 @@
 compileBroker.cpp                       oop.inline.hpp
 compileBroker.cpp                       os.hpp
 compileBroker.cpp                       sharedRuntime.hpp
+compileBroker.cpp                       sweeper.hpp
 compileBroker.cpp                       systemDictionary.hpp
 compileBroker.cpp                       vmSymbols.hpp
 
@@ -3719,6 +3721,7 @@
 sharedRuntime.cpp                       abstractCompiler.hpp
 sharedRuntime.cpp                       arguments.hpp
 sharedRuntime.cpp                       biasedLocking.hpp
+sharedRuntime.cpp                       compileBroker.hpp
 sharedRuntime.cpp                       compiledIC.hpp
 sharedRuntime.cpp                       compilerOracle.hpp
 sharedRuntime.cpp                       copy.hpp
@@ -3973,6 +3976,7 @@
 
 sweeper.cpp                             atomic.hpp
 sweeper.cpp                             codeCache.hpp
+sweeper.cpp                             compileBroker.hpp
 sweeper.cpp                             events.hpp
 sweeper.cpp                             methodOop.hpp
 sweeper.cpp                             mutexLocker.hpp
@@ -3980,6 +3984,8 @@
 sweeper.cpp                             os.hpp
 sweeper.cpp                             resourceArea.hpp
 sweeper.cpp                             sweeper.hpp
+sweeper.cpp                             vm_operations.hpp
+sweeper.cpp                             xmlstream.hpp
 
 symbolKlass.cpp                         gcLocker.hpp
 symbolKlass.cpp                         handles.inline.hpp
@@ -4633,6 +4639,7 @@
 vm_operations.cpp                       interfaceSupport.hpp
 vm_operations.cpp                       isGCActiveMark.hpp
 vm_operations.cpp                       resourceArea.hpp
+vm_operations.cpp                       sweeper.hpp
 vm_operations.cpp                       threadService.hpp
 vm_operations.cpp                       thread_<os_family>.inline.hpp
 vm_operations.cpp                       vmSymbols.hpp
--- a/hotspot/src/share/vm/oops/methodDataOop.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/oops/methodDataOop.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1391,6 +1391,9 @@
   }
   void inc_decompile_count() {
     _nof_decompiles += 1;
+    if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
+      method()->set_not_compilable();
+    }
   }
 
   // Support for code generation
--- a/hotspot/src/share/vm/oops/methodOop.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/oops/methodOop.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -575,12 +575,6 @@
     return true;
   }
 
-  methodDataOop mdo = method_data();
-  if (mdo != NULL
-      && (uint)mdo->decompile_count() > (uint)PerMethodRecompilationCutoff) {
-    // Since (uint)-1 is large, -1 really means 'no cutoff'.
-    return true;
-  }
 #ifdef COMPILER2
   if (is_tier1_compile(comp_level)) {
     if (is_not_tier1_compilable()) {
@@ -593,7 +587,16 @@
 }
 
 // call this when compiler finds that this method is not compilable
-void methodOopDesc::set_not_compilable(int comp_level) {
+void methodOopDesc::set_not_compilable(int comp_level, bool report) {
+  if (PrintCompilation && report) {
+    ttyLocker ttyl;
+    tty->print("made not compilable ");
+    this->print_short_name(tty);
+    int size = this->code_size();
+    if (size > 0)
+      tty->print(" (%d bytes)", size);
+    tty->cr();
+  }
   if ((TraceDeoptimization || LogCompilation) && (xtty != NULL)) {
     ttyLocker ttyl;
     xtty->begin_elem("make_not_compilable thread='%d'", (int) os::current_thread_id());
@@ -705,6 +708,16 @@
 // This function must not hit a safepoint!
 address methodOopDesc::verified_code_entry() {
   debug_only(No_Safepoint_Verifier nsv;)
+  nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
+  if (code == NULL && UseCodeCacheFlushing) {
+    nmethod *saved_code = CodeCache::find_and_remove_saved_code(this);
+    if (saved_code != NULL) {
+      methodHandle method(this);
+      assert( ! saved_code->is_osr_method(), "should not get here for osr" );
+      set_code( method, saved_code );
+    }
+  }
+
   assert(_from_compiled_entry != NULL, "must be set");
   return _from_compiled_entry;
 }
@@ -733,8 +746,8 @@
   int comp_level = code->comp_level();
   // In theory there could be a race here. In practice it is unlikely
   // and not worth worrying about.
-  if (comp_level > highest_tier_compile()) {
-    set_highest_tier_compile(comp_level);
+  if (comp_level > mh->highest_tier_compile()) {
+    mh->set_highest_tier_compile(comp_level);
   }
 
   OrderAccess::storestore();
--- a/hotspot/src/share/vm/oops/methodOop.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/oops/methodOop.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -303,7 +303,7 @@
   bool check_code() const;      // Not inline to avoid circular ref
   nmethod* volatile code() const                 { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
   void clear_code();            // Clear out any compiled code
-  void set_code(methodHandle mh, nmethod* code);
+  static void set_code(methodHandle mh, nmethod* code);
   void set_adapter_entry(AdapterHandlerEntry* adapter) {  _adapter = adapter; }
   address get_i2c_entry();
   address get_c2i_entry();
@@ -596,7 +596,10 @@
   // whether it is not compilable for another reason like having a
   // breakpoint set in it.
   bool is_not_compilable(int comp_level = CompLevel_highest_tier) const;
-  void set_not_compilable(int comp_level = CompLevel_highest_tier);
+  void set_not_compilable(int comp_level = CompLevel_highest_tier, bool report = true);
+  void set_not_compilable_quietly(int comp_level = CompLevel_highest_tier) {
+    set_not_compilable(comp_level, false);
+  }
 
   bool is_not_osr_compilable() const             { return is_not_compilable() || access_flags().is_not_osr_compilable(); }
   void set_not_osr_compilable()                  { _access_flags.set_not_osr_compilable(); }
--- a/hotspot/src/share/vm/opto/doCall.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -182,26 +182,16 @@
             }
           }
           CallGenerator* miss_cg;
+          Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
+                                    Deoptimization::Reason_bimorphic :
+                                    Deoptimization::Reason_class_check;
           if (( profile.morphism() == 1 ||
                (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
-
-              !too_many_traps(Deoptimization::Reason_class_check)
-
-              // Check only total number of traps per method to allow
-              // the transition from monomorphic to bimorphic case between
-              // compilations without falling into virtual call.
-              // A monomorphic case may have the class_check trap flag is set
-              // due to the time gap between the uncommon trap processing
-              // when flags are set in MDO and the call site bytecode execution
-              // in Interpreter when MDO counters are updated.
-              // There was also class_check trap in monomorphic case due to
-              // the bug 6225440.
-
+              !too_many_traps(jvms->method(), jvms->bci(), reason)
              ) {
             // Generate uncommon trap for class check failure path
             // in case of monomorphic or bimorphic virtual call site.
-            miss_cg = CallGenerator::for_uncommon_trap(call_method,
-                        Deoptimization::Reason_class_check,
+            miss_cg = CallGenerator::for_uncommon_trap(call_method, reason,
                         Deoptimization::Action_maybe_recompile);
           } else {
             // Generate virtual call for class check failure path
--- a/hotspot/src/share/vm/opto/ifg.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/opto/ifg.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -736,7 +736,28 @@
         // the flags and assumes it's dead.  This keeps the (useless)
         // flag-setting behavior alive while also keeping the (useful)
         // memory update effect.
-        for( uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++ ) {
+        uint begin = 1;
+        uint end = n->req();
+        if (n->Opcode() == Op_SCMemProj) {
+          begin = 0;
+        } else if (n->is_Mach()) {
+          switch (n->as_Mach()->ideal_Opcode()) {
+            case Op_MemBarAcquire:
+            case Op_MemBarVolatile:
+              if (n->len() >= MemBarNode::Precedent + 1 &&
+                  n->in(MemBarNode::Precedent) != NULL &&
+                  n->in(MemBarNode::Precedent)->outcnt() == 1) {
+                // This membar node is the single user of it's input
+                // so the input won't be considered live and this node
+                // would get deleted during copy elimination so force
+                // it to be live.
+                end = MemBarNode::Precedent + 1;
+              }
+              break;
+          }
+        }
+
+        for( uint k = begin; k < end; k++ ) {
           Node *def = n->in(k);
           uint x = n2lidx(def);
           if( !x ) continue;
--- a/hotspot/src/share/vm/opto/machnode.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/opto/machnode.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -232,7 +232,7 @@
   // Expand method for MachNode, replaces nodes representing pseudo
   // instructions with a set of nodes which represent real machine
   // instructions and compute the same value.
-  virtual MachNode *Expand( State *, Node_List &proj_list ) { return this; }
+  virtual MachNode *Expand( State *, Node_List &proj_list, Node* mem ) { return this; }
 
   // Bottom_type call; value comes from operand0
   virtual const class Type *bottom_type() const { return _opnds[0]->type(); }
--- a/hotspot/src/share/vm/opto/matcher.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1580,7 +1580,7 @@
   uint num_proj = _proj_list.size();
 
   // Perform any 1-to-many expansions required
-  MachNode *ex = mach->Expand(s,_proj_list);
+  MachNode *ex = mach->Expand(s,_proj_list, mem);
   if( ex != mach ) {
     assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
     if( ex->in(1)->is_Con() )
--- a/hotspot/src/share/vm/opto/memnode.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -583,9 +583,22 @@
 // Preceeding equivalent StoreCMs may be eliminated.
 class StoreCMNode : public StoreNode {
  private:
+  virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
+  virtual uint cmp( const Node &n ) const {
+    return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
+      && StoreNode::cmp(n);
+  }
+  virtual uint size_of() const { return sizeof(*this); }
   int _oop_alias_idx;   // The alias_idx of OopStore
+
 public:
-  StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : StoreNode(c,mem,adr,at,val,oop_store), _oop_alias_idx(oop_alias_idx) {}
+  StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
+    StoreNode(c,mem,adr,at,val,oop_store),
+    _oop_alias_idx(oop_alias_idx) {
+    assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
+           _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
+           "bad oop alias idx");
+  }
   virtual int Opcode() const;
   virtual Node *Identity( PhaseTransform *phase );
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
--- a/hotspot/src/share/vm/opto/output.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/opto/output.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1093,7 +1093,7 @@
   cb->initialize(total_req, locs_req);
 
   // Have we run out of code space?
-  if (cb->blob() == NULL) {
+  if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     turn_off_compiler(this);
     return;
   }
@@ -1314,7 +1314,7 @@
 
       // Verify that there is sufficient space remaining
       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
-      if (cb->blob() == NULL) {
+      if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
         turn_off_compiler(this);
         return;
       }
@@ -1430,10 +1430,14 @@
     _code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
     // Emit the deopt handler code.
     _code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
+    // Emit the MethodHandle deopt handler code.  We can use the same
+    // code as for the normal deopt handler, we just need a different
+    // entry point address.
+    _code_offsets.set_value(CodeOffsets::DeoptMH, emit_deopt_handler(*cb));
   }
 
   // One last check for failed CodeBuffer::expand:
-  if (cb->blob() == NULL) {
+  if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
     turn_off_compiler(this);
     return;
   }
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -414,8 +414,6 @@
 void Parse::profile_call(Node* receiver) {
   if (!method_data_update()) return;
 
-  profile_generic_call();
-
   switch (bc()) {
   case Bytecodes::_invokevirtual:
   case Bytecodes::_invokeinterface:
@@ -424,6 +422,7 @@
   case Bytecodes::_invokestatic:
   case Bytecodes::_invokedynamic:
   case Bytecodes::_invokespecial:
+    profile_generic_call();
     break;
   default: fatal("unexpected call bytecode");
   }
@@ -444,13 +443,16 @@
 void Parse::profile_receiver_type(Node* receiver) {
   assert(method_data_update(), "must be generating profile code");
 
-  // Skip if we aren't tracking receivers
-  if (TypeProfileWidth < 1) return;
-
   ciMethodData* md = method()->method_data();
   assert(md != NULL, "expected valid ciMethodData");
   ciProfileData* data = md->bci_to_data(bci());
   assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here");
+
+  // Skip if we aren't tracking receivers
+  if (TypeProfileWidth < 1) {
+    increment_md_counter_at(md, data, CounterData::count_offset());
+    return;
+  }
   ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData();
 
   Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0));
--- a/hotspot/src/share/vm/opto/runtime.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -706,6 +706,11 @@
     // vc->set_receiver_count(empty_row, DataLayout::counter_increment);
     int count_off = ReceiverTypeData::receiver_count_cell_index(empty_row);
     *(mdp + count_off) = DataLayout::counter_increment;
+  } else {
+    // Receiver did not match any saved receiver and there is no empty row for it.
+    // Increment total counter to indicate polimorphic case.
+    intptr_t* count_p = (intptr_t*)(((byte*)(data)) + in_bytes(CounterData::count_offset()));
+    *count_p += DataLayout::counter_increment;
   }
 JRT_END
 
--- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -66,7 +66,7 @@
   if (!canBeCompiled(m))      return false;
 
   return !UseInterpreter ||                                              // must compile all methods
-         (UseCompiler && AlwaysCompileLoopMethods && m->has_loops()); // eagerly compile loop methods
+         (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
 }
 
 // Returns true if m is allowed to be compiled
@@ -137,7 +137,7 @@
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
 
-  if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) {
+  if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
     nmethod* nm = m->code();
     if (nm == NULL ) {
       const char* comment = "count";
@@ -162,7 +162,7 @@
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
-  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) {
+  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
     CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
 
     NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
@@ -204,7 +204,7 @@
   reset_counter_for_invocation_event(m);
   const char* comment = "count";
 
-  if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) {
+  if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
     ResourceMark rm(THREAD);
     JavaThread *thread = (JavaThread*)THREAD;
     frame       fr     = thread->last_frame();
@@ -248,7 +248,7 @@
   int hot_count = m->backedge_count();
   const char* comment = "backedge_count";
 
-  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m)) {
+  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
     CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
 
     NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -235,6 +235,12 @@
   assert(cb->frame_size() >= 0, "Unexpected frame size");
   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
 
+  // If the deopt call site is a MethodHandle invoke call site we have
+  // to adjust the unpack_sp.
+  nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
+  if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
+    unpack_sp = deoptee.unextended_sp();
+
 #ifdef ASSERT
   assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
   Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
@@ -1332,13 +1338,14 @@
     // Whether the interpreter is producing MDO data or not, we also need
     // to use the MDO to detect hot deoptimization points and control
     // aggressive optimization.
+    bool inc_recompile_count = false;
+    ProfileData* pdata = NULL;
     if (ProfileTraps && update_trap_state && trap_mdo.not_null()) {
       assert(trap_mdo() == get_method_data(thread, trap_method, false), "sanity");
       uint this_trap_count = 0;
       bool maybe_prior_trap = false;
       bool maybe_prior_recompile = false;
-      ProfileData* pdata
-        = query_update_method_data(trap_mdo, trap_bci, reason,
+      pdata = query_update_method_data(trap_mdo, trap_bci, reason,
                                    //outputs:
                                    this_trap_count,
                                    maybe_prior_trap,
@@ -1374,18 +1381,7 @@
         // Detect repeated recompilation at the same BCI, and enforce a limit.
         if (make_not_entrant && maybe_prior_recompile) {
           // More than one recompile at this point.
-          trap_mdo->inc_overflow_recompile_count();
-          if (maybe_prior_trap
-              && ((uint)trap_mdo->overflow_recompile_count()
-                  > (uint)PerBytecodeRecompilationCutoff)) {
-            // Give up on the method containing the bad BCI.
-            if (trap_method() == nm->method()) {
-              make_not_compilable = true;
-            } else {
-              trap_method->set_not_compilable();
-              // But give grace to the enclosing nm->method().
-            }
-          }
+          inc_recompile_count = maybe_prior_trap;
         }
       } else {
         // For reasons which are not recorded per-bytecode, we simply
@@ -1412,7 +1408,17 @@
         reset_counters = true;
       }
 
-      if (make_not_entrant && pdata != NULL) {
+    }
+
+    // Take requested actions on the method:
+
+    // Recompile
+    if (make_not_entrant) {
+      if (!nm->make_not_entrant()) {
+        return; // the call did not change nmethod's state
+      }
+
+      if (pdata != NULL) {
         // Record the recompilation event, if any.
         int tstate0 = pdata->trap_state();
         int tstate1 = trap_state_set_recompiled(tstate0, true);
@@ -1421,7 +1427,19 @@
       }
     }
 
-    // Take requested actions on the method:
+    if (inc_recompile_count) {
+      trap_mdo->inc_overflow_recompile_count();
+      if ((uint)trap_mdo->overflow_recompile_count() >
+          (uint)PerBytecodeRecompilationCutoff) {
+        // Give up on the method containing the bad BCI.
+        if (trap_method() == nm->method()) {
+          make_not_compilable = true;
+        } else {
+          trap_method->set_not_compilable();
+          // But give grace to the enclosing nm->method().
+        }
+      }
+    }
 
     // Reset invocation counters
     if (reset_counters) {
@@ -1431,13 +1449,8 @@
         reset_invocation_counter(trap_scope);
     }
 
-    // Recompile
-    if (make_not_entrant) {
-      nm->make_not_entrant();
-    }
-
     // Give up compiling
-    if (make_not_compilable) {
+    if (make_not_compilable && !nm->method()->is_not_compilable()) {
       assert(make_not_entrant, "consistent");
       nm->method()->set_not_compilable();
     }
@@ -1510,9 +1523,11 @@
       if (tstate1 != tstate0)
         pdata->set_trap_state(tstate1);
     } else {
-      if (LogCompilation && xtty != NULL)
+      if (LogCompilation && xtty != NULL) {
+        ttyLocker ttyl;
         // Missing MDP?  Leave a small complaint in the log.
         xtty->elem("missing_mdp bci='%d'", trap_bci);
+      }
     }
   }
 
@@ -1666,6 +1681,7 @@
   "class_check",
   "array_check",
   "intrinsic",
+  "bimorphic",
   "unloaded",
   "uninitialized",
   "unreached",
--- a/hotspot/src/share/vm/runtime/deoptimization.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/deoptimization.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -33,12 +33,15 @@
   enum DeoptReason {
     Reason_many = -1,             // indicates presence of several reasons
     Reason_none = 0,              // indicates absence of a relevant deopt.
+    // Next 7 reasons are recorded per bytecode in DataLayout::trap_bits
     Reason_null_check,            // saw unexpected null or zero divisor (@bci)
     Reason_null_assert,           // saw unexpected non-null or non-zero (@bci)
     Reason_range_check,           // saw unexpected array index (@bci)
     Reason_class_check,           // saw unexpected object class (@bci)
     Reason_array_check,           // saw unexpected array class (aastore @bci)
     Reason_intrinsic,             // saw unexpected operand to intrinsic (@bci)
+    Reason_bimorphic,             // saw unexpected object class in bimorphic inlining (@bci)
+
     Reason_unloaded,              // unloaded class or constant pool entry
     Reason_uninitialized,         // bad class state (uninitialized)
     Reason_unreached,             // code is not reached, compiler
@@ -49,7 +52,7 @@
     Reason_predicate,             // compiler generated predicate failed
     Reason_LIMIT,
     // Note:  Keep this enum in sync. with _trap_reason_name.
-    Reason_RECORDED_LIMIT = Reason_unloaded   // some are not recorded per bc
+    Reason_RECORDED_LIMIT = Reason_bimorphic  // some are not recorded per bc
     // Note:  Reason_RECORDED_LIMIT should be < 8 to fit into 3 bits of
     // DataLayout::trap_bits.  This dependency is enforced indirectly
     // via asserts, to avoid excessive direct header-to-header dependencies.
@@ -279,7 +282,7 @@
                                        int trap_state);
 
   static bool reason_is_recorded_per_bytecode(DeoptReason reason) {
-    return reason > Reason_none && reason < Reason_RECORDED_LIMIT;
+    return reason > Reason_none && reason <= Reason_RECORDED_LIMIT;
   }
 
   static DeoptReason reason_recorded_per_bytecode_if_any(DeoptReason reason) {
--- a/hotspot/src/share/vm/runtime/frame.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -107,7 +107,11 @@
 
 address frame::raw_pc() const {
   if (is_deoptimized_frame()) {
-    return ((nmethod*) cb())->deopt_handler_begin() - pc_return_offset;
+    nmethod* nm = cb()->as_nmethod_or_null();
+    if (nm->is_method_handle_return(pc()))
+      return nm->deopt_mh_handler_begin() - pc_return_offset;
+    else
+      return nm->deopt_handler_begin() - pc_return_offset;
   } else {
     return (pc() - pc_return_offset);
   }
@@ -269,10 +273,16 @@
   } // NeedsDeoptSuspend
 
 
-  address deopt = nm->deopt_handler_begin();
+  // If the call site is a MethodHandle call site use the MH deopt
+  // handler.
+  address deopt = nm->is_method_handle_return(pc()) ?
+    nm->deopt_mh_handler_begin() :
+    nm->deopt_handler_begin();
+
   // Save the original pc before we patch in the new one
   nm->set_original_pc(this, pc());
   patch_pc(thread, deopt);
+
 #ifdef ASSERT
   {
     RegisterMap map(thread, false);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -742,6 +742,9 @@
   diagnostic(bool, PrintAdapterHandlers, false,                             \
           "Print code generated for i2c/c2i adapters")                      \
                                                                             \
+  develop(bool, VerifyAdapterSharing, false,                                \
+          "Verify that the code for shared adapters is the equivalent")     \
+                                                                            \
   diagnostic(bool, PrintAssembly, false,                                    \
           "Print assembly code (using external disassembler.so)")           \
                                                                             \
@@ -2864,7 +2867,7 @@
   product(intx, PerMethodRecompilationCutoff, 400,                          \
           "After recompiling N times, stay in the interpreter (-1=>'Inf')") \
                                                                             \
-  product(intx, PerBytecodeRecompilationCutoff, 100,                        \
+  product(intx, PerBytecodeRecompilationCutoff, 200,                        \
           "Per-BCI limit on repeated recompilation (-1=>'Inf')")            \
                                                                             \
   product(intx, PerMethodTrapLimit,  100,                                   \
@@ -3117,6 +3120,15 @@
   notproduct(bool, ExitOnFullCodeCache, false,                              \
           "Exit the VM if we fill the code cache.")                         \
                                                                             \
+  product(bool, UseCodeCacheFlushing, false,                                \
+          "Attempt to clean the code cache before shutting off compiler")   \
+                                                                            \
+  product(intx,  MinCodeCacheFlushingInterval, 30,                          \
+          "Min number of seconds between code cache cleaning sessions")     \
+                                                                            \
+  product(uintx,  CodeCacheFlushingMinimumFreeSpace, 1500*K,                \
+          "When less than X space left, start code cache cleaning")         \
+                                                                            \
   /* interpreter debugging */                                               \
   develop(intx, BinarySwitchThreshold, 5,                                   \
           "Minimal number of lookupswitch entries for rewriting to binary " \
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -1033,10 +1033,20 @@
   address   sender_pc = caller_frame.pc();
   CodeBlob* sender_cb = caller_frame.cb();
   nmethod*  sender_nm = sender_cb->as_nmethod_or_null();
+  bool is_mh_invoke_via_adapter = false;  // Direct c2c call or via adapter?
+  if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
+    // If the callee_target is set, then we have come here via an i2c
+    // adapter.
+    methodOop callee = thread->callee_target();
+    if (callee != NULL) {
+      assert(callee->is_method(), "sanity");
+      is_mh_invoke_via_adapter = true;
+    }
+  }
 
   if (caller_frame.is_interpreted_frame() ||
-      caller_frame.is_entry_frame() ||
-      (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc))) {
+      caller_frame.is_entry_frame()       ||
+      is_mh_invoke_via_adapter) {
     methodOop callee = thread->callee_target();
     guarantee(callee != NULL && callee->is_method(), "bad handshake");
     thread->set_vm_result(callee);
@@ -1351,7 +1361,7 @@
 // We are calling the interpreter via a c2i. Normally this would mean that
 // we were called by a compiled method. However we could have lost a race
 // where we went int -> i2c -> c2i and so the caller could in fact be
-// interpreted. If the caller is compiled we attampt to patch the caller
+// interpreted. If the caller is compiled we attempt to patch the caller
 // so he no longer calls into the interpreter.
 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
   methodOop moop(method);
@@ -1367,10 +1377,19 @@
   // we did we'd leap into space because the callsite needs to use
   // "to interpreter" stub in order to load up the methodOop. Don't
   // ask me how I know this...
-  //
 
   CodeBlob* cb = CodeCache::find_blob(caller_pc);
-  if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
+  if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
+    return;
+  }
+
+  // The check above makes sure this is a nmethod.
+  nmethod* nm = cb->as_nmethod_or_null();
+  assert(nm, "must be");
+
+  // Don't fixup MethodHandle call sites as c2i/i2c adapters are used
+  // to implement MethodHandle actions.
+  if (nm->is_method_handle_return(caller_pc)) {
     return;
   }
 
@@ -1385,7 +1404,7 @@
 
   if (moop->code() == NULL) return;
 
-  if (((nmethod*)cb)->is_in_use()) {
+  if (nm->is_in_use()) {
 
     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
@@ -1417,7 +1436,7 @@
         if (callee == cb || callee->is_adapter_blob()) {
           // static call or optimized virtual
           if (TraceCallFixup) {
-            tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
+            tty->print("fixup callsite           at " INTPTR_FORMAT " to compiled code for", caller_pc);
             moop->print_short_name(tty);
             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
           }
@@ -1433,7 +1452,7 @@
         }
       } else {
           if (TraceCallFixup) {
-            tty->print("already patched  callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
+            tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
             moop->print_short_name(tty);
             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
           }
@@ -1787,55 +1806,78 @@
 class AdapterFingerPrint : public CHeapObj {
  private:
   union {
-    signed char  _compact[12];
-    int          _compact_int[3];
-    intptr_t*    _fingerprint;
+    int  _compact[3];
+    int* _fingerprint;
   } _value;
-  int _length; // A negative length indicates that _value._fingerprint is the array.
-               // Otherwise it's in the compact form.
+  int _length; // A negative length indicates the fingerprint is in the compact form,
+               // Otherwise _value._fingerprint is the array.
+
+  // Remap BasicTypes that are handled equivalently by the adapters.
+  // These are correct for the current system but someday it might be
+  // necessary to make this mapping platform dependent.
+  static BasicType adapter_encoding(BasicType in) {
+    assert((~0xf & in) == 0, "must fit in 4 bits");
+    switch(in) {
+      case T_BOOLEAN:
+      case T_BYTE:
+      case T_SHORT:
+      case T_CHAR:
+        // There are all promoted to T_INT in the calling convention
+        return T_INT;
 
- public:
-  AdapterFingerPrint(int total_args_passed, VMRegPair* regs) {
-    assert(sizeof(_value._compact) == sizeof(_value._compact_int), "must match");
-    _length = total_args_passed * 2;
-    if (_length < (int)sizeof(_value._compact)) {
-      _value._compact_int[0] = _value._compact_int[1] = _value._compact_int[2] = 0;
-      // Storing the signature encoded as signed chars hits about 98%
-      // of the time.
-      signed char* ptr = _value._compact;
-      int o = 0;
-      for (int i = 0; i < total_args_passed; i++) {
-        VMRegPair pair = regs[i];
-        intptr_t v1 = pair.first()->value();
-        intptr_t v2 = pair.second()->value();
-        if (v1 == (signed char) v1 &&
-            v2 == (signed char) v2) {
-          _value._compact[o++] = v1;
-          _value._compact[o++] = v2;
-        } else {
-          goto big;
+      case T_OBJECT:
+      case T_ARRAY:
+        if (!TaggedStackInterpreter) {
+#ifdef _LP64
+          return T_LONG;
+#else
+          return T_INT;
+#endif
         }
-      }
-      _length = -_length;
-      return;
-    }
-  big:
-    _value._fingerprint = NEW_C_HEAP_ARRAY(intptr_t, _length);
-    int o = 0;
-    for (int i = 0; i < total_args_passed; i++) {
-      VMRegPair pair = regs[i];
-      intptr_t v1 = pair.first()->value();
-      intptr_t v2 = pair.second()->value();
-      _value._fingerprint[o++] = v1;
-      _value._fingerprint[o++] = v2;
+        return T_OBJECT;
+
+      case T_INT:
+      case T_LONG:
+      case T_FLOAT:
+      case T_DOUBLE:
+      case T_VOID:
+        return in;
+
+      default:
+        ShouldNotReachHere();
+        return T_CONFLICT;
     }
   }
 
-  AdapterFingerPrint(AdapterFingerPrint* orig) {
-    _length = orig->_length;
-    _value = orig->_value;
-    // take ownership of any storage by destroying the length
-    orig->_length = 0;
+ public:
+  AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
+    // The fingerprint is based on the BasicType signature encoded
+    // into an array of ints with four entries per int.
+    int* ptr;
+    int len = (total_args_passed + 3) >> 2;
+    if (len <= (int)(sizeof(_value._compact) / sizeof(int))) {
+      _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
+      // Storing the signature encoded as signed chars hits about 98%
+      // of the time.
+      _length = -len;
+      ptr = _value._compact;
+    } else {
+      _length = len;
+      _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length);
+      ptr = _value._fingerprint;
+    }
+
+    // Now pack the BasicTypes with 4 per int
+    int sig_index = 0;
+    for (int index = 0; index < len; index++) {
+      int value = 0;
+      for (int byte = 0; byte < 4; byte++) {
+        if (sig_index < total_args_passed) {
+          value = (value << 4) | adapter_encoding(sig_bt[sig_index++]);
+        }
+      }
+      ptr[index] = value;
+    }
   }
 
   ~AdapterFingerPrint() {
@@ -1844,11 +1886,7 @@
     }
   }
 
-  AdapterFingerPrint* allocate() {
-    return new AdapterFingerPrint(this);
-  }
-
-  intptr_t value(int index) {
+  int value(int index) {
     if (_length < 0) {
       return _value._compact[index];
     }
@@ -1864,9 +1902,9 @@
   }
 
   unsigned int compute_hash() {
-    intptr_t hash = 0;
+    int hash = 0;
     for (int i = 0; i < length(); i++) {
-      intptr_t v = value(i);
+      int v = value(i);
       hash = (hash << 8) ^ v ^ (hash >> 5);
     }
     return (unsigned int)hash;
@@ -1885,9 +1923,9 @@
       return false;
     }
     if (_length < 0) {
-      return _value._compact_int[0] == other->_value._compact_int[0] &&
-             _value._compact_int[1] == other->_value._compact_int[1] &&
-             _value._compact_int[2] == other->_value._compact_int[2];
+      return _value._compact[0] == other->_value._compact[0] &&
+             _value._compact[1] == other->_value._compact[1] &&
+             _value._compact[2] == other->_value._compact[2];
     } else {
       for (int i = 0; i < _length; i++) {
         if (_value._fingerprint[i] != other->_value._fingerprint[i]) {
@@ -1935,10 +1973,15 @@
     add_entry(index, entry);
   }
 
+  void free_entry(AdapterHandlerEntry* entry) {
+    entry->deallocate();
+    BasicHashtable::free_entry(entry);
+  }
+
   // Find a entry with the same fingerprint if it exists
-  AdapterHandlerEntry* lookup(int total_args_passed, VMRegPair* regs) {
+  AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
     debug_only(_lookups++);
-    AdapterFingerPrint fp(total_args_passed, regs);
+    AdapterFingerPrint fp(total_args_passed, sig_bt);
     unsigned int hash = fp.compute_hash();
     int index = hash_to_index(hash);
     for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
@@ -2110,17 +2153,26 @@
     }
     assert(i == total_args_passed, "");
 
-    // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
-    int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
+    // Lookup method signature's fingerprint
+    entry = _adapters->lookup(total_args_passed, sig_bt);
 
-    // Lookup method signature's fingerprint
-    entry = _adapters->lookup(total_args_passed, regs);
+#ifdef ASSERT
+    AdapterHandlerEntry* shared_entry = NULL;
+    if (VerifyAdapterSharing && entry != NULL) {
+      shared_entry = entry;
+      entry = NULL;
+    }
+#endif
+
     if (entry != NULL) {
       return entry;
     }
 
+    // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
+    int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
+
     // Make a C heap allocated version of the fingerprint to store in the adapter
-    fingerprint = new AdapterFingerPrint(total_args_passed, regs);
+    fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
 
     // Create I2C & C2I handlers
 
@@ -2139,6 +2191,20 @@
                                                      regs,
                                                      fingerprint);
 
+#ifdef ASSERT
+      if (VerifyAdapterSharing) {
+        if (shared_entry != NULL) {
+          assert(shared_entry->compare_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt),
+                 "code must match");
+          // Release the one just created and return the original
+          _adapters->free_entry(entry);
+          return shared_entry;
+        } else  {
+          entry->save_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt);
+        }
+      }
+#endif
+
       B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
       NOT_PRODUCT(code_size = buffer.code_size());
     }
@@ -2146,19 +2212,8 @@
       // CodeCache is full, disable compilation
       // Ought to log this but compile log is only per compile thread
       // and we're some non descript Java thread.
-      UseInterpreter = true;
-      if (UseCompiler || AlwaysCompileLoopMethods ) {
-#ifndef PRODUCT
-        warning("CodeCache is full. Compiler has been disabled");
-        if (CompileTheWorld || ExitOnFullCodeCache) {
-          before_exit(JavaThread::current());
-          exit_globals(); // will delete tty
-          vm_direct_exit(CompileTheWorld ? 0 : 1);
-        }
-#endif
-        UseCompiler               = false;
-        AlwaysCompileLoopMethods  = false;
-      }
+      MutexUnlocker mu(AdapterHandlerLibrary_lock);
+      CompileBroker::handle_full_code_cache();
       return NULL; // Out of CodeCache space
     }
     entry->relocate(B->instructions_begin());
@@ -2204,6 +2259,44 @@
     _c2i_unverified_entry += delta;
 }
 
+
+void AdapterHandlerEntry::deallocate() {
+  delete _fingerprint;
+#ifdef ASSERT
+  if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
+  if (_saved_sig)  FREE_C_HEAP_ARRAY(Basictype, _saved_sig);
+#endif
+}
+
+
+#ifdef ASSERT
+// Capture the code before relocation so that it can be compared
+// against other versions.  If the code is captured after relocation
+// then relative instructions won't be equivalent.
+void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
+  _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length);
+  _code_length = length;
+  memcpy(_saved_code, buffer, length);
+  _total_args_passed = total_args_passed;
+  _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed);
+  memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType));
+}
+
+
+bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
+  if (length != _code_length) {
+    return false;
+  }
+  for (int i = 0; i < length; i++) {
+    if (buffer[i] != _saved_code[i]) {
+      return false;
+    }
+  }
+  return true;
+}
+#endif
+
+
 // Create a native wrapper for this native method.  The wrapper converts the
 // java compiled calling convention to the native convention, handlizes
 // arguments, and transitions to native.  On return from the native we transition
@@ -2282,19 +2375,8 @@
     // CodeCache is full, disable compilation
     // Ought to log this but compile log is only per compile thread
     // and we're some non descript Java thread.
-    UseInterpreter = true;
-    if (UseCompiler || AlwaysCompileLoopMethods ) {
-#ifndef PRODUCT
-      warning("CodeCache is full. Compiler has been disabled");
-      if (CompileTheWorld || ExitOnFullCodeCache) {
-        before_exit(JavaThread::current());
-        exit_globals(); // will delete tty
-        vm_direct_exit(CompileTheWorld ? 0 : 1);
-      }
-#endif
-      UseCompiler               = false;
-      AlwaysCompileLoopMethods  = false;
-    }
+    MutexUnlocker mu(AdapterHandlerLibrary_lock);
+    CompileBroker::handle_full_code_cache();
   }
   return nm;
 }
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -540,13 +540,30 @@
   address _c2i_entry;
   address _c2i_unverified_entry;
 
+#ifdef ASSERT
+  // Captures code and signature used to generate this adapter when
+  // verifing adapter equivalence.
+  unsigned char* _saved_code;
+  int            _code_length;
+  BasicType*     _saved_sig;
+  int            _total_args_passed;
+#endif
+
   void init(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
     _fingerprint = fingerprint;
     _i2c_entry = i2c_entry;
     _c2i_entry = c2i_entry;
     _c2i_unverified_entry = c2i_unverified_entry;
+#ifdef ASSERT
+    _saved_code = NULL;
+    _code_length = 0;
+    _saved_sig = NULL;
+    _total_args_passed = 0;
+#endif
   }
 
+  void deallocate();
+
   // should never be used
   AdapterHandlerEntry();
 
@@ -566,6 +583,12 @@
     return (AdapterHandlerEntry*)BasicHashtableEntry::next();
   }
 
+#ifdef ASSERT
+  // Used to verify that code generated for shared adapters is equivalent
+  void save_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt);
+  bool compare_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt);
+#endif
+
 #ifndef PRODUCT
   void print();
 #endif /* PRODUCT */
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -33,6 +33,11 @@
 jint      NMethodSweeper::_locked_seen = 0;
 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
 bool      NMethodSweeper::_rescan = false;
+bool      NMethodSweeper::_was_full = false;
+jint      NMethodSweeper::_advise_to_sweep = 0;
+jlong     NMethodSweeper::_last_was_full = 0;
+uint      NMethodSweeper::_highest_marked = 0;
+long      NMethodSweeper::_was_full_traversal = 0;
 
 class MarkActivationClosure: public CodeBlobClosure {
 public:
@@ -114,6 +119,40 @@
       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
     }
   }
+
+  if (UseCodeCacheFlushing) {
+    if (!CodeCache::needs_flushing()) {
+      // In a safepoint, no race with setters
+      _advise_to_sweep = 0;
+    }
+
+    if (was_full()) {
+      // There was some progress so attempt to restart the compiler
+      jlong now           = os::javaTimeMillis();
+      jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
+      jlong curr_interval = now - _last_was_full;
+      if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
+        CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
+        set_was_full(false);
+
+        // Update the _last_was_full time so we can tell how fast the
+        // code cache is filling up
+        _last_was_full = os::javaTimeMillis();
+
+        if (PrintMethodFlushing) {
+          tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
+            CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+        }
+        if (LogCompilation && (xtty != NULL)) {
+          ttyLocker ttyl;
+          xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                           CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+          xtty->stamp();
+          xtty->end_elem();
+        }
+      }
+    }
+  }
 }
 
 
@@ -137,12 +176,12 @@
     if (nm->is_marked_for_reclamation()) {
       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
       if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod 0x%x (marked for reclamation) being flushed", nm);
+        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
       }
       nm->flush();
     } else {
       if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod 0x%x (zombie) being marked for reclamation", nm);
+        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
       }
       nm->mark_for_reclamation();
       _rescan = true;
@@ -152,7 +191,7 @@
     // stack we can safely convert it to a zombie method
     if (nm->can_not_entrant_be_converted()) {
       if (PrintMethodFlushing && Verbose) {
-        tty->print_cr("### Nmethod 0x%x (not entrant) being made zombie", nm);
+        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
       }
       nm->make_zombie();
       _rescan = true;
@@ -167,7 +206,7 @@
   } else if (nm->is_unloaded()) {
     // Unloaded code, just make it a zombie
     if (PrintMethodFlushing && Verbose)
-      tty->print_cr("### Nmethod 0x%x (unloaded) being made zombie", nm);
+      tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
     if (nm->is_osr_method()) {
       // No inline caches will ever point to osr methods, so we can just remove it
       nm->flush();
@@ -177,7 +216,167 @@
     }
   } else {
     assert(nm->is_alive(), "should be alive");
+
+    if (UseCodeCacheFlushing) {
+      if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
+          (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
+          CodeCache::needs_flushing()) {
+        // This method has not been called since the forced cleanup happened
+        nm->make_not_entrant();
+      }
+    }
+
     // Clean-up all inline caches that points to zombie/non-reentrant methods
     nm->cleanup_inline_caches();
   }
 }
+
+// Code cache unloading: when compilers notice the code cache is getting full,
+// they will call a vm op that comes here. This code attempts to speculatively
+// unload the oldest half of the nmethods (based on the compile job id) by
+// saving the old code in a list in the CodeCache. Then
+// execution resumes. If a method so marked is not called by the second
+// safepoint from the current one, the nmethod will be marked non-entrant and
+// got rid of by normal sweeping. If the method is called, the methodOop's
+// _code field is restored and the methodOop/nmethod
+// go back to their normal state.
+void NMethodSweeper::handle_full_code_cache(bool is_full) {
+  // Only the first one to notice can advise us to start early cleaning
+  if (!is_full){
+    jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
+    if (old != 0) {
+      return;
+    }
+  }
+
+  if (is_full) {
+    // Since code cache is full, immediately stop new compiles
+    bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
+    if (!did_set) {
+      // only the first to notice can start the cleaning,
+      // others will go back and block
+      return;
+    }
+    set_was_full(true);
+
+    // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
+    jlong now = os::javaTimeMillis();
+    jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
+    jlong curr_interval = now - _last_was_full;
+    if (curr_interval < max_interval) {
+      _rescan = true;
+      if (PrintMethodFlushing) {
+        tty->print_cr("### handle full too often, turning off compiler");
+      }
+      if (LogCompilation && (xtty != NULL)) {
+        ttyLocker ttyl;
+        xtty->begin_elem("disable_compiler flushing_interval='" UINT64_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                         curr_interval/1000, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+        xtty->stamp();
+        xtty->end_elem();
+      }
+      return;
+    }
+  }
+
+  VM_HandleFullCodeCache op(is_full);
+  VMThread::execute(&op);
+
+  // rescan again as soon as possible
+  _rescan = true;
+}
+
+void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
+  // If there was a race in detecting full code cache, only run
+  // one vm op for it or keep the compiler shut off
+
+  debug_only(jlong start = os::javaTimeMillis();)
+
+  if ((!was_full()) && (is_full)) {
+    if (!CodeCache::needs_flushing()) {
+      if (PrintMethodFlushing) {
+        tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
+          CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+      }
+      if (LogCompilation && (xtty != NULL)) {
+        ttyLocker ttyl;
+        xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                         CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+        xtty->stamp();
+        xtty->end_elem();
+      }
+      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
+      return;
+    }
+  }
+
+  // Traverse the code cache trying to dump the oldest nmethods
+  uint curr_max_comp_id = CompileBroker::get_compilation_id();
+  uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
+  if (PrintMethodFlushing && Verbose) {
+    tty->print_cr("### Cleaning code cache: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes",
+        CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+  }
+  if (LogCompilation && (xtty != NULL)) {
+    ttyLocker ttyl;
+    xtty->begin_elem("start_cleaning_code_cache live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                      CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+    xtty->stamp();
+    xtty->end_elem();
+  }
+
+  nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
+  jint disconnected = 0;
+  jint made_not_entrant  = 0;
+  while ((nm != NULL)){
+    uint curr_comp_id = nm->compile_id();
+
+    // OSR methods cannot be flushed like this. Also, don't flush native methods
+    // since they are part of the JDK in most cases
+    if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
+        (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
+
+      if ((nm->method()->code() == nm)) {
+        // This method has not been previously considered for
+        // unloading or it was restored already
+        CodeCache::speculatively_disconnect(nm);
+        disconnected++;
+      } else if (nm->is_speculatively_disconnected()) {
+        // This method was previously considered for preemptive unloading and was not called since then
+        nm->method()->invocation_counter()->decay();
+        nm->method()->backedge_counter()->decay();
+        nm->make_not_entrant();
+        made_not_entrant++;
+      }
+
+      if (curr_comp_id > _highest_marked) {
+        _highest_marked = curr_comp_id;
+      }
+    }
+    nm = CodeCache::alive_nmethod(CodeCache::next(nm));
+  }
+
+  if (LogCompilation && (xtty != NULL)) {
+    ttyLocker ttyl;
+    xtty->begin_elem("stop_cleaning_code_cache disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
+                      disconnected, made_not_entrant, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
+    xtty->stamp();
+    xtty->end_elem();
+  }
+
+  // Shut off compiler. Sweeper will run exiting from this safepoint
+  // and turn it back on if it clears enough space
+  if (was_full()) {
+    _last_was_full = os::javaTimeMillis();
+    CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
+  }
+
+  // After two more traversals the sweeper will get rid of unrestored nmethods
+  _was_full_traversal = _traversals;
+#ifdef ASSERT
+  jlong end = os::javaTimeMillis();
+  if(PrintMethodFlushing && Verbose) {
+    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
+  }
+#endif
+}
--- a/hotspot/src/share/vm/runtime/sweeper.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -38,6 +38,11 @@
   static int       _locked_seen;     // Number of locked nmethods encountered during the scan
   static int       _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
 
+  static bool      _was_full;        // remember if we did emergency unloading
+  static jint      _advise_to_sweep; // flag to indicate code cache getting full
+  static jlong     _last_was_full;   // timestamp of last emergency unloading
+  static uint      _highest_marked;   // highest compile id dumped at last emergency unloading
+  static long      _was_full_traversal;   // trav number at last emergency unloading
 
   static void process_nmethod(nmethod *nm);
  public:
@@ -51,4 +56,10 @@
     // changes to false at safepoint so we can never overwrite it with false.
      _rescan = true;
   }
+
+  static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
+  static void speculative_disconnect_nmethods(bool was_full);   // Called by vm op to deal with alloc failure
+
+  static void set_was_full(bool state) { _was_full = state; }
+  static bool was_full() { return _was_full; }
 };
--- a/hotspot/src/share/vm/runtime/vm_operations.cpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/vm_operations.cpp	Thu Feb 04 03:34:05 2010 -0800
@@ -151,6 +151,10 @@
 
 #endif // !PRODUCT
 
+void VM_HandleFullCodeCache::doit() {
+  NMethodSweeper::speculative_disconnect_nmethods(_is_full);
+}
+
 void VM_Verify::doit() {
   Universe::verify();
 }
--- a/hotspot/src/share/vm/runtime/vm_operations.hpp	Wed Jan 27 22:38:37 2010 -0800
+++ b/hotspot/src/share/vm/runtime/vm_operations.hpp	Thu Feb 04 03:34:05 2010 -0800
@@ -41,6 +41,7 @@
   template(DeoptimizeFrame)                       \
   template(DeoptimizeAll)                         \
   template(ZombieAll)                             \
+  template(HandleFullCodeCache)                   \
   template(Verify)                                \
   template(PrintJNI)                              \
   template(HeapDumper)                            \
@@ -241,6 +242,16 @@
   bool allow_nested_vm_operations() const        { return true;  }
 };
 
+class VM_HandleFullCodeCache: public VM_Operation {
+ private:
+  bool  _is_full;
+ public:
+  VM_HandleFullCodeCache(bool is_full)           { _is_full = is_full; }
+  VMOp_Type type() const                         { return VMOp_HandleFullCodeCache; }
+  void doit();
+  bool allow_nested_vm_operations() const        { return true; }
+};
+
 #ifndef PRODUCT
 class VM_DeoptimizeAll: public VM_Operation {
  private:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6792161/Test6792161.java	Thu Feb 04 03:34:05 2010 -0800
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6792161
+ * @summary assert("No dead instructions after post-alloc")
+ *
+ * @run main/othervm -Xcomp -XX:MaxInlineSize=120 Test6792161
+ */
+
+import java.lang.reflect.Constructor;
+public class Test6792161 {
+    static Constructor test(Class cls) throws Exception {
+        Class[] args= { String.class };
+        try {
+            return cls.getConstructor(args);
+        } catch (NoSuchMethodException e) {}
+        return cls.getConstructor(new Class[0]);
+    }
+    public static void main(final String[] args) throws Exception {
+        try {
+            for (int i = 0; i < 100000; i++) {
+                Constructor ctor = test(Class.forName("Test6792161"));
+            }
+        } catch (NoSuchMethodException e) {}
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6916644/Test6916644.java	Thu Feb 04 03:34:05 2010 -0800
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6916644
+ * @summary C2 compiler crash on x86
+ *
+ * @run main/othervm -Xcomp -XX:CompileOnly=Test6916644.test Test6916644
+ */
+
+public class Test6916644 {
+    static int result;
+    static int i1;
+    static int i2;
+
+    static public void test(double d) {
+        result = (d <= 0.0D) ? i1 : i2;
+    }
+
+    public static void main(String[] args) {
+        for (int i = 0; i < 100000; i++) {
+            // use an alternating value so the test doesn't always go
+            // the same direction.  Otherwise we won't transform it
+            // into a cmove.
+            test(i & 1);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6921969/TestMultiplyLongHiZero.java	Thu Feb 04 03:34:05 2010 -0800
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2010 Google, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 6921969
+ * @summary Tests shorter long multiply sequences when the high 32 bits of long operands are known to be zero on x86_32
+ * @run main/othervm -Xbatch -XX:-Inline -XX:CompileOnly=.testNormal,.testLeftOptimized,.testRightOptimized,.testOptimized,.testLeftOptimized_LoadUI2L,.testRightOptimized_LoadUI2L,.testOptimized_LoadUI2L TestMultiplyLongHiZero
+ */
+
+// This test must run without any command line arguments.
+
+public class TestMultiplyLongHiZero {
+
+  private static void check(long leftFactor, long rightFactor, long optimizedProduct, long constantProduct) {
+    long normalProduct = leftFactor * rightFactor; // unaffected by the new optimization
+    if (optimizedProduct != constantProduct || normalProduct != constantProduct) {
+      throw new RuntimeException("Not all three products are equal: " +
+                                 Long.toHexString(normalProduct) + ", " +
+                                 Long.toHexString(optimizedProduct) + ", " +
+                                 Long.toHexString(constantProduct));
+    }
+  }
+
+  private static int initInt(String[] args, int v) {
+    if (args.length > 0) {
+      try {
+        return Integer.valueOf(args[0]);
+      } catch (NumberFormatException e) { }
+    }
+    return v;
+  }
+
+  private static final long mask32 = 0x00000000FFFFFFFFL;
+
+  private static void testNormal(int leftFactor, int rightFactor, long constantProduct) {
+    check((long) leftFactor,
+          (long) rightFactor,
+          (long) leftFactor * (long) rightFactor, // unaffected by the new optimization
+          constantProduct);
+  }
+
+  private static void testLeftOptimized(int leftFactor, int rightFactor, long constantProduct) {
+    check((leftFactor & mask32),
+          (long) rightFactor,
+          (leftFactor & mask32) * (long) rightFactor, // left factor optimized
+          constantProduct);
+  }
+
+  private static void testRightOptimized(int leftFactor, int rightFactor, long constantProduct) {
+    check((long) leftFactor,
+          (rightFactor & mask32),
+          (long) leftFactor * (rightFactor & mask32), // right factor optimized
+          constantProduct);
+  }
+
+  private static void testOptimized(int leftFactor, int rightFactor, long constantProduct) {
+    check((leftFactor & mask32),
+          (rightFactor & mask32),
+          (leftFactor & mask32) * (rightFactor & mask32), // both factors optimized
+          constantProduct);
+  }
+
+  private static void testLeftOptimized_LoadUI2L(int leftFactor, int rightFactor, long constantProduct, int[] factors) {
+    check((leftFactor & mask32),
+          (long) rightFactor,
+          (factors[0] & mask32) * (long) rightFactor, // left factor optimized
+          constantProduct);
+  }
+
+  private static void testRightOptimized_LoadUI2L(int leftFactor, int rightFactor, long constantProduct, int[] factors) {
+    check((long) leftFactor,
+          (rightFactor & mask32),
+          (long) leftFactor * (factors[1] & mask32), // right factor optimized
+          constantProduct);
+  }
+
+  private static void testOptimized_LoadUI2L(int leftFactor, int rightFactor, long constantProduct, int[] factors) {
+    check((leftFactor & mask32),
+          (rightFactor & mask32),
+          (factors[0] & mask32) * (factors[1] & mask32), // both factors optimized
+          constantProduct);
+  }
+
+  private static void test(int leftFactor, int rightFactor,
+                           long normalConstantProduct,
+                           long leftOptimizedConstantProduct,
+                           long rightOptimizedConstantProduct,
+                           long optimizedConstantProduct) {
+    int[] factors = new int[2];
+    factors[0] = leftFactor;
+    factors[1] = rightFactor;
+    testNormal(leftFactor, rightFactor, normalConstantProduct);
+    testLeftOptimized(leftFactor, rightFactor, leftOptimizedConstantProduct);
+    testRightOptimized(leftFactor, rightFactor, rightOptimizedConstantProduct);
+    testOptimized(leftFactor, rightFactor, optimizedConstantProduct);
+    testLeftOptimized_LoadUI2L(leftFactor, rightFactor, leftOptimizedConstantProduct, factors);
+    testRightOptimized_LoadUI2L(leftFactor, rightFactor, rightOptimizedConstantProduct, factors);
+    testOptimized_LoadUI2L(leftFactor, rightFactor, optimizedConstantProduct, factors);
+  }
+
+  public static void main(String[] args) {
+    for (int i = 0; i < 100000; ++i) { // Trigger compilation
+      int i0 = initInt(args, 1);
+      int i1 = initInt(args, 3);
+      int i2 = initInt(args, -1);
+      int i3 = initInt(args, 0x7FFFFFFF);
+      test(i0, i1, 3L, 3L, 3L, 3L);
+      test(i0, i2, -1L, -1L, 0xFFFFFFFFL, 0xFFFFFFFFL);
+      test(i0, i3, 0x7FFFFFFFL, 0x7FFFFFFFL, 0x7FFFFFFFL, 0x7FFFFFFFL);
+      test(i1, i2, -3L, -3L, 0x2FFFFFFFDL, 0x2FFFFFFFDL);
+      test(i1, i3, 0x17FFFFFFDL, 0x17FFFFFFDL, 0x17FFFFFFDL, 0x17FFFFFFDL);
+      test(i2, i3, 0xFFFFFFFF80000001L, 0x7FFFFFFE80000001L,
+           0xFFFFFFFF80000001L, 0x7FFFFFFE80000001L);
+    }
+  }
+}