Merge
authorkvn
Mon, 14 Feb 2011 14:36:29 -0800
changeset 8330 5f6046a69677
parent 8123 2d683e4bb197 (current diff)
parent 8329 96eacc5e391f (diff)
child 8331 dfa72047c093
Merge
hotspot/src/os/solaris/vm/os_solaris.cpp
hotspot/src/share/vm/compiler/compileBroker.cpp
hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/java.cpp
hotspot/src/share/vm/runtime/os.hpp
hotspot/src/share/vm/runtime/vmStructs.cpp
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/StubRoutines.java	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/StubRoutines.java	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,8 +31,7 @@
 /** Very minimal port for now to get frames working */
 
 public class StubRoutines {
-  private static AddressField      callStubReturnAddressField;
-  private static AddressField      callStubCompiledReturnAddressField;
+  private static AddressField callStubReturnAddressField;
 
   static {
     VM.registerVMInitializedObserver(new Observer() {
@@ -44,20 +43,7 @@
 
   private static synchronized void initialize(TypeDataBase db) {
     Type type = db.lookupType("StubRoutines");
-
     callStubReturnAddressField = type.getAddressField("_call_stub_return_address");
-    // Only some platforms have specific return from compiled to call_stub
-    try {
-      type = db.lookupType("StubRoutines::x86");
-      if (type != null) {
-        callStubCompiledReturnAddressField = type.getAddressField("_call_stub_compiled_return");
-      }
-    } catch (RuntimeException re) {
-      callStubCompiledReturnAddressField = null;
-    }
-    if (callStubCompiledReturnAddressField == null && VM.getVM().getCPU().equals("x86")) {
-      throw new InternalError("Missing definition for _call_stub_compiled_return");
-    }
   }
 
   public StubRoutines() {
@@ -65,20 +51,10 @@
 
   public boolean returnsToCallStub(Address returnPC) {
     Address addr = callStubReturnAddressField.getValue();
-    boolean result = false;
-    if (addr == null) {
-      result = (addr == returnPC);
-    } else {
-      result = addr.equals(returnPC);
-    }
-    if (result || callStubCompiledReturnAddressField == null ) return result;
-    // Could be a return to compiled code return point
-    addr = callStubCompiledReturnAddressField.getValue();
     if (addr == null) {
       return (addr == returnPC);
     } else {
       return (addr.equals(returnPC));
     }
-
   }
 }
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -2407,14 +2407,23 @@
 #endif
 
 
-void MacroAssembler::load_sized_value(Address src, Register dst,
-                                      size_t size_in_bytes, bool is_signed) {
+void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
   switch (size_in_bytes) {
-  case  8: ldx(src, dst); break;
-  case  4: ld( src, dst); break;
-  case  2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
-  case  1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
-  default: ShouldNotReachHere();
+  case  8:  ld_long(src, dst); break;
+  case  4:  ld(     src, dst); break;
+  case  2:  is_signed ? ldsh(src, dst) : lduh(src, dst); break;
+  case  1:  is_signed ? ldsb(src, dst) : ldub(src, dst); break;
+  default:  ShouldNotReachHere();
+  }
+}
+
+void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
+  switch (size_in_bytes) {
+  case  8:  st_long(src, dst); break;
+  case  4:  st(     src, dst); break;
+  case  2:  sth(    src, dst); break;
+  case  1:  stb(    src, dst); break;
+  default:  ShouldNotReachHere();
   }
 }
 
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -2330,8 +2330,9 @@
   void lcmp( Register Ra, Register Rb, Register Rresult);
 #endif
 
-  // Loading values by size and signed-ness
-  void load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed);
+  // Load and store values by size and signed-ness
+  void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
+  void store_sized_value(Register src, Address dst, size_t size_in_bytes);
 
   void float_cmp( bool is_float, int unordered_result,
                   FloatRegister Fa, FloatRegister Fb,
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -596,16 +596,9 @@
         __ st_ptr(O1_scratch, Address(O0_argslot, 0));
       } else {
         Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
-        __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
-        if (arg_slots == 2) {
-          __ unimplemented("not yet tested");
-#ifndef _LP64
-          __ signx(O2_scratch, O3_scratch);  // Sign extend
-#endif
-          __ st_long(O2_scratch, Address(O0_argslot, 0));  // Uses O2/O3 on !_LP64
-        } else {
-          __ st_ptr( O2_scratch, Address(O0_argslot, 0));
-        }
+        const int arg_size = type2aelembytes(arg_type);
+        __ load_sized_value(prim_value_addr, O2_scratch, arg_size, is_signed_subword_type(arg_type));
+        __ store_sized_value(O2_scratch, Address(O0_argslot, 0), arg_size);  // long store uses O2/O3 on !_LP64
       }
 
       if (direct_to_method) {
@@ -784,11 +777,9 @@
       switch (ek) {
       case _adapter_opt_i2l:
         {
-          __ ldsw(arg_lsw, O2_scratch);      // Load LSW
-#ifndef _LP64
-          __ signx(O2_scratch, O3_scratch);  // Sign extend
-#endif
-          __ st_long(O2_scratch, arg_msw);   // Uses O2/O3 on !_LP64
+          __ ldsw(arg_lsw, O2_scratch);                           // Load LSW
+          NOT_LP64(__ srlx(O2_scratch, BitsPerInt, O3_scratch));  // Move high bits to lower bits for std
+          __ st_long(O2_scratch, arg_msw);                        // Uses O2/O3 on !_LP64
         }
         break;
       case _adapter_opt_unboxl:
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Mon Feb 14 14:36:29 2011 -0800
@@ -8125,6 +8125,17 @@
 %}
 #endif
 
+instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
+  match(Set dst (CmpLTMask src zero));
+  effect(KILL ccr);
+  size(4);
+  format %{ "SRA    $src,#31,$dst\t# cmpLTMask0" %}
+  ins_encode %{
+    __ sra($src$$Register, 31, $dst$$Register);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
   match(Set dst (CmpLTMask p q));
   effect( KILL ccr );
@@ -8144,19 +8155,7 @@
 
   format %{ "SUBcc  $p,$q,$p\t! p' = p-q\n\t"
             "ADD    $p,$y,$tmp\t! g3=p-q+y\n\t"
-            "MOVl   $tmp,$p\t! p' < 0 ? p'+y : p'" %}
-  ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) );
-  ins_pipe( cadd_cmpltmask );
-%}
-
-instruct cadd_cmpLTMask2( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{
-  match(Set p (AddI (SubI p q) (AndI (CmpLTMask p q) y)));
-  effect( KILL ccr, TEMP tmp);
-  ins_cost(DEFAULT_COST*3);
-
-  format %{ "SUBcc  $p,$q,$p\t! p' = p-q\n\t"
-            "ADD    $p,$y,$tmp\t! g3=p-q+y\n\t"
-            "MOVl   $tmp,$p\t! p' < 0 ? p'+y : p'" %}
+            "MOVlt  $tmp,$p\t! p' < 0 ? p'+y : p'" %}
   ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) );
   ins_pipe( cadd_cmpltmask );
 %}
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -6528,20 +6528,39 @@
   return off;
 }
 
-void MacroAssembler::load_sized_value(Register dst, Address src,
-                                      size_t size_in_bytes, bool is_signed) {
+void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
   switch (size_in_bytes) {
 #ifndef _LP64
-  // For case 8, caller is responsible for manually loading
-  // the second word into another register.
-  case  8: movl(dst, src); break;
+  case  8:
+    assert(dst2 != noreg, "second dest register required");
+    movl(dst,  src);
+    movl(dst2, src.plus_disp(BytesPerInt));
+    break;
 #else
-  case  8: movq(dst, src); break;
+  case  8:  movq(dst, src); break;
 #endif
-  case  4: movl(dst, src); break;
-  case  2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
-  case  1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
-  default: ShouldNotReachHere();
+  case  4:  movl(dst, src); break;
+  case  2:  is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
+  case  1:  is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
+  default:  ShouldNotReachHere();
+  }
+}
+
+void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
+  switch (size_in_bytes) {
+#ifndef _LP64
+  case  8:
+    assert(src2 != noreg, "second source register required");
+    movl(dst,                        src);
+    movl(dst.plus_disp(BytesPerInt), src2);
+    break;
+#else
+  case  8:  movq(dst, src); break;
+#endif
+  case  4:  movl(dst, src); break;
+  case  2:  movw(dst, src); break;
+  case  1:  movb(dst, src); break;
+  default:  ShouldNotReachHere();
   }
 }
 
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1522,8 +1522,9 @@
   // Support for sign-extension (hi:lo = extend_sign(lo))
   void extend_sign(Register hi, Register lo);
 
-  // Loading values by size and signed-ness
-  void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
+  // Load and store values by size and signed-ness
+  void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg);
+  void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg);
 
   // Support for inc/dec with optimal instruction selection depending on value
 
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1899,8 +1899,6 @@
   Label do_double;
   Label done_conv;
 
-  address compiled_entry = __ pc();
-
   // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
   if (UseSSE < 2) {
     __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
@@ -1934,15 +1932,7 @@
     __ jmp(done_conv);
   }
 
-#if 0
-  // emit a sentinel we can test for when converting an interpreter
-  // entry point to a compiled entry point.
-  __ a_long(Interpreter::return_sentinel);
-  __ a_long((int)compiled_entry);
-#endif
-
   // Return point to interpreter from compiled/native method
-
   InternalAddress return_from_native_method(__ pc());
 
   __ bind(done_conv);
--- a/hotspot/src/cpu/x86/vm/interpreter_x86.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,14 +26,6 @@
 #define CPU_X86_VM_INTERPRETER_X86_HPP
 
  public:
-
-  // Sentinel placed in the code for interpreter returns so
-  // that i2c adapters and osr code can recognize an interpreter
-  // return address and convert the return to a specialized
-  // block of code to handle compiedl return values and cleaning
-  // the fpu stack.
-  static const int return_sentinel;
-
   static Address::ScaleFactor stackElementScale() { return Address::times_4; }
 
   // Offset from rsp (which points to the last stack element)
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,9 +51,6 @@
 
 #define __ _masm->
 
-// Initialize the sentinel used to distinguish an interpreter return address.
-const int Interpreter::return_sentinel = 0xfeedbeed;
-
 //------------------------------------------------------------------------------------------------------------------------
 
 address AbstractInterpreterGenerator::generate_slow_signature_handler() {
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -602,24 +602,18 @@
       // make room for the new argument:
       __ movl(rax_argslot, rcx_bmh_vmargslot);
       __ lea(rax_argslot, __ argument_address(rax_argslot));
-      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask,
-                       rax_argslot, rbx_temp, rdx_temp);
+
+      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, rax_argslot, rbx_temp, rdx_temp);
 
       // store bound argument into the new stack slot:
       __ load_heap_oop(rbx_temp, rcx_bmh_argument);
-      Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
       if (arg_type == T_OBJECT) {
         __ movptr(Address(rax_argslot, 0), rbx_temp);
       } else {
-        __ load_sized_value(rdx_temp, prim_value_addr,
-                            type2aelembytes(arg_type), is_signed_subword_type(arg_type));
-        __ movptr(Address(rax_argslot, 0), rdx_temp);
-#ifndef _LP64
-        if (arg_slots == 2) {
-          __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
-          __ movl(Address(rax_argslot, Interpreter::stackElementSize), rdx_temp);
-        }
-#endif //_LP64
+        Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
+        const int arg_size = type2aelembytes(arg_type);
+        __ load_sized_value(rdx_temp, prim_value_addr, arg_size, is_signed_subword_type(arg_type), rbx_temp);
+        __ store_sized_value(Address(rax_argslot, 0), rdx_temp, arg_size, rbx_temp);
       }
 
       if (direct_to_method) {
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -660,25 +660,6 @@
                             int comp_args_on_stack,
                             const BasicType *sig_bt,
                             const VMRegPair *regs) {
-  // we're being called from the interpreter but need to find the
-  // compiled return entry point.  The return address on the stack
-  // should point at it and we just need to pull the old value out.
-  // load up the pointer to the compiled return entry point and
-  // rewrite our return pc. The code is arranged like so:
-  //
-  // .word Interpreter::return_sentinel
-  // .word address_of_compiled_return_point
-  // return_entry_point: blah_blah_blah
-  //
-  // So we can find the appropriate return point by loading up the word
-  // just prior to the current return address we have on the stack.
-  //
-  // We will only enter here from an interpreted frame and never from after
-  // passing thru a c2i. Azul allowed this but we do not. If we lose the
-  // race and use a c2i we will remain interpreted for the race loser(s).
-  // This removes all sorts of headaches on the x86 side and also eliminates
-  // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
-
 
   // Note: rsi contains the senderSP on entry. We must preserve it since
   // we may do a i2c -> c2i transition if we lose a race where compiled
@@ -687,40 +668,6 @@
   // Pick up the return address
   __ movptr(rax, Address(rsp, 0));
 
-  // If UseSSE >= 2 then no cleanup is needed on the return to the
-  // interpreter so skip fixing up the return entry point unless
-  // VerifyFPU is enabled.
-  if (UseSSE < 2 || VerifyFPU) {
-    Label skip, chk_int;
-    // If we were called from the call stub we need to do a little bit different
-    // cleanup than if the interpreter returned to the call stub.
-
-    ExternalAddress stub_return_address(StubRoutines::_call_stub_return_address);
-    __ cmpptr(rax, stub_return_address.addr());
-    __ jcc(Assembler::notEqual, chk_int);
-    assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
-    __ lea(rax, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
-    __ jmp(skip);
-
-    // It must be the interpreter since we never get here via a c2i (unlike Azul)
-
-    __ bind(chk_int);
-#ifdef ASSERT
-    {
-      Label ok;
-      __ cmpl(Address(rax, -2*wordSize), Interpreter::return_sentinel);
-      __ jcc(Assembler::equal, ok);
-      __ int3();
-      __ bind(ok);
-    }
-#endif // ASSERT
-    __ movptr(rax, Address(rax, -wordSize));
-    __ bind(skip);
-  }
-
-  // rax, now contains the compiled return entry point which will do an
-  // cleanup needed for the return from compiled to interpreted.
-
   // Must preserve original SP for loading incoming arguments because
   // we need to align the outgoing SP for compiled code.
   __ movptr(rdi, rsp);
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -610,14 +610,6 @@
                             const BasicType *sig_bt,
                             const VMRegPair *regs) {
 
-  //
-  // We will only enter here from an interpreted frame and never from after
-  // passing thru a c2i. Azul allowed this but we do not. If we lose the
-  // race and use a c2i we will remain interpreted for the race loser(s).
-  // This removes all sorts of headaches on the x86 side and also eliminates
-  // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
-
-
   // Note: r13 contains the senderSP on entry. We must preserve it since
   // we may do a i2c -> c2i transition if we lose a race where compiled
   // code goes non-entrant while we get args ready.
@@ -627,6 +619,7 @@
   // save code can segv when fxsave instructions find improperly
   // aligned stack pointer.
 
+  // Pick up the return address
   __ movptr(rax, Address(rsp, 0));
 
   // Must preserve original SP for loading incoming arguments because
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -240,9 +240,30 @@
     BLOCK_COMMENT("call_stub_return_address:");
     return_address = __ pc();
 
-    Label common_return;
+#ifdef COMPILER2
+    {
+      Label L_skip;
+      if (UseSSE >= 2) {
+        __ verify_FPU(0, "call_stub_return");
+      } else {
+        for (int i = 1; i < 8; i++) {
+          __ ffree(i);
+        }
 
-    __ BIND(common_return);
+        // UseSSE <= 1 so double result should be left on TOS
+        __ movl(rsi, result_type);
+        __ cmpl(rsi, T_DOUBLE);
+        __ jcc(Assembler::equal, L_skip);
+        if (UseSSE == 0) {
+          // UseSSE == 0 so float result should be left on TOS
+          __ cmpl(rsi, T_FLOAT);
+          __ jcc(Assembler::equal, L_skip);
+        }
+        __ ffree(0);
+      }
+      __ BIND(L_skip);
+    }
+#endif // COMPILER2
 
     // store result depending on type
     // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
@@ -305,37 +326,6 @@
     }
     __ jmp(exit);
 
-    // If we call compiled code directly from the call stub we will
-    // need to adjust the return back to the call stub to a specialized
-    // piece of code that can handle compiled results and cleaning the fpu
-    // stack. compiled code will be set to return here instead of the
-    // return above that handles interpreter returns.
-
-    BLOCK_COMMENT("call_stub_compiled_return:");
-    StubRoutines::x86::set_call_stub_compiled_return( __ pc());
-
-#ifdef COMPILER2
-    if (UseSSE >= 2) {
-      __ verify_FPU(0, "call_stub_compiled_return");
-    } else {
-      for (int i = 1; i < 8; i++) {
-        __ ffree(i);
-      }
-
-      // UseSSE <= 1 so double result should be left on TOS
-      __ movl(rsi, result_type);
-      __ cmpl(rsi, T_DOUBLE);
-      __ jcc(Assembler::equal, common_return);
-      if (UseSSE == 0) {
-        // UseSSE == 0 so float result should be left on TOS
-        __ cmpl(rsi, T_FLOAT);
-        __ jcc(Assembler::equal, common_return);
-      }
-      __ ffree(0);
-    }
-#endif /* COMPILER2 */
-    __ jmp(common_return);
-
     return start;
   }
 
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,5 @@
 // Implementation of the platform-specific part of StubRoutines - for
 // a description of how to extend it, see the stubRoutines.hpp file.
 
-address StubRoutines::x86::_verify_mxcsr_entry        = NULL;
-address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry= NULL;
-address StubRoutines::x86::_call_stub_compiled_return = NULL;
+address StubRoutines::x86::_verify_mxcsr_entry         = NULL;
+address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL;
--- a/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,24 +44,14 @@
  friend class VMStructs;
 
  private:
-  // If we call compiled code directly from the call stub we will
-  // need to adjust the return back to the call stub to a specialized
-  // piece of code that can handle compiled results and cleaning the fpu
-  // stack. The variable holds that location.
-  static address _call_stub_compiled_return;
   static address _verify_mxcsr_entry;
   static address _verify_fpu_cntrl_wrd_entry;
-  static jint    _mxcsr_std;
 
  public:
   static address verify_mxcsr_entry()                        { return _verify_mxcsr_entry; }
   static address verify_fpu_cntrl_wrd_entry()                { return _verify_fpu_cntrl_wrd_entry; }
-
-  static address get_call_stub_compiled_return()             { return _call_stub_compiled_return; }
-  static void set_call_stub_compiled_return(address ret)     { _call_stub_compiled_return = ret; }
 };
 
-  static bool    returns_to_call_stub(address return_pc)     { return (return_pc == _call_stub_return_address) ||
-                                                                       return_pc == x86::get_call_stub_compiled_return(); }
+  static bool    returns_to_call_stub(address return_pc)     { return return_pc == _call_stub_return_address; }
 
 #endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -177,9 +177,7 @@
 
 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   TosState incoming_state = state;
-
-  Label interpreter_entry;
-  address compiled_entry = __ pc();
+  address entry = __ pc();
 
 #ifdef COMPILER2
   // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
@@ -197,14 +195,6 @@
     __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
   }
 
-  __ jmp(interpreter_entry, relocInfo::none);
-  // emit a sentinel we can test for when converting an interpreter
-  // entry point to a compiled entry point.
-  __ a_long(Interpreter::return_sentinel);
-  __ a_long((int)compiled_entry);
-  address entry = __ pc();
-  __ bind(interpreter_entry);
-
   // In SSE mode, interpreter returns FP results in xmm0 but they need
   // to end up back on the FPU so it can operate on them.
   if (incoming_state == ftos && UseSSE >= 1) {
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -190,13 +190,7 @@
 }
 
 
-address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
-                                                                int step) {
-
-  // amd64 doesn't need to do anything special about compiled returns
-  // to the interpreter so the code that exists on x86 to place a sentinel
-  // here and the specialized cleanup code is not needed here.
-
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
   address entry = __ pc();
 
   // Restore stack bottom in case i2c adjusted stack
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1710,39 +1710,6 @@
       __ pop(rdi);                               // get return address
       __ mov(rsp, rdx);                          // set sp to sender sp
 
-
-      Label skip;
-      Label chkint;
-
-      // The interpreter frame we have removed may be returning to
-      // either the callstub or the interpreter. Since we will
-      // now be returning from a compiled (OSR) nmethod we must
-      // adjust the return to the return were it can handler compiled
-      // results and clean the fpu stack. This is very similar to
-      // what a i2c adapter must do.
-
-      // Are we returning to the call stub?
-
-      __ cmp32(rdi, ExternalAddress(StubRoutines::_call_stub_return_address));
-      __ jcc(Assembler::notEqual, chkint);
-
-      // yes adjust to the specialized call stub  return.
-      assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
-      __ lea(rdi, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
-      __ jmp(skip);
-
-      __ bind(chkint);
-
-      // Are we returning to the interpreter? Look for sentinel
-
-      __ cmpl(Address(rdi, -2*wordSize), Interpreter::return_sentinel);
-      __ jcc(Assembler::notEqual, skip);
-
-      // Adjust to compiled return back to interpreter
-
-      __ movptr(rdi, Address(rdi, -wordSize));
-      __ bind(skip);
-
       // Align stack pointer for compiled code (note that caller is
       // responsible for undoing this fixup by remembering the old SP
       // in an rbp,-relative location)
--- a/hotspot/src/cpu/zero/vm/stubRoutines_zero.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/zero/vm/stubRoutines_zero.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -30,7 +30,3 @@
 #ifdef TARGET_OS_FAMILY_linux
 # include "thread_linux.inline.hpp"
 #endif
-
-#ifdef IA32
-address StubRoutines::x86::_call_stub_compiled_return = NULL;
-#endif // IA32
--- a/hotspot/src/cpu/zero/vm/stubRoutines_zero.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/cpu/zero/vm/stubRoutines_zero.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -48,13 +48,4 @@
     method_handles_adapters_code_size = 0
   };
 
-#ifdef IA32
-  class x86 {
-    friend class VMStructs;
-
-   private:
-    static address _call_stub_compiled_return;
-  };
-#endif // IA32
-
 #endif // CPU_ZERO_VM_STUBROUTINES_ZERO_HPP
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1975,7 +1975,11 @@
       #ifndef RTLD_DL_SYMENT
       #define RTLD_DL_SYMENT 1
       #endif
-      Sym * info;
+#ifdef _LP64
+      Elf64_Sym * info;
+#else
+      Elf32_Sym * info;
+#endif
       if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
                        RTLD_DL_SYMENT)) {
         if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
@@ -6424,4 +6428,3 @@
    INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
      os::Solaris::clear_interrupted);
 }
-
--- a/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008 Red Hat, Inc.
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -300,4 +300,18 @@
                               (intptr_t) compare_value);
 }
 
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  os::atomic_copy64(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, dest);
+}
+
 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -67,7 +67,6 @@
 inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
 #ifdef COMPILER2
   // Compiler2 does not support v8, it is used only for v9.
-  assert (VM_Version::v9_instructions_work(), "only supported on v9");
   _Atomic_move_long_v9(src, dst);
 #else
   // The branch is cheaper then emulated LDD.
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -189,14 +189,22 @@
               tty->print_cr("cpu_info.implementation: %s", implementation);
             }
 #endif
-            if (strncmp(implementation, "SPARC64", 7) == 0) {
+            // Convert to UPPER case before compare.
+            char* impl = strdup(implementation);
+
+            for (int i = 0; impl[i] != 0; i++)
+              impl[i] = (char)toupper((uint)impl[i]);
+            if (strstr(impl, "SPARC64") != NULL) {
               features |= sparc64_family_m;
-            } else if (strncmp(implementation, "UltraSPARC-T", 12) == 0) {
+            } else if (strstr(impl, "SPARC-T") != NULL) {
               features |= T_family_m;
-              if (strncmp(implementation, "UltraSPARC-T1", 13) == 0) {
+              if (strstr(impl, "SPARC-T1") != NULL) {
                 features |= T1_model_m;
               }
+            } else {
+              assert(strstr(impl, "SPARC") != NULL, "should be sparc");
             }
+            free((void*)impl);
             break;
           }
         } // for(
--- a/hotspot/src/share/vm/adlc/output_c.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/adlc/output_c.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1698,7 +1698,75 @@
     fprintf(fp,"\n");
   } // done generating expand rule
 
-  else if( node->_matrule != NULL ) {
+  // Generate projections for instruction's additional DEFs and KILLs
+  if( ! node->expands() && (node->needs_projections() || node->has_temps())) {
+    // Get string representing the MachNode that projections point at
+    const char *machNode = "this";
+    // Generate the projections
+    fprintf(fp,"  // Add projection edges for additional defs or kills\n");
+
+    // Examine each component to see if it is a DEF or KILL
+    node->_components.reset();
+    // Skip the first component, if already handled as (SET dst (...))
+    Component *comp = NULL;
+    // For kills, the choice of projection numbers is arbitrary
+    int proj_no = 1;
+    bool declared_def  = false;
+    bool declared_kill = false;
+
+    while( (comp = node->_components.iter()) != NULL ) {
+      // Lookup register class associated with operand type
+      Form        *form = (Form*)_globalNames[comp->_type];
+      assert( form, "component type must be a defined form");
+      OperandForm *op   = form->is_operand();
+
+      if (comp->is(Component::TEMP)) {
+        fprintf(fp, "  // TEMP %s\n", comp->_name);
+        if (!declared_def) {
+          // Define the variable "def" to hold new MachProjNodes
+          fprintf(fp, "  MachTempNode *def;\n");
+          declared_def = true;
+        }
+        if (op && op->_interface && op->_interface->is_RegInterface()) {
+          fprintf(fp,"  def = new (C) MachTempNode(state->MachOperGenerator( %s, C ));\n",
+                  machOperEnum(op->_ident));
+          fprintf(fp,"  add_req(def);\n");
+          // The operand for TEMP is already constructed during
+          // this mach node construction, see buildMachNode().
+          //
+          // int idx  = node->operand_position_format(comp->_name);
+          // fprintf(fp,"  set_opnd_array(%d, state->MachOperGenerator( %s, C ));\n",
+          //         idx, machOperEnum(op->_ident));
+        } else {
+          assert(false, "can't have temps which aren't registers");
+        }
+      } else if (comp->isa(Component::KILL)) {
+        fprintf(fp, "  // DEF/KILL %s\n", comp->_name);
+
+        if (!declared_kill) {
+          // Define the variable "kill" to hold new MachProjNodes
+          fprintf(fp, "  MachProjNode *kill;\n");
+          declared_kill = true;
+        }
+
+        assert( op, "Support additional KILLS for base operands");
+        const char *regmask    = reg_mask(*op);
+        const char *ideal_type = op->ideal_type(_globalNames, _register);
+
+        if (!op->is_bound_register()) {
+          syntax_err(node->_linenum, "In %s only bound registers can be killed: %s %s\n",
+                     node->_ident, comp->_type, comp->_name);
+        }
+
+        fprintf(fp,"  kill = ");
+        fprintf(fp,"new (C, 1) MachProjNode( %s, %d, (%s), Op_%s );\n",
+                machNode, proj_no++, regmask, ideal_type);
+        fprintf(fp,"  proj_list.push(kill);\n");
+      }
+    }
+  }
+
+  if( !node->expands() && node->_matrule != NULL ) {
     // Remove duplicated operands and inputs which use the same name.
     // Seach through match operands for the same name usage.
     uint cur_num_opnds = node->num_opnds();
@@ -1752,72 +1820,6 @@
     }
   }
 
-
-  // Generate projections for instruction's additional DEFs and KILLs
-  if( ! node->expands() && (node->needs_projections() || node->has_temps())) {
-    // Get string representing the MachNode that projections point at
-    const char *machNode = "this";
-    // Generate the projections
-    fprintf(fp,"  // Add projection edges for additional defs or kills\n");
-
-    // Examine each component to see if it is a DEF or KILL
-    node->_components.reset();
-    // Skip the first component, if already handled as (SET dst (...))
-    Component *comp = NULL;
-    // For kills, the choice of projection numbers is arbitrary
-    int proj_no = 1;
-    bool declared_def  = false;
-    bool declared_kill = false;
-
-    while( (comp = node->_components.iter()) != NULL ) {
-      // Lookup register class associated with operand type
-      Form        *form = (Form*)_globalNames[comp->_type];
-      assert( form, "component type must be a defined form");
-      OperandForm *op   = form->is_operand();
-
-      if (comp->is(Component::TEMP)) {
-        fprintf(fp, "  // TEMP %s\n", comp->_name);
-        if (!declared_def) {
-          // Define the variable "def" to hold new MachProjNodes
-          fprintf(fp, "  MachTempNode *def;\n");
-          declared_def = true;
-        }
-        if (op && op->_interface && op->_interface->is_RegInterface()) {
-          fprintf(fp,"  def = new (C) MachTempNode(state->MachOperGenerator( %s, C ));\n",
-                  machOperEnum(op->_ident));
-          fprintf(fp,"  add_req(def);\n");
-          int idx  = node->operand_position_format(comp->_name);
-          fprintf(fp,"  set_opnd_array(%d, state->MachOperGenerator( %s, C ));\n",
-                  idx, machOperEnum(op->_ident));
-        } else {
-          assert(false, "can't have temps which aren't registers");
-        }
-      } else if (comp->isa(Component::KILL)) {
-        fprintf(fp, "  // DEF/KILL %s\n", comp->_name);
-
-        if (!declared_kill) {
-          // Define the variable "kill" to hold new MachProjNodes
-          fprintf(fp, "  MachProjNode *kill;\n");
-          declared_kill = true;
-        }
-
-        assert( op, "Support additional KILLS for base operands");
-        const char *regmask    = reg_mask(*op);
-        const char *ideal_type = op->ideal_type(_globalNames, _register);
-
-        if (!op->is_bound_register()) {
-          syntax_err(node->_linenum, "In %s only bound registers can be killed: %s %s\n",
-                     node->_ident, comp->_type, comp->_name);
-        }
-
-        fprintf(fp,"  kill = ");
-        fprintf(fp,"new (C, 1) MachProjNode( %s, %d, (%s), Op_%s );\n",
-                machNode, proj_no++, regmask, ideal_type);
-        fprintf(fp,"  proj_list.push(kill);\n");
-      }
-    }
-  }
-
   // If the node is a MachConstantNode, insert the MachConstantBaseNode edge.
   // NOTE: this edge must be the last input (see MachConstantNode::mach_constant_base_node_input).
   if (node->is_mach_constant()) {
@@ -3776,12 +3778,10 @@
       }
       dont_care = true;
       // For each operand not in the match rule, call MachOperGenerator
-      // with the enum for the opcode that needs to be built
-      // and the node just built, the parent of the operand.
+      // with the enum for the opcode that needs to be built.
       ComponentList clist = inst->_components;
       int         index  = clist.operand_position(comp->_name, comp->_usedef);
       const char *opcode = machOperEnum(comp->_type);
-      const char *parent = "node";
       fprintf(fp_cpp, "%s node->set_opnd_array(%d, ", indent, index);
       fprintf(fp_cpp, "MachOperGenerator(%s, C));\n", opcode);
       }
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -491,10 +491,11 @@
       // to start profiling on its own.
       _method->ensure_method_data();
     }
-  } else if (is_profiling() && _would_profile) {
+  } else if (is_profiling()) {
     ciMethodData *md = method->method_data_or_null();
-    assert(md != NULL, "Sanity");
-    md->set_would_profile(_would_profile);
+    if (md != NULL) {
+      md->set_would_profile(_would_profile);
+    }
   }
 }
 
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -412,13 +412,16 @@
     fail_type = _unloaded_ciinstance_klass;
   }
   KlassHandle found_klass;
-  if (!require_local) {
-    klassOop kls = SystemDictionary::find_constrained_instance_or_array_klass(
-        sym, loader, KILL_COMPILE_ON_FATAL_(fail_type));
-    found_klass = KlassHandle(THREAD, kls);
-  } else {
-    klassOop kls = SystemDictionary::find_instance_or_array_klass(
-        sym, loader, domain, KILL_COMPILE_ON_FATAL_(fail_type));
+  {
+    MutexLocker ml(Compile_lock);
+    klassOop kls;
+    if (!require_local) {
+      kls = SystemDictionary::find_constrained_instance_or_array_klass(sym, loader,
+                                                                       KILL_COMPILE_ON_FATAL_(fail_type));
+    } else {
+      kls = SystemDictionary::find_instance_or_array_klass(sym, loader, domain,
+                                                           KILL_COMPILE_ON_FATAL_(fail_type));
+    }
     found_klass = KlassHandle(THREAD, kls);
   }
 
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -46,6 +46,7 @@
   ciKlass(h_k), _non_static_fields(NULL)
 {
   assert(get_Klass()->oop_is_instance(), "wrong type");
+  assert(get_instanceKlass()->is_loaded(), "must be at least loaded");
   instanceKlass* ik = get_instanceKlass();
 
   AccessFlags access_flags = ik->access_flags();
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1290,6 +1290,15 @@
 int ClassLoader::_compile_the_world_counter = 0;
 static int _codecache_sweep_counter = 0;
 
+// Filter out all exceptions except OOMs
+static void clear_pending_exception_if_not_oom(TRAPS) {
+  if (HAS_PENDING_EXCEPTION &&
+      !PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())) {
+    CLEAR_PENDING_EXCEPTION;
+  }
+  // The CHECK at the caller will propagate the exception out
+}
+
 void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
   int len = (int)strlen(name);
   if (len > 6 && strcmp(".class", name + len - 6) == 0) {
@@ -1312,12 +1321,12 @@
         k->initialize(THREAD);
       }
       bool exception_occurred = HAS_PENDING_EXCEPTION;
-      CLEAR_PENDING_EXCEPTION;
+      clear_pending_exception_if_not_oom(CHECK);
       if (CompileTheWorldPreloadClasses && k.not_null()) {
         constantPoolKlass::preload_and_initialize_all_classes(k->constants(), THREAD);
         if (HAS_PENDING_EXCEPTION) {
           // If something went wrong in preloading we just ignore it
-          CLEAR_PENDING_EXCEPTION;
+          clear_pending_exception_if_not_oom(CHECK);
           tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer);
         }
       }
@@ -1344,7 +1353,7 @@
               CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_initial_compile,
                                             methodHandle(), 0, "CTW", THREAD);
               if (HAS_PENDING_EXCEPTION) {
-                CLEAR_PENDING_EXCEPTION;
+                clear_pending_exception_if_not_oom(CHECK);
                 tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
               }
               if (TieredCompilation) {
@@ -1358,7 +1367,7 @@
                 CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization,
                                               methodHandle(), 0, "CTW", THREAD);
                 if (HAS_PENDING_EXCEPTION) {
-                  CLEAR_PENDING_EXCEPTION;
+                  clear_pending_exception_if_not_oom(CHECK);
                   tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
                 }
               }
--- a/hotspot/src/share/vm/classfile/loaderConstraints.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/classfile/loaderConstraints.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -322,8 +322,14 @@
 klassOop LoaderConstraintTable::find_constrained_klass(Symbol* name,
                                                        Handle loader) {
   LoaderConstraintEntry *p = *(find_loader_constraint(name, loader));
-  if (p != NULL && p->klass() != NULL)
+  if (p != NULL && p->klass() != NULL) {
+    if (Klass::cast(p->klass())->oop_is_instance() && !instanceKlass::cast(p->klass())->is_loaded()) {
+      // Only return fully loaded classes.  Classes found through the
+      // constraints might still be in the process of loading.
+      return NULL;
+    }
     return p->klass();
+  }
 
   // No constraints, or else no klass loaded yet.
   return NULL;
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1690,6 +1690,8 @@
 
 void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
   assert(k.not_null(), "just checking");
+  assert_locked_or_safepoint(Compile_lock);
+
   // Link into hierachy. Make sure the vtables are initialized before linking into
   k->append_to_sibling_list();                    // add to superklass/sibling list
   k->process_interfaces(THREAD);                  // handle all "implements" declarations
@@ -2152,6 +2154,9 @@
 }
 
 
+// Try to find a class name using the loader constraints.  The
+// loader constraints might know about a class that isn't fully loaded
+// yet and these will be ignored.
 klassOop SystemDictionary::find_constrained_instance_or_array_klass(
                     Symbol* class_name, Handle class_loader, TRAPS) {
 
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -768,7 +768,9 @@
 // Initialize the compilation queue
 void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
   EXCEPTION_MARK;
+#ifndef ZERO
   assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
+#endif // !ZERO
   if (c2_compiler_count > 0) {
     _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
   }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,9 +112,12 @@
   yg_cur_size = MAX2(yg_cur_size, yg_min_size);
 
   og_min_size = align_size_up(og_min_size, og_align);
-  og_max_size = align_size_up(og_max_size, og_align);
+  // Align old gen size down to preserve specified heap size.
+  assert(og_align == yg_align, "sanity");
+  og_max_size = align_size_down(og_max_size, og_align);
+  og_max_size = MAX2(og_max_size, og_min_size);
   size_t og_cur_size =
-    align_size_up(_collector_policy->old_gen_size(), og_align);
+    align_size_down(_collector_policy->old_gen_size(), og_align);
   og_cur_size = MAX2(og_cur_size, og_min_size);
 
   pg_min_size = align_size_up(pg_min_size, pg_align);
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1942,7 +1942,7 @@
         constantPoolOop constants = istate->method()->constants();
         if (!constants->tag_at(index).is_unresolved_klass()) {
           // Make sure klass is initialized and doesn't have a finalizer
-          oop entry = (klassOop) *constants->obj_at_addr(index);
+          oop entry = constants->slot_at(index).get_oop();
           assert(entry->is_klass(), "Should be resolved klass");
           klassOop k_entry = (klassOop) entry;
           assert(k_entry->klass_part()->oop_is_instance(), "Should be instanceKlass");
@@ -2032,7 +2032,7 @@
             if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
               CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
             }
-            klassOop klassOf = (klassOop) *(METHOD->constants()->obj_at_addr(index));
+            klassOop klassOf = (klassOop) METHOD->constants()->slot_at(index).get_oop();
             klassOop objKlassOop = STACK_OBJECT(-1)->klass(); //ebx
             //
             // Check for compatibilty. This check must not GC!!
@@ -2067,7 +2067,7 @@
             if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
               CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
             }
-            klassOop klassOf = (klassOop) *(METHOD->constants()->obj_at_addr(index));
+            klassOop klassOf = (klassOop) METHOD->constants()->slot_at(index).get_oop();
             klassOop objKlassOop = STACK_OBJECT(-1)->klass();
             //
             // Check for compatibilty. This check must not GC!!
--- a/hotspot/src/share/vm/libadt/vectset.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/libadt/vectset.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -350,28 +350,11 @@
   return (int)_xor;
 }
 
-//------------------------------iterate----------------------------------------
-SetI_ *VectorSet::iterate(uint &elem) const
-{
-  VSetI_ *foo = (new(ResourceObj::C_HEAP) VSetI_(this));
-  elem = foo->next();
-  return foo;
-}
-
 //=============================================================================
-//------------------------------VSetI_-----------------------------------------
-// Initialize the innards of a VectorSet iterator
-VSetI_::VSetI_( const VectorSet *vset ) : s(vset)
-{
-  i = (uint)-1L;
-  j = (uint)-1L;
-  mask = (unsigned)(1L<<31);
-}
-
 //------------------------------next-------------------------------------------
 // Find and return the next element of a vector set, or return garbage and
-// make "VSetI_::test()" fail.
-uint VSetI_::next(void)
+// make "VectorSetI::test()" fail.
+uint VectorSetI::next(void)
 {
   j++;                          // Next element in word
   mask = (mask & max_jint) << 1;// Next bit in word
--- a/hotspot/src/share/vm/libadt/vectset.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/libadt/vectset.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,6 +98,9 @@
   uint Size(void) const;            // Number of elements in the Set.
   void Sort(void);                  // Sort before iterating
   int hash() const;                 // Hash function
+  void Reset(void) {                // Reset a set
+    memset( data, 0, size*sizeof(uint32) );
+  }
 
   /* Removed for MCC BUG
      operator const VectorSet* (void) const { return this; } */
@@ -148,8 +151,7 @@
 
 
 private:
-  friend class VSetI_;
-  SetI_ *iterate(uint&) const;
+  SetI_ *iterate(uint&) const { ShouldNotCallThis(); return NULL; } // Removed
 };
 
 //------------------------------Iteration--------------------------------------
@@ -158,22 +160,26 @@
 // or may not be iterated over; untouched elements will be affected once.
 // Usage:  for( VectorSetI i(s); i.test(); i++ ) { body = i.elem; }
 
-class VSetI_ : public SetI_ {
+class VectorSetI : public StackObj {
   friend class VectorSet;
-  friend class VectorSetI;
   const VectorSet *s;
   uint i, j;
   uint32 mask;
-  VSetI_(const VectorSet *vset);
   uint next(void);
+
+public:
+  uint elem;                    // The publically accessible element
+
+  VectorSetI( const VectorSet *vset ) :
+    s(vset),
+    i((uint)-1L),
+    j((uint)-1L),
+    mask((unsigned)(1L<<31)) {
+    elem = next();
+  }
+
+  void operator ++(void) { elem = next(); }
   int test(void) { return i < s->size; }
 };
 
-class VectorSetI : public SetI {
-public:
-  VectorSetI( const VectorSet *s ) : SetI(s) { }
-  void operator ++(void) { elem = ((VSetI_*)impl)->next(); }
-  int test(void) { return ((VSetI_*)impl)->test(); }
-};
-
 #endif // SHARE_VM_LIBADT_VECTSET_HPP
--- a/hotspot/src/share/vm/memory/allocation.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -157,7 +157,7 @@
 
 void trace_heap_malloc(size_t size, const char* name, void* p) {
   // A lock is not needed here - tty uses a lock internally
-  tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name);
+  tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name);
 }
 
 
@@ -573,22 +573,27 @@
   st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
 }
 
-size_t Arena::_bytes_allocated = 0;
+julong Arena::_bytes_allocated = 0;
+
+void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
 
 AllocStats::AllocStats() {
-  start_mallocs = os::num_mallocs;
-  start_frees = os::num_frees;
+  start_mallocs      = os::num_mallocs;
+  start_frees        = os::num_frees;
   start_malloc_bytes = os::alloc_bytes;
-  start_res_bytes = Arena::_bytes_allocated;
+  start_mfree_bytes  = os::free_bytes;
+  start_res_bytes    = Arena::_bytes_allocated;
 }
 
-int     AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
-size_t  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
-size_t  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
-int     AllocStats::num_frees() { return os::num_frees - start_frees; }
+julong  AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
+julong  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
+julong  AllocStats::num_frees()   { return os::num_frees - start_frees; }
+julong  AllocStats::free_bytes()  { return os::free_bytes - start_mfree_bytes; }
+julong  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
 void    AllocStats::print() {
-  tty->print("%d mallocs (%ldK), %d frees, %ldK resrc",
-             num_mallocs(), alloc_bytes()/K, num_frees(), resource_bytes()/K);
+  tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
+                UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
+                num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
 }
 
 
--- a/hotspot/src/share/vm/memory/allocation.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -202,10 +202,11 @@
   char *_hwm, *_max;            // High water mark and max in current chunk
   void* grow(size_t x);         // Get a new Chunk of at least size x
   NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing)
-  NOT_PRODUCT(static size_t _bytes_allocated;) // total #bytes allocated since start
+  NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
   friend class AllocStats;
   debug_only(void* malloc(size_t size);)
   debug_only(void* internal_malloc_4(size_t x);)
+  NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
  public:
   Arena();
   Arena(size_t init_size);
@@ -219,7 +220,7 @@
     assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
     x = ARENA_ALIGN(x);
     debug_only(if (UseMallocOnly) return malloc(x);)
-    NOT_PRODUCT(_bytes_allocated += x);
+    NOT_PRODUCT(inc_bytes_allocated(x);)
     if (_hwm + x > _max) {
       return grow(x);
     } else {
@@ -232,7 +233,7 @@
   void *Amalloc_4(size_t x) {
     assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
     debug_only(if (UseMallocOnly) return malloc(x);)
-    NOT_PRODUCT(_bytes_allocated += x);
+    NOT_PRODUCT(inc_bytes_allocated(x);)
     if (_hwm + x > _max) {
       return grow(x);
     } else {
@@ -252,7 +253,7 @@
     size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
     x += delta;
 #endif
-    NOT_PRODUCT(_bytes_allocated += x);
+    NOT_PRODUCT(inc_bytes_allocated(x);)
     if (_hwm + x > _max) {
       return grow(x); // grow() returns a result aligned >= 8 bytes.
     } else {
@@ -406,15 +407,16 @@
 // for statistics
 #ifndef PRODUCT
 class AllocStats : StackObj {
-  int    start_mallocs, start_frees;
-  size_t start_malloc_bytes, start_res_bytes;
+  julong start_mallocs, start_frees;
+  julong start_malloc_bytes, start_mfree_bytes, start_res_bytes;
  public:
   AllocStats();
 
-  int    num_mallocs();    // since creation of receiver
-  size_t alloc_bytes();
-  size_t resource_bytes();
-  int    num_frees();
+  julong num_mallocs();    // since creation of receiver
+  julong alloc_bytes();
+  julong num_frees();
+  julong free_bytes();
+  julong resource_bytes();
   void   print();
 };
 #endif
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,20 @@
 void trace_heap_malloc(size_t size, const char* name, void *p);
 void trace_heap_free(void *p);
 
+#ifndef PRODUCT
+// Increments unsigned long value for statistics (not atomic on MP).
+inline void inc_stat_counter(volatile julong* dest, julong add_value) {
+#if defined(SPARC) || defined(X86)
+  // Sparc and X86 have atomic jlong (8 bytes) instructions
+  julong value = Atomic::load((volatile jlong*)dest);
+  value += add_value;
+  Atomic::store((jlong)value, (volatile jlong*)dest);
+#else
+  // possible word-tearing during load/store
+  *dest += add_value;
+#endif
+}
+#endif
 
 // allocate using malloc; will fail if no memory available
 inline char* AllocateHeap(size_t size, const char* name = NULL) {
--- a/hotspot/src/share/vm/oops/cpCacheOop.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/oops/cpCacheOop.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,15 +98,15 @@
 // Atomically sets f1 if it is still NULL, otherwise it keeps the
 // current value.
 void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) {
-    // Use barriers as in oop_store
-    HeapWord* f1_addr = (HeapWord*) &_f1;
-    update_barrier_set_pre(f1_addr, f1);
-    void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
-    bool success = (result == NULL);
-    if (success) {
-      update_barrier_set((void*) f1_addr, f1);
-    }
+  // Use barriers as in oop_store
+  oop* f1_addr = (oop*) &_f1;
+  update_barrier_set_pre(f1_addr, f1);
+  void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
+  bool success = (result == NULL);
+  if (success) {
+    update_barrier_set(f1_addr, f1);
   }
+}
 
 #ifdef ASSERT
 // It is possible to have two different dummy methodOops created
--- a/hotspot/src/share/vm/oops/methodDataOop.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/oops/methodDataOop.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -764,11 +764,13 @@
   if (TieredCompilation) {
     _invocation_counter.init();
     _backedge_counter.init();
+    _invocation_counter_start = 0;
+    _backedge_counter_start = 0;
     _num_loops = 0;
     _num_blocks = 0;
     _highest_comp_level = 0;
     _highest_osr_comp_level = 0;
-    _would_profile = false;
+    _would_profile = true;
   }
   set_creation_mileage(mileage_of(method()));
 
--- a/hotspot/src/share/vm/oops/methodDataOop.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/oops/methodDataOop.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1224,6 +1224,9 @@
   InvocationCounter _invocation_counter;
   // Same for backedges.
   InvocationCounter _backedge_counter;
+  // Counter values at the time profiling started.
+  int               _invocation_counter_start;
+  int               _backedge_counter_start;
   // Number of loops and blocks is computed when compiling the first
   // time with C1. It is used to determine if method is trivial.
   short             _num_loops;
@@ -1333,6 +1336,28 @@
     return backedge_counter()->count();
   }
 
+  int invocation_count_start() {
+    if (invocation_counter()->carry()) {
+      return 0;
+    }
+    return _invocation_counter_start;
+  }
+
+  int backedge_count_start() {
+    if (backedge_counter()->carry()) {
+      return 0;
+    }
+    return _backedge_counter_start;
+  }
+
+  int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
+  int backedge_count_delta()   { return backedge_count()   - backedge_count_start();   }
+
+  void reset_start_counters() {
+    _invocation_counter_start = invocation_count();
+    _backedge_counter_start = backedge_count();
+  }
+
   InvocationCounter* invocation_counter()     { return &_invocation_counter; }
   InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
 
--- a/hotspot/src/share/vm/oops/methodOop.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/oops/methodOop.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -934,7 +934,7 @@
   assert(m->signature() == signature, "");
   assert(m->is_method_handle_invoke(), "");
 #ifdef CC_INTERP
-  ResultTypeFinder rtf(signature());
+  ResultTypeFinder rtf(signature);
   m->set_result_index(rtf.type());
 #endif
   m->compute_size_of_parameters(THREAD);
--- a/hotspot/src/share/vm/opto/escape.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/opto/escape.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,6 +93,9 @@
 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
   _nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
   _processed(C->comp_arena()),
+  pt_ptset(C->comp_arena()),
+  pt_visited(C->comp_arena()),
+  pt_worklist(C->comp_arena(), 4, 0, 0),
   _collecting(true),
   _progress(false),
   _compile(C),
@@ -220,9 +223,7 @@
   PointsToNode::EscapeState orig_es = es;
 
   // compute max escape state of anything this node could point to
-  VectorSet ptset(Thread::current()->resource_area());
-  PointsTo(ptset, n);
-  for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) {
+  for(VectorSetI i(PointsTo(n)); i.test() && es != PointsToNode::GlobalEscape; ++i) {
     uint pt = i.elem;
     PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
     if (pes > es)
@@ -236,9 +237,10 @@
   return es;
 }
 
-void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n) {
-  VectorSet visited(Thread::current()->resource_area());
-  GrowableArray<uint>  worklist;
+VectorSet* ConnectionGraph::PointsTo(Node * n) {
+  pt_ptset.Reset();
+  pt_visited.Reset();
+  pt_worklist.clear();
 
 #ifdef ASSERT
   Node *orig_n = n;
@@ -249,8 +251,8 @@
 
   // If we have a JavaObject, return just that object
   if (npt->node_type() == PointsToNode::JavaObject) {
-    ptset.set(n->_idx);
-    return;
+    pt_ptset.set(n->_idx);
+    return &pt_ptset;
   }
 #ifdef ASSERT
   if (npt->_node == NULL) {
@@ -260,10 +262,10 @@
     assert(npt->_node != NULL, "unregistered node");
   }
 #endif
-  worklist.push(n->_idx);
-  while(worklist.length() > 0) {
-    int ni = worklist.pop();
-    if (visited.test_set(ni))
+  pt_worklist.push(n->_idx);
+  while(pt_worklist.length() > 0) {
+    int ni = pt_worklist.pop();
+    if (pt_visited.test_set(ni))
       continue;
 
     PointsToNode* pn = ptnode_adr(ni);
@@ -276,10 +278,10 @@
       uint etgt = pn->edge_target(e);
       PointsToNode::EdgeType et = pn->edge_type(e);
       if (et == PointsToNode::PointsToEdge) {
-        ptset.set(etgt);
+        pt_ptset.set(etgt);
         edges_processed++;
       } else if (et == PointsToNode::DeferredEdge) {
-        worklist.push(etgt);
+        pt_worklist.push(etgt);
         edges_processed++;
       } else {
         assert(false,"neither PointsToEdge or DeferredEdge");
@@ -288,16 +290,17 @@
     if (edges_processed == 0) {
       // no deferred or pointsto edges found.  Assume the value was set
       // outside this method.  Add the phantom object to the pointsto set.
-      ptset.set(_phantom_object);
+      pt_ptset.set(_phantom_object);
     }
   }
+  return &pt_ptset;
 }
 
 void ConnectionGraph::remove_deferred(uint ni, GrowableArray<uint>* deferred_edges, VectorSet* visited) {
   // This method is most expensive during ConnectionGraph construction.
   // Reuse vectorSet and an additional growable array for deferred edges.
   deferred_edges->clear();
-  visited->Clear();
+  visited->Reset();
 
   visited->set(ni);
   PointsToNode *ptn = ptnode_adr(ni);
@@ -1009,7 +1012,6 @@
   uint new_index_start = (uint) _compile->num_alias_types();
   Arena* arena = Thread::current()->resource_area();
   VectorSet visited(arena);
-  VectorSet ptset(arena);
 
 
   //  Phase 1:  Process possible allocations from alloc_worklist.
@@ -1137,10 +1139,9 @@
         }
       }
     } else if (n->is_AddP()) {
-      ptset.Clear();
-      PointsTo(ptset, get_addp_base(n));
-      assert(ptset.Size() == 1, "AddP address is unique");
-      uint elem = ptset.getelem(); // Allocation node's index
+      VectorSet* ptset = PointsTo(get_addp_base(n));
+      assert(ptset->Size() == 1, "AddP address is unique");
+      uint elem = ptset->getelem(); // Allocation node's index
       if (elem == _phantom_object) {
         assert(false, "escaped allocation");
         continue; // Assume the value was set outside this method.
@@ -1157,10 +1158,9 @@
         assert(n->is_Phi(), "loops only through Phi's");
         continue;  // already processed
       }
-      ptset.Clear();
-      PointsTo(ptset, n);
-      if (ptset.Size() == 1) {
-        uint elem = ptset.getelem(); // Allocation node's index
+      VectorSet* ptset = PointsTo(n);
+      if (ptset->Size() == 1) {
+        uint elem = ptset->getelem(); // Allocation node's index
         if (elem == _phantom_object) {
           assert(false, "escaped allocation");
           continue; // Assume the value was set outside this method.
@@ -1434,7 +1434,7 @@
   // Update the memory inputs of MemNodes with the value we computed
   // in Phase 2 and move stores memory users to corresponding memory slices.
 #ifdef ASSERT
-  visited.Clear();
+  visited.Reset();
   Node_Stack old_mems(arena, _compile->unique() >> 2);
 #endif
   for (uint i = 0; i < nodes_size(); i++) {
@@ -1640,7 +1640,6 @@
 #undef CG_BUILD_ITER_LIMIT
 
   Arena* arena = Thread::current()->resource_area();
-  VectorSet ptset(arena);
   VectorSet visited(arena);
   worklist.clear();
 
@@ -1657,7 +1656,7 @@
       if (n->is_AddP()) {
         // Search for objects which are not scalar replaceable
         // and adjust their escape state.
-        verify_escape_state(ni, ptset, igvn);
+        adjust_escape_state(ni, igvn);
       }
     }
   }
@@ -1776,8 +1775,8 @@
   return has_non_escaping_obj;
 }
 
-// Search for objects which are not scalar replaceable.
-void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase) {
+// Adjust escape state after Connection Graph is built.
+void ConnectionGraph::adjust_escape_state(int nidx, PhaseTransform* phase) {
   PointsToNode* ptn = ptnode_adr(nidx);
   Node* n = ptn->_node;
   assert(n->is_AddP(), "Should be called for AddP nodes only");
@@ -1792,9 +1791,8 @@
 
   int offset = ptn->offset();
   Node* base = get_addp_base(n);
-  ptset.Clear();
-  PointsTo(ptset, base);
-  int ptset_size = ptset.Size();
+  VectorSet* ptset = PointsTo(base);
+  int ptset_size = ptset->Size();
 
   // Check if a oop field's initializing value is recorded and add
   // a corresponding NULL field's value if it is not recorded.
@@ -1814,7 +1812,7 @@
   // Do a simple control flow analysis to distinguish above cases.
   //
   if (offset != Type::OffsetBot && ptset_size == 1) {
-    uint elem = ptset.getelem(); // Allocation node's index
+    uint elem = ptset->getelem(); // Allocation node's index
     // It does not matter if it is not Allocation node since
     // only non-escaping allocations are scalar replaced.
     if (ptnode_adr(elem)->_node->is_Allocate() &&
@@ -1913,7 +1911,7 @@
   //
   if (ptset_size > 1 || ptset_size != 0 &&
       (has_LoadStore || offset == Type::OffsetBot)) {
-    for( VectorSetI j(&ptset); j.test(); ++j ) {
+    for( VectorSetI j(ptset); j.test(); ++j ) {
       set_escape_state(j.elem, PointsToNode::ArgEscape);
       ptnode_adr(j.elem)->_scalar_replaceable = false;
     }
@@ -1937,7 +1935,6 @@
       // Stub calls, objects do not escape but they are not scale replaceable.
       // Adjust escape state for outgoing arguments.
       const TypeTuple * d = call->tf()->domain();
-      VectorSet ptset(Thread::current()->resource_area());
       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
         const Type* at = d->field_at(i);
         Node *arg = call->in(i)->uncast();
@@ -1970,9 +1967,7 @@
             //
             arg = get_addp_base(arg);
           }
-          ptset.Clear();
-          PointsTo(ptset, arg);
-          for( VectorSetI j(&ptset); j.test(); ++j ) {
+          for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
             uint pt = j.elem;
             set_escape_state(pt, PointsToNode::ArgEscape);
           }
@@ -1990,7 +1985,6 @@
       // fall-through if not a Java method or no analyzer information
       if (call_analyzer != NULL) {
         const TypeTuple * d = call->tf()->domain();
-        VectorSet ptset(Thread::current()->resource_area());
         bool copy_dependencies = false;
         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
           const Type* at = d->field_at(i);
@@ -2015,9 +2009,7 @@
               copy_dependencies = true;
             }
 
-            ptset.Clear();
-            PointsTo(ptset, arg);
-            for( VectorSetI j(&ptset); j.test(); ++j ) {
+            for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
               uint pt = j.elem;
               if (global_escapes) {
                 //The argument global escapes, mark everything it could point to
@@ -2045,15 +2037,12 @@
     {
       // adjust escape state for  outgoing arguments
       const TypeTuple * d = call->tf()->domain();
-      VectorSet ptset(Thread::current()->resource_area());
       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
         const Type* at = d->field_at(i);
         if (at->isa_oopptr() != NULL) {
           Node *arg = call->in(i)->uncast();
           set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
-          ptset.Clear();
-          PointsTo(ptset, arg);
-          for( VectorSetI j(&ptset); j.test(); ++j ) {
+          for( VectorSetI j(PointsTo(arg)); j.test(); ++j ) {
             uint pt = j.elem;
             set_escape_state(pt, PointsToNode::GlobalEscape);
           }
@@ -2515,9 +2504,7 @@
     {
       Node *base = get_addp_base(n);
       // Create a field edge to this node from everything base could point to.
-      VectorSet ptset(Thread::current()->resource_area());
-      PointsTo(ptset, base);
-      for( VectorSetI i(&ptset); i.test(); ++i ) {
+      for( VectorSetI i(PointsTo(base)); i.test(); ++i ) {
         uint pt = i.elem;
         add_field_edge(pt, n_idx, address_offset(n, phase));
       }
@@ -2583,10 +2570,8 @@
 
       // For everything "adr_base" could point to, create a deferred edge from
       // this node to each field with the same offset.
-      VectorSet ptset(Thread::current()->resource_area());
-      PointsTo(ptset, adr_base);
       int offset = address_offset(adr, phase);
-      for( VectorSetI i(&ptset); i.test(); ++i ) {
+      for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
         uint pt = i.elem;
         add_deferred_edge_to_fields(n_idx, pt, offset);
       }
@@ -2676,9 +2661,7 @@
       Node *val = n->in(MemNode::ValueIn)->uncast();
       // For everything "adr_base" could point to, create a deferred edge
       // to "val" from each field with the same offset.
-      VectorSet ptset(Thread::current()->resource_area());
-      PointsTo(ptset, adr_base);
-      for( VectorSetI i(&ptset); i.test(); ++i ) {
+      for( VectorSetI i(PointsTo(adr_base)); i.test(); ++i ) {
         uint pt = i.elem;
         add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
       }
--- a/hotspot/src/share/vm/opto/escape.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/opto/escape.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -268,7 +268,12 @@
   // walk the connection graph starting at the node corresponding to "n" and
   // add the index of everything it could point to, to "ptset".  This may cause
   // Phi's encountered to get (re)processed  (which requires "phase".)
-  void PointsTo(VectorSet &ptset, Node * n);
+  VectorSet* PointsTo(Node * n);
+
+  // Reused structures for PointsTo().
+  VectorSet            pt_ptset;
+  VectorSet            pt_visited;
+  GrowableArray<uint>  pt_worklist;
 
   //  Edge manipulation.  The "from_i" and "to_i" arguments are the
   //  node indices of the source and destination of the edge
@@ -334,8 +339,11 @@
   // Set the escape state of a node
   void set_escape_state(uint ni, PointsToNode::EscapeState es);
 
-  // Search for objects which are not scalar replaceable.
-  void verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase);
+  // Adjust escape state after Connection Graph is built.
+  void adjust_escape_state(int nidx, PhaseTransform* phase);
+
+  // Compute the escape information
+  bool compute_escape();
 
 public:
   ConnectionGraph(Compile *C, PhaseIterGVN *igvn);
@@ -346,9 +354,6 @@
   // Perform escape analysis
   static void do_analysis(Compile *C, PhaseIterGVN *igvn);
 
-  // Compute the escape information
-  bool compute_escape();
-
   // escape state of a node
   PointsToNode::EscapeState escape_state(Node *n);
 
--- a/hotspot/src/share/vm/opto/indexSet.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/opto/indexSet.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,12 +39,12 @@
 
 #ifdef ASSERT
 // Initialize statistics counters
-uint IndexSet::_alloc_new = 0;
-uint IndexSet::_alloc_total = 0;
+julong IndexSet::_alloc_new = 0;
+julong IndexSet::_alloc_total = 0;
 
-long IndexSet::_total_bits = 0;
-long IndexSet::_total_used_blocks = 0;
-long IndexSet::_total_unused_blocks = 0;
+julong IndexSet::_total_bits = 0;
+julong IndexSet::_total_used_blocks = 0;
+julong IndexSet::_total_unused_blocks = 0;
 
 // Per set, or all sets operation tracing
 int IndexSet::_serial_count = 1;
@@ -141,7 +141,7 @@
 
 #ifdef ASSERT
   if (CollectIndexSetStatistics) {
-    _alloc_new += bitblock_alloc_chunk_size;
+    inc_stat_counter(&_alloc_new, bitblock_alloc_chunk_size);
   }
 #endif
 }
@@ -154,7 +154,7 @@
 IndexSet::BitBlock *IndexSet::alloc_block() {
 #ifdef ASSERT
   if (CollectIndexSetStatistics) {
-    _alloc_total++;
+    inc_stat_counter(&_alloc_total, 1);
   }
 #endif
   Compile *compile = Compile::current();
@@ -391,13 +391,13 @@
 // Update block/bit counts to reflect that this set has been iterated over.
 
 void IndexSet::tally_iteration_statistics() const {
-  _total_bits += count();
+  inc_stat_counter(&_total_bits, count());
 
   for (uint i = 0; i < _max_blocks; i++) {
     if (_blocks[i] != &_empty_block) {
-      _total_used_blocks++;
+      inc_stat_counter(&_total_used_blocks, 1);
     } else {
-      _total_unused_blocks++;
+      inc_stat_counter(&_total_unused_blocks, 1);
     }
   }
 }
@@ -406,17 +406,17 @@
 // Print statistics about IndexSet usage.
 
 void IndexSet::print_statistics() {
-  long total_blocks = _total_used_blocks + _total_unused_blocks;
+  julong total_blocks = _total_used_blocks + _total_unused_blocks;
   tty->print_cr ("Accumulated IndexSet usage statistics:");
   tty->print_cr ("--------------------------------------");
   tty->print_cr ("  Iteration:");
-  tty->print_cr ("    blocks visited: %d", total_blocks);
-  tty->print_cr ("    blocks empty: %4.2f%%", 100.0*_total_unused_blocks/total_blocks);
-  tty->print_cr ("    bit density (bits/used blocks): %4.2f%%", (double)_total_bits/_total_used_blocks);
-  tty->print_cr ("    bit density (bits/all blocks): %4.2f%%", (double)_total_bits/total_blocks);
+  tty->print_cr ("    blocks visited: " UINT64_FORMAT, total_blocks);
+  tty->print_cr ("    blocks empty: %4.2f%%", 100.0*(double)_total_unused_blocks/total_blocks);
+  tty->print_cr ("    bit density (bits/used blocks): %4.2f", (double)_total_bits/_total_used_blocks);
+  tty->print_cr ("    bit density (bits/all blocks): %4.2f", (double)_total_bits/total_blocks);
   tty->print_cr ("  Allocation:");
-  tty->print_cr ("    blocks allocated: %d", _alloc_new);
-  tty->print_cr ("    blocks used/reused: %d", _alloc_total);
+  tty->print_cr ("    blocks allocated: " UINT64_FORMAT, _alloc_new);
+  tty->print_cr ("    blocks used/reused: " UINT64_FORMAT, _alloc_total);
 }
 
 //---------------------------- IndexSet::verify() -----------------------------
--- a/hotspot/src/share/vm/opto/indexSet.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/opto/indexSet.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -352,13 +352,13 @@
   void tally_iteration_statistics() const;
 
   // BitBlock allocation statistics
-  static uint _alloc_new;
-  static uint _alloc_total;
+  static julong _alloc_new;
+  static julong _alloc_total;
 
   // Block density statistics
-  static long _total_bits;
-  static long _total_used_blocks;
-  static long _total_unused_blocks;
+  static julong _total_bits;
+  static julong _total_used_blocks;
+  static julong _total_unused_blocks;
 
   // Sanity tests
   void verify() const;
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1480,6 +1480,8 @@
 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
 void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool do_loop_pred) {
+  ResourceMark rm;
+
   int old_progress = C->major_progress();
 
   // Reset major-progress flag for the driver's heuristics
@@ -2013,7 +2015,7 @@
   if (_dom_stk == NULL) {
     uint init_size = C->unique() / 100; // Guess that 1/100 is a reasonable initial size.
     if (init_size < 10) init_size = 10;
-    _dom_stk = new (C->node_arena()) GrowableArray<uint>(C->node_arena(), init_size, 0, 0);
+    _dom_stk = new GrowableArray<uint>(init_size);
   }
   // Compute new depth for each node.
   for (i = 0; i < _idom_size; i++) {
--- a/hotspot/src/share/vm/opto/loopnode.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/opto/loopnode.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -700,7 +700,7 @@
   PhaseIdealLoop( PhaseIterGVN &igvn) :
     PhaseTransform(Ideal_Loop),
     _igvn(igvn),
-    _dom_lca_tags(C->comp_arena()),
+    _dom_lca_tags(arena()), // Thread::resource_area
     _verify_me(NULL),
     _verify_only(true) {
     build_and_optimize(false, false);
@@ -721,7 +721,7 @@
   PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool do_loop_pred) :
     PhaseTransform(Ideal_Loop),
     _igvn(igvn),
-    _dom_lca_tags(C->comp_arena()),
+    _dom_lca_tags(arena()), // Thread::resource_area
     _verify_me(NULL),
     _verify_only(false) {
     build_and_optimize(do_split_ifs, do_loop_pred);
@@ -731,7 +731,7 @@
   PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) :
     PhaseTransform(Ideal_Loop),
     _igvn(igvn),
-    _dom_lca_tags(C->comp_arena()),
+    _dom_lca_tags(arena()), // Thread::resource_area
     _verify_me(verify_me),
     _verify_only(false) {
     build_and_optimize(false, false);
--- a/hotspot/src/share/vm/opto/node.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/opto/node.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -743,6 +743,9 @@
 //------------------------------del_req----------------------------------------
 // Delete the required edge and compact the edge array
 void Node::del_req( uint idx ) {
+  assert( idx < _cnt, "oob");
+  assert( !VerifyHashTableKeys || _hash_lock == 0,
+          "remove node from hash table before modifying it");
   // First remove corresponding def-use edge
   Node *n = in(idx);
   if (n != NULL) n->del_out((Node *)this);
--- a/hotspot/src/share/vm/opto/phase.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/opto/phase.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -99,16 +99,18 @@
   tty->print_cr ("    stub compilation     : %3.3f sec.", Phase::_t_stubCompilation.seconds());
   tty->print_cr ("  Phases:");
   tty->print_cr ("    parse          : %3.3f sec", Phase::_t_parser.seconds());
-  if (DoEscapeAnalysis) {
-    tty->print_cr ("    escape analysis   : %3.3f sec", Phase::_t_escapeAnalysis.seconds());
-  }
   tty->print_cr ("    optimizer      : %3.3f sec", Phase::_t_optimizer.seconds());
   if( Verbose || WizardMode ) {
+    if (DoEscapeAnalysis) {
+      // EA is part of Optimizer.
+      tty->print_cr ("      escape analysis: %3.3f sec", Phase::_t_escapeAnalysis.seconds());
+    }
     tty->print_cr ("      iterGVN        : %3.3f sec", Phase::_t_iterGVN.seconds());
     tty->print_cr ("      idealLoop      : %3.3f sec", Phase::_t_idealLoop.seconds());
     tty->print_cr ("      idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds());
     tty->print_cr ("      ccp            : %3.3f sec", Phase::_t_ccp.seconds());
     tty->print_cr ("      iterGVN2       : %3.3f sec", Phase::_t_iterGVN2.seconds());
+    tty->print_cr ("      macroExpand    : %3.3f sec", Phase::_t_macroExpand.seconds());
     tty->print_cr ("      graphReshape   : %3.3f sec", Phase::_t_graphReshaping.seconds());
     double optimizer_subtotal = Phase::_t_iterGVN.seconds() +
       Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() +
@@ -133,18 +135,15 @@
     double percent_of_regalloc = ((regalloc_subtotal == 0.0) ? 0.0 : (regalloc_subtotal / Phase::_t_registerAllocation.seconds() * 100.0));
     tty->print_cr ("      subtotal       : %3.3f sec,  %3.2f %%", regalloc_subtotal, percent_of_regalloc);
   }
-  tty->print_cr ("    macroExpand    : %3.3f sec", Phase::_t_macroExpand.seconds());
   tty->print_cr ("    blockOrdering  : %3.3f sec", Phase::_t_blockOrdering.seconds());
   tty->print_cr ("    peephole       : %3.3f sec", Phase::_t_peephole.seconds());
   tty->print_cr ("    codeGen        : %3.3f sec", Phase::_t_codeGeneration.seconds());
   tty->print_cr ("    install_code   : %3.3f sec", Phase::_t_registerMethod.seconds());
   tty->print_cr ("    -------------- : ----------");
   double phase_subtotal = Phase::_t_parser.seconds() +
-    (DoEscapeAnalysis ? Phase::_t_escapeAnalysis.seconds() : 0.0) +
     Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() +
     Phase::_t_matcher.seconds() + Phase::_t_scheduler.seconds() +
     Phase::_t_registerAllocation.seconds() + Phase::_t_blockOrdering.seconds() +
-    Phase::_t_macroExpand.seconds() + Phase::_t_peephole.seconds() +
     Phase::_t_codeGeneration.seconds() + Phase::_t_registerMethod.seconds();
   double percent_of_method_compile = ((phase_subtotal == 0.0) ? 0.0 : phase_subtotal / Phase::_t_methodCompilation.seconds()) * 100.0;
   // counters inside Compile::CodeGen include time for adapters and stubs
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -740,6 +740,9 @@
   develop(bool, PrintMalloc, false,                                         \
           "print all malloc/free calls")                                    \
                                                                             \
+  develop(bool, PrintMallocStatistics, false,                               \
+          "print malloc/free statistics")                                   \
+                                                                            \
   develop(bool, ZapResourceArea, trueInDebug,                               \
           "Zap freed resource/arena space with 0xABABABAB")                 \
                                                                             \
--- a/hotspot/src/share/vm/runtime/java.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/runtime/java.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -326,7 +326,7 @@
   }
 
   print_bytecode_count();
-  if (WizardMode) {
+  if (PrintMallocStatistics) {
     tty->print("allocation stats: ");
     alloc_stats.print();
     tty->cr();
--- a/hotspot/src/share/vm/runtime/os.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/runtime/os.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,9 +72,10 @@
 size_t            os::_page_sizes[os::page_sizes_max];
 
 #ifndef PRODUCT
-int os::num_mallocs = 0;            // # of calls to malloc/realloc
-size_t os::alloc_bytes = 0;         // # of bytes allocated
-int os::num_frees = 0;              // # of calls to free
+julong os::num_mallocs = 0;         // # of calls to malloc/realloc
+julong os::alloc_bytes = 0;         // # of bytes allocated
+julong os::num_frees = 0;           // # of calls to free
+julong os::free_bytes = 0;          // # of bytes freed
 #endif
 
 // Fill in buffer with current local time as an ISO-8601 string.
@@ -490,9 +491,9 @@
   }
 
   if (start_of_prev_block + space_before + size + space_after == start_of_this_block) {
-    tty->print_cr("### previous object: %p (%ld bytes)", obj, size);
+    tty->print_cr("### previous object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
   } else {
-    tty->print_cr("### previous object (not sure if correct): %p (%ld bytes)", obj, size);
+    tty->print_cr("### previous object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
   }
 
   // now find successor block
@@ -504,16 +505,16 @@
       start_of_next_block[1] == badResourceValue &&
       start_of_next_block[2] == badResourceValue &&
       start_of_next_block[3] == badResourceValue) {
-    tty->print_cr("### next object: %p (%ld bytes)", next_obj, next_size);
+    tty->print_cr("### next object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
   } else {
-    tty->print_cr("### next object (not sure if correct): %p (%ld bytes)", next_obj, next_size);
+    tty->print_cr("### next object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
   }
 }
 
 
 void report_heap_error(void* memblock, void* bad, const char* where) {
-  tty->print_cr("## nof_mallocs = %d, nof_frees = %d", os::num_mallocs, os::num_frees);
-  tty->print_cr("## memory stomp: byte at %p %s object %p", bad, where, memblock);
+  tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
+  tty->print_cr("## memory stomp: byte at " PTR_FORMAT " %s object " PTR_FORMAT, bad, where, memblock);
   print_neighbor_blocks(memblock);
   fatal("memory stomping error");
 }
@@ -538,8 +539,8 @@
 #endif
 
 void* os::malloc(size_t size) {
-  NOT_PRODUCT(num_mallocs++);
-  NOT_PRODUCT(alloc_bytes += size);
+  NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
+  NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
 
   if (size == 0) {
     // return a valid pointer if size is zero
@@ -562,26 +563,26 @@
 #endif
   u_char* memblock = ptr + space_before;
   if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
-    tty->print_cr("os::malloc caught, %lu bytes --> %p", size, memblock);
+    tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
     breakpoint();
   }
   debug_only(if (paranoid) verify_block(memblock));
-  if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc %lu bytes --> %p", size, memblock);
+  if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
   return memblock;
 }
 
 
 void* os::realloc(void *memblock, size_t size) {
-  NOT_PRODUCT(num_mallocs++);
-  NOT_PRODUCT(alloc_bytes += size);
 #ifndef ASSERT
+  NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
+  NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
   return ::realloc(memblock, size);
 #else
   if (memblock == NULL) {
-    return os::malloc(size);
+    return malloc(size);
   }
   if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
-    tty->print_cr("os::realloc caught %p", memblock);
+    tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
     breakpoint();
   }
   verify_block(memblock);
@@ -589,13 +590,13 @@
   if (size == 0) return NULL;
   // always move the block
   void* ptr = malloc(size);
-  if (PrintMalloc) tty->print_cr("os::remalloc %lu bytes, %p --> %p", size, memblock, ptr);
+  if (PrintMalloc) tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
   // Copy to new memory if malloc didn't fail
   if ( ptr != NULL ) {
     memcpy(ptr, memblock, MIN2(size, get_size(memblock)));
     if (paranoid) verify_block(ptr);
     if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
-      tty->print_cr("os::realloc caught, %lu bytes --> %p", size, ptr);
+      tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
       breakpoint();
     }
     free(memblock);
@@ -606,17 +607,14 @@
 
 
 void  os::free(void *memblock) {
-  NOT_PRODUCT(num_frees++);
+  NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
 #ifdef ASSERT
   if (memblock == NULL) return;
   if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
-    if (tty != NULL) tty->print_cr("os::free caught %p", memblock);
+    if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
     breakpoint();
   }
   verify_block(memblock);
-  if (PrintMalloc && tty != NULL)
-    // tty->print_cr("os::free %p", memblock);
-    fprintf(stderr, "os::free %p\n", memblock);
   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
   // Added by detlefs.
   if (MallocCushion) {
@@ -627,12 +625,18 @@
       *p = (u_char)freeBlockPad;
     }
     size_t size = get_size(memblock);
+    inc_stat_counter(&free_bytes, size);
     u_char* end = ptr + space_before + size;
     for (u_char* q = end; q < end + MallocCushion; q++) {
       guarantee(*q == badResourceValue,
                 "Thing freed should be malloc result.");
       *q = (u_char)freeBlockPad;
     }
+    if (PrintMalloc && tty != NULL)
+      fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, memblock);
+  } else if (PrintMalloc && tty != NULL) {
+    // tty->print_cr("os::free %p", memblock);
+    fprintf(stderr, "os::free " PTR_FORMAT "\n", memblock);
   }
 #endif
   ::free((char*)memblock - space_before);
--- a/hotspot/src/share/vm/runtime/os.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/runtime/os.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -559,9 +559,10 @@
   static char* strdup(const char *);  // Like strdup
 
 #ifndef PRODUCT
-  static int  num_mallocs;            // # of calls to malloc/realloc
-  static size_t  alloc_bytes;         // # of bytes allocated
-  static int  num_frees;              // # of calls to free
+  static julong num_mallocs;         // # of calls to malloc/realloc
+  static julong alloc_bytes;         // # of bytes allocated
+  static julong num_frees;           // # of calls to free
+  static julong free_bytes;          // # of bytes freed
 #endif
 
   // SocketInterface (ex HPI SocketInterface )
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/simpleThresholdPolicy.hpp"
 #include "runtime/simpleThresholdPolicy.inline.hpp"
+#include "code/scopeDesc.hpp"
 
 // Print an event.
 void SimpleThresholdPolicy::print_event(EventType type, methodHandle mh, methodHandle imh,
@@ -48,6 +49,18 @@
     break;
   case COMPILE:
     tty->print("compile");
+    break;
+  case KILL:
+    tty->print("kill");
+    break;
+  case UPDATE:
+    tty->print("update");
+    break;
+  case REPROFILE:
+    tty->print("reprofile");
+    break;
+  default:
+    tty->print("unknown");
   }
 
   tty->print(" level: %d ", level);
@@ -69,13 +82,17 @@
   if (type != COMPILE) {
     methodDataHandle mdh = mh->method_data();
     int mdo_invocations = 0, mdo_backedges = 0;
+    int mdo_invocations_start = 0, mdo_backedges_start = 0;
     if (mdh() != NULL) {
       mdo_invocations = mdh->invocation_count();
       mdo_backedges = mdh->backedge_count();
+      mdo_invocations_start = mdh->invocation_count_start();
+      mdo_backedges_start = mdh->backedge_count_start();
     }
-    tty->print(" total: %d,%d mdo: %d,%d",
+    tty->print(" total: %d,%d mdo: %d(%d),%d(%d)",
                invocation_count, backedge_count,
-               mdo_invocations, mdo_backedges);
+               mdo_invocations, mdo_invocations_start,
+               mdo_backedges, mdo_backedges_start);
     tty->print(" max levels: %d,%d",
                 mh->highest_comp_level(), mh->highest_osr_comp_level());
     if (inlinee_event) {
@@ -138,6 +155,20 @@
   return compile_queue->first();
 }
 
+void SimpleThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
+  for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
+    if (PrintTieredEvents) {
+      methodHandle mh(sd->method());
+      print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none);
+    }
+    methodDataOop mdo = sd->method()->method_data();
+    if (mdo != NULL) {
+      mdo->reset_start_counters();
+    }
+    if (sd->is_top()) break;
+  }
+}
+
 nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
                                       int branch_bci, int bci, CompLevel comp_level, TRAPS) {
   if (comp_level == CompLevel_none &&
@@ -254,46 +285,35 @@
 
 // Common transition function. Given a predicate determines if a method should transition to another level.
 CompLevel SimpleThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level) {
+  if (is_trivial(method)) return CompLevel_simple;
+
   CompLevel next_level = cur_level;
   int i = method->invocation_count();
   int b = method->backedge_count();
 
   switch(cur_level) {
   case CompLevel_none:
-    {
-      methodDataOop mdo = method->method_data();
-      if (mdo != NULL) {
-        int mdo_i = mdo->invocation_count();
-        int mdo_b = mdo->backedge_count();
-        // If we were at full profile level, would we switch to full opt?
-        if ((this->*p)(mdo_i, mdo_b, CompLevel_full_profile)) {
-          next_level = CompLevel_full_optimization;
-        }
-      }
-    }
-    if (next_level == cur_level && (this->*p)(i, b, cur_level)) {
-      if (is_trivial(method)) {
-        next_level = CompLevel_simple;
-      } else {
-        next_level = CompLevel_full_profile;
-      }
+    // If we were at full profile level, would we switch to full opt?
+    if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
+      next_level = CompLevel_full_optimization;
+    } else if ((this->*p)(i, b, cur_level)) {
+      next_level = CompLevel_full_profile;
     }
     break;
   case CompLevel_limited_profile:
   case CompLevel_full_profile:
-    if (is_trivial(method)) {
-      next_level = CompLevel_simple;
-    } else {
+    {
       methodDataOop mdo = method->method_data();
-      guarantee(mdo != NULL, "MDO should always exist");
-      if (mdo->would_profile()) {
-        int mdo_i = mdo->invocation_count();
-        int mdo_b = mdo->backedge_count();
-        if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+      if (mdo != NULL) {
+        if (mdo->would_profile()) {
+          int mdo_i = mdo->invocation_count_delta();
+          int mdo_b = mdo->backedge_count_delta();
+          if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+            next_level = CompLevel_full_optimization;
+          }
+        } else {
           next_level = CompLevel_full_optimization;
         }
-      } else {
-        next_level = CompLevel_full_optimization;
       }
     }
     break;
@@ -303,12 +323,6 @@
 
 // Determine if a method should be compiled with a normal entry point at a different level.
 CompLevel SimpleThresholdPolicy::call_event(methodOop method,  CompLevel cur_level) {
-  CompLevel highest_level = (CompLevel)method->highest_comp_level();
-  if (cur_level == CompLevel_none && highest_level > cur_level) {
-    // TODO: We may want to try to do more extensive reprofiling in this case.
-    return highest_level;
-  }
-
   CompLevel osr_level = (CompLevel) method->highest_osr_comp_level();
   CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level);
 
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.hpp	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@
   void set_c1_count(int x) { _c1_count = x;    }
   void set_c2_count(int x) { _c2_count = x;    }
 
-  enum EventType { CALL, LOOP, COMPILE };
+  enum EventType { CALL, LOOP, COMPILE, KILL, UPDATE, REPROFILE };
   void print_event(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level);
   // Print policy-specific information if necessary
   virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
@@ -103,7 +103,7 @@
   virtual void disable_compilation(methodOop method) { }
   // TODO: we should honour reprofiling requests in the future. Currently reprofiling
   // would happen but not to the extent we would ideally like.
-  virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) { }
+  virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
   virtual nmethod* event(methodHandle method, methodHandle inlinee,
                          int branch_bci, int bci, CompLevel comp_level, TRAPS);
   // Select task is called by CompileBroker. We should return a task or NULL.
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Mon Feb 14 14:36:29 2011 -0800
@@ -748,7 +748,6 @@
   /***********************************/                                                                                              \
                                                                                                                                      \
      static_field(StubRoutines,                _call_stub_return_address,                     address)                               \
-     IA32_ONLY(static_field(StubRoutines::x86,_call_stub_compiled_return,                     address))                              \
                                                                                                                                      \
   /***************************************/                                                                                          \
   /* PcDesc and other compiled code info */                                                                                          \
--- a/hotspot/test/compiler/6987555/Test6987555.java	Fri Feb 11 15:32:03 2011 -0800
+++ b/hotspot/test/compiler/6987555/Test6987555.java	Mon Feb 14 14:36:29 2011 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,8 +54,8 @@
         if (DEBUG)  System.out.println("boolean=" + x);
         MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(boolean.class, boolean.class));
         MethodHandle mh2 = mh1.asType(MethodType.methodType(boolean.class, Boolean.class));
-        boolean a = mh1.<boolean>invokeExact(x);
-        boolean b = mh2.<boolean>invokeExact(Boolean.valueOf(x));
+        boolean a = (boolean) mh1.invokeExact(x);
+        boolean b = (boolean) mh2.invokeExact(Boolean.valueOf(x));
         assert a == b : a + " != " + b;
     }
 
@@ -80,8 +80,8 @@
         if (DEBUG)  System.out.println("byte=" + x);
         MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(byte.class, byte.class));
         MethodHandle mh2 = mh1.asType(MethodType.methodType(byte.class, Byte.class));
-        byte a = mh1.<byte>invokeExact(x);
-        byte b = mh2.<byte>invokeExact(Byte.valueOf(x));
+        byte a = (byte) mh1.invokeExact(x);
+        byte b = (byte) mh2.invokeExact(Byte.valueOf(x));
         assert a == b : a + " != " + b;
     }
 
@@ -104,8 +104,8 @@
         if (DEBUG)  System.out.println("char=" + x);
         MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(char.class, char.class));
         MethodHandle mh2 = mh1.asType(MethodType.methodType(char.class, Character.class));
-        char a = mh1.<char>invokeExact(x);
-        char b = mh2.<char>invokeExact(Character.valueOf(x));
+        char a = (char) mh1.invokeExact(x);
+        char b = (char) mh2.invokeExact(Character.valueOf(x));
         assert a == b : a + " != " + b;
     }
 
@@ -134,8 +134,8 @@
         if (DEBUG)  System.out.println("short=" + x);
         MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(short.class, short.class));
         MethodHandle mh2 = mh1.asType(MethodType.methodType(short.class, Short.class));
-        short a = mh1.<short>invokeExact(x);
-        short b = mh2.<short>invokeExact(Short.valueOf(x));
+        short a = (short) mh1.invokeExact(x);
+        short b = (short) mh2.invokeExact(Short.valueOf(x));
         assert a == b : a + " != " + b;
     }
 
@@ -164,8 +164,8 @@
         if (DEBUG)  System.out.println("int=" + x);
         MethodHandle mh1 = MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(int.class, int.class));
         MethodHandle mh2 = mh1.asType(MethodType.methodType(int.class, Integer.class));
-        int a = mh1.<int>invokeExact(x);
-        int b = mh2.<int>invokeExact(Integer.valueOf(x));
+        int a = (int) mh1.invokeExact(x);
+        int b = (int) mh2.invokeExact(Integer.valueOf(x));
         assert a == b : a + " != " + b;
     }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/7017746/Test.java	Mon Feb 14 14:36:29 2011 -0800
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 7017746
+ * @summary Regression : C2 compiler crash due to SIGSEGV in PhaseCFG::schedule_early()
+ *
+ * @run main/othervm -Xbatch Test
+ */
+
+public class Test {
+
+  int i;
+
+  static int test(Test t, int a, int b) {
+    int j = t.i;
+    int x = a - b;
+    if (a < b) x = x + j;
+    return x - j;
+  }
+
+  public static void main(String args[]) {
+    Test t = new Test();
+    for (int n = 0; n < 1000000; n++) {
+      int i = test(t, 1, 2);
+    }
+  }
+}
+