Merge
authorkvn
Fri, 24 Jul 2009 09:01:00 -0700
changeset 3277 b621e1c1f2c4
parent 3264 5d4c990fce1c (current diff)
parent 3276 bda7d4532054 (diff)
child 3280 791e5e4859ae
child 3592 857575a0a200
Merge
hotspot/src/share/vm/memory/universe.hpp
hotspot/src/share/vm/opto/cfgnode.cpp
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -1302,22 +1302,19 @@
 
   const Register ic_reg = rax;
   const Register receiver = j_rarg0;
-  const Register tmp = rdx;
 
   Label ok;
   Label exception_pending;
 
+  assert_different_registers(ic_reg, receiver, rscratch1);
   __ verify_oop(receiver);
-  __ push(tmp); // spill (any other registers free here???)
-  __ load_klass(tmp, receiver);
-  __ cmpq(ic_reg, tmp);
+  __ load_klass(rscratch1, receiver);
+  __ cmpq(ic_reg, rscratch1);
   __ jcc(Assembler::equal, ok);
 
-  __ pop(tmp);
   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 
   __ bind(ok);
-  __ pop(tmp);
 
   // Verified entry point must be aligned
   __ align(8);
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -420,6 +420,13 @@
   return  _matrule->is_ideal_load();
 }
 
+// Return 'true' if this instruction matches an ideal 'LoadKlass' node
+bool InstructForm::skip_antidep_check() const {
+  if( _matrule == NULL ) return false;
+
+  return  _matrule->skip_antidep_check();
+}
+
 // Return 'true' if this instruction matches an ideal 'Load?' node
 Form::DataType InstructForm::is_ideal_store() const {
   if( _matrule == NULL ) return Form::none;
@@ -567,6 +574,8 @@
 
 // loads from memory, so must check for anti-dependence
 bool InstructForm::needs_anti_dependence_check(FormDict &globals) const {
+  if ( skip_antidep_check() ) return false;
+
   // Machine independent loads must be checked for anti-dependences
   if( is_ideal_load() != Form::none )  return true;
 
@@ -3957,6 +3966,28 @@
 }
 
 
+bool MatchRule::skip_antidep_check() const {
+  // Some loads operate on what is effectively immutable memory so we
+  // should skip the anti dep computations.  For some of these nodes
+  // the rewritable field keeps the anti dep logic from triggering but
+  // for certain kinds of LoadKlass it does not since they are
+  // actually reading memory which could be rewritten by the runtime,
+  // though never by generated code.  This disables it uniformly for
+  // the nodes that behave like this: LoadKlass, LoadNKlass and
+  // LoadRange.
+  if ( _opType && (strcmp(_opType,"Set") == 0) && _rChild ) {
+    const char *opType = _rChild->_opType;
+    if (strcmp("LoadKlass", opType) == 0 ||
+        strcmp("LoadNKlass", opType) == 0 ||
+        strcmp("LoadRange", opType) == 0) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+
 Form::DataType MatchRule::is_ideal_store() const {
   Form::DataType ideal_store = Form::none;
 
--- a/hotspot/src/share/vm/adlc/formssel.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -158,6 +158,9 @@
 
   virtual Form::CallType is_ideal_call() const; // matches ideal 'Call'
   virtual Form::DataType is_ideal_load() const; // node matches ideal 'LoadXNode'
+  // Should antidep checks be disabled for this Instruct
+  // See definition of MatchRule::skip_antidep_check
+  bool skip_antidep_check() const;
   virtual Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
           bool        is_ideal_mem() const { return is_ideal_load() != Form::none || is_ideal_store() != Form::none; }
   virtual uint        two_address(FormDict &globals); // output reg must match input reg
@@ -1003,6 +1006,9 @@
   bool       is_ideal_loopEnd() const; // node matches ideal 'LoopEnd'
   bool       is_ideal_bool() const;    // node matches ideal 'Bool'
   Form::DataType is_ideal_load() const;// node matches ideal 'LoadXNode'
+  // Should antidep checks be disabled for this rule
+  // See definition of MatchRule::skip_antidep_check
+  bool skip_antidep_check() const;
   Form::DataType is_ideal_store() const;// node matches ideal 'StoreXNode'
 
   // Check if 'mRule2' is a cisc-spill variant of this MatchRule
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -3231,6 +3231,16 @@
     this_klass->set_minor_version(minor_version);
     this_klass->set_major_version(major_version);
 
+    // Set up methodOop::intrinsic_id as soon as we know the names of methods.
+    // (We used to do this lazily, but now we query it in Rewriter,
+    // which is eagerly done for every method, so we might as well do it now,
+    // when everything is fresh in memory.)
+    if (methodOopDesc::klass_id_for_intrinsics(this_klass->as_klassOop()) != vmSymbols::NO_SID) {
+      for (int j = 0; j < methods->length(); j++) {
+        ((methodOop)methods->obj_at(j))->init_intrinsic_id();
+      }
+    }
+
     if (cached_class_file_bytes != NULL) {
       // JVMTI: we have an instanceKlass now, tell it about the cached bytes
       this_klass->set_cached_class_file(cached_class_file_bytes,
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -513,9 +513,6 @@
 //
 // for Emacs: (let ((c-backslash-column 120) (c-backslash-max-column 120)) (c-backslash-region (point) (point-max) nil t))
 #define VM_INTRINSICS_DO(do_intrinsic, do_class, do_name, do_signature, do_alias)                                       \
-  do_intrinsic(_Object_init,              java_lang_Object, object_initializer_name, void_method_signature,      F_R)   \
-  /*    (symbol object_initializer_name defined above) */                                                               \
-                                                                                                                        \
   do_intrinsic(_hashCode,                 java_lang_Object,       hashCode_name, void_int_signature,             F_R)   \
    do_name(     hashCode_name,                                   "hashCode")                                            \
   do_intrinsic(_getClass,                 java_lang_Object,       getClass_name, void_class_signature,           F_R)   \
@@ -635,9 +632,6 @@
   do_intrinsic(_equalsC,                  java_util_Arrays,       equals_name,    equalsC_signature,             F_S)   \
    do_signature(equalsC_signature,                               "([C[C)Z")                                             \
                                                                                                                         \
-  do_intrinsic(_invoke,                   java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
-  /*   (symbols invoke_name and invoke_signature defined above) */                                                      \
-                                                                                                                        \
   do_intrinsic(_compareTo,                java_lang_String,       compareTo_name, string_int_signature,          F_R)   \
    do_name(     compareTo_name,                                  "compareTo")                                           \
   do_intrinsic(_indexOf,                  java_lang_String,       indexOf_name, string_int_signature,            F_R)   \
@@ -656,8 +650,6 @@
    do_name(     attemptUpdate_name,                                 "attemptUpdate")                                    \
    do_signature(attemptUpdate_signature,                            "(JJ)Z")                                            \
                                                                                                                         \
-  do_intrinsic(_fillInStackTrace,         java_lang_Throwable, fillInStackTrace_name, void_throwable_signature,  F_RNY) \
-                                                                                                                        \
   /* support for sun.misc.Unsafe */                                                                                     \
   do_class(sun_misc_Unsafe,               "sun/misc/Unsafe")                                                            \
                                                                                                                         \
@@ -819,10 +811,22 @@
    do_name(     prefetchReadStatic_name,                         "prefetchReadStatic")                                  \
   do_intrinsic(_prefetchWriteStatic,      sun_misc_Unsafe,        prefetchWriteStatic_name, prefetch_signature,  F_SN)  \
    do_name(     prefetchWriteStatic_name,                        "prefetchWriteStatic")                                 \
+    /*== LAST_COMPILER_INLINE*/                                                                                         \
+    /*the compiler does have special inlining code for these; bytecode inline is just fine */                           \
+                                                                                                                        \
+  do_intrinsic(_fillInStackTrace,         java_lang_Throwable, fillInStackTrace_name, void_throwable_signature,  F_RNY) \
+                                                                                                                        \
+  do_intrinsic(_Object_init,              java_lang_Object, object_initializer_name, void_method_signature,      F_R)   \
+  /*    (symbol object_initializer_name defined above) */                                                               \
+                                                                                                                        \
+  do_intrinsic(_invoke,                   java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
+  /*   (symbols invoke_name and invoke_signature defined above) */                                                      \
+                                                                                                                        \
     /*end*/
 
 
 
+
 // Class vmSymbols
 
 class vmSymbols: AllStatic {
@@ -935,6 +939,7 @@
     #undef VM_INTRINSIC_ENUM
 
     ID_LIMIT,
+    LAST_COMPILER_INLINE = _prefetchWriteStatic,
     FIRST_ID = _none + 1
   };
 
@@ -972,4 +977,7 @@
   static Flags              flags_for(ID id);
 
   static const char* short_name_as_C_string(ID id, char* buf, int size);
+
+  // Access to intrinsic methods:
+  static methodOop method_for(ID id);
 };
--- a/hotspot/src/share/vm/compiler/oopMap.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/compiler/oopMap.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -379,7 +379,15 @@
         if ( loc != NULL ) {
           oop *base_loc    = fr->oopmapreg_to_location(omv.content_reg(), reg_map);
           oop *derived_loc = loc;
-          derived_oop_fn(base_loc, derived_loc);
+          oop val = *base_loc;
+          if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
+            // Ignore NULL oops and decoded NULL narrow oops which
+            // equal to Universe::narrow_oop_base when a narrow oop
+            // implicit null check is used in compiled code.
+            // The narrow_oop_base could be NULL or be the address
+            // of the page below heap depending on compressed oops mode.
+          } else
+            derived_oop_fn(base_loc, derived_loc);
         }
         oms.next();
       }  while (!oms.is_done());
@@ -394,6 +402,15 @@
       oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
       if ( loc != NULL ) {
         if ( omv.type() == OopMapValue::oop_value ) {
+          oop val = *loc;
+          if (val == (oop)NULL || Universe::is_narrow_oop_base(val)) {
+            // Ignore NULL oops and decoded NULL narrow oops which
+            // equal to Universe::narrow_oop_base when a narrow oop
+            // implicit null check is used in compiled code.
+            // The narrow_oop_base could be NULL or be the address
+            // of the page below heap depending on compressed oops mode.
+            continue;
+          }
 #ifdef ASSERT
           if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
              !Universe::heap()->is_in_or_null(*loc)) {
@@ -410,6 +427,8 @@
 #endif // ASSERT
           oop_fn->do_oop(loc);
         } else if ( omv.type() == OopMapValue::value_value ) {
+          assert((*loc) == (oop)NULL || !Universe::is_narrow_oop_base(*loc),
+                 "found invalid value pointer");
           value_fn->do_oop(loc);
         } else if ( omv.type() == OopMapValue::narrowoop_value ) {
           narrowOop *nl = (narrowOop*)loc;
--- a/hotspot/src/share/vm/compiler/oopMap.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/compiler/oopMap.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -233,6 +233,10 @@
   int heap_size() const;
   void copy_to(address addr);
 
+  // Methods oops_do() and all_do() filter out NULL oops and
+  // oop == Universe::narrow_oop_base() before passing oops
+  // to closures.
+
   // Iterates through frame for a compiled method
   static void oops_do            (const frame* fr,
                                   const RegisterMap* reg_map, OopClosure* f);
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -273,6 +273,7 @@
   compute_index_maps();
 
   if (RegisterFinalizersAtInit && _klass->name() == vmSymbols::java_lang_Object()) {
+    bool did_rewrite = false;
     int i = _methods->length();
     while (i-- > 0) {
       methodOop method = (methodOop)_methods->obj_at(i);
@@ -281,9 +282,11 @@
         // object for finalization if needed.
         methodHandle m(THREAD, method);
         rewrite_Object_init(m, CHECK);
+        did_rewrite = true;
         break;
       }
     }
+    assert(did_rewrite, "must find Object::<init> to rewrite it");
   }
 
   // rewrite methods, in two passes
--- a/hotspot/src/share/vm/memory/universe.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/memory/universe.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -343,6 +343,7 @@
   // For UseCompressedOops
   static address* narrow_oop_base_addr()              { return &_narrow_oop._base; }
   static address  narrow_oop_base()                   { return  _narrow_oop._base; }
+  static bool  is_narrow_oop_base(void* addr)         { return (narrow_oop_base() == (address)addr); }
   static int      narrow_oop_shift()                  { return  _narrow_oop._shift; }
   static void     set_narrow_oop_base(address base)   { _narrow_oop._base  = base; }
   static void     set_narrow_oop_shift(int shift)     { _narrow_oop._shift = shift; }
--- a/hotspot/src/share/vm/oops/methodKlass.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/oops/methodKlass.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -68,7 +68,7 @@
   m->set_constants(NULL);
   m->set_max_stack(0);
   m->set_max_locals(0);
-  m->clear_intrinsic_id_cache();
+  m->set_intrinsic_id(vmIntrinsics::_none);
   m->set_method_data(NULL);
   m->set_interpreter_throwout_count(0);
   m->set_vtable_index(methodOopDesc::garbage_vtable_index);
--- a/hotspot/src/share/vm/oops/methodOop.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -962,26 +962,39 @@
   return newm;
 }
 
-vmIntrinsics::ID methodOopDesc::compute_intrinsic_id() const {
-  assert(vmIntrinsics::_none == 0, "correct coding of default case");
-  const uintptr_t max_cache_uint = right_n_bits((int)(sizeof(_intrinsic_id_cache) * BitsPerByte));
-  assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_cache_uint, "else fix cache size");
+vmSymbols::SID methodOopDesc::klass_id_for_intrinsics(klassOop holder) {
   // if loader is not the default loader (i.e., != NULL), we can't know the intrinsics
   // because we are not loading from core libraries
-  if (instanceKlass::cast(method_holder())->class_loader() != NULL) return vmIntrinsics::_none;
+  if (instanceKlass::cast(holder)->class_loader() != NULL)
+    return vmSymbols::NO_SID;   // regardless of name, no intrinsics here
 
   // see if the klass name is well-known:
-  symbolOop klass_name    = instanceKlass::cast(method_holder())->name();
-  vmSymbols::SID klass_id = vmSymbols::find_sid(klass_name);
-  if (klass_id == vmSymbols::NO_SID)  return vmIntrinsics::_none;
+  symbolOop klass_name = instanceKlass::cast(holder)->name();
+  return vmSymbols::find_sid(klass_name);
+}
+
+void methodOopDesc::init_intrinsic_id() {
+  assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
+  const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
+  assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size");
+
+  // the klass name is well-known:
+  vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder());
+  assert(klass_id != vmSymbols::NO_SID, "caller responsibility");
 
   // ditto for method and signature:
   vmSymbols::SID  name_id = vmSymbols::find_sid(name());
-  if (name_id  == vmSymbols::NO_SID)  return vmIntrinsics::_none;
+  if (name_id  == vmSymbols::NO_SID)  return;
   vmSymbols::SID   sig_id = vmSymbols::find_sid(signature());
-  if (sig_id   == vmSymbols::NO_SID)  return vmIntrinsics::_none;
+  if (sig_id   == vmSymbols::NO_SID)  return;
   jshort flags = access_flags().as_short();
 
+  vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
+  if (id != vmIntrinsics::_none) {
+    set_intrinsic_id(id);
+    return;
+  }
+
   // A few slightly irregular cases:
   switch (klass_id) {
   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_StrictMath):
@@ -992,15 +1005,18 @@
     case vmSymbols::VM_SYMBOL_ENUM_NAME(sqrt_name):
       // pretend it is the corresponding method in the non-strict class:
       klass_id = vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_Math);
+      id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
       break;
     }
   }
 
-  // return intrinsic id if any
-  return vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
+  if (id != vmIntrinsics::_none) {
+    // Set up its iid.  It is an alias method.
+    set_intrinsic_id(id);
+    return;
+  }
 }
 
-
 // These two methods are static since a GC may move the methodOopDesc
 bool methodOopDesc::load_signature_classes(methodHandle m, TRAPS) {
   bool sig_is_loaded = true;
--- a/hotspot/src/share/vm/oops/methodOop.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/oops/methodOop.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -104,7 +104,7 @@
   u2                _max_stack;                  // Maximum number of entries on the expression stack
   u2                _max_locals;                 // Number of local variables used by this method
   u2                _size_of_parameters;         // size of the parameter block (receiver + arguments) in words
-  u1                _intrinsic_id_cache;         // Cache for intrinsic_id; 0 or 1+vmInt::ID
+  u1                _intrinsic_id;               // vmSymbols::intrinsic_id (0 == _none)
   u1                _highest_tier_compile;       // Highest compile level this method has ever seen.
   u2                _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
   u2                _number_of_breakpoints;      // fullspeed debugging support
@@ -224,8 +224,6 @@
   int highest_tier_compile()                     { return _highest_tier_compile;}
   void set_highest_tier_compile(int level)      { _highest_tier_compile = level;}
 
-  void clear_intrinsic_id_cache() { _intrinsic_id_cache = 0; }
-
   // Count of times method was exited via exception while interpreting
   void interpreter_throwout_increment() {
     if (_interpreter_throwout_count < 65534) {
@@ -571,18 +569,12 @@
   void set_cached_itable_index(int index)           { instanceKlass::cast(method_holder())->set_cached_itable_index(method_idnum(), index); }
 
   // Support for inlining of intrinsic methods
-  vmIntrinsics::ID intrinsic_id() const { // returns zero if not an intrinsic
-    const u1& cache = _intrinsic_id_cache;
-    if (cache != 0) {
-      return (vmIntrinsics::ID)(cache - 1);
-    } else {
-      vmIntrinsics::ID id = compute_intrinsic_id();
-      *(u1*)&cache = ((u1) id) + 1;   // force the cache to be non-const
-      vmIntrinsics::verify_method(id, (methodOop) this);
-      assert((vmIntrinsics::ID)(cache - 1) == id, "proper conversion");
-      return id;
-    }
-  }
+  vmIntrinsics::ID intrinsic_id() const          { return (vmIntrinsics::ID) _intrinsic_id;           }
+  void     set_intrinsic_id(vmIntrinsics::ID id) {                           _intrinsic_id = (u1) id; }
+
+  // Helper routines for intrinsic_id() and vmIntrinsics::method().
+  void init_intrinsic_id();     // updates from _none if a match
+  static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
 
   // On-stack replacement support
   bool has_osr_nmethod()                         { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
@@ -635,9 +627,6 @@
   void set_size_of_parameters(int size)          { _size_of_parameters = size; }
  private:
 
-  // Helper routine for intrinsic_id().
-  vmIntrinsics::ID compute_intrinsic_id() const;
-
   // Inlined elements
   address* native_function_addr() const          { assert(is_native(), "must be native"); return (address*) (this+1); }
   address* signature_handler_addr() const        { return native_function_addr() + 1; }
--- a/hotspot/src/share/vm/opto/cfgnode.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -1789,15 +1789,19 @@
 #ifdef _LP64
   // Push DecodeN down through phi.
   // The rest of phi graph will transform by split EncodeP node though phis up.
-  if (UseNewCode && UseCompressedOops && can_reshape && progress == NULL) {
+  if (UseCompressedOops && can_reshape && progress == NULL) {
     bool may_push = true;
     bool has_decodeN = false;
     Node* in_decodeN = NULL;
     for (uint i=1; i<req(); ++i) {// For all paths in
       Node *ii = in(i);
       if (ii->is_DecodeN() && ii->bottom_type() == bottom_type()) {
-        has_decodeN = true;
-        in_decodeN = ii->in(1);
+        // Note: in_decodeN is used only to define the type of new phi.
+        // Find a non dead path otherwise phi type will be wrong.
+        if (ii->in(1)->bottom_type() != Type::TOP) {
+          has_decodeN = true;
+          in_decodeN = ii->in(1);
+        }
       } else if (!ii->is_Phi()) {
         may_push = false;
       }
@@ -1805,7 +1809,6 @@
 
     if (has_decodeN && may_push) {
       PhaseIterGVN *igvn = phase->is_IterGVN();
-      // Note: in_decodeN is used only to define the type of new phi here.
       PhiNode *new_phi = PhiNode::make_blank(in(0), in_decodeN);
       uint orig_cnt = req();
       for (uint i=1; i<req(); ++i) {// For all paths in
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -101,7 +101,8 @@
     }
   }
   // Lazily create intrinsics for intrinsic IDs well-known in the runtime.
-  if (m->intrinsic_id() != vmIntrinsics::_none) {
+  if (m->intrinsic_id() != vmIntrinsics::_none &&
+      m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
     CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
     if (cg != NULL) {
       // Save it for next time:
@@ -440,6 +441,8 @@
                   _orig_pc_slot_offset_in_bytes(0),
                   _node_bundling_limit(0),
                   _node_bundling_base(NULL),
+                  _java_calls(0),
+                  _inner_loops(0),
 #ifndef PRODUCT
                   _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
                   _printer(IdealGraphPrinter::printer()),
@@ -710,6 +713,8 @@
     _code_buffer("Compile::Fill_buffer"),
     _node_bundling_limit(0),
     _node_bundling_base(NULL),
+    _java_calls(0),
+    _inner_loops(0),
 #ifndef PRODUCT
     _trace_opto_output(TraceOptoOutput),
     _printer(NULL),
@@ -1850,22 +1855,26 @@
   int  _float_count;            // count float ops requiring 24-bit precision
   int  _double_count;           // count double ops requiring more precision
   int  _java_call_count;        // count non-inlined 'java' calls
+  int  _inner_loop_count;       // count loops which need alignment
   VectorSet _visited;           // Visitation flags
   Node_List _tests;             // Set of IfNodes & PCTableNodes
 
   Final_Reshape_Counts() :
-    _call_count(0), _float_count(0), _double_count(0), _java_call_count(0),
+    _call_count(0), _float_count(0), _double_count(0),
+    _java_call_count(0), _inner_loop_count(0),
     _visited( Thread::current()->resource_area() ) { }
 
   void inc_call_count  () { _call_count  ++; }
   void inc_float_count () { _float_count ++; }
   void inc_double_count() { _double_count++; }
   void inc_java_call_count() { _java_call_count++; }
+  void inc_inner_loop_count() { _inner_loop_count++; }
 
   int  get_call_count  () const { return _call_count  ; }
   int  get_float_count () const { return _float_count ; }
   int  get_double_count() const { return _double_count; }
   int  get_java_call_count() const { return _java_call_count; }
+  int  get_inner_loop_count() const { return _inner_loop_count; }
 };
 
 static bool oop_offset_is_sane(const TypeInstPtr* tp) {
@@ -1877,7 +1886,7 @@
 
 //------------------------------final_graph_reshaping_impl----------------------
 // Implement items 1-5 from final_graph_reshaping below.
-static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
+static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
 
   if ( n->outcnt() == 0 ) return; // dead node
   uint nop = n->Opcode();
@@ -1919,13 +1928,13 @@
   case Op_CmpF:
   case Op_CmpF3:
   // case Op_ConvL2F: // longs are split into 32-bit halves
-    fpu.inc_float_count();
+    frc.inc_float_count();
     break;
 
   case Op_ConvF2D:
   case Op_ConvD2F:
-    fpu.inc_float_count();
-    fpu.inc_double_count();
+    frc.inc_float_count();
+    frc.inc_double_count();
     break;
 
   // Count all double operations that may use FPU
@@ -1942,7 +1951,7 @@
   case Op_ConD:
   case Op_CmpD:
   case Op_CmpD3:
-    fpu.inc_double_count();
+    frc.inc_double_count();
     break;
   case Op_Opaque1:              // Remove Opaque Nodes before matching
   case Op_Opaque2:              // Remove Opaque Nodes before matching
@@ -1951,7 +1960,7 @@
   case Op_CallStaticJava:
   case Op_CallJava:
   case Op_CallDynamicJava:
-    fpu.inc_java_call_count(); // Count java call site;
+    frc.inc_java_call_count(); // Count java call site;
   case Op_CallRuntime:
   case Op_CallLeaf:
   case Op_CallLeafNoFP: {
@@ -1962,7 +1971,7 @@
     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
     if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
-      fpu.inc_call_count();   // Count the call site
+      frc.inc_call_count();   // Count the call site
     } else {                  // See if uncommon argument is shared
       Node *n = call->in(TypeFunc::Parms);
       int nop = n->Opcode();
@@ -1983,11 +1992,11 @@
   case Op_StoreD:
   case Op_LoadD:
   case Op_LoadD_unaligned:
-    fpu.inc_double_count();
+    frc.inc_double_count();
     goto handle_mem;
   case Op_StoreF:
   case Op_LoadF:
-    fpu.inc_float_count();
+    frc.inc_float_count();
     goto handle_mem;
 
   case Op_StoreB:
@@ -2324,6 +2333,12 @@
       n->subsume_by(btp);
     }
     break;
+  case Op_Loop:
+  case Op_CountedLoop:
+    if (n->as_Loop()->is_inner_loop()) {
+      frc.inc_inner_loop_count();
+    }
+    break;
   default:
     assert( !n->is_Call(), "" );
     assert( !n->is_Mem(), "" );
@@ -2332,17 +2347,17 @@
 
   // Collect CFG split points
   if (n->is_MultiBranch())
-    fpu._tests.push(n);
+    frc._tests.push(n);
 }
 
 //------------------------------final_graph_reshaping_walk---------------------
 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
 // requires that the walk visits a node's inputs before visiting the node.
-static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &fpu ) {
+static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
   ResourceArea *area = Thread::current()->resource_area();
   Unique_Node_List sfpt(area);
 
-  fpu._visited.set(root->_idx); // first, mark node as visited
+  frc._visited.set(root->_idx); // first, mark node as visited
   uint cnt = root->req();
   Node *n = root;
   uint  i = 0;
@@ -2351,7 +2366,7 @@
       // Place all non-visited non-null inputs onto stack
       Node* m = n->in(i);
       ++i;
-      if (m != NULL && !fpu._visited.test_set(m->_idx)) {
+      if (m != NULL && !frc._visited.test_set(m->_idx)) {
         if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
           sfpt.push(m);
         cnt = m->req();
@@ -2361,7 +2376,7 @@
       }
     } else {
       // Now do post-visit work
-      final_graph_reshaping_impl( n, fpu );
+      final_graph_reshaping_impl( n, frc );
       if (nstack.is_empty())
         break;             // finished
       n = nstack.node();   // Get node from stack
@@ -2442,16 +2457,16 @@
     return true;
   }
 
-  Final_Reshape_Counts fpu;
+  Final_Reshape_Counts frc;
 
   // Visit everybody reachable!
   // Allocate stack of size C->unique()/2 to avoid frequent realloc
   Node_Stack nstack(unique() >> 1);
-  final_graph_reshaping_walk(nstack, root(), fpu);
+  final_graph_reshaping_walk(nstack, root(), frc);
 
   // Check for unreachable (from below) code (i.e., infinite loops).
-  for( uint i = 0; i < fpu._tests.size(); i++ ) {
-    MultiBranchNode *n = fpu._tests[i]->as_MultiBranch();
+  for( uint i = 0; i < frc._tests.size(); i++ ) {
+    MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
     // Get number of CFG targets.
     // Note that PCTables include exception targets after calls.
     uint required_outcnt = n->required_outcnt();
@@ -2497,7 +2512,7 @@
     // Check that I actually visited all kids.  Unreached kids
     // must be infinite loops.
     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
-      if (!fpu._visited.test(n->fast_out(j)->_idx)) {
+      if (!frc._visited.test(n->fast_out(j)->_idx)) {
         record_method_not_compilable("infinite loop");
         return true;            // Found unvisited kid; must be unreach
       }
@@ -2506,13 +2521,14 @@
   // If original bytecodes contained a mixture of floats and doubles
   // check if the optimizer has made it homogenous, item (3).
   if( Use24BitFPMode && Use24BitFP &&
-      fpu.get_float_count() > 32 &&
-      fpu.get_double_count() == 0 &&
-      (10 * fpu.get_call_count() < fpu.get_float_count()) ) {
+      frc.get_float_count() > 32 &&
+      frc.get_double_count() == 0 &&
+      (10 * frc.get_call_count() < frc.get_float_count()) ) {
     set_24_bit_selection_and_mode( false,  true );
   }
 
-  set_has_java_calls(fpu.get_java_call_count() > 0);
+  set_java_calls(frc.get_java_call_count());
+  set_inner_loops(frc.get_inner_loop_count());
 
   // No infinite loops, no reason to bail out.
   return false;
--- a/hotspot/src/share/vm/opto/compile.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -223,7 +223,8 @@
   PhaseCFG*             _cfg;                   // Results of CFG finding
   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
   bool                  _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
-  bool                  _has_java_calls;        // True if the method has java calls
+  int                   _java_calls;            // Number of java calls in the method
+  int                   _inner_loops;           // Number of inner loops in the method
   Matcher*              _matcher;               // Engine to map ideal to machine instructions
   PhaseRegAlloc*        _regalloc;              // Results of register allocation.
   int                   _frame_slots;           // Size of total frame in stack slots
@@ -505,7 +506,9 @@
   PhaseCFG*         cfg()                       { return _cfg; }
   bool              select_24_bit_instr() const { return _select_24_bit_instr; }
   bool              in_24_bit_fp_mode() const   { return _in_24_bit_fp_mode; }
-  bool              has_java_calls() const      { return _has_java_calls; }
+  bool              has_java_calls() const      { return _java_calls > 0; }
+  int               java_calls() const          { return _java_calls; }
+  int               inner_loops() const         { return _inner_loops; }
   Matcher*          matcher()                   { return _matcher; }
   PhaseRegAlloc*    regalloc()                  { return _regalloc; }
   int               frame_slots() const         { return _frame_slots; }
@@ -532,7 +535,8 @@
     _in_24_bit_fp_mode   = mode;
   }
 
-  void set_has_java_calls(bool z) { _has_java_calls = z; }
+  void  set_java_calls(int z) { _java_calls  = z; }
+  void set_inner_loops(int z) { _inner_loops = z; }
 
   // Instruction bits passed off to the VM
   int               code_size()                 { return _method_size; }
--- a/hotspot/src/share/vm/opto/escape.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/escape.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -578,11 +578,24 @@
   if (phi_alias_idx == alias_idx) {
     return orig_phi;
   }
-  // have we already created a Phi for this alias index?
+  // Have we recently created a Phi for this alias index?
   PhiNode *result = get_map_phi(orig_phi->_idx);
   if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
     return result;
   }
+  // Previous check may fail when the same wide memory Phi was split into Phis
+  // for different memory slices. Search all Phis for this region.
+  if (result != NULL) {
+    Node* region = orig_phi->in(0);
+    for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
+      Node* phi = region->fast_out(i);
+      if (phi->is_Phi() &&
+          C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
+        assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
+        return phi->as_Phi();
+      }
+    }
+  }
   if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
     if (C->do_escape_analysis() == true && !C->failing()) {
       // Retry compilation without escape analysis.
@@ -595,6 +608,7 @@
   orig_phi_worklist.append_if_missing(orig_phi);
   const TypePtr *atype = C->get_adr_type(alias_idx);
   result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
+  C->copy_node_notes_to(result, orig_phi);
   set_map_phi(orig_phi->_idx, result);
   igvn->set_type(result, result->bottom_type());
   record_for_optimizer(result);
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -1373,11 +1373,12 @@
   return st;
 }
 
+
 void GraphKit::pre_barrier(Node* ctl,
                            Node* obj,
                            Node* adr,
-                           uint adr_idx,
-                           Node *val,
+                           uint  adr_idx,
+                           Node* val,
                            const TypeOopPtr* val_type,
                            BasicType bt) {
   BarrierSet* bs = Universe::heap()->barrier_set();
@@ -1385,7 +1386,7 @@
   switch (bs->kind()) {
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
-        g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt);
+      g1_write_barrier_pre(obj, adr, adr_idx, val, val_type, bt);
       break;
 
     case BarrierSet::CardTableModRef:
@@ -1404,8 +1405,8 @@
                             Node* store,
                             Node* obj,
                             Node* adr,
-                            uint adr_idx,
-                            Node *val,
+                            uint  adr_idx,
+                            Node* val,
                             BasicType bt,
                             bool use_precise) {
   BarrierSet* bs = Universe::heap()->barrier_set();
@@ -1413,7 +1414,7 @@
   switch (bs->kind()) {
     case BarrierSet::G1SATBCT:
     case BarrierSet::G1SATBCTLogging:
-        g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
+      g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
       break;
 
     case BarrierSet::CardTableModRef:
@@ -1431,42 +1432,36 @@
   }
 }
 
-Node* GraphKit::store_oop_to_object(Node* ctl,
-                                    Node* obj,
-                                    Node* adr,
-                                    const TypePtr* adr_type,
-                                    Node *val,
-                                    const TypeOopPtr* val_type,
-                                    BasicType bt) {
+Node* GraphKit::store_oop(Node* ctl,
+                          Node* obj,
+                          Node* adr,
+                          const TypePtr* adr_type,
+                          Node* val,
+                          const TypeOopPtr* val_type,
+                          BasicType bt,
+                          bool use_precise) {
+
+  set_control(ctl);
+  if (stopped()) return top(); // Dead path ?
+
+  assert(bt == T_OBJECT, "sanity");
+  assert(val != NULL, "not dead path");
   uint adr_idx = C->get_alias_index(adr_type);
-  Node* store;
-  pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
-  store = store_to_memory(control(), adr, val, bt, adr_idx);
-  post_barrier(control(), store, obj, adr, adr_idx, val, bt, false);
+  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
+
+  pre_barrier(control(), obj, adr, adr_idx, val, val_type, bt);
+  Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
+  post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
   return store;
 }
 
-Node* GraphKit::store_oop_to_array(Node* ctl,
-                                   Node* obj,
-                                   Node* adr,
-                                   const TypePtr* adr_type,
-                                   Node *val,
-                                   const TypeOopPtr* val_type,
-                                   BasicType bt) {
-  uint adr_idx = C->get_alias_index(adr_type);
-  Node* store;
-  pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
-  store = store_to_memory(control(), adr, val, bt, adr_idx);
-  post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
-  return store;
-}
-
+// Could be an array or object we don't know at compile time (unsafe ref.)
 Node* GraphKit::store_oop_to_unknown(Node* ctl,
-                                     Node* obj,
-                                     Node* adr,
-                                     const TypePtr* adr_type,
-                                     Node *val,
-                                     BasicType bt) {
+                             Node* obj,   // containing obj
+                             Node* adr,  // actual adress to store val at
+                             const TypePtr* adr_type,
+                             Node* val,
+                             BasicType bt) {
   Compile::AliasType* at = C->alias_type(adr_type);
   const TypeOopPtr* val_type = NULL;
   if (adr_type->isa_instptr()) {
@@ -1485,12 +1480,7 @@
   if (val_type == NULL) {
     val_type = TypeInstPtr::BOTTOM;
   }
-
-  uint adr_idx = at->index();
-  pre_barrier(ctl, obj, adr, adr_idx, val, val_type, bt);
-  Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
-  post_barrier(control(), store, obj, adr, adr_idx, val, bt, true);
-  return store;
+  return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
 }
 
 
@@ -1804,93 +1794,6 @@
 }
 
 
-//------------------------------store_barrier----------------------------------
-// Insert a write-barrier store.  This is to let generational GC work; we have
-// to flag all oop-stores before the next GC point.
-void GraphKit::write_barrier_post(Node* oop_store, Node* obj, Node* adr,
-                                  Node* val, bool use_precise) {
-  // No store check needed if we're storing a NULL or an old object
-  // (latter case is probably a string constant). The concurrent
-  // mark sweep garbage collector, however, needs to have all nonNull
-  // oop updates flagged via card-marks.
-  if (val != NULL && val->is_Con()) {
-    // must be either an oop or NULL
-    const Type* t = val->bottom_type();
-    if (t == TypePtr::NULL_PTR || t == Type::TOP)
-      // stores of null never (?) need barriers
-      return;
-    ciObject* con = t->is_oopptr()->const_oop();
-    if (con != NULL
-        && con->is_perm()
-        && Universe::heap()->can_elide_permanent_oop_store_barriers())
-      // no store barrier needed, because no old-to-new ref created
-      return;
-  }
-
-  if (use_ReduceInitialCardMarks()
-      && obj == just_allocated_object(control())) {
-    // We can skip marks on a freshly-allocated object.
-    // Keep this code in sync with do_eager_card_mark in runtime.cpp.
-    // That routine eagerly marks the occasional object which is produced
-    // by the slow path, so that we don't have to do it here.
-    return;
-  }
-
-  if (!use_precise) {
-    // All card marks for a (non-array) instance are in one place:
-    adr = obj;
-  }
-  // (Else it's an array (or unknown), and we want more precise card marks.)
-  assert(adr != NULL, "");
-
-  // Get the alias_index for raw card-mark memory
-  int adr_type = Compile::AliasIdxRaw;
-  // Convert the pointer to an int prior to doing math on it
-  Node* cast = _gvn.transform(new (C, 2) CastP2XNode(control(), adr));
-  // Divide by card size
-  assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
-         "Only one we handle so far.");
-  CardTableModRefBS* ct =
-    (CardTableModRefBS*)(Universe::heap()->barrier_set());
-  Node *b = _gvn.transform(new (C, 3) URShiftXNode( cast, _gvn.intcon(CardTableModRefBS::card_shift) ));
-  // We store into a byte array, so do not bother to left-shift by zero
-  Node *c = byte_map_base_node();
-  // Combine
-  Node *sb_ctl = control();
-  Node *sb_adr = _gvn.transform(new (C, 4) AddPNode( top()/*no base ptr*/, c, b ));
-  Node *sb_val = _gvn.intcon(0);
-  // Smash zero into card
-  if( !UseConcMarkSweepGC ) {
-    BasicType bt = T_BYTE;
-    store_to_memory(sb_ctl, sb_adr, sb_val, bt, adr_type);
-  } else {
-    // Specialized path for CM store barrier
-    cms_card_mark( sb_ctl, sb_adr, sb_val, oop_store);
-  }
-}
-
-// Specialized path for CMS store barrier
-void GraphKit::cms_card_mark(Node* ctl, Node* adr, Node* val, Node *oop_store) {
-  BasicType bt = T_BYTE;
-  int adr_idx = Compile::AliasIdxRaw;
-  Node* mem = memory(adr_idx);
-
-  // The type input is NULL in PRODUCT builds
-  const TypePtr* type = NULL;
-  debug_only(type = C->get_adr_type(adr_idx));
-
-  // Add required edge to oop_store, optimizer does not support precedence edges.
-  // Convert required edge to precedence edge before allocation.
-  Node *store = _gvn.transform( new (C, 5) StoreCMNode(ctl, mem, adr, type, val, oop_store) );
-  set_memory(store, adr_idx);
-
-  // For CMS, back-to-back card-marks can only remove the first one
-  // and this requires DU info.  Push on worklist for optimizer.
-  if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
-    record_for_igvn(store);
-}
-
-
 void GraphKit::round_double_arguments(ciMethod* dest_method) {
   // (Note:  TypeFunc::make has a cache that makes this fast.)
   const TypeFunc* tf    = TypeFunc::make(dest_method);
@@ -3215,6 +3118,79 @@
   return NULL;
 }
 
+//----------------------------- store barriers ----------------------------
+#define __ ideal.
+
+void GraphKit::sync_kit(IdealKit& ideal) {
+  // Final sync IdealKit and graphKit.
+  __ drain_delay_transform();
+  set_all_memory(__ merged_memory());
+  set_control(__ ctrl());
+}
+
+// vanilla/CMS post barrier
+// Insert a write-barrier store.  This is to let generational GC work; we have
+// to flag all oop-stores before the next GC point.
+void GraphKit::write_barrier_post(Node* oop_store,
+                                  Node* obj,
+                                  Node* adr,
+                                  Node* val,
+                                  bool use_precise) {
+  // No store check needed if we're storing a NULL or an old object
+  // (latter case is probably a string constant). The concurrent
+  // mark sweep garbage collector, however, needs to have all nonNull
+  // oop updates flagged via card-marks.
+  if (val != NULL && val->is_Con()) {
+    // must be either an oop or NULL
+    const Type* t = val->bottom_type();
+    if (t == TypePtr::NULL_PTR || t == Type::TOP)
+      // stores of null never (?) need barriers
+      return;
+    ciObject* con = t->is_oopptr()->const_oop();
+    if (con != NULL
+        && con->is_perm()
+        && Universe::heap()->can_elide_permanent_oop_store_barriers())
+      // no store barrier needed, because no old-to-new ref created
+      return;
+  }
+
+  if (!use_precise) {
+    // All card marks for a (non-array) instance are in one place:
+    adr = obj;
+  }
+  // (Else it's an array (or unknown), and we want more precise card marks.)
+  assert(adr != NULL, "");
+
+  IdealKit ideal(gvn(), control(), merged_memory(), true);
+
+  // Convert the pointer to an int prior to doing math on it
+  Node* cast = __ CastPX(__ ctrl(), adr);
+
+  // Divide by card size
+  assert(Universe::heap()->barrier_set()->kind() == BarrierSet::CardTableModRef,
+         "Only one we handle so far.");
+  Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
+
+  // Combine card table base and card offset
+  Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
+
+  // Get the alias_index for raw card-mark memory
+  int adr_type = Compile::AliasIdxRaw;
+  // Smash zero into card
+  Node*   zero = __ ConI(0);
+  BasicType bt = T_BYTE;
+  if( !UseConcMarkSweepGC ) {
+    __ store(__ ctrl(), card_adr, zero, bt, adr_type);
+  } else {
+    // Specialized path for CM store barrier
+    __ storeCM(__ ctrl(), card_adr, zero, oop_store, bt, adr_type);
+  }
+
+  // Final sync IdealKit and GraphKit.
+  sync_kit(ideal);
+}
+
+// G1 pre/post barriers
 void GraphKit::g1_write_barrier_pre(Node* obj,
                                     Node* adr,
                                     uint alias_idx,
@@ -3222,10 +3198,8 @@
                                     const TypeOopPtr* val_type,
                                     BasicType bt) {
   IdealKit ideal(gvn(), control(), merged_memory(), true);
-#define __ ideal.
-  __ declares_done();
-
-  Node* thread = __ thread();
+
+  Node* tls = __ thread(); // ThreadLocalStorage
 
   Node* no_ctrl = NULL;
   Node* no_base = __ top();
@@ -3248,9 +3222,9 @@
 
   // set_control( ctl);
 
-  Node* marking_adr = __ AddP(no_base, thread, __ ConX(marking_offset));
-  Node* buffer_adr  = __ AddP(no_base, thread, __ ConX(buffer_offset));
-  Node* index_adr   = __ AddP(no_base, thread, __ ConX(index_offset));
+  Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
+  Node* buffer_adr  = __ AddP(no_base, tls, __ ConX(buffer_offset));
+  Node* index_adr   = __ AddP(no_base, tls, __ ConX(index_offset));
 
   // Now some of the values
 
@@ -3278,55 +3252,52 @@
         Node* next_index = __ SubI(index,  __ ConI(sizeof(intptr_t)));
         Node* next_indexX = next_index;
 #ifdef _LP64
-          // We could refine the type for what it's worth
-          // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
-          next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
-#endif // _LP64
+        // We could refine the type for what it's worth
+        // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
+        next_indexX = _gvn.transform( new (C, 2) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
+#endif
 
         // Now get the buffer location we will log the original value into and store it
-
         Node *log_addr = __ AddP(no_base, buffer, next_indexX);
-        // __ store(__ ctrl(), log_addr, orig, T_OBJECT, C->get_alias_index(TypeOopPtr::BOTTOM));
         __ store(__ ctrl(), log_addr, orig, T_OBJECT, Compile::AliasIdxRaw);
 
-
         // update the index
-        // __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
-        // This is a hack to force this store to occur before the oop store that is coming up
-        __ store(__ ctrl(), index_adr, next_index, T_INT, C->get_alias_index(TypeOopPtr::BOTTOM));
+        __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
 
       } __ else_(); {
 
         // logging buffer is full, call the runtime
         const TypeFunc *tf = OptoRuntime::g1_wb_pre_Type();
-        // __ make_leaf_call(tf, OptoRuntime::g1_wb_pre_Java(), "g1_wb_pre", orig, thread);
-        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, thread);
-      } __ end_if();
-    } __ end_if();
-  } __ end_if();
-
-  __ drain_delay_transform();
-  set_control( __ ctrl());
-  set_all_memory( __ merged_memory());
-
-#undef __
+        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", orig, tls);
+      } __ end_if();  // (!index)
+    } __ end_if();  // (orig != NULL)
+  } __ end_if();  // (!marking)
+
+  // Final sync IdealKit and GraphKit.
+  sync_kit(ideal);
 }
 
 //
 // Update the card table and add card address to the queue
 //
-void GraphKit::g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store,  Node* index, Node* index_adr, Node* buffer, const TypeFunc* tf) {
-#define __ ideal->
+void GraphKit::g1_mark_card(IdealKit& ideal,
+                            Node* card_adr,
+                            Node* oop_store,
+                            Node* index,
+                            Node* index_adr,
+                            Node* buffer,
+                            const TypeFunc* tf) {
+
   Node* zero = __ ConI(0);
   Node* no_base = __ top();
   BasicType card_bt = T_BYTE;
   // Smash zero into card. MUST BE ORDERED WRT TO STORE
-  __ storeCM(__ ctrl(), card_adr, zero, store, card_bt, Compile::AliasIdxRaw);
+  __ storeCM(__ ctrl(), card_adr, zero, oop_store, card_bt, Compile::AliasIdxRaw);
 
   //  Now do the queue work
   __ if_then(index, BoolTest::ne, zero); {
 
-    Node* next_index = __ SubI(index,  __ ConI(sizeof(intptr_t)));
+    Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
     Node* next_indexX = next_index;
 #ifdef _LP64
     // We could refine the type for what it's worth
@@ -3341,10 +3312,10 @@
   } __ else_(); {
     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
   } __ end_if();
-#undef __
+
 }
 
-void GraphKit::g1_write_barrier_post(Node* store,
+void GraphKit::g1_write_barrier_post(Node* oop_store,
                                      Node* obj,
                                      Node* adr,
                                      uint alias_idx,
@@ -3369,10 +3340,8 @@
   assert(adr != NULL, "");
 
   IdealKit ideal(gvn(), control(), merged_memory(), true);
-#define __ ideal.
-  __ declares_done();
-
-  Node* thread = __ thread();
+
+  Node* tls = __ thread(); // ThreadLocalStorage
 
   Node* no_ctrl = NULL;
   Node* no_base = __ top();
@@ -3394,8 +3363,8 @@
 
   // Pointers into the thread
 
-  Node* buffer_adr = __ AddP(no_base, thread, __ ConX(buffer_offset));
-  Node* index_adr =  __ AddP(no_base, thread, __ ConX(index_offset));
+  Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
+  Node* index_adr =  __ AddP(no_base, tls, __ ConX(index_offset));
 
   // Now some values
 
@@ -3404,18 +3373,14 @@
 
 
   // Convert the store obj pointer to an int prior to doing math on it
-  // Use addr not obj gets accurate card marks
-
-  // Node* cast = __ CastPX(no_ctrl, adr /* obj */);
-
   // Must use ctrl to prevent "integerized oop" existing across safepoint
-  Node* cast =  __ CastPX(__ ctrl(), ( use_precise ? adr : obj ));
+  Node* cast =  __ CastPX(__ ctrl(), adr);
 
   // Divide pointer by card size
   Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
 
   // Combine card table base and card offset
-  Node *card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
+  Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
 
   // If we know the value being stored does it cross regions?
 
@@ -3439,18 +3404,17 @@
         Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
 
         __ if_then(card_val, BoolTest::ne, zero); {
-          g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf);
+          g1_mark_card(ideal, card_adr, oop_store, index, index_adr, buffer, tf);
         } __ end_if();
       } __ end_if();
     } __ end_if();
   } else {
-    g1_mark_card(&ideal, card_adr, store, index, index_adr, buffer, tf);
+    // Object.clone() instrinsic uses this path.
+    g1_mark_card(ideal, card_adr, oop_store, index, index_adr, buffer, tf);
   }
 
-
-  __ drain_delay_transform();
-  set_control( __ ctrl());
-  set_all_memory( __ merged_memory());
+  // Final sync IdealKit and GraphKit.
+  sync_kit(ideal);
+}
 #undef __
 
-}
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -449,13 +449,24 @@
   //
   // If val==NULL, it is taken to be a completely unknown value. QQQ
 
+  Node* store_oop(Node* ctl,
+                  Node* obj,   // containing obj
+                  Node* adr,  // actual adress to store val at
+                  const TypePtr* adr_type,
+                  Node* val,
+                  const TypeOopPtr* val_type,
+                  BasicType bt,
+                  bool use_precise);
+
   Node* store_oop_to_object(Node* ctl,
                             Node* obj,   // containing obj
                             Node* adr,  // actual adress to store val at
                             const TypePtr* adr_type,
                             Node* val,
                             const TypeOopPtr* val_type,
-                            BasicType bt);
+                            BasicType bt) {
+    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);
+  }
 
   Node* store_oop_to_array(Node* ctl,
                            Node* obj,   // containing obj
@@ -463,7 +474,9 @@
                            const TypePtr* adr_type,
                            Node* val,
                            const TypeOopPtr* val_type,
-                           BasicType bt);
+                           BasicType bt) {
+    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
+  }
 
   // Could be an array or object we don't know at compile time (unsafe ref.)
   Node* store_oop_to_unknown(Node* ctl,
@@ -488,9 +501,6 @@
   // Return a load of array element at idx.
   Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
 
-  // CMS card-marks have an input from the corresponding oop_store
-  void  cms_card_mark(Node* ctl, Node* adr, Node* val, Node* oop_store);
-
   //---------------- Dtrace support --------------------
   void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
   void make_dtrace_method_entry(ciMethod* method) {
@@ -582,9 +592,6 @@
     return C->too_many_recompiles(method(), bci(), reason);
   }
 
-  // vanilla/CMS post barrier
-  void write_barrier_post(Node *store, Node* obj, Node* adr, Node* val, bool use_precise);
-
   // Returns the object (if any) which was created the moment before.
   Node* just_allocated_object(Node* current_control);
 
@@ -593,6 +600,11 @@
             && Universe::heap()->can_elide_tlab_store_barriers());
   }
 
+  void sync_kit(IdealKit& ideal);
+
+  // vanilla/CMS post barrier
+  void write_barrier_post(Node *store, Node* obj, Node* adr, Node* val, bool use_precise);
+
   // G1 pre/post barriers
   void g1_write_barrier_pre(Node* obj,
                             Node* adr,
@@ -610,7 +622,7 @@
                              bool use_precise);
   // Helper function for g1
   private:
-  void g1_mark_card(IdealKit* ideal, Node* card_adr, Node* store,  Node* index, Node* index_adr,
+  void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store,  Node* index, Node* index_adr,
                     Node* buffer, const TypeFunc* tf);
 
   public:
--- a/hotspot/src/share/vm/opto/idealKit.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/idealKit.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -34,7 +34,7 @@
 const uint IdealKit::first_var = TypeFunc::Parms + 1;
 
 //----------------------------IdealKit-----------------------------------------
-IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_transforms) :
+IdealKit::IdealKit(PhaseGVN &gvn, Node* control, Node* mem, bool delay_all_transforms, bool has_declarations) :
   _gvn(gvn), C(gvn.C) {
   _initial_ctrl = control;
   _initial_memory = mem;
@@ -47,6 +47,9 @@
   _pending_cvstates = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
   _delay_transform  = new (C->node_arena()) GrowableArray<Node*>(C->node_arena(), init_size, 0, 0);
   DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray<int>(C->node_arena(), init_size, 0, 0));
+  if (!has_declarations) {
+     declarations_done();
+  }
 }
 
 //-------------------------------if_then-------------------------------------
@@ -97,7 +100,7 @@
 //-------------------------------end_if-------------------------------------
 // Merge the "then" and "else" cvstates.
 //
-// The if_then() pushed the current state for later use
+// The if_then() pushed a copy of the current state for later use
 // as the initial state for a future "else" clause.  The
 // current state then became the initial state for the
 // then clause.  If an "else" clause was encountered, it will
@@ -258,8 +261,8 @@
   return delay_transform(PhiNode::make(reg, n, ct));
 }
 
-//-----------------------------declares_done-----------------------------------
-void IdealKit::declares_done() {
+//-----------------------------declarations_done-------------------------------
+void IdealKit::declarations_done() {
   _cvstate = new_cvstate();   // initialize current cvstate
   set_ctrl(_initial_ctrl);    // initialize control in current cvstate
   set_all_memory(_initial_memory);// initialize memory in current cvstate
@@ -277,7 +280,9 @@
 
 //-----------------------------delay_transform-----------------------------------
 Node* IdealKit::delay_transform(Node* n) {
-  gvn().set_type(n, n->bottom_type());
+  if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
+    gvn().set_type(n, n->bottom_type());
+  }
   _delay_transform->push(n);
   return n;
 }
@@ -321,7 +326,9 @@
 Node* IdealKit::memory(uint alias_idx) {
   MergeMemNode* mem = merged_memory();
   Node* p = mem->memory_at(alias_idx);
-  _gvn.set_type(p, Type::MEMORY);  // must be mapped
+  if (!gvn().is_IterGVN() || !gvn().is_IterGVN()->delay_transform()) {
+    _gvn.set_type(p, Type::MEMORY);  // must be mapped
+  }
   return p;
 }
 
@@ -462,9 +469,6 @@
   const TypePtr* adr_type = TypeRawPtr::BOTTOM;
   uint adr_idx = C->get_alias_index(adr_type);
 
-  // Clone initial memory
-  MergeMemNode* cloned_mem =  MergeMemNode::make(C, merged_memory());
-
   // Slow-path leaf call
   int size = slow_call_type->domain()->cnt();
   CallNode *call =  (CallNode*)new (C, size) CallLeafNode( slow_call_type, slow_call, leaf_name, adr_type);
@@ -489,9 +493,6 @@
 
   set_ctrl(transform( new (C, 1) ProjNode(call,TypeFunc::Control) ));
 
-  // Set the incoming clone of memory as current memory
-  set_all_memory(cloned_mem);
-
   // Make memory for the call
   Node* mem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
 
--- a/hotspot/src/share/vm/opto/idealKit.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/idealKit.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -49,7 +49,7 @@
 // Example:
 //    Node* limit = ??
 //    IdealVariable i(kit), j(kit);
-//    declares_done();
+//    declarations_done();
 //    Node* exit = make_label(1); // 1 goto
 //    set(j, ConI(0));
 //    loop(i, ConI(0), BoolTest::lt, limit); {
@@ -101,10 +101,7 @@
   Node* new_cvstate();                     // Create a new cvstate
   Node* cvstate() { return _cvstate; }     // current cvstate
   Node* copy_cvstate();                    // copy current cvstate
-  void set_ctrl(Node* ctrl) { _cvstate->set_req(TypeFunc::Control, ctrl); }
 
-  // Should this assert this is a MergeMem???
-  void set_all_memory(Node* mem){ _cvstate->set_req(TypeFunc::Memory, mem); }
   void set_memory(Node* mem, uint alias_idx );
   void do_memory_merge(Node* merging, Node* join);
   void clear(Node* m);                     // clear a cvstate
@@ -132,15 +129,17 @@
   Node* memory(uint alias_idx);
 
  public:
-  IdealKit(PhaseGVN &gvn, Node* control, Node* memory, bool delay_all_transforms = false);
+  IdealKit(PhaseGVN &gvn, Node* control, Node* memory, bool delay_all_transforms = false, bool has_declarations = false);
   ~IdealKit() {
     stop();
     drain_delay_transform();
   }
   // Control
   Node* ctrl()                          { return _cvstate->in(TypeFunc::Control); }
+  void set_ctrl(Node* ctrl)             { _cvstate->set_req(TypeFunc::Control, ctrl); }
   Node* top()                           { return C->top(); }
   MergeMemNode* merged_memory()         { return _cvstate->in(TypeFunc::Memory)->as_MergeMem(); }
+  void set_all_memory(Node* mem)        { _cvstate->set_req(TypeFunc::Memory, mem); }
   void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); }
   Node* value(IdealVariable& v)         { return _cvstate->in(first_var + v.id()); }
   void dead(IdealVariable& v)           { set(v, (Node*)NULL); }
@@ -155,7 +154,7 @@
   Node* make_label(int goto_ct);
   void bind(Node* lab);
   void goto_(Node* lab, bool bind = false);
-  void declares_done();
+  void declarations_done();
   void drain_delay_transform();
 
   Node* IfTrue(IfNode* iff)  { return transform(new (C,1) IfTrueNode(iff)); }
--- a/hotspot/src/share/vm/opto/ifnode.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/ifnode.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -378,7 +378,18 @@
 
   // Force the original merge dead
   igvn->hash_delete(r);
-  r->set_req_X(0,NULL,igvn);
+  // First, remove region's dead users.
+  for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) {
+    Node* u = r->last_out(l);
+    if( u == r ) {
+      r->set_req(0, NULL);
+    } else {
+      assert(u->outcnt() == 0, "only dead users");
+      igvn->remove_dead_node(u);
+    }
+    l -= 1;
+  }
+  igvn->remove_dead_node(r);
 
   // Now remove the bogus extra edges used to keep things alive
   igvn->remove_dead_node( hook );
--- a/hotspot/src/share/vm/opto/library_call.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -310,11 +310,6 @@
     if (!InlineAtomicLong)  return NULL;
     break;
 
-  case vmIntrinsics::_Object_init:
-  case vmIntrinsics::_invoke:
-    // We do not intrinsify these; they are marked for other purposes.
-    return NULL;
-
   case vmIntrinsics::_getCallerClass:
     if (!UseNewReflection)  return NULL;
     if (!InlineReflectionGetCallerClass)  return NULL;
@@ -327,6 +322,8 @@
     break;
 
  default:
+    assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
+    assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
     break;
   }
 
@@ -394,18 +391,11 @@
   }
 
   if (PrintIntrinsics) {
-    switch (intrinsic_id()) {
-    case vmIntrinsics::_invoke:
-    case vmIntrinsics::_Object_init:
-      // We do not expect to inline these, so do not produce any noise about them.
-      break;
-    default:
-      tty->print("Did not inline intrinsic %s%s at bci:%d in",
-                 vmIntrinsics::name_at(intrinsic_id()),
-                 (is_virtual() ? " (virtual)" : ""), kit.bci());
-      kit.caller()->print_short_name(tty);
-      tty->print_cr(" (%d bytes)", kit.caller()->code_size());
-    }
+    tty->print("Did not inline intrinsic %s%s at bci:%d in",
+               vmIntrinsics::name_at(intrinsic_id()),
+               (is_virtual() ? " (virtual)" : ""), kit.bci());
+    kit.caller()->print_short_name(tty);
+    tty->print_cr(" (%d bytes)", kit.caller()->code_size());
   }
   C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
   return NULL;
@@ -1030,7 +1020,7 @@
   const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
   const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
 
-  IdealKit kit(gvn(), control(), merged_memory());
+  IdealKit kit(gvn(), control(), merged_memory(), false, true);
 #define __ kit.
   Node* zero             = __ ConI(0);
   Node* one              = __ ConI(1);
@@ -1042,7 +1032,7 @@
   Node* targetOffset     = __ ConI(targetOffset_i);
   Node* sourceEnd        = __ SubI(__ AddI(sourceOffset, sourceCount), targetCountLess1);
 
-  IdealVariable rtn(kit), i(kit), j(kit); __ declares_done();
+  IdealVariable rtn(kit), i(kit), j(kit); __ declarations_done();
   Node* outer_loop = __ make_label(2 /* goto */);
   Node* return_    = __ make_label(1);
 
@@ -1079,9 +1069,9 @@
        __ bind(outer_loop);
   }__ end_loop(); __ dead(i);
   __ bind(return_);
-  __ drain_delay_transform();
-
-  set_control(__ ctrl());
+
+  // Final sync IdealKit and GraphKit.
+  sync_kit(kit);
   Node* result = __ value(rtn);
 #undef __
   C->set_has_loops(true);
@@ -2183,14 +2173,23 @@
         // of it. So we need to emit code to conditionally do the proper type of
         // store.
 
-        IdealKit kit(gvn(), control(),  merged_memory());
-        kit.declares_done();
+        IdealKit ideal(gvn(), control(),  merged_memory());
+#define __ ideal.
         // QQQ who knows what probability is here??
-        kit.if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
-          (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
-        } kit.else_(); {
-          (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
-        } kit.end_if();
+        __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
+          // Sync IdealKit and graphKit.
+          set_all_memory( __ merged_memory());
+          set_control(__ ctrl());
+          Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
+          // Update IdealKit memory.
+          __ set_all_memory(merged_memory());
+          __ set_ctrl(control());
+        } __ else_(); {
+          __ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile);
+        } __ end_if();
+        // Final sync IdealKit and GraphKit.
+        sync_kit(ideal);
+#undef __
       }
     }
   }
--- a/hotspot/src/share/vm/opto/loopopts.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/loopopts.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -346,7 +346,10 @@
 
     // Yes!  Reshape address expression!
     Node *inv_scale = new (C, 3) LShiftINode( add_invar, scale );
-    register_new_node( inv_scale, add_invar_ctrl );
+    Node *inv_scale_ctrl =
+      dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ?
+      add_invar_ctrl : scale_ctrl;
+    register_new_node( inv_scale, inv_scale_ctrl );
     Node *var_scale = new (C, 3) LShiftINode( add_var, scale );
     register_new_node( var_scale, n_ctrl );
     Node *var_add = new (C, 3) AddINode( var_scale, inv_scale );
--- a/hotspot/src/share/vm/opto/machnode.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/machnode.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -300,6 +300,12 @@
           }
         }
         adr_type = t_disp->add_offset(offset);
+      } else if( base == NULL && offset != 0 && offset != Type::OffsetBot ) {
+        // Use ideal type if it is oop ptr.
+        const TypePtr *tp = oper->type()->isa_ptr();
+        if( tp != NULL) {
+          adr_type = tp;
+        }
       }
     }
 
--- a/hotspot/src/share/vm/opto/macro.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/macro.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -198,14 +198,79 @@
 }
 
 // Eliminate a card mark sequence.  p2x is a ConvP2XNode
-void PhaseMacroExpand::eliminate_card_mark(Node *p2x) {
+void PhaseMacroExpand::eliminate_card_mark(Node* p2x) {
   assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required");
-  Node *shift = p2x->unique_out();
-  Node *addp = shift->unique_out();
-  for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
-    Node *st = addp->last_out(j);
-    assert(st->is_Store(), "store required");
-    _igvn.replace_node(st, st->in(MemNode::Memory));
+  if (!UseG1GC) {
+    // vanilla/CMS post barrier
+    Node *shift = p2x->unique_out();
+    Node *addp = shift->unique_out();
+    for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
+      Node *st = addp->last_out(j);
+      assert(st->is_Store(), "store required");
+      _igvn.replace_node(st, st->in(MemNode::Memory));
+    }
+  } else {
+    // G1 pre/post barriers
+    assert(p2x->outcnt() == 2, "expects 2 users: Xor and URShift nodes");
+    // It could be only one user, URShift node, in Object.clone() instrinsic
+    // but the new allocation is passed to arraycopy stub and it could not
+    // be scalar replaced. So we don't check the case.
+
+    // Remove G1 post barrier.
+
+    // Search for CastP2X->Xor->URShift->Cmp path which
+    // checks if the store done to a different from the value's region.
+    // And replace Cmp with #0 (false) to collapse G1 post barrier.
+    Node* xorx = NULL;
+    for (DUIterator_Fast imax, i = p2x->fast_outs(imax); i < imax; i++) {
+      Node* u = p2x->fast_out(i);
+      if (u->Opcode() == Op_XorX) {
+        xorx = u;
+        break;
+      }
+    }
+    assert(xorx != NULL, "missing G1 post barrier");
+    Node* shift = xorx->unique_out();
+    Node* cmpx = shift->unique_out();
+    assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
+    cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
+    "missing region check in G1 post barrier");
+    _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
+
+    // Remove G1 pre barrier.
+
+    // Search "if (marking != 0)" check and set it to "false".
+    Node* this_region = p2x->in(0);
+    assert(this_region != NULL, "");
+    // There is no G1 pre barrier if previous stored value is NULL
+    // (for example, after initialization).
+    if (this_region->is_Region() && this_region->req() == 3) {
+      int ind = 1;
+      if (!this_region->in(ind)->is_IfFalse()) {
+        ind = 2;
+      }
+      if (this_region->in(ind)->is_IfFalse()) {
+        Node* bol = this_region->in(ind)->in(0)->in(1);
+        assert(bol->is_Bool(), "");
+        cmpx = bol->in(1);
+        if (bol->as_Bool()->_test._test == BoolTest::ne &&
+            cmpx->is_Cmp() && cmpx->in(2) == intcon(0) &&
+            cmpx->in(1)->is_Load()) {
+          Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
+          const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
+                                              PtrQueue::byte_offset_of_active());
+          if (adr->is_AddP() && adr->in(AddPNode::Base) == top() &&
+              adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
+              adr->in(AddPNode::Offset) == MakeConX(marking_offset)) {
+            _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ));
+          }
+        }
+      }
+    }
+    // Now CastP2X can be removed since it is used only on dead path
+    // which currently still alive until igvn optimize it.
+    assert(p2x->unique_out()->Opcode() == Op_URShiftX, "");
+    _igvn.replace_node(p2x, top());
   }
 }
 
@@ -760,14 +825,11 @@
           if (n->is_Store()) {
             _igvn.replace_node(n, n->in(MemNode::Memory));
           } else {
-            assert( n->Opcode() == Op_CastP2X, "CastP2X required");
             eliminate_card_mark(n);
           }
           k -= (oc2 - use->outcnt());
         }
       } else {
-        assert( !use->is_SafePoint(), "safepoint uses must have been already elimiated");
-        assert( use->Opcode() == Op_CastP2X, "CastP2X required");
         eliminate_card_mark(use);
       }
       j -= (oc1 - res->outcnt());
--- a/hotspot/src/share/vm/opto/matcher.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -1489,8 +1489,7 @@
 #ifdef ASSERT
     // Verify adr type after matching memory operation
     const MachOper* oper = mach->memory_operand();
-    if (oper != NULL && oper != (MachOper*)-1 &&
-        mach->adr_type() != TypeRawPtr::BOTTOM) { // non-direct addressing mode
+    if (oper != NULL && oper != (MachOper*)-1) {
       // It has a unique memory operand.  Find corresponding ideal mem node.
       Node* m = NULL;
       if (leaf->is_Mem()) {
--- a/hotspot/src/share/vm/opto/output.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -50,6 +50,13 @@
   init_scratch_buffer_blob();
   if (failing())  return; // Out of memory
 
+  // The number of new nodes (mostly MachNop) is proportional to
+  // the number of java calls and inner loops which are aligned.
+  if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
+                            C->inner_loops()*(OptoLoopAlignment-1)),
+                           "out of nodes before code generation" ) ) {
+    return;
+  }
   // Make sure I can find the Start Node
   Block_Array& bbs = _cfg->_bbs;
   Block *entry = _cfg->_blocks[1];
@@ -1105,7 +1112,7 @@
   uint *call_returns = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
 
   uint  return_offset = 0;
-  MachNode *nop = new (this) MachNopNode();
+  int nop_size = (new (this) MachNopNode())->size(_regalloc);
 
   int previous_offset = 0;
   int current_offset  = 0;
@@ -1188,7 +1195,6 @@
         }
 
         // align the instruction if necessary
-        int nop_size = nop->size(_regalloc);
         int padding = mach->compute_padding(current_offset);
         // Make sure safepoint node for polling is distinct from a call's
         // return by adding a nop if needed.
@@ -1372,7 +1378,6 @@
 
     // If the next block is the top of a loop, pad this block out to align
     // the loop top a little. Helps prevent pipe stalls at loop back branches.
-    int nop_size = (new (this) MachNopNode())->size(_regalloc);
     if( i<_cfg->_num_blocks-1 ) {
       Block *nb = _cfg->_blocks[i+1];
       uint padding = nb->alignment_padding(current_offset);
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -450,6 +450,8 @@
     subsume_node(old, nn);
   }
 
+  bool delay_transform() const { return _delay_transform; }
+
   void set_delay_transform(bool delay) {
     _delay_transform = delay;
   }
--- a/hotspot/src/share/vm/opto/type.hpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/opto/type.hpp	Fri Jul 24 09:01:00 2009 -0700
@@ -1216,6 +1216,8 @@
 #define Op_AndX      Op_AndL
 #define Op_AddX      Op_AddL
 #define Op_SubX      Op_SubL
+#define Op_XorX      Op_XorL
+#define Op_URShiftX  Op_URShiftL
 // conversions
 #define ConvI2X(x)   ConvI2L(x)
 #define ConvL2X(x)   (x)
@@ -1258,6 +1260,8 @@
 #define Op_AndX      Op_AndI
 #define Op_AddX      Op_AddI
 #define Op_SubX      Op_SubI
+#define Op_XorX      Op_XorI
+#define Op_URShiftX  Op_URShiftI
 // conversions
 #define ConvI2X(x)   (x)
 #define ConvL2X(x)   ConvL2I(x)
--- a/hotspot/src/share/vm/runtime/stackValue.cpp	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/src/share/vm/runtime/stackValue.cpp	Fri Jul 24 09:01:00 2009 -0700
@@ -104,7 +104,17 @@
     }
 #endif
     case Location::oop: {
-      Handle h(*(oop *)value_addr); // Wrap a handle around the oop
+      oop val = *(oop *)value_addr;
+#ifdef _LP64
+      if (Universe::is_narrow_oop_base(val)) {
+         // Compiled code may produce decoded oop = narrow_oop_base
+         // when a narrow oop implicit null check is used.
+         // The narrow_oop_base could be NULL or be the address
+         // of the page below heap. Use NULL value for both cases.
+         val = (oop)NULL;
+      }
+#endif
+      Handle h(val); // Wrap a handle around the oop
       return new StackValue(h);
     }
     case Location::addr: {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6826736/Test.java	Fri Jul 24 09:01:00 2009 -0700
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6826736
+ * @summary CMS: core dump with -XX:+UseCompressedOops
+ *
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test
+ */
+
+public class Test {
+    int[] arr;
+    int[] arr2;
+    int test(int r) {
+        for (int i = 0; i < 100; i++) {
+            for (int j = i; j < 100; j++) {
+               int a = 0;
+               for (long k = 0; k < 100; k++) {
+                  a += k;
+               }
+               if (arr != null)
+                   a = arr[j];
+               r += a;
+            }
+        }
+        return r;
+    }
+
+    public static void main(String[] args) {
+        int r = 0;
+        Test t = new Test();
+        for (int i = 0; i < 100; i++) {
+            t.arr = new int[100];
+            r = t.test(r);
+        }
+        System.out.println("Warmup 1 is done.");
+        for (int i = 0; i < 100; i++) {
+            t.arr = null;
+            r = t.test(r);
+        }
+        System.out.println("Warmup 2 is done.");
+        for (int i = 0; i < 100; i++) {
+            t.arr = new int[100];
+            r = t.test(r);
+        }
+        System.out.println("Warmup is done.");
+        for (int i = 0; i < 100; i++) {
+            t.arr = new int[1000000];
+            t.arr = null;
+            r = t.test(r);
+        }
+    }
+}
--- a/hotspot/test/compiler/6837094/Test.java	Thu Jul 16 12:38:26 2009 -0700
+++ b/hotspot/test/compiler/6837094/Test.java	Fri Jul 24 09:01:00 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2009 Google Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6851282/Test.java	Fri Jul 24 09:01:00 2009 -0700
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6851282
+ * @summary JIT miscompilation results in null entry in array when using CompressedOops
+ *
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops Test
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class Test {
+  void foo(A a, A[] as) {
+    for (A a1 : as) {
+      B[] filtered = a.c(a1);
+      for (B b : filtered) {
+        if (b == null) {
+          System.out.println("bug: b == null");
+          System.exit(97);
+        }
+      }
+    }
+  }
+
+  public static void main(String[] args) {
+    List<A> as = new ArrayList<A>();
+    for (int i = 0; i < 5000; i++) {
+      List<B> bs = new ArrayList<B>();
+      for (int j = i; j < i + 1000; j++)
+        bs.add(new B(j));
+      as.add(new A(bs.toArray(new B[0])));
+    }
+    new Test().foo(as.get(0), as.subList(1, as.size()).toArray(new A[0]));
+  }
+}
+
+class A {
+  final B[] bs;
+
+  public A(B[] bs) {
+    this.bs = bs;
+  }
+
+  final B[] c(final A a) {
+    return new BoxedArray<B>(bs).filter(new Function<B, Boolean>() {
+      public Boolean apply(B arg) {
+        for (B b : a.bs) {
+          if (b.d == arg.d)
+            return true;
+        }
+        return false;
+      }
+    });
+  }
+}
+
+class BoxedArray<T> {
+
+  private final T[] array;
+
+  BoxedArray(T[] array) {
+    this.array = array;
+  }
+
+  public T[] filter(Function<T, Boolean> function) {
+    boolean[] include = new boolean[array.length];
+    int len = 0;
+    int i = 0;
+    while (i < array.length) {
+      if (function.apply(array[i])) {
+        include[i] = true;
+        len += 1;
+      }
+      i += 1;
+    }
+    T[] result = (T[]) java.lang.reflect.Array.newInstance(array.getClass().getComponentType(), len);
+    len = 0;
+    i = 0;
+    while (len < result.length) {
+      if (include[i]) {
+        result[len] = array[i];
+        len += 1;
+      }
+      i += 1;
+    }
+    return result;
+  }
+}
+
+interface Function<T, R> {
+  R apply(T arg);
+}
+
+class B {
+  final int d;
+  public B(int d) {
+    this.d = d;
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6857159/Test6857159.java	Fri Jul 24 09:01:00 2009 -0700
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6857159
+ * @summary local schedule failed with checkcast of Thread.currentThread()
+ *
+ * @run shell Test6857159.sh
+ */
+
+public class Test6857159 extends Thread {
+    static class ct0 extends Test6857159 {
+        public void message() {
+            // System.out.println("message");
+        }
+
+        public void run() {
+             message();
+             ct0 ct = (ct0) Thread.currentThread();
+             ct.message();
+        }
+    }
+    static class ct1 extends ct0 {
+        public void message() {
+            // System.out.println("message");
+        }
+    }
+    static class ct2 extends ct0 {
+        public void message() {
+            // System.out.println("message");
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        for (int i = 0; i < 100000; i++) {
+            Thread t = null;
+            switch (i % 3) {
+              case 0: t = new ct0(); break;
+              case 1: t = new ct1(); break;
+              case 2: t = new ct2(); break;
+            }
+            t.start();
+            t.join();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6857159/Test6857159.sh	Fri Jul 24 09:01:00 2009 -0700
@@ -0,0 +1,65 @@
+#!/bin/sh
+# 
+# Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+# 
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+# 
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+# 
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# 
+# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+# CA 95054 USA or visit www.sun.com if you need additional information or
+# have any questions.
+# 
+# 
+
+if [ "${TESTSRC}" = "" ]
+then
+  echo "TESTSRC not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTSRC=${TESTSRC}"
+if [ "${TESTJAVA}" = "" ]
+then
+  echo "TESTJAVA not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTJAVA=${TESTJAVA}"
+if [ "${TESTCLASSES}" = "" ]
+then
+  echo "TESTCLASSES not set.  Test cannot execute.  Failed."
+  exit 1
+fi
+echo "TESTCLASSES=${TESTCLASSES}"
+echo "CLASSPATH=${CLASSPATH}"
+
+set -x
+
+cp ${TESTSRC}/Test6857159.java .
+cp ${TESTSRC}/Test6857159.sh .
+
+${TESTJAVA}/bin/javac -d . Test6857159.java
+
+${TESTJAVA}/bin/java  ${TESTVMOPTS} -Xbatch -XX:+PrintCompilation -XX:CompileOnly=Test6857159\$ct.run Test6857159 > test.out 2>&1
+
+grep "COMPILE SKIPPED" test.out
+
+result=$?
+if [ $result -eq 1 ]
+then
+  echo "Passed"
+  exit 0
+else
+  echo "Failed"
+  exit 1
+fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6859338/Test6859338.java	Fri Jul 24 09:01:00 2009 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6859338
+ * @summary Assertion failure in sharedRuntime.cpp
+ *
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions  -XX:-InlineObjectHash -Xbatch -XX:-ProfileInterpreter Test6859338
+ */
+
+public class Test6859338 {
+    static Object[] o = new Object[] { new Object(), null };
+    public static void main(String[] args) {
+        int total = 0;
+        try {
+            // Exercise the implicit null check in the unverified entry point
+            for (int i = 0; i < 40000; i++) {
+                int limit = o.length;
+                if (i < 20000) limit = 1;
+                for (int j = 0; j < limit; j++) {
+                    total += o[j].hashCode();
+                }
+            }
+
+        } catch (NullPointerException e) {
+            // this is expected.  A true failure causes a crash
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6860469/Test.java	Fri Jul 24 09:01:00 2009 -0700
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2009 Google Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6860469
+ * @summary remix_address_expressions reshapes address expression with bad control
+ *
+ * @run main/othervm -Xcomp -XX:CompileOnly=Test.C Test
+ */
+
+public class Test {
+
+  private static final int H = 16;
+  private static final int F = 9;
+
+  static int[] fl = new int[1 << F];
+
+  static int C(int ll, int f) {
+    int max = -1;
+    int min = H + 1;
+
+    if (ll != 0) {
+      if (ll < min) {
+        min = ll;
+      }
+      if (ll > max) {
+        max = ll;
+      }
+    }
+
+    if (f > max) {
+      f = max;
+    }
+    if (min > f) {
+      min = f;
+    }
+
+    for (int mc = 1 >> max - f; mc <= 0; mc++) {
+      int i = mc << (32 - f);
+      fl[i] = max;
+    }
+
+    return min;
+  }
+
+  public static void main(String argv[]) {
+    C(0, 10);
+  }
+}