6892658: C2 should optimize some stringbuilder patterns
authornever
Thu, 12 Nov 2009 09:24:21 -0800
changeset 4450 6d700b859b3e
parent 4448 d6ec2737186c
child 4451 dfc03acffc4c
6892658: C2 should optimize some stringbuilder patterns Reviewed-by: kvn, twisti
hotspot/src/share/vm/ci/ciEnv.cpp
hotspot/src/share/vm/ci/ciEnv.hpp
hotspot/src/share/vm/ci/ciInstanceKlass.cpp
hotspot/src/share/vm/ci/ciInstanceKlass.hpp
hotspot/src/share/vm/ci/ciObjectFactory.cpp
hotspot/src/share/vm/classfile/systemDictionary.hpp
hotspot/src/share/vm/classfile/vmSymbols.cpp
hotspot/src/share/vm/classfile/vmSymbols.hpp
hotspot/src/share/vm/includeDB_compiler2
hotspot/src/share/vm/includeDB_core
hotspot/src/share/vm/memory/universe.cpp
hotspot/src/share/vm/memory/universe.hpp
hotspot/src/share/vm/opto/c2_globals.cpp
hotspot/src/share/vm/opto/c2_globals.hpp
hotspot/src/share/vm/opto/callGenerator.cpp
hotspot/src/share/vm/opto/callGenerator.hpp
hotspot/src/share/vm/opto/callnode.cpp
hotspot/src/share/vm/opto/callnode.hpp
hotspot/src/share/vm/opto/compile.cpp
hotspot/src/share/vm/opto/compile.hpp
hotspot/src/share/vm/opto/doCall.cpp
hotspot/src/share/vm/opto/graphKit.cpp
hotspot/src/share/vm/opto/graphKit.hpp
hotspot/src/share/vm/opto/macro.cpp
hotspot/src/share/vm/opto/memnode.cpp
hotspot/src/share/vm/opto/node.hpp
hotspot/src/share/vm/opto/parseHelper.cpp
hotspot/src/share/vm/opto/phase.hpp
hotspot/src/share/vm/opto/phaseX.hpp
hotspot/src/share/vm/opto/stringopts.cpp
hotspot/src/share/vm/opto/stringopts.hpp
hotspot/src/share/vm/opto/type.hpp
hotspot/src/share/vm/runtime/globals.cpp
hotspot/src/share/vm/runtime/globals_extension.hpp
hotspot/src/share/vm/utilities/growableArray.hpp
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -46,6 +46,9 @@
 ciInstanceKlass* ciEnv::_Thread;
 ciInstanceKlass* ciEnv::_OutOfMemoryError;
 ciInstanceKlass* ciEnv::_String;
+ciInstanceKlass* ciEnv::_StringBuffer;
+ciInstanceKlass* ciEnv::_StringBuilder;
+ciInstanceKlass* ciEnv::_Integer;
 
 ciSymbol*        ciEnv::_unloaded_cisymbol = NULL;
 ciInstanceKlass* ciEnv::_unloaded_ciinstance_klass = NULL;
@@ -110,6 +113,8 @@
   _ArrayIndexOutOfBoundsException_instance = NULL;
   _ArrayStoreException_instance = NULL;
   _ClassCastException_instance = NULL;
+  _the_null_string = NULL;
+  _the_min_jint_string = NULL;
 }
 
 ciEnv::ciEnv(Arena* arena) {
@@ -163,6 +168,8 @@
   _ArrayIndexOutOfBoundsException_instance = NULL;
   _ArrayStoreException_instance = NULL;
   _ClassCastException_instance = NULL;
+  _the_null_string = NULL;
+  _the_min_jint_string = NULL;
 }
 
 ciEnv::~ciEnv() {
@@ -248,6 +255,22 @@
   return _ClassCastException_instance;
 }
 
+ciInstance* ciEnv::the_null_string() {
+  if (_the_null_string == NULL) {
+    VM_ENTRY_MARK;
+    _the_null_string = get_object(Universe::the_null_string())->as_instance();
+  }
+  return _the_null_string;
+}
+
+ciInstance* ciEnv::the_min_jint_string() {
+  if (_the_min_jint_string == NULL) {
+    VM_ENTRY_MARK;
+    _the_min_jint_string = get_object(Universe::the_min_jint_string())->as_instance();
+  }
+  return _the_min_jint_string;
+}
+
 // ------------------------------------------------------------------
 // ciEnv::get_method_from_handle
 ciMethod* ciEnv::get_method_from_handle(jobject method) {
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -82,6 +82,9 @@
   static ciInstanceKlass* _Thread;
   static ciInstanceKlass* _OutOfMemoryError;
   static ciInstanceKlass* _String;
+  static ciInstanceKlass* _StringBuffer;
+  static ciInstanceKlass* _StringBuilder;
+  static ciInstanceKlass* _Integer;
 
   static ciSymbol*        _unloaded_cisymbol;
   static ciInstanceKlass* _unloaded_ciinstance_klass;
@@ -97,6 +100,9 @@
   ciInstance* _ArrayStoreException_instance;
   ciInstance* _ClassCastException_instance;
 
+  ciInstance* _the_null_string;      // The Java string "null"
+  ciInstance* _the_min_jint_string; // The Java string "-2147483648"
+
   // Look up a klass by name from a particular class loader (the accessor's).
   // If require_local, result must be defined in that class loader, or NULL.
   // If !require_local, a result from remote class loader may be reported,
@@ -310,6 +316,15 @@
   ciInstanceKlass* String_klass() {
     return _String;
   }
+  ciInstanceKlass* StringBuilder_klass() {
+    return _StringBuilder;
+  }
+  ciInstanceKlass* StringBuffer_klass() {
+    return _StringBuffer;
+  }
+  ciInstanceKlass* Integer_klass() {
+    return _Integer;
+  }
   ciInstance* NullPointerException_instance() {
     assert(_NullPointerException_instance != NULL, "initialization problem");
     return _NullPointerException_instance;
@@ -324,6 +339,9 @@
   ciInstance* ArrayStoreException_instance();
   ciInstance* ClassCastException_instance();
 
+  ciInstance* the_null_string();
+  ciInstance* the_min_jint_string();
+
   static ciSymbol* unloaded_cisymbol() {
     return _unloaded_cisymbol;
   }
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -341,6 +341,20 @@
 }
 
 // ------------------------------------------------------------------
+// ciInstanceKlass::get_field_by_name
+ciField* ciInstanceKlass::get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static) {
+  VM_ENTRY_MARK;
+  instanceKlass* k = get_instanceKlass();
+  fieldDescriptor fd;
+  klassOop def = k->find_field(name->get_symbolOop(), signature->get_symbolOop(), is_static, &fd);
+  if (def == NULL) {
+    return NULL;
+  }
+  ciField* field = new (CURRENT_THREAD_ENV->arena()) ciField(&fd);
+  return field;
+}
+
+// ------------------------------------------------------------------
 // ciInstanceKlass::non_static_fields.
 
 class NonStaticFieldFiller: public FieldClosure {
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -148,6 +148,7 @@
 
   ciInstanceKlass* get_canonical_holder(int offset);
   ciField* get_field_by_offset(int field_offset, bool is_static);
+  ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
 
   GrowableArray<ciField*>* non_static_fields();
 
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -168,6 +168,15 @@
   ciEnv::_String =
     get(SystemDictionary::string_klass())
       ->as_instance_klass();
+  ciEnv::_StringBuffer =
+    get(SystemDictionary::stringBuffer_klass())
+      ->as_instance_klass();
+  ciEnv::_StringBuilder =
+    get(SystemDictionary::StringBuilder_klass())
+      ->as_instance_klass();
+  ciEnv::_Integer =
+    get(SystemDictionary::int_klass())
+      ->as_instance_klass();
 
   for (int len = -1; len != _ci_objects->length(); ) {
     len = _ci_objects->length();
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -150,6 +150,7 @@
   template(vector_klass,                 java_util_Vector,               Pre) \
   template(hashtable_klass,              java_util_Hashtable,            Pre) \
   template(stringBuffer_klass,           java_lang_StringBuffer,         Pre) \
+  template(StringBuilder_klass,          java_lang_StringBuilder,        Pre) \
                                                                               \
   /* It's NULL in non-1.4 JDKs. */                                            \
   template(stackTraceElement_klass,      java_lang_StackTraceElement,    Opt) \
--- a/hotspot/src/share/vm/classfile/vmSymbols.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/classfile/vmSymbols.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -303,6 +303,11 @@
   const int neg = JVM_ACC_STATIC | JVM_ACC_SYNCHRONIZED;
   return (flags & (req | neg)) == req;
 }
+inline bool match_F_Y(jshort flags) {
+  const int req = JVM_ACC_SYNCHRONIZED;
+  const int neg = JVM_ACC_STATIC;
+  return (flags & (req | neg)) == req;
+}
 inline bool match_F_RN(jshort flags) {
   const int req = JVM_ACC_NATIVE;
   const int neg = JVM_ACC_STATIC | JVM_ACC_SYNCHRONIZED;
@@ -361,6 +366,7 @@
   const char* sname = vmSymbols::name_for(signature_for(id));
   const char* fname = "";
   switch (flags_for(id)) {
+  case F_Y:  fname = "synchronized ";  break;
   case F_RN: fname = "native ";        break;
   case F_SN: fname = "native static "; break;
   case F_S:  fname = "static ";        break;
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -84,6 +84,7 @@
   template(java_lang_reflect_Field,                   "java/lang/reflect/Field")                  \
   template(java_lang_reflect_Array,                   "java/lang/reflect/Array")                  \
   template(java_lang_StringBuffer,                    "java/lang/StringBuffer")                   \
+  template(java_lang_StringBuilder,                   "java/lang/StringBuilder")                  \
   template(java_lang_CharSequence,                    "java/lang/CharSequence")                   \
   template(java_security_AccessControlContext,        "java/security/AccessControlContext")       \
   template(java_security_ProtectionDomain,            "java/security/ProtectionDomain")           \
@@ -334,6 +335,7 @@
   template(ptypes_name,                               "ptypes")                                   \
   template(form_name,                                 "form")                                     \
   template(erasedType_name,                           "erasedType")                               \
+  template(append_name,                               "append")                                   \
                                                                                                   \
   /* non-intrinsic name/signature pairs: */                                                       \
   template(register_method_name,                      "register")                                 \
@@ -415,6 +417,13 @@
   template(string_signature,                          "Ljava/lang/String;")                                       \
   template(reference_signature,                       "Ljava/lang/ref/Reference;")                                \
   template(concurrenthashmap_signature,               "Ljava/util/concurrent/ConcurrentHashMap;")                 \
+  template(String_StringBuilder_signature,            "(Ljava/lang/String;)Ljava/lang/StringBuilder;")            \
+  template(int_StringBuilder_signature,               "(I)Ljava/lang/StringBuilder;")                             \
+  template(char_StringBuilder_signature,              "(C)Ljava/lang/StringBuilder;")                             \
+  template(String_StringBuffer_signature,             "(Ljava/lang/String;)Ljava/lang/StringBuffer;")             \
+  template(int_StringBuffer_signature,                "(I)Ljava/lang/StringBuffer;")                              \
+  template(char_StringBuffer_signature,               "(C)Ljava/lang/StringBuffer;")                              \
+  template(int_String_signature,                      "(I)Ljava/lang/String;")                                    \
   /* signature symbols needed by intrinsics */                                                                    \
   VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE)            \
                                                                                                                   \
@@ -814,10 +823,34 @@
     /*the compiler does have special inlining code for these; bytecode inline is just fine */                           \
                                                                                                                         \
   do_intrinsic(_fillInStackTrace,         java_lang_Throwable, fillInStackTrace_name, void_throwable_signature,  F_RNY) \
-                                                                                                                        \
-  do_intrinsic(_Object_init,              java_lang_Object, object_initializer_name, void_method_signature,      F_R)   \
-  /*    (symbol object_initializer_name defined above) */                                                               \
-                                                                                                                        \
+                                                                                                                          \
+  do_intrinsic(_StringBuilder_void,   java_lang_StringBuilder, object_initializer_name, void_method_signature,     F_R)   \
+  do_intrinsic(_StringBuilder_int,    java_lang_StringBuilder, object_initializer_name, int_void_signature,        F_R)   \
+  do_intrinsic(_StringBuilder_String, java_lang_StringBuilder, object_initializer_name, string_void_signature,     F_R)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuilder_append_char,   java_lang_StringBuilder, append_name, char_StringBuilder_signature,   F_R)   \
+  do_intrinsic(_StringBuilder_append_int,    java_lang_StringBuilder, append_name, int_StringBuilder_signature,    F_R)   \
+  do_intrinsic(_StringBuilder_append_String, java_lang_StringBuilder, append_name, String_StringBuilder_signature, F_R)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuilder_toString, java_lang_StringBuilder, toString_name, void_string_signature,             F_R)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuffer_void,   java_lang_StringBuffer, object_initializer_name, void_method_signature,       F_R)   \
+  do_intrinsic(_StringBuffer_int,    java_lang_StringBuffer, object_initializer_name, int_void_signature,          F_R)   \
+  do_intrinsic(_StringBuffer_String, java_lang_StringBuffer, object_initializer_name, string_void_signature,       F_R)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuffer_append_char,   java_lang_StringBuffer, append_name, char_StringBuffer_signature,      F_Y)   \
+  do_intrinsic(_StringBuffer_append_int,    java_lang_StringBuffer, append_name, int_StringBuffer_signature,       F_Y)   \
+  do_intrinsic(_StringBuffer_append_String, java_lang_StringBuffer, append_name, String_StringBuffer_signature,    F_Y)   \
+                                                                                                                          \
+  do_intrinsic(_StringBuffer_toString,  java_lang_StringBuffer, toString_name, void_string_signature,              F_Y)   \
+                                                                                                                          \
+  do_intrinsic(_Integer_toString,      java_lang_Integer, toString_name, int_String_signature,                     F_S)   \
+                                                                                                                          \
+  do_intrinsic(_String_String, java_lang_String, object_initializer_name, string_void_signature,                   F_R)   \
+                                                                                                                          \
+  do_intrinsic(_Object_init,              java_lang_Object, object_initializer_name, void_method_signature,        F_R)   \
+  /*    (symbol object_initializer_name defined above) */                                                                 \
+                                                                                                                          \
   do_intrinsic(_invoke,                   java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
   /*   (symbols invoke_name and invoke_signature defined above) */                                                      \
                                                                                                                         \
@@ -945,11 +978,12 @@
   enum Flags {
     // AccessFlags syndromes relevant to intrinsics.
     F_none = 0,
-    F_R,                        // !static        !synchronized (R="regular")
-    F_S,                        //  static        !synchronized
-    F_RN,                       // !static native !synchronized
-    F_SN,                       //  static native !synchronized
-    F_RNY                       // !static native  synchronized
+    F_R,                        // !static ?native !synchronized (R="regular")
+    F_S,                        //  static ?native !synchronized
+    F_Y,                        // !static ?native  synchronized
+    F_RN,                       // !static  native !synchronized
+    F_SN,                       //  static  native !synchronized
+    F_RNY                       // !static  native  synchronized
   };
 
 public:
--- a/hotspot/src/share/vm/includeDB_compiler2	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/includeDB_compiler2	Thu Nov 12 09:24:21 2009 -0800
@@ -149,6 +149,7 @@
 c2compiler.hpp                          abstractCompiler.hpp
 
 callGenerator.cpp                       addnode.hpp
+callGenerator.cpp                       bcEscapeAnalyzer.hpp
 callGenerator.cpp                       callGenerator.hpp
 callGenerator.cpp                       callnode.hpp
 callGenerator.cpp                       cfgnode.hpp
@@ -321,6 +322,7 @@
 compile.cpp                             rootnode.hpp
 compile.cpp                             runtime.hpp
 compile.cpp                             signature.hpp
+compile.cpp                             stringopts.hpp
 compile.cpp                             stubRoutines.hpp
 compile.cpp                             systemDictionary.hpp
 compile.cpp                             timer.hpp
@@ -476,12 +478,16 @@
 graphKit.cpp                            runtime.hpp
 graphKit.cpp                            sharedRuntime.hpp
 
+graphKit.hpp                            addnode.hpp
 graphKit.hpp                            callnode.hpp
 graphKit.hpp                            cfgnode.hpp
 graphKit.hpp                            ciEnv.hpp
+graphKit.hpp                            divnode.hpp
 graphKit.hpp                            compile.hpp
 graphKit.hpp                            deoptimization.hpp
 graphKit.hpp                            phaseX.hpp
+graphKit.hpp                            mulnode.hpp
+graphKit.hpp                            subnode.hpp
 graphKit.hpp                            type.hpp
 
 idealKit.cpp                            addnode.hpp
@@ -490,7 +496,10 @@
 idealKit.cpp                            idealKit.hpp
 idealKit.cpp				runtime.hpp
 
+idealKit.hpp                            addnode.hpp
+idealKit.hpp                            cfgnode.hpp
 idealKit.hpp                            connode.hpp
+idealKit.hpp                            divnode.hpp
 idealKit.hpp                            mulnode.hpp
 idealKit.hpp                            phaseX.hpp
 idealKit.hpp                            subnode.hpp
@@ -641,6 +650,7 @@
 macro.cpp                               callnode.hpp
 macro.cpp                               cfgnode.hpp
 macro.cpp                               compile.hpp
+macro.cpp                              compileLog.hpp
 macro.cpp                               connode.hpp
 macro.cpp                               locknode.hpp
 macro.cpp                               loopnode.hpp
@@ -993,6 +1003,21 @@
 split_if.cpp                            connode.hpp
 split_if.cpp                            loopnode.hpp
 
+stringopts.hpp                          phaseX.hpp
+stringopts.hpp                          node.hpp
+
+stringopts.cpp                          addnode.hpp
+stringopts.cpp                          callnode.hpp
+stringopts.cpp                          callGenerator.hpp
+stringopts.cpp                          compileLog.hpp
+stringopts.cpp                          divnode.hpp
+stringopts.cpp                          idealKit.hpp
+stringopts.cpp                          graphKit.hpp
+stringopts.cpp                          rootnode.hpp
+stringopts.cpp                          runtime.hpp
+stringopts.cpp                          subnode.hpp
+stringopts.cpp                          stringopts.hpp
+
 stubGenerator_<arch_model>.cpp          runtime.hpp
 
 stubRoutines.cpp                        runtime.hpp
--- a/hotspot/src/share/vm/includeDB_core	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/includeDB_core	Thu Nov 12 09:24:21 2009 -0800
@@ -570,6 +570,7 @@
 ciEnv.hpp                               dependencies.hpp
 ciEnv.hpp                               exceptionHandlerTable.hpp
 ciEnv.hpp                               oopMap.hpp
+ciEnv.hpp                               systemDictionary.hpp
 ciEnv.hpp                               thread.hpp
 
 ciExceptionHandler.cpp                  ciExceptionHandler.hpp
--- a/hotspot/src/share/vm/memory/universe.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/memory/universe.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -67,6 +67,8 @@
 objArrayOop Universe::_the_empty_system_obj_array     = NULL;
 objArrayOop Universe::_the_empty_class_klass_array    = NULL;
 objArrayOop Universe::_the_array_interfaces_array     = NULL;
+oop Universe::_the_null_string                        = NULL;
+oop Universe::_the_min_jint_string                   = NULL;
 LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
 LatestMethodOopCache* Universe::_loader_addClass_cache    = NULL;
 ActiveMethodOopsCache* Universe::_reflect_invoke_cache    = NULL;
@@ -187,6 +189,8 @@
   f->do_oop((oop*)&_the_empty_system_obj_array);
   f->do_oop((oop*)&_the_empty_class_klass_array);
   f->do_oop((oop*)&_the_array_interfaces_array);
+  f->do_oop((oop*)&_the_null_string);
+  f->do_oop((oop*)&_the_min_jint_string);
   _finalizer_register_cache->oops_do(f);
   _loader_addClass_cache->oops_do(f);
   _reflect_invoke_cache->oops_do(f);
@@ -289,6 +293,9 @@
 
     klassOop ok = SystemDictionary::object_klass();
 
+    _the_null_string            = StringTable::intern("null", CHECK);
+    _the_min_jint_string       = StringTable::intern("-2147483648", CHECK);
+
     if (UseSharedSpaces) {
       // Verify shared interfaces array.
       assert(_the_array_interfaces_array->obj_at(0) ==
--- a/hotspot/src/share/vm/memory/universe.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/memory/universe.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -169,6 +169,8 @@
   static objArrayOop  _the_empty_system_obj_array;    // Canonicalized system obj array
   static objArrayOop  _the_empty_class_klass_array;   // Canonicalized obj array of type java.lang.Class
   static objArrayOop  _the_array_interfaces_array;    // Canonicalized 2-array of cloneable & serializable klasses
+  static oop          _the_null_string;               // A cache of "null" as a Java string
+  static oop          _the_min_jint_string;          // A cache of "-2147483648" as a Java string
   static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects
   static LatestMethodOopCache* _loader_addClass_cache;    // method for registering loaded classes in class loader vector
   static ActiveMethodOopsCache* _reflect_invoke_cache;    // method for security checks
@@ -310,6 +312,8 @@
   static objArrayOop  the_empty_system_obj_array ()   { return _the_empty_system_obj_array;    }
   static objArrayOop  the_empty_class_klass_array ()  { return _the_empty_class_klass_array;   }
   static objArrayOop  the_array_interfaces_array()    { return _the_array_interfaces_array;    }
+  static oop          the_null_string()               { return _the_null_string;               }
+  static oop          the_min_jint_string()          { return _the_min_jint_string;          }
   static methodOop    finalizer_register_method()     { return _finalizer_register_cache->get_methodOop(); }
   static methodOop    loader_addClass_method()        { return _loader_addClass_cache->get_methodOop(); }
   static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; }
--- a/hotspot/src/share/vm/opto/c2_globals.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/c2_globals.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -25,4 +25,4 @@
 # include "incls/_precompiled.incl"
 # include "incls/_c2_globals.cpp.incl"
 
-C2_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
+C2_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_EXPERIMENTAL_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -26,7 +26,7 @@
 // Defines all globals flags used by the server compiler.
 //
 
-#define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+#define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct) \
                                                                             \
   notproduct(intx, CompileZapFirst, 0,                                      \
           "If +ZapDeadCompiledLocals, "                                     \
@@ -394,6 +394,12 @@
   product(bool, UseOptoBiasInlining, true,                                  \
           "Generate biased locking code in C2 ideal graph")                 \
                                                                             \
+  experimental(bool, OptimizeStringConcat, false,                           \
+          "Optimize the construction of Strings by StringBuilder")          \
+                                                                            \
+  notproduct(bool, PrintOptimizeStringConcat, false,                        \
+          "Print information about transformations performed on Strings")   \
+                                                                            \
   product(intx, ValueSearchLimit, 1000,                                     \
           "Recursion limit in PhaseMacroExpand::value_from_mem_phi")        \
                                                                             \
@@ -413,4 +419,4 @@
   product(bool, BlockLayoutRotateLoops, true,                               \
           "Allow back branches to be fall throughs in the block layour")    \
 
-C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
+C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -98,12 +98,21 @@
 //---------------------------DirectCallGenerator------------------------------
 // Internal class which handles all out-of-line calls w/o receiver type checks.
 class DirectCallGenerator : public CallGenerator {
-public:
-  DirectCallGenerator(ciMethod* method)
-    : CallGenerator(method)
+ private:
+  CallStaticJavaNode* _call_node;
+  // Force separate memory and I/O projections for the exceptional
+  // paths to facilitate late inlinig.
+  bool                _separate_io_proj;
+
+ public:
+  DirectCallGenerator(ciMethod* method, bool separate_io_proj)
+    : CallGenerator(method),
+      _separate_io_proj(separate_io_proj)
   {
   }
   virtual JVMState* generate(JVMState* jvms);
+
+  CallStaticJavaNode* call_node() const { return _call_node; }
 };
 
 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
@@ -129,9 +138,10 @@
     call->set_optimized_virtual(true);
   }
   kit.set_arguments_for_java_call(call);
-  kit.set_edges_for_java_call(call);
-  Node* ret = kit.set_results_for_java_call(call);
+  kit.set_edges_for_java_call(call, false, _separate_io_proj);
+  Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
   kit.push_node(method()->return_type()->basic_type(), ret);
+  _call_node = call;  // Save the call node in case we need it later
   return kit.transfer_exceptions_into_jvms();
 }
 
@@ -238,9 +248,9 @@
   return new ParseGenerator(m, expected_uses, true);
 }
 
-CallGenerator* CallGenerator::for_direct_call(ciMethod* m) {
+CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
   assert(!m->is_abstract(), "for_direct_call mismatch");
-  return new DirectCallGenerator(m);
+  return new DirectCallGenerator(m, separate_io_proj);
 }
 
 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
@@ -248,6 +258,108 @@
   return new VirtualCallGenerator(m, vtable_index);
 }
 
+// Allow inlining decisions to be delayed
+class LateInlineCallGenerator : public DirectCallGenerator {
+  CallGenerator* _inline_cg;
+
+ public:
+  LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
+    DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
+
+  virtual bool      is_late_inline() const { return true; }
+
+  // Convert the CallStaticJava into an inline
+  virtual void do_late_inline();
+
+  JVMState* generate(JVMState* jvms) {
+    // Record that this call site should be revisited once the main
+    // parse is finished.
+    Compile::current()->add_late_inline(this);
+
+    // Emit the CallStaticJava and request separate projections so
+    // that the late inlining logic can distinguish between fall
+    // through and exceptional uses of the memory and io projections
+    // as is done for allocations and macro expansion.
+    return DirectCallGenerator::generate(jvms);
+  }
+
+};
+
+
+void LateInlineCallGenerator::do_late_inline() {
+  // Can't inline it
+  if (call_node() == NULL || call_node()->outcnt() == 0 ||
+      call_node()->in(0) == NULL || call_node()->in(0)->is_top())
+    return;
+
+  CallStaticJavaNode* call = call_node();
+
+  // Make a clone of the JVMState that appropriate to use for driving a parse
+  Compile* C = Compile::current();
+  JVMState* jvms     = call->jvms()->clone_shallow(C);
+  uint size = call->req();
+  SafePointNode* map = new (C, size) SafePointNode(size, jvms);
+  for (uint i1 = 0; i1 < size; i1++) {
+    map->init_req(i1, call->in(i1));
+  }
+
+  // Make sure the state is a MergeMem for parsing.
+  if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
+    map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
+  }
+
+  // Make enough space for the expression stack and transfer the incoming arguments
+  int nargs    = method()->arg_size();
+  jvms->set_map(map);
+  map->ensure_stack(jvms, jvms->method()->max_stack());
+  if (nargs > 0) {
+    for (int i1 = 0; i1 < nargs; i1++) {
+      map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
+    }
+  }
+
+  CompileLog* log = C->log();
+  if (log != NULL) {
+    log->head("late_inline method='%d'", log->identify(method()));
+    JVMState* p = jvms;
+    while (p != NULL) {
+      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+      p = p->caller();
+    }
+    log->tail("late_inline");
+  }
+
+  // Setup default node notes to be picked up by the inlining
+  Node_Notes* old_nn = C->default_node_notes();
+  if (old_nn != NULL) {
+    Node_Notes* entry_nn = old_nn->clone(C);
+    entry_nn->set_jvms(jvms);
+    C->set_default_node_notes(entry_nn);
+  }
+
+  // Now perform the inling using the synthesized JVMState
+  JVMState* new_jvms = _inline_cg->generate(jvms);
+  if (new_jvms == NULL)  return;  // no change
+  if (C->failing())      return;
+
+  // Capture any exceptional control flow
+  GraphKit kit(new_jvms);
+
+  // Find the result object
+  Node* result = C->top();
+  int   result_size = method()->return_type()->size();
+  if (result_size != 0 && !kit.stopped()) {
+    result = (result_size == 1) ? kit.pop() : kit.pop_pair();
+  }
+
+  kit.replace_call(call, result);
+}
+
+
+CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
+  return new LateInlineCallGenerator(method, inline_cg);
+}
+
 
 //---------------------------WarmCallGenerator--------------------------------
 // Internal class which handles initial deferral of inlining decisions.
@@ -315,70 +427,7 @@
 }
 
 void WarmCallInfo::make_hot() {
-  Compile* C = Compile::current();
-  // Replace the callnode with something better.
-  CallJavaNode* call = this->call()->as_CallJava();
-  ciMethod* method   = call->method();
-  int       nargs    = method->arg_size();
-  JVMState* jvms     = call->jvms()->clone_shallow(C);
-  uint size = TypeFunc::Parms + MAX2(2, nargs);
-  SafePointNode* map = new (C, size) SafePointNode(size, jvms);
-  for (uint i1 = 0; i1 < (uint)(TypeFunc::Parms + nargs); i1++) {
-    map->init_req(i1, call->in(i1));
-  }
-  jvms->set_map(map);
-  jvms->set_offsets(map->req());
-  jvms->set_locoff(TypeFunc::Parms);
-  jvms->set_stkoff(TypeFunc::Parms);
-  GraphKit kit(jvms);
-
-  JVMState* new_jvms = _hot_cg->generate(kit.jvms());
-  if (new_jvms == NULL)  return;  // no change
-  if (C->failing())      return;
-
-  kit.set_jvms(new_jvms);
-  Node* res = C->top();
-  int   res_size = method->return_type()->size();
-  if (res_size != 0) {
-    kit.inc_sp(-res_size);
-    res = kit.argument(0);
-  }
-  GraphKit ekit(kit.combine_and_pop_all_exception_states()->jvms());
-
-  // Replace the call:
-  for (DUIterator i = call->outs(); call->has_out(i); i++) {
-    Node* n = call->out(i);
-    Node* nn = NULL;  // replacement
-    if (n->is_Proj()) {
-      ProjNode* nproj = n->as_Proj();
-      assert(nproj->_con < (uint)(TypeFunc::Parms + (res_size ? 1 : 0)), "sane proj");
-      if (nproj->_con == TypeFunc::Parms) {
-        nn = res;
-      } else {
-        nn = kit.map()->in(nproj->_con);
-      }
-      if (nproj->_con == TypeFunc::I_O) {
-        for (DUIterator j = nproj->outs(); nproj->has_out(j); j++) {
-          Node* e = nproj->out(j);
-          if (e->Opcode() == Op_CreateEx) {
-            e->replace_by(ekit.argument(0));
-          } else if (e->Opcode() == Op_Catch) {
-            for (DUIterator k = e->outs(); e->has_out(k); k++) {
-              CatchProjNode* p = e->out(j)->as_CatchProj();
-              if (p->is_handler_proj()) {
-                p->replace_by(ekit.control());
-              } else {
-                p->replace_by(kit.control());
-              }
-            }
-          }
-        }
-      }
-    }
-    NOT_PRODUCT(if (!nn)  n->dump(2));
-    assert(nn != NULL, "don't know what to do with this user");
-    n->replace_by(nn);
-  }
+  Unimplemented();
 }
 
 void WarmCallInfo::make_cold() {
--- a/hotspot/src/share/vm/opto/callGenerator.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/callGenerator.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -57,6 +57,13 @@
   // is_trap: Does not return to the caller.  (E.g., uncommon trap.)
   virtual bool      is_trap() const             { return false; }
 
+  // is_late_inline: supports conversion of call into an inline
+  virtual bool      is_late_inline() const      { return false; }
+  // Replace the call with an inline version of the code
+  virtual void do_late_inline() { ShouldNotReachHere(); }
+
+  virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
+
   // Note:  It is possible for a CG to be both inline and virtual.
   // (The hashCode intrinsic does a vtable check and an inlined fast path.)
 
@@ -92,9 +99,12 @@
   static CallGenerator* for_osr(ciMethod* m, int osr_bci);
 
   // How to generate vanilla out-of-line call sites:
-  static CallGenerator* for_direct_call(ciMethod* m);   // static, special
+  static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false);   // static, special
   static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index);  // virtual, interface
 
+  // How to generate a replace a direct call with an inline version
+  static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
+
   // How to make a call but defer the decision whether to inline or not.
   static CallGenerator* for_warm_call(WarmCallInfo* ci,
                                       CallGenerator* if_cold,
--- a/hotspot/src/share/vm/opto/callnode.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -693,6 +693,84 @@
 }
 
 
+void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) {
+  projs->fallthrough_proj      = NULL;
+  projs->fallthrough_catchproj = NULL;
+  projs->fallthrough_ioproj    = NULL;
+  projs->catchall_ioproj       = NULL;
+  projs->catchall_catchproj    = NULL;
+  projs->fallthrough_memproj   = NULL;
+  projs->catchall_memproj      = NULL;
+  projs->resproj               = NULL;
+  projs->exobj                 = NULL;
+
+  for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+    ProjNode *pn = fast_out(i)->as_Proj();
+    if (pn->outcnt() == 0) continue;
+    switch (pn->_con) {
+    case TypeFunc::Control:
+      {
+        // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
+        projs->fallthrough_proj = pn;
+        DUIterator_Fast jmax, j = pn->fast_outs(jmax);
+        const Node *cn = pn->fast_out(j);
+        if (cn->is_Catch()) {
+          ProjNode *cpn = NULL;
+          for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
+            cpn = cn->fast_out(k)->as_Proj();
+            assert(cpn->is_CatchProj(), "must be a CatchProjNode");
+            if (cpn->_con == CatchProjNode::fall_through_index)
+              projs->fallthrough_catchproj = cpn;
+            else {
+              assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
+              projs->catchall_catchproj = cpn;
+            }
+          }
+        }
+        break;
+      }
+    case TypeFunc::I_O:
+      if (pn->_is_io_use)
+        projs->catchall_ioproj = pn;
+      else
+        projs->fallthrough_ioproj = pn;
+      for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
+        Node* e = pn->out(j);
+        if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj()) {
+          assert(projs->exobj == NULL, "only one");
+          projs->exobj = e;
+        }
+      }
+      break;
+    case TypeFunc::Memory:
+      if (pn->_is_io_use)
+        projs->catchall_memproj = pn;
+      else
+        projs->fallthrough_memproj = pn;
+      break;
+    case TypeFunc::Parms:
+      projs->resproj = pn;
+      break;
+    default:
+      assert(false, "unexpected projection from allocation node.");
+    }
+  }
+
+  // The resproj may not exist because the result couuld be ignored
+  // and the exception object may not exist if an exception handler
+  // swallows the exception but all the other must exist and be found.
+  assert(projs->fallthrough_proj      != NULL, "must be found");
+  assert(projs->fallthrough_catchproj != NULL, "must be found");
+  assert(projs->fallthrough_memproj   != NULL, "must be found");
+  assert(projs->fallthrough_ioproj    != NULL, "must be found");
+  assert(projs->catchall_catchproj    != NULL, "must be found");
+  if (separate_io_proj) {
+    assert(projs->catchall_memproj      != NULL, "must be found");
+    assert(projs->catchall_ioproj       != NULL, "must be found");
+  }
+}
+
+
 //=============================================================================
 uint CallJavaNode::size_of() const { return sizeof(*this); }
 uint CallJavaNode::cmp( const Node &n ) const {
--- a/hotspot/src/share/vm/opto/callnode.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/callnode.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -470,6 +470,23 @@
 #endif
 };
 
+
+// Simple container for the outgoing projections of a call.  Useful
+// for serious surgery on calls.
+class CallProjections : public StackObj {
+public:
+  Node* fallthrough_proj;
+  Node* fallthrough_catchproj;
+  Node* fallthrough_memproj;
+  Node* fallthrough_ioproj;
+  Node* catchall_catchproj;
+  Node* catchall_memproj;
+  Node* catchall_ioproj;
+  Node* resproj;
+  Node* exobj;
+};
+
+
 //------------------------------CallNode---------------------------------------
 // Call nodes now subsume the function of debug nodes at callsites, so they
 // contain the functionality of a full scope chain of debug nodes.
@@ -521,6 +538,11 @@
   // or returns NULL if there is no one.
   Node *result_cast();
 
+  // Collect all the interesting edges from a call for use in
+  // replacing the call by something else.  Used by macro expansion
+  // and the late inlining support.
+  void extract_projections(CallProjections* projs, bool separate_io_proj);
+
   virtual uint match_edge(uint idx) const;
 
 #ifndef PRODUCT
@@ -529,6 +551,7 @@
 #endif
 };
 
+
 //------------------------------CallJavaNode-----------------------------------
 // Make a static or dynamic subroutine call node using Java calling
 // convention.  (The "Java" calling convention is the compiler's calling
--- a/hotspot/src/share/vm/opto/compile.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/compile.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -224,6 +224,32 @@
 }
 
 
+void Compile::gvn_replace_by(Node* n, Node* nn) {
+  for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
+    Node* use = n->last_out(i);
+    bool is_in_table = initial_gvn()->hash_delete(use);
+    uint uses_found = 0;
+    for (uint j = 0; j < use->len(); j++) {
+      if (use->in(j) == n) {
+        if (j < use->req())
+          use->set_req(j, nn);
+        else
+          use->set_prec(j, nn);
+        uses_found++;
+      }
+    }
+    if (is_in_table) {
+      // reinsert into table
+      initial_gvn()->hash_find_insert(use);
+    }
+    record_for_igvn(use);
+    i -= uses_found;    // we deleted 1 or more copies of this edge
+  }
+}
+
+
+
+
 // Identify all nodes that are reachable from below, useful.
 // Use breadth-first pass that records state in a Unique_Node_List,
 // recursive traversal is slower.
@@ -554,6 +580,28 @@
       rethrow_exceptions(kit.transfer_exceptions_into_jvms());
     }
 
+    if (!failing() && has_stringbuilder()) {
+      {
+        // remove useless nodes to make the usage analysis simpler
+        ResourceMark rm;
+        PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
+      }
+
+      {
+        ResourceMark rm;
+        print_method("Before StringOpts", 3);
+        PhaseStringOpts pso(initial_gvn(), &for_igvn);
+        print_method("After StringOpts", 3);
+      }
+
+      // now inline anything that we skipped the first time around
+      while (_late_inlines.length() > 0) {
+        CallGenerator* cg = _late_inlines.pop();
+        cg->do_late_inline();
+      }
+    }
+    assert(_late_inlines.length() == 0, "should have been processed");
+
     print_method("Before RemoveUseless", 3);
 
     // Remove clutter produced by parsing.
@@ -820,6 +868,7 @@
   _fixed_slots = 0;
   set_has_split_ifs(false);
   set_has_loops(has_method() && method()->has_loops()); // first approximation
+  set_has_stringbuilder(false);
   _deopt_happens = true;  // start out assuming the worst
   _trap_can_recompile = false;  // no traps emitted yet
   _major_progress = true; // start out assuming good things will happen
@@ -2240,6 +2289,30 @@
     break;
   }
 
+  case Op_Proj: {
+    if (OptimizeStringConcat) {
+      ProjNode* p = n->as_Proj();
+      if (p->_is_io_use) {
+        // Separate projections were used for the exception path which
+        // are normally removed by a late inline.  If it wasn't inlined
+        // then they will hang around and should just be replaced with
+        // the original one.
+        Node* proj = NULL;
+        // Replace with just one
+        for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
+          Node *use = i.get();
+          if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
+            proj = use;
+            break;
+          }
+        }
+        assert(p != NULL, "must be found");
+        p->subsume_by(proj);
+      }
+    }
+    break;
+  }
+
   case Op_Phi:
     if (n->as_Phi()->bottom_type()->isa_narrowoop()) {
       // The EncodeP optimization may create Phi with the same edges
--- a/hotspot/src/share/vm/opto/compile.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/compile.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -149,6 +149,7 @@
   bool                  _has_loops;             // True if the method _may_ have some loops
   bool                  _has_split_ifs;         // True if the method _may_ have some split-if
   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
+  bool                  _has_stringbuilder;     // True StringBuffers or StringBuilders are allocated
   uint                  _trap_hist[trapHistLength];  // Cumulative traps
   bool                  _trap_can_recompile;    // Have we emitted a recompiling trap?
   uint                  _decompile_count;       // Cumulative decompilation counts.
@@ -219,6 +220,9 @@
   Unique_Node_List*     _for_igvn;              // Initial work-list for next round of Iterative GVN
   WarmCallInfo*         _warm_calls;            // Sorted work-list for heat-based inlining.
 
+  GrowableArray<CallGenerator*> _late_inlines;  // List of CallGenerators to be revisited after
+                                                // main parsing has finished.
+
   // Matching, CFG layout, allocation, code generation
   PhaseCFG*             _cfg;                   // Results of CFG finding
   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
@@ -298,6 +302,8 @@
   void          set_has_split_ifs(bool z)       { _has_split_ifs = z; }
   bool              has_unsafe_access() const   { return _has_unsafe_access; }
   void          set_has_unsafe_access(bool z)   { _has_unsafe_access = z; }
+  bool              has_stringbuilder() const   { return _has_stringbuilder; }
+  void          set_has_stringbuilder(bool z)   { _has_stringbuilder = z; }
   void          set_trap_count(uint r, uint c)  { assert(r < trapHistLength, "oob");        _trap_hist[r] = c; }
   uint              trap_count(uint r) const    { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
   bool              trap_can_recompile() const  { return _trap_can_recompile; }
@@ -475,6 +481,7 @@
   // Decide how to build a call.
   // The profile factor is a discount to apply to this site's interp. profile.
   CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor);
+  bool should_delay_inlining(ciMethod* call_method, JVMState* jvms);
 
   // Report if there were too many traps at a current method and bci.
   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
@@ -495,6 +502,11 @@
   void          set_initial_gvn(PhaseGVN *gvn)           { _initial_gvn = gvn; }
   void          set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
 
+  // Replace n by nn using initial_gvn, calling hash_delete and
+  // record_for_igvn as needed.
+  void gvn_replace_by(Node* n, Node* nn);
+
+
   void              identify_useful_nodes(Unique_Node_List &useful);
   void              remove_useless_nodes  (Unique_Node_List &useful);
 
@@ -502,6 +514,9 @@
   void          set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
   WarmCallInfo* pop_warm_call();
 
+  // Record this CallGenerator for inlining at the end of parsing.
+  void              add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }
+
   // Matching, CFG layout, allocation, code generation
   PhaseCFG*         cfg()                       { return _cfg; }
   bool              select_24_bit_instr() const { return _select_24_bit_instr; }
--- a/hotspot/src/share/vm/opto/doCall.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -128,6 +128,12 @@
 
       if (allow_inline) {
         CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses);
+        if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) {
+          // Delay the inlining of this method to give us the
+          // opportunity to perform some high level optimizations
+          // first.
+          return CallGenerator::for_late_inline(call_method, cg);
+        }
         if (cg == NULL) {
           // Fall through.
         } else if (require_inline || !InlineWarmCalls) {
@@ -225,10 +231,63 @@
   } else {
     // Class Hierarchy Analysis or Type Profile reveals a unique target,
     // or it is a static or special call.
-    return CallGenerator::for_direct_call(call_method);
+    return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms));
   }
 }
 
+// Return true for methods that shouldn't be inlined early so that
+// they are easier to analyze and optimize as intrinsics.
+bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
+  if (has_stringbuilder()) {
+
+    if ((call_method->holder() == C->env()->StringBuilder_klass() ||
+         call_method->holder() == C->env()->StringBuffer_klass()) &&
+        (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
+         jvms->method()->holder() == C->env()->StringBuffer_klass())) {
+      // Delay SB calls only when called from non-SB code
+      return false;
+    }
+
+    switch (call_method->intrinsic_id()) {
+      case vmIntrinsics::_StringBuilder_void:
+      case vmIntrinsics::_StringBuilder_int:
+      case vmIntrinsics::_StringBuilder_String:
+      case vmIntrinsics::_StringBuilder_append_char:
+      case vmIntrinsics::_StringBuilder_append_int:
+      case vmIntrinsics::_StringBuilder_append_String:
+      case vmIntrinsics::_StringBuilder_toString:
+      case vmIntrinsics::_StringBuffer_void:
+      case vmIntrinsics::_StringBuffer_int:
+      case vmIntrinsics::_StringBuffer_String:
+      case vmIntrinsics::_StringBuffer_append_char:
+      case vmIntrinsics::_StringBuffer_append_int:
+      case vmIntrinsics::_StringBuffer_append_String:
+      case vmIntrinsics::_StringBuffer_toString:
+      case vmIntrinsics::_Integer_toString:
+        return true;
+
+      case vmIntrinsics::_String_String:
+        {
+          Node* receiver = jvms->map()->in(jvms->argoff() + 1);
+          if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
+            CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
+            ciMethod* m = csj->method();
+            if (m != NULL &&
+                (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
+                 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
+              // Delay String.<init>(new SB())
+              return true;
+          }
+          return false;
+        }
+
+      default:
+        return false;
+    }
+  }
+  return false;
+}
+
 
 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -1351,8 +1351,8 @@
 }
 
 //------------------------------set_all_memory_call----------------------------
-void GraphKit::set_all_memory_call(Node* call) {
-  Node* newmem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) );
+void GraphKit::set_all_memory_call(Node* call, bool separate_io_proj) {
+  Node* newmem = _gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory, separate_io_proj) );
   set_all_memory(newmem);
 }
 
@@ -1573,7 +1573,7 @@
 //---------------------------set_edges_for_java_call---------------------------
 // Connect a newly created call into the current JVMS.
 // A return value node (if any) is returned from set_edges_for_java_call.
-void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw) {
+void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
 
   // Add the predefined inputs:
   call->init_req( TypeFunc::Control, control() );
@@ -1595,13 +1595,13 @@
   // Re-use the current map to produce the result.
 
   set_control(_gvn.transform(new (C, 1) ProjNode(call, TypeFunc::Control)));
-  set_i_o(    _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O    )));
-  set_all_memory_call(xcall);
+  set_i_o(    _gvn.transform(new (C, 1) ProjNode(call, TypeFunc::I_O    , separate_io_proj)));
+  set_all_memory_call(xcall, separate_io_proj);
 
   //return xcall;   // no need, caller already has it
 }
 
-Node* GraphKit::set_results_for_java_call(CallJavaNode* call) {
+Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
   if (stopped())  return top();  // maybe the call folded up?
 
   // Capture the return value, if any.
@@ -1614,8 +1614,15 @@
   // Note:  Since any out-of-line call can produce an exception,
   // we always insert an I_O projection from the call into the result.
 
-  make_slow_call_ex(call, env()->Throwable_klass(), false);
-
+  make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
+
+  if (separate_io_proj) {
+    // The caller requested separate projections be used by the fall
+    // through and exceptional paths, so replace the projections for
+    // the fall through path.
+    set_i_o(_gvn.transform( new (C, 1) ProjNode(call, TypeFunc::I_O) ));
+    set_all_memory(_gvn.transform( new (C, 1) ProjNode(call, TypeFunc::Memory) ));
+  }
   return ret;
 }
 
@@ -1678,6 +1685,59 @@
   }
 }
 
+
+// Replace the call with the current state of the kit.
+void GraphKit::replace_call(CallNode* call, Node* result) {
+  JVMState* ejvms = NULL;
+  if (has_exceptions()) {
+    ejvms = transfer_exceptions_into_jvms();
+  }
+
+  SafePointNode* final_state = stop();
+
+  // Find all the needed outputs of this call
+  CallProjections callprojs;
+  call->extract_projections(&callprojs, true);
+
+  // Replace all the old call edges with the edges from the inlining result
+  C->gvn_replace_by(callprojs.fallthrough_catchproj, final_state->in(TypeFunc::Control));
+  C->gvn_replace_by(callprojs.fallthrough_memproj,   final_state->in(TypeFunc::Memory));
+  C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_state->in(TypeFunc::I_O));
+
+  // Replace the result with the new result if it exists and is used
+  if (callprojs.resproj != NULL && result != NULL) {
+    C->gvn_replace_by(callprojs.resproj, result);
+  }
+
+  if (ejvms == NULL) {
+    // No exception edges to simply kill off those paths
+    C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
+    C->gvn_replace_by(callprojs.catchall_memproj,   C->top());
+    C->gvn_replace_by(callprojs.catchall_ioproj,    C->top());
+  } else {
+    GraphKit ekit(ejvms);
+
+    // Load my combined exception state into the kit, with all phis transformed:
+    SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
+
+    Node* ex_oop = ekit.use_exception_state(ex_map);
+
+    C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
+    C->gvn_replace_by(callprojs.catchall_memproj,   ekit.reset_memory());
+    C->gvn_replace_by(callprojs.catchall_ioproj,    ekit.i_o());
+
+    // Replace the old exception object with the newly created one
+    if (callprojs.exobj != NULL) {
+      C->gvn_replace_by(callprojs.exobj, ex_oop);
+    }
+  }
+
+  // Disconnect the call from the graph
+  call->disconnect_inputs(NULL);
+  C->gvn_replace_by(call, C->top());
+}
+
+
 //------------------------------increment_counter------------------------------
 // for statistics: increment a VM counter by 1
 
@@ -3459,4 +3519,3 @@
   sync_kit(ideal);
 }
 #undef __
-
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -279,6 +279,34 @@
   }
   Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
 
+
+  // Some convenient shortcuts for common nodes
+  Node* IfTrue(IfNode* iff)                   { return _gvn.transform(new (C,1) IfTrueNode(iff));      }
+  Node* IfFalse(IfNode* iff)                  { return _gvn.transform(new (C,1) IfFalseNode(iff));     }
+
+  Node* AddI(Node* l, Node* r)                { return _gvn.transform(new (C,3) AddINode(l, r));       }
+  Node* SubI(Node* l, Node* r)                { return _gvn.transform(new (C,3) SubINode(l, r));       }
+  Node* MulI(Node* l, Node* r)                { return _gvn.transform(new (C,3) MulINode(l, r));       }
+  Node* DivI(Node* ctl, Node* l, Node* r)     { return _gvn.transform(new (C,3) DivINode(ctl, l, r));  }
+
+  Node* AndI(Node* l, Node* r)                { return _gvn.transform(new (C,3) AndINode(l, r));       }
+  Node* OrI(Node* l, Node* r)                 { return _gvn.transform(new (C,3) OrINode(l, r));        }
+  Node* XorI(Node* l, Node* r)                { return _gvn.transform(new (C,3) XorINode(l, r));       }
+
+  Node* MaxI(Node* l, Node* r)                { return _gvn.transform(new (C,3) MaxINode(l, r));       }
+  Node* MinI(Node* l, Node* r)                { return _gvn.transform(new (C,3) MinINode(l, r));       }
+
+  Node* LShiftI(Node* l, Node* r)             { return _gvn.transform(new (C,3) LShiftINode(l, r));    }
+  Node* RShiftI(Node* l, Node* r)             { return _gvn.transform(new (C,3) RShiftINode(l, r));    }
+  Node* URShiftI(Node* l, Node* r)            { return _gvn.transform(new (C,3) URShiftINode(l, r));   }
+
+  Node* CmpI(Node* l, Node* r)                { return _gvn.transform(new (C,3) CmpINode(l, r));       }
+  Node* CmpL(Node* l, Node* r)                { return _gvn.transform(new (C,3) CmpLNode(l, r));       }
+  Node* CmpP(Node* l, Node* r)                { return _gvn.transform(new (C,3) CmpPNode(l, r));       }
+  Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C,2) BoolNode(cmp, relop)); }
+
+  Node* AddP(Node* b, Node* a, Node* o)       { return _gvn.transform(new (C,4) AddPNode(b, a, o));    }
+
   // Convert between int and long, and size_t.
   // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
   Node* ConvI2L(Node* offset);
@@ -400,7 +428,7 @@
   void set_all_memory(Node* newmem);
 
   // Create a memory projection from the call, then set_all_memory.
-  void set_all_memory_call(Node* call);
+  void set_all_memory_call(Node* call, bool separate_io_proj = false);
 
   // Create a LoadNode, reading from the parser's memory state.
   // (Note:  require_atomic_access is useful only with T_LONG.)
@@ -543,12 +571,12 @@
   // Transform the call, and update the basics: control, i_o, memory.
   // (The next step is usually to call set_results_for_java_call.)
   void set_edges_for_java_call(CallJavaNode* call,
-                               bool must_throw = false);
+                               bool must_throw = false, bool separate_io_proj = false);
 
   // Finish up a java call that was started by set_edges_for_java_call.
   // Call add_exception on any throw arising from the call.
   // Return the call result (transformed).
-  Node* set_results_for_java_call(CallJavaNode* call);
+  Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
 
   // Similar to set_edges_for_java_call, but simplified for runtime calls.
   void  set_predefined_output_for_runtime_call(Node* call) {
@@ -559,6 +587,11 @@
                                                const TypePtr* hook_mem);
   Node* set_predefined_input_for_runtime_call(SafePointNode* call);
 
+  // Replace the call with the current state of the kit.  Requires
+  // that the call was generated with separate io_projs so that
+  // exceptional control flow can be handled properly.
+  void replace_call(CallNode* call, Node* result);
+
   // helper functions for statistics
   void increment_counter(address counter_addr);   // increment a debug counter
   void increment_counter(Node*   counter_addr);   // increment a debug counter
--- a/hotspot/src/share/vm/opto/macro.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/macro.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -912,15 +912,29 @@
     return false;
   }
 
+  CompileLog* log = C->log();
+  if (log != NULL) {
+    Node* klass = alloc->in(AllocateNode::KlassNode);
+    const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr();
+    log->head("eliminate_allocation type='%d'",
+              log->identify(tklass->klass()));
+    JVMState* p = alloc->jvms();
+    while (p != NULL) {
+      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+      p = p->caller();
+    }
+    log->tail("eliminate_allocation");
+  }
+
   process_users_of_allocation(alloc);
 
 #ifndef PRODUCT
-if (PrintEliminateAllocations) {
-  if (alloc->is_AllocateArray())
-    tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
-  else
-    tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
-}
+  if (PrintEliminateAllocations) {
+    if (alloc->is_AllocateArray())
+      tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx);
+    else
+      tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx);
+  }
 #endif
 
   return true;
@@ -1639,6 +1653,18 @@
       } // if (!oldbox->is_eliminated())
   } // if (alock->is_Lock() && !lock->is_coarsened())
 
+  CompileLog* log = C->log();
+  if (log != NULL) {
+    log->head("eliminate_lock lock='%d'",
+              alock->is_Lock());
+    JVMState* p = alock->jvms();
+    while (p != NULL) {
+      log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+      p = p->caller();
+    }
+    log->tail("eliminate_lock");
+  }
+
   #ifndef PRODUCT
   if (PrintEliminateLocks) {
     if (alock->is_Lock()) {
--- a/hotspot/src/share/vm/opto/memnode.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -1503,6 +1503,8 @@
       }
     }
   } else if (tp->base() == Type::InstPtr) {
+    const TypeInstPtr* tinst = tp->is_instptr();
+    ciKlass* klass = tinst->klass();
     assert( off != Type::OffsetBot ||
             // arrays can be cast to Objects
             tp->is_oopptr()->klass()->is_java_lang_Object() ||
@@ -1510,6 +1512,25 @@
             phase->C->has_unsafe_access(),
             "Field accesses must be precise" );
     // For oop loads, we expect the _type to be precise
+    if (OptimizeStringConcat && klass == phase->C->env()->String_klass() &&
+        adr->is_AddP() && off != Type::OffsetBot) {
+      // For constant Strings treat the fields as compile time constants.
+      Node* base = adr->in(AddPNode::Base);
+      if (base->Opcode() == Op_ConP) {
+        const TypeOopPtr* t = phase->type(base)->isa_oopptr();
+        ciObject* string = t->const_oop();
+        ciConstant constant = string->as_instance()->field_value_by_offset(off);
+        if (constant.basic_type() == T_INT) {
+          return TypeInt::make(constant.as_int());
+        } else if (constant.basic_type() == T_ARRAY) {
+          if (adr->bottom_type()->is_ptr_to_narrowoop()) {
+            return TypeNarrowOop::make_from_constant(constant.as_object());
+          } else {
+            return TypeOopPtr::make_from_constant(constant.as_object());
+          }
+        }
+      }
+    }
   } else if (tp->base() == Type::KlassPtr) {
     assert( off != Type::OffsetBot ||
             // arrays can be cast to Objects
--- a/hotspot/src/share/vm/opto/node.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/node.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -661,18 +661,25 @@
     return (_flags & Flag_is_Call) != 0;
   }
 
+  CallNode* isa_Call() const {
+    return is_Call() ? as_Call() : NULL;
+  }
+
   CallNode *as_Call() const { // Only for CallNode (not for MachCallNode)
     assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class");
     return (CallNode*)this;
   }
 
-  #define DEFINE_CLASS_QUERY(type) \
-  bool is_##type() const { \
+  #define DEFINE_CLASS_QUERY(type)                           \
+  bool is_##type() const {                                   \
     return ((_class_id & ClassMask_##type) == Class_##type); \
-  } \
-  type##Node *as_##type() const { \
-    assert(is_##type(), "invalid node class"); \
-    return (type##Node*)this; \
+  }                                                          \
+  type##Node *as_##type() const {                            \
+    assert(is_##type(), "invalid node class");               \
+    return (type##Node*)this;                                \
+  }                                                          \
+  type##Node* isa_##type() const {                           \
+    return (is_##type()) ? as_##type() : NULL;               \
   }
 
   DEFINE_CLASS_QUERY(AbstractLock)
@@ -1249,6 +1256,24 @@
 #undef I_VDUI_ONLY
 #undef VDUI_ONLY
 
+// An Iterator that truly follows the iterator pattern.  Doesn't
+// support deletion but could be made to.
+//
+//   for (SimpleDUIterator i(n); i.has_next(); i.next()) {
+//     Node* m = i.get();
+//
+class SimpleDUIterator : public StackObj {
+ private:
+  Node* node;
+  DUIterator_Fast i;
+  DUIterator_Fast imax;
+ public:
+  SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
+  bool has_next() { return i < imax; }
+  void next() { i++; }
+  Node* get() { return node->fast_out(i); }
+};
+
 
 //-----------------------------------------------------------------------------
 // Map dense integer indices to Nodes.  Uses classic doubling-array trick.
@@ -1290,6 +1315,12 @@
 public:
   Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
   Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
+  bool contains(Node* n) {
+    for (uint e = 0; e < size(); e++) {
+      if (at(e) == n) return true;
+    }
+    return false;
+  }
   void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
   void remove( uint i ) { Node_Array::remove(i); _cnt--; }
   void push( Node *b ) { map(_cnt++,b); }
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -221,6 +221,14 @@
 
   // Push resultant oop onto stack
   push(obj);
+
+  // Keep track of whether opportunities exist for StringBuilder
+  // optimizations.
+  if (OptimizeStringConcat &&
+      (klass == C->env()->StringBuilder_klass() ||
+       klass == C->env()->StringBuffer_klass())) {
+    C->set_has_stringbuilder(true);
+  }
 }
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/opto/phase.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/phase.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -44,6 +44,7 @@
     BlockLayout,                // Linear ordering of blocks
     Register_Allocation,        // Register allocation, duh
     LIVE,                       // Dragon-book LIVE range problem
+    StringOpts,                 // StringBuilder related optimizations
     Interference_Graph,         // Building the IFG
     Coalesce,                   // Coalescing copies
     Ideal_Loop,                 // Find idealized trip-counted loops
--- a/hotspot/src/share/vm/opto/phaseX.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/phaseX.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -345,7 +345,11 @@
   Node  *hash_find(const Node *n) { return _table.hash_find(n); }
 
   // Used after parsing to eliminate values that are no longer in program
-  void   remove_useless_nodes(VectorSet &useful) { _table.remove_useless_nodes(useful); }
+  void   remove_useless_nodes(VectorSet &useful) {
+    _table.remove_useless_nodes(useful);
+    // this may invalidate cached cons so reset the cache
+    init_con_caches();
+  }
 
   virtual ConNode* uncached_makecon(const Type* t);  // override from PhaseTransform
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/stringopts.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -0,0 +1,1395 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_stringopts.cpp.incl"
+
+#define __ kit.
+
+class StringConcat : public ResourceObj {
+ private:
+  PhaseStringOpts*    _stringopts;
+  Node*               _string_alloc;
+  AllocateNode*       _begin;          // The allocation the begins the pattern
+  CallStaticJavaNode* _end;            // The final call of the pattern.  Will either be
+                                       // SB.toString or or String.<init>(SB.toString)
+  bool                _multiple;       // indicates this is a fusion of two or more
+                                       // separate StringBuilders
+
+  Node*               _arguments;      // The list of arguments to be concatenated
+  GrowableArray<int>  _mode;           // into a String along with a mode flag
+                                       // indicating how to treat the value.
+
+  Node_List           _control;        // List of control nodes that will be deleted
+  Node_List           _uncommon_traps; // Uncommon traps that needs to be rewritten
+                                       // to restart at the initial JVMState.
+ public:
+  // Mode for converting arguments to Strings
+  enum {
+    StringMode,
+    IntMode,
+    CharMode
+  };
+
+  StringConcat(PhaseStringOpts* stringopts, CallStaticJavaNode* end):
+    _end(end),
+    _begin(NULL),
+    _multiple(false),
+    _string_alloc(NULL),
+    _stringopts(stringopts) {
+    _arguments = new (_stringopts->C, 1) Node(1);
+    _arguments->del_req(0);
+  }
+
+  bool validate_control_flow();
+
+  void merge_add() {
+#if 0
+    // XXX This is place holder code for reusing an existing String
+    // allocation but the logic for checking the state safety is
+    // probably inadequate at the moment.
+    CallProjections endprojs;
+    sc->end()->extract_projections(&endprojs, false);
+    if (endprojs.resproj != NULL) {
+      for (SimpleDUIterator i(endprojs.resproj); i.has_next(); i.next()) {
+        CallStaticJavaNode *use = i.get()->isa_CallStaticJava();
+        if (use != NULL && use->method() != NULL &&
+            use->method()->holder() == C->env()->String_klass() &&
+            use->method()->name() == ciSymbol::object_initializer_name() &&
+            use->in(TypeFunc::Parms + 1) == endprojs.resproj) {
+          // Found useless new String(sb.toString()) so reuse the newly allocated String
+          // when creating the result instead of allocating a new one.
+          sc->set_string_alloc(use->in(TypeFunc::Parms));
+          sc->set_end(use);
+        }
+      }
+    }
+#endif
+  }
+
+  StringConcat* merge(StringConcat* other, Node* arg);
+
+  void set_allocation(AllocateNode* alloc) {
+    _begin = alloc;
+  }
+
+  void append(Node* value, int mode) {
+    _arguments->add_req(value);
+    _mode.append(mode);
+  }
+  void push(Node* value, int mode) {
+    _arguments->ins_req(0, value);
+    _mode.insert_before(0, mode);
+  }
+  void push_string(Node* value) {
+    push(value, StringMode);
+  }
+  void push_int(Node* value) {
+    push(value, IntMode);
+  }
+  void push_char(Node* value) {
+    push(value, CharMode);
+  }
+
+  Node* argument(int i) {
+    return _arguments->in(i);
+  }
+  void set_argument(int i, Node* value) {
+    _arguments->set_req(i, value);
+  }
+  int num_arguments() {
+    return _mode.length();
+  }
+  int mode(int i) {
+    return _mode.at(i);
+  }
+  void add_control(Node* ctrl) {
+    assert(!_control.contains(ctrl), "only push once");
+    _control.push(ctrl);
+  }
+  CallStaticJavaNode* end() { return _end; }
+  AllocateNode* begin() { return _begin; }
+  Node* string_alloc() { return _string_alloc; }
+
+  void eliminate_unneeded_control();
+  void eliminate_initialize(InitializeNode* init);
+  void eliminate_call(CallNode* call);
+
+  void maybe_log_transform() {
+    CompileLog* log = _stringopts->C->log();
+    if (log != NULL) {
+      log->head("replace_string_concat arguments='%d' string_alloc='%d' multiple='%d'",
+                num_arguments(),
+                _string_alloc != NULL,
+                _multiple);
+      JVMState* p = _begin->jvms();
+      while (p != NULL) {
+        log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
+        p = p->caller();
+      }
+      log->tail("replace_string_concat");
+    }
+  }
+
+  void convert_uncommon_traps(GraphKit& kit, const JVMState* jvms) {
+    for (uint u = 0; u < _uncommon_traps.size(); u++) {
+      Node* uct = _uncommon_traps.at(u);
+
+      // Build a new call using the jvms state of the allocate
+      address call_addr = SharedRuntime::uncommon_trap_blob()->instructions_begin();
+      const TypeFunc* call_type = OptoRuntime::uncommon_trap_Type();
+      int size = call_type->domain()->cnt();
+      const TypePtr* no_memory_effects = NULL;
+      Compile* C = _stringopts->C;
+      CallStaticJavaNode* call = new (C, size) CallStaticJavaNode(call_type, call_addr, "uncommon_trap",
+                                                                  jvms->bci(), no_memory_effects);
+      for (int e = 0; e < TypeFunc::Parms; e++) {
+        call->init_req(e, uct->in(e));
+      }
+      // Set the trap request to record intrinsic failure if this trap
+      // is taken too many times.  Ideally we would handle then traps by
+      // doing the original bookkeeping in the MDO so that if it caused
+      // the code to be thrown out we could still recompile and use the
+      // optimization.  Failing the uncommon traps doesn't really mean
+      // that the optimization is a bad idea but there's no other way to
+      // do the MDO updates currently.
+      int trap_request = Deoptimization::make_trap_request(Deoptimization::Reason_intrinsic,
+                                                           Deoptimization::Action_make_not_entrant);
+      call->init_req(TypeFunc::Parms, __ intcon(trap_request));
+      kit.add_safepoint_edges(call);
+
+      _stringopts->gvn()->transform(call);
+      C->gvn_replace_by(uct, call);
+      uct->disconnect_inputs(NULL);
+    }
+  }
+
+  void cleanup() {
+    // disconnect the hook node
+    _arguments->disconnect_inputs(NULL);
+  }
+};
+
+
+void StringConcat::eliminate_unneeded_control() {
+  eliminate_initialize(begin()->initialization());
+  for (uint i = 0; i < _control.size(); i++) {
+    Node* n = _control.at(i);
+    if (n->is_Call()) {
+      if (n != _end) {
+        eliminate_call(n->as_Call());
+      }
+    } else if (n->is_IfTrue()) {
+      Compile* C = _stringopts->C;
+      C->gvn_replace_by(n, n->in(0)->in(0));
+      C->gvn_replace_by(n->in(0), C->top());
+    }
+  }
+}
+
+
+StringConcat* StringConcat::merge(StringConcat* other, Node* arg) {
+  StringConcat* result = new StringConcat(_stringopts, _end);
+  for (uint x = 0; x < _control.size(); x++) {
+    Node* n = _control.at(x);
+    if (n->is_Call()) {
+      result->_control.push(n);
+    }
+  }
+  for (uint x = 0; x < other->_control.size(); x++) {
+    Node* n = other->_control.at(x);
+    if (n->is_Call()) {
+      result->_control.push(n);
+    }
+  }
+  assert(result->_control.contains(other->_end), "what?");
+  assert(result->_control.contains(_begin), "what?");
+  for (int x = 0; x < num_arguments(); x++) {
+    if (argument(x) == arg) {
+      // replace the toString result with the all the arguments that
+      // made up the other StringConcat
+      for (int y = 0; y < other->num_arguments(); y++) {
+        result->append(other->argument(y), other->mode(y));
+      }
+    } else {
+      result->append(argument(x), mode(x));
+    }
+  }
+  result->set_allocation(other->_begin);
+  result->_multiple = true;
+  return result;
+}
+
+
+void StringConcat::eliminate_call(CallNode* call) {
+  Compile* C = _stringopts->C;
+  CallProjections projs;
+  call->extract_projections(&projs, false);
+  if (projs.fallthrough_catchproj != NULL) {
+    C->gvn_replace_by(projs.fallthrough_catchproj, call->in(TypeFunc::Control));
+  }
+  if (projs.fallthrough_memproj != NULL) {
+    C->gvn_replace_by(projs.fallthrough_memproj, call->in(TypeFunc::Memory));
+  }
+  if (projs.catchall_memproj != NULL) {
+    C->gvn_replace_by(projs.catchall_memproj, C->top());
+  }
+  if (projs.fallthrough_ioproj != NULL) {
+    C->gvn_replace_by(projs.fallthrough_ioproj, call->in(TypeFunc::I_O));
+  }
+  if (projs.catchall_ioproj != NULL) {
+    C->gvn_replace_by(projs.catchall_ioproj, C->top());
+  }
+  if (projs.catchall_catchproj != NULL) {
+    // EA can't cope with the partially collapsed graph this
+    // creates so put it on the worklist to be collapsed later.
+    for (SimpleDUIterator i(projs.catchall_catchproj); i.has_next(); i.next()) {
+      Node *use = i.get();
+      int opc = use->Opcode();
+      if (opc == Op_CreateEx || opc == Op_Region) {
+        _stringopts->record_dead_node(use);
+      }
+    }
+    C->gvn_replace_by(projs.catchall_catchproj, C->top());
+  }
+  if (projs.resproj != NULL) {
+    C->gvn_replace_by(projs.resproj, C->top());
+  }
+  C->gvn_replace_by(call, C->top());
+}
+
+void StringConcat::eliminate_initialize(InitializeNode* init) {
+  Compile* C = _stringopts->C;
+
+  // Eliminate Initialize node.
+  assert(init->outcnt() <= 2, "only a control and memory projection expected");
+  assert(init->req() <= InitializeNode::RawStores, "no pending inits");
+  Node *ctrl_proj = init->proj_out(TypeFunc::Control);
+  if (ctrl_proj != NULL) {
+    C->gvn_replace_by(ctrl_proj, init->in(TypeFunc::Control));
+  }
+  Node *mem_proj = init->proj_out(TypeFunc::Memory);
+  if (mem_proj != NULL) {
+    Node *mem = init->in(TypeFunc::Memory);
+    C->gvn_replace_by(mem_proj, mem);
+  }
+  C->gvn_replace_by(init, C->top());
+  init->disconnect_inputs(NULL);
+}
+
+Node_List PhaseStringOpts::collect_toString_calls() {
+  Node_List string_calls;
+  Node_List worklist;
+
+  _visited.Clear();
+
+  // Prime the worklist
+  for (uint i = 1; i < C->root()->len(); i++) {
+    Node* n = C->root()->in(i);
+    if (n != NULL && !_visited.test_set(n->_idx)) {
+      worklist.push(n);
+    }
+  }
+
+  while (worklist.size() > 0) {
+    Node* ctrl = worklist.pop();
+    if (ctrl->is_CallStaticJava()) {
+      CallStaticJavaNode* csj = ctrl->as_CallStaticJava();
+      ciMethod* m = csj->method();
+      if (m != NULL &&
+          (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
+           m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString)) {
+        string_calls.push(csj);
+      }
+    }
+    if (ctrl->in(0) != NULL && !_visited.test_set(ctrl->in(0)->_idx)) {
+      worklist.push(ctrl->in(0));
+    }
+    if (ctrl->is_Region()) {
+      for (uint i = 1; i < ctrl->len(); i++) {
+        if (ctrl->in(i) != NULL && !_visited.test_set(ctrl->in(i)->_idx)) {
+          worklist.push(ctrl->in(i));
+        }
+      }
+    }
+  }
+  return string_calls;
+}
+
+
+StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) {
+  ciMethod* m = call->method();
+  ciSymbol* string_sig;
+  ciSymbol* int_sig;
+  ciSymbol* char_sig;
+  if (m->holder() == C->env()->StringBuilder_klass()) {
+    string_sig = ciSymbol::String_StringBuilder_signature();
+    int_sig = ciSymbol::int_StringBuilder_signature();
+    char_sig = ciSymbol::char_StringBuilder_signature();
+  } else if (m->holder() == C->env()->StringBuffer_klass()) {
+    string_sig = ciSymbol::String_StringBuffer_signature();
+    int_sig = ciSymbol::int_StringBuffer_signature();
+    char_sig = ciSymbol::char_StringBuffer_signature();
+  } else {
+    return NULL;
+  }
+#ifndef PRODUCT
+  if (PrintOptimizeStringConcat) {
+    tty->print("considering toString call in ");
+    call->jvms()->dump_spec(tty); tty->cr();
+  }
+#endif
+
+  StringConcat* sc = new StringConcat(this, call);
+
+  AllocateNode* alloc = NULL;
+  InitializeNode* init = NULL;
+
+  // possible opportunity for StringBuilder fusion
+  CallStaticJavaNode* cnode = call;
+  while (cnode) {
+    Node* recv = cnode->in(TypeFunc::Parms)->uncast();
+    if (recv->is_Proj()) {
+      recv = recv->in(0);
+    }
+    cnode = recv->isa_CallStaticJava();
+    if (cnode == NULL) {
+      alloc = recv->isa_Allocate();
+      if (alloc == NULL) {
+        break;
+      }
+      // Find the constructor call
+      Node* result = alloc->result_cast();
+      if (result == NULL || !result->is_CheckCastPP()) {
+        // strange looking allocation
+#ifndef PRODUCT
+        if (PrintOptimizeStringConcat) {
+          tty->print("giving up because allocation looks strange ");
+          alloc->jvms()->dump_spec(tty); tty->cr();
+        }
+#endif
+        break;
+      }
+      Node* constructor = NULL;
+      for (SimpleDUIterator i(result); i.has_next(); i.next()) {
+        CallStaticJavaNode *use = i.get()->isa_CallStaticJava();
+        if (use != NULL && use->method() != NULL &&
+            use->method()->name() == ciSymbol::object_initializer_name() &&
+            use->method()->holder() == m->holder()) {
+          // Matched the constructor.
+          ciSymbol* sig = use->method()->signature()->as_symbol();
+          if (sig == ciSymbol::void_method_signature() ||
+              sig == ciSymbol::int_void_signature() ||
+              sig == ciSymbol::string_void_signature()) {
+            if (sig == ciSymbol::string_void_signature()) {
+              // StringBuilder(String) so pick this up as the first argument
+              assert(use->in(TypeFunc::Parms + 1) != NULL, "what?");
+              sc->push_string(use->in(TypeFunc::Parms + 1));
+            }
+            // The int variant takes an initial size for the backing
+            // array so just treat it like the void version.
+            constructor = use;
+          } else {
+#ifndef PRODUCT
+            if (PrintOptimizeStringConcat) {
+              tty->print("unexpected constructor signature: %s", sig->as_utf8());
+            }
+#endif
+          }
+          break;
+        }
+      }
+      if (constructor == NULL) {
+        // couldn't find constructor
+#ifndef PRODUCT
+        if (PrintOptimizeStringConcat) {
+          tty->print("giving up because couldn't find constructor ");
+          alloc->jvms()->dump_spec(tty);
+        }
+#endif
+        break;
+      }
+
+      // Walked all the way back and found the constructor call so see
+      // if this call converted into a direct string concatenation.
+      sc->add_control(call);
+      sc->add_control(constructor);
+      sc->add_control(alloc);
+      sc->set_allocation(alloc);
+      if (sc->validate_control_flow()) {
+        return sc;
+      } else {
+        return NULL;
+      }
+    } else if (cnode->method() == NULL) {
+      break;
+    } else if (cnode->method()->holder() == m->holder() &&
+               cnode->method()->name() == ciSymbol::append_name() &&
+               (cnode->method()->signature()->as_symbol() == string_sig ||
+                cnode->method()->signature()->as_symbol() == char_sig ||
+                cnode->method()->signature()->as_symbol() == int_sig)) {
+      sc->add_control(cnode);
+      Node* arg = cnode->in(TypeFunc::Parms + 1);
+      if (cnode->method()->signature()->as_symbol() == int_sig) {
+        sc->push_int(arg);
+      } else if (cnode->method()->signature()->as_symbol() == char_sig) {
+        sc->push_char(arg);
+      } else {
+        if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) {
+          CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava();
+          if (csj->method() != NULL &&
+              csj->method()->holder() == C->env()->Integer_klass() &&
+              csj->method()->name() == ciSymbol::toString_name()) {
+            sc->add_control(csj);
+            sc->push_int(csj->in(TypeFunc::Parms));
+            continue;
+          }
+        }
+        sc->push_string(arg);
+      }
+      continue;
+    } else {
+      // some unhandled signature
+#ifndef PRODUCT
+      if (PrintOptimizeStringConcat) {
+        tty->print("giving up because encountered unexpected signature ");
+        cnode->tf()->dump(); tty->cr();
+        cnode->in(TypeFunc::Parms + 1)->dump();
+      }
+#endif
+      break;
+    }
+  }
+  return NULL;
+}
+
+
+PhaseStringOpts::PhaseStringOpts(PhaseGVN* gvn, Unique_Node_List*):
+  Phase(StringOpts),
+  _gvn(gvn),
+  _visited(Thread::current()->resource_area()) {
+
+  assert(OptimizeStringConcat, "shouldn't be here");
+
+  size_table_field = C->env()->Integer_klass()->get_field_by_name(ciSymbol::make("sizeTable"),
+                                                                  ciSymbol::make("[I"), true);
+  if (size_table_field == NULL) {
+    // Something wrong so give up.
+    assert(false, "why can't we find Integer.sizeTable?");
+    return;
+  }
+
+  // Collect the types needed to talk about the various slices of memory
+  const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
+                                                     false, NULL, 0);
+
+  const TypePtr* value_field_type = string_type->add_offset(java_lang_String::value_offset_in_bytes());
+  const TypePtr* offset_field_type = string_type->add_offset(java_lang_String::offset_offset_in_bytes());
+  const TypePtr* count_field_type = string_type->add_offset(java_lang_String::count_offset_in_bytes());
+
+  value_field_idx = C->get_alias_index(value_field_type);
+  count_field_idx = C->get_alias_index(count_field_type);
+  offset_field_idx = C->get_alias_index(offset_field_type);
+  char_adr_idx = C->get_alias_index(TypeAryPtr::CHARS);
+
+  // For each locally allocated StringBuffer see if the usages can be
+  // collapsed into a single String construction.
+
+  // Run through the list of allocation looking for SB.toString to see
+  // if it's possible to fuse the usage of the SB into a single String
+  // construction.
+  GrowableArray<StringConcat*> concats;
+  Node_List toStrings = collect_toString_calls();
+  while (toStrings.size() > 0) {
+    StringConcat* sc = build_candidate(toStrings.pop()->as_CallStaticJava());
+    if (sc != NULL) {
+      concats.push(sc);
+    }
+  }
+
+  // try to coalesce separate concats
+ restart:
+  for (int c = 0; c < concats.length(); c++) {
+    StringConcat* sc = concats.at(c);
+    for (int i = 0; i < sc->num_arguments(); i++) {
+      Node* arg = sc->argument(i);
+      if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) {
+        CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava();
+        if (csj->method() != NULL &&
+            (csj->method()->holder() == C->env()->StringBuffer_klass() ||
+             csj->method()->holder() == C->env()->StringBuilder_klass()) &&
+            csj->method()->name() == ciSymbol::toString_name()) {
+          for (int o = 0; o < concats.length(); o++) {
+            if (c == o) continue;
+            StringConcat* other = concats.at(o);
+            if (other->end() == csj) {
+#ifndef PRODUCT
+              if (PrintOptimizeStringConcat) {
+                tty->print_cr("considering stacked concats");
+              }
+#endif
+
+              StringConcat* merged = sc->merge(other, arg);
+              if (merged->validate_control_flow()) {
+#ifndef PRODUCT
+                if (PrintOptimizeStringConcat) {
+                  tty->print_cr("stacking would succeed");
+                }
+#endif
+                if (c < o) {
+                  concats.remove_at(o);
+                  concats.at_put(c, merged);
+                } else {
+                  concats.remove_at(c);
+                  concats.at_put(o, merged);
+                }
+                goto restart;
+              } else {
+#ifndef PRODUCT
+                if (PrintOptimizeStringConcat) {
+                  tty->print_cr("stacking would fail");
+                }
+#endif
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+
+  for (int c = 0; c < concats.length(); c++) {
+    StringConcat* sc = concats.at(c);
+    replace_string_concat(sc);
+  }
+
+  remove_dead_nodes();
+}
+
+void PhaseStringOpts::record_dead_node(Node* dead) {
+  dead_worklist.push(dead);
+}
+
+void PhaseStringOpts::remove_dead_nodes() {
+  // Delete any dead nodes to make things clean enough that escape
+  // analysis doesn't get unhappy.
+  while (dead_worklist.size() > 0) {
+    Node* use = dead_worklist.pop();
+    int opc = use->Opcode();
+    switch (opc) {
+      case Op_Region: {
+        uint i = 1;
+        for (i = 1; i < use->req(); i++) {
+          if (use->in(i) != C->top()) {
+            break;
+          }
+        }
+        if (i >= use->req()) {
+          for (SimpleDUIterator i(use); i.has_next(); i.next()) {
+            Node* m = i.get();
+            if (m->is_Phi()) {
+              dead_worklist.push(m);
+            }
+          }
+          C->gvn_replace_by(use, C->top());
+        }
+        break;
+      }
+      case Op_AddP:
+      case Op_CreateEx: {
+        // Recurisvely clean up references to CreateEx so EA doesn't
+        // get unhappy about the partially collapsed graph.
+        for (SimpleDUIterator i(use); i.has_next(); i.next()) {
+          Node* m = i.get();
+          if (m->is_AddP()) {
+            dead_worklist.push(m);
+          }
+        }
+        C->gvn_replace_by(use, C->top());
+        break;
+      }
+      case Op_Phi:
+        if (use->in(0) == C->top()) {
+          C->gvn_replace_by(use, C->top());
+        }
+        break;
+    }
+  }
+}
+
+
+bool StringConcat::validate_control_flow() {
+  // We found all the calls and arguments now lets see if it's
+  // safe to transform the graph as we would expect.
+
+  // Check to see if this resulted in too many uncommon traps previously
+  if (Compile::current()->too_many_traps(_begin->jvms()->method(), _begin->jvms()->bci(),
+                        Deoptimization::Reason_intrinsic)) {
+    return false;
+  }
+
+  // Walk backwards over the control flow from toString to the
+  // allocation and make sure all the control flow is ok.  This
+  // means it's either going to be eliminated once the calls are
+  // removed or it can safely be transformed into an uncommon
+  // trap.
+
+  int null_check_count = 0;
+  Unique_Node_List ctrl_path;
+
+  assert(_control.contains(_begin), "missing");
+  assert(_control.contains(_end), "missing");
+
+  // Collect the nodes that we know about and will eliminate into ctrl_path
+  for (uint i = 0; i < _control.size(); i++) {
+    // Push the call and it's control projection
+    Node* n = _control.at(i);
+    if (n->is_Allocate()) {
+      AllocateNode* an = n->as_Allocate();
+      InitializeNode* init = an->initialization();
+      ctrl_path.push(init);
+      ctrl_path.push(init->as_Multi()->proj_out(0));
+    }
+    if (n->is_Call()) {
+      CallNode* cn = n->as_Call();
+      ctrl_path.push(cn);
+      ctrl_path.push(cn->proj_out(0));
+      ctrl_path.push(cn->proj_out(0)->unique_out());
+      ctrl_path.push(cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0));
+    } else {
+      ShouldNotReachHere();
+    }
+  }
+
+  // Skip backwards through the control checking for unexpected contro flow
+  Node* ptr = _end;
+  bool fail = false;
+  while (ptr != _begin) {
+    if (ptr->is_Call() && ctrl_path.member(ptr)) {
+      ptr = ptr->in(0);
+    } else if (ptr->is_CatchProj() && ctrl_path.member(ptr)) {
+      ptr = ptr->in(0)->in(0)->in(0);
+      assert(ctrl_path.member(ptr), "should be a known piece of control");
+    } else if (ptr->is_IfTrue()) {
+      IfNode* iff = ptr->in(0)->as_If();
+      BoolNode* b = iff->in(1)->isa_Bool();
+      Node* cmp = b->in(1);
+      Node* v1 = cmp->in(1);
+      Node* v2 = cmp->in(2);
+      Node* otherproj = iff->proj_out(1 - ptr->as_Proj()->_con);
+
+      // Null check of the return of append which can simply be eliminated
+      if (b->_test._test == BoolTest::ne &&
+          v2->bottom_type() == TypePtr::NULL_PTR &&
+          v1->is_Proj() && ctrl_path.member(v1->in(0))) {
+        // NULL check of the return value of the append
+        null_check_count++;
+        if (otherproj->outcnt() == 1) {
+          CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
+          if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) {
+            ctrl_path.push(call);
+          }
+        }
+        _control.push(ptr);
+        ptr = ptr->in(0)->in(0);
+        continue;
+      }
+
+      // A test which leads to an uncommon trap which should be safe.
+      // Later this trap will be converted into a trap that restarts
+      // at the beginning.
+      if (otherproj->outcnt() == 1) {
+        CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
+        if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) {
+          // control flow leads to uct so should be ok
+          _uncommon_traps.push(call);
+          ctrl_path.push(call);
+          ptr = ptr->in(0)->in(0);
+          continue;
+        }
+      }
+
+#ifndef PRODUCT
+      // Some unexpected control flow we don't know how to handle.
+      if (PrintOptimizeStringConcat) {
+        tty->print_cr("failing with unknown test");
+        b->dump();
+        cmp->dump();
+        v1->dump();
+        v2->dump();
+        tty->cr();
+      }
+#endif
+      break;
+    } else if (ptr->is_Proj() && ptr->in(0)->is_Initialize()) {
+      ptr = ptr->in(0)->in(0);
+    } else if (ptr->is_Region()) {
+      Node* copy = ptr->as_Region()->is_copy();
+      if (copy != NULL) {
+        ptr = copy;
+        continue;
+      }
+      if (ptr->req() == 3 &&
+          ptr->in(1) != NULL && ptr->in(1)->is_Proj() &&
+          ptr->in(2) != NULL && ptr->in(2)->is_Proj() &&
+          ptr->in(1)->in(0) == ptr->in(2)->in(0) &&
+          ptr->in(1)->in(0) != NULL && ptr->in(1)->in(0)->is_If()) {
+        // Simple diamond.
+        // XXX should check for possibly merging stores.  simple data merges are ok.
+        ptr = ptr->in(1)->in(0)->in(0);
+        continue;
+      }
+#ifndef PRODUCT
+      if (PrintOptimizeStringConcat) {
+        tty->print_cr("fusion would fail for region");
+        _begin->dump();
+        ptr->dump(2);
+      }
+#endif
+      fail = true;
+      break;
+    } else {
+      // other unknown control
+      if (!fail) {
+#ifndef PRODUCT
+        if (PrintOptimizeStringConcat) {
+          tty->print_cr("fusion would fail for");
+          _begin->dump();
+        }
+#endif
+        fail = true;
+      }
+#ifndef PRODUCT
+      if (PrintOptimizeStringConcat) {
+        ptr->dump();
+      }
+#endif
+      ptr = ptr->in(0);
+    }
+  }
+#ifndef PRODUCT
+  if (PrintOptimizeStringConcat && fail) {
+    tty->cr();
+  }
+#endif
+  if (fail) return !fail;
+
+  // Validate that all these results produced are contained within
+  // this cluster of objects.  First collect all the results produced
+  // by calls in the region.
+  _stringopts->_visited.Clear();
+  Node_List worklist;
+  Node* final_result = _end->proj_out(TypeFunc::Parms);
+  for (uint i = 0; i < _control.size(); i++) {
+    CallNode* cnode = _control.at(i)->isa_Call();
+    if (cnode != NULL) {
+      _stringopts->_visited.test_set(cnode->_idx);
+    }
+    Node* result = cnode != NULL ? cnode->proj_out(TypeFunc::Parms) : NULL;
+    if (result != NULL && result != final_result) {
+      worklist.push(result);
+    }
+  }
+
+  Node* last_result = NULL;
+  while (worklist.size() > 0) {
+    Node* result = worklist.pop();
+    if (_stringopts->_visited.test_set(result->_idx))
+      continue;
+    for (SimpleDUIterator i(result); i.has_next(); i.next()) {
+      Node *use = i.get();
+      if (ctrl_path.member(use)) {
+        // already checked this
+        continue;
+      }
+      int opc = use->Opcode();
+      if (opc == Op_CmpP || opc == Op_Node) {
+        ctrl_path.push(use);
+        continue;
+      }
+      if (opc == Op_CastPP || opc == Op_CheckCastPP) {
+        for (SimpleDUIterator j(use); j.has_next(); j.next()) {
+          worklist.push(j.get());
+        }
+        worklist.push(use->in(1));
+        ctrl_path.push(use);
+        continue;
+      }
+#ifndef PRODUCT
+      if (PrintOptimizeStringConcat) {
+        if (result != last_result) {
+          last_result = result;
+          tty->print_cr("extra uses for result:");
+          last_result->dump();
+        }
+        use->dump();
+      }
+#endif
+      fail = true;
+      break;
+    }
+  }
+
+#ifndef PRODUCT
+  if (PrintOptimizeStringConcat && !fail) {
+    ttyLocker ttyl;
+    tty->cr();
+    tty->print("fusion would succeed (%d %d) for ", null_check_count, _uncommon_traps.size());
+    _begin->jvms()->dump_spec(tty); tty->cr();
+    for (int i = 0; i < num_arguments(); i++) {
+      argument(i)->dump();
+    }
+    _control.dump();
+    tty->cr();
+  }
+#endif
+
+  return !fail;
+}
+
+Node* PhaseStringOpts::fetch_static_field(GraphKit& kit, ciField* field) {
+  const TypeKlassPtr* klass_type = TypeKlassPtr::make(field->holder());
+  Node* klass_node = __ makecon(klass_type);
+  BasicType bt = field->layout_type();
+  ciType* field_klass = field->type();
+
+  const Type *type;
+  if( bt == T_OBJECT ) {
+    if (!field->type()->is_loaded()) {
+      type = TypeInstPtr::BOTTOM;
+    } else if (field->is_constant()) {
+      // This can happen if the constant oop is non-perm.
+      ciObject* con = field->constant_value().as_object();
+      // Do not "join" in the previous type; it doesn't add value,
+      // and may yield a vacuous result if the field is of interface type.
+      type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
+      assert(type != NULL, "field singleton type must be consistent");
+    } else {
+      type = TypeOopPtr::make_from_klass(field_klass->as_klass());
+    }
+  } else {
+    type = Type::get_const_basic_type(bt);
+  }
+
+  return kit.make_load(NULL, kit.basic_plus_adr(klass_node, field->offset_in_bytes()),
+                       type, T_OBJECT,
+                       C->get_alias_index(klass_type->add_offset(field->offset_in_bytes())));
+}
+
+Node* PhaseStringOpts::int_stringSize(GraphKit& kit, Node* arg) {
+  RegionNode *final_merge = new (C, 3) RegionNode(3);
+  kit.gvn().set_type(final_merge, Type::CONTROL);
+  Node* final_size = new (C, 3) PhiNode(final_merge, TypeInt::INT);
+  kit.gvn().set_type(final_size, TypeInt::INT);
+
+  IfNode* iff = kit.create_and_map_if(kit.control(),
+                                      __ Bool(__ CmpI(arg, __ intcon(0x80000000)), BoolTest::ne),
+                                      PROB_FAIR, COUNT_UNKNOWN);
+  Node* is_min = __ IfFalse(iff);
+  final_merge->init_req(1, is_min);
+  final_size->init_req(1, __ intcon(11));
+
+  kit.set_control(__ IfTrue(iff));
+  if (kit.stopped()) {
+    final_merge->init_req(2, C->top());
+    final_size->init_req(2, C->top());
+  } else {
+
+    // int size = (i < 0) ? stringSize(-i) + 1 : stringSize(i);
+    RegionNode *r = new (C, 3) RegionNode(3);
+    kit.gvn().set_type(r, Type::CONTROL);
+    Node *phi = new (C, 3) PhiNode(r, TypeInt::INT);
+    kit.gvn().set_type(phi, TypeInt::INT);
+    Node *size = new (C, 3) PhiNode(r, TypeInt::INT);
+    kit.gvn().set_type(size, TypeInt::INT);
+    Node* chk = __ CmpI(arg, __ intcon(0));
+    Node* p = __ Bool(chk, BoolTest::lt);
+    IfNode* iff = kit.create_and_map_if(kit.control(), p, PROB_FAIR, COUNT_UNKNOWN);
+    Node* lessthan = __ IfTrue(iff);
+    Node* greaterequal = __ IfFalse(iff);
+    r->init_req(1, lessthan);
+    phi->init_req(1, __ SubI(__ intcon(0), arg));
+    size->init_req(1, __ intcon(1));
+    r->init_req(2, greaterequal);
+    phi->init_req(2, arg);
+    size->init_req(2, __ intcon(0));
+    kit.set_control(r);
+    C->record_for_igvn(r);
+    C->record_for_igvn(phi);
+    C->record_for_igvn(size);
+
+    // for (int i=0; ; i++)
+    //   if (x <= sizeTable[i])
+    //     return i+1;
+    RegionNode *loop = new (C, 3) RegionNode(3);
+    loop->init_req(1, kit.control());
+    kit.gvn().set_type(loop, Type::CONTROL);
+
+    Node *index = new (C, 3) PhiNode(loop, TypeInt::INT);
+    index->init_req(1, __ intcon(0));
+    kit.gvn().set_type(index, TypeInt::INT);
+    kit.set_control(loop);
+    Node* sizeTable = fetch_static_field(kit, size_table_field);
+
+    Node* value = kit.load_array_element(NULL, sizeTable, index, TypeAryPtr::INTS);
+    C->record_for_igvn(value);
+    Node* limit = __ CmpI(phi, value);
+    Node* limitb = __ Bool(limit, BoolTest::le);
+    IfNode* iff2 = kit.create_and_map_if(kit.control(), limitb, PROB_MIN, COUNT_UNKNOWN);
+    Node* lessEqual = __ IfTrue(iff2);
+    Node* greater = __ IfFalse(iff2);
+
+    loop->init_req(2, greater);
+    index->init_req(2, __ AddI(index, __ intcon(1)));
+
+    kit.set_control(lessEqual);
+    C->record_for_igvn(loop);
+    C->record_for_igvn(index);
+
+    final_merge->init_req(2, kit.control());
+    final_size->init_req(2, __ AddI(__ AddI(index, size), __ intcon(1)));
+  }
+
+  kit.set_control(final_merge);
+  C->record_for_igvn(final_merge);
+  C->record_for_igvn(final_size);
+
+  return final_size;
+}
+
+void PhaseStringOpts::int_getChars(GraphKit& kit, Node* arg, Node* char_array, Node* start, Node* end) {
+  RegionNode *final_merge = new (C, 4) RegionNode(4);
+  kit.gvn().set_type(final_merge, Type::CONTROL);
+  Node *final_mem = PhiNode::make(final_merge, kit.memory(char_adr_idx), Type::MEMORY, TypeAryPtr::CHARS);
+  kit.gvn().set_type(final_mem, Type::MEMORY);
+
+  // need to handle Integer.MIN_VALUE specially because negating doesn't make it positive
+  {
+    // i == MIN_VALUE
+    IfNode* iff = kit.create_and_map_if(kit.control(),
+                                        __ Bool(__ CmpI(arg, __ intcon(0x80000000)), BoolTest::ne),
+                                        PROB_FAIR, COUNT_UNKNOWN);
+
+    Node* old_mem = kit.memory(char_adr_idx);
+
+    kit.set_control(__ IfFalse(iff));
+    if (kit.stopped()) {
+      // Statically not equal to MIN_VALUE so this path is dead
+      final_merge->init_req(3, kit.control());
+    } else {
+      copy_string(kit, __ makecon(TypeInstPtr::make(C->env()->the_min_jint_string())),
+                  char_array, start);
+      final_merge->init_req(3, kit.control());
+      final_mem->init_req(3, kit.memory(char_adr_idx));
+    }
+
+    kit.set_control(__ IfTrue(iff));
+    kit.set_memory(old_mem, char_adr_idx);
+  }
+
+
+  // Simplified version of Integer.getChars
+
+  // int q, r;
+  // int charPos = index;
+  Node* charPos = end;
+
+  // char sign = 0;
+
+  Node* i = arg;
+  Node* sign = __ intcon(0);
+
+  // if (i < 0) {
+  //     sign = '-';
+  //     i = -i;
+  // }
+  {
+    IfNode* iff = kit.create_and_map_if(kit.control(),
+                                        __ Bool(__ CmpI(arg, __ intcon(0)), BoolTest::lt),
+                                        PROB_FAIR, COUNT_UNKNOWN);
+
+    RegionNode *merge = new (C, 3) RegionNode(3);
+    kit.gvn().set_type(merge, Type::CONTROL);
+    i = new (C, 3) PhiNode(merge, TypeInt::INT);
+    kit.gvn().set_type(i, TypeInt::INT);
+    sign = new (C, 3) PhiNode(merge, TypeInt::INT);
+    kit.gvn().set_type(sign, TypeInt::INT);
+
+    merge->init_req(1, __ IfTrue(iff));
+    i->init_req(1, __ SubI(__ intcon(0), arg));
+    sign->init_req(1, __ intcon('-'));
+    merge->init_req(2, __ IfFalse(iff));
+    i->init_req(2, arg);
+    sign->init_req(2, __ intcon(0));
+
+    kit.set_control(merge);
+
+    C->record_for_igvn(merge);
+    C->record_for_igvn(i);
+    C->record_for_igvn(sign);
+  }
+
+  // for (;;) {
+  //     q = i / 10;
+  //     r = i - ((q << 3) + (q << 1));  // r = i-(q*10) ...
+  //     buf [--charPos] = digits [r];
+  //     i = q;
+  //     if (i == 0) break;
+  // }
+
+  {
+    RegionNode *head = new (C, 3) RegionNode(3);
+    head->init_req(1, kit.control());
+    kit.gvn().set_type(head, Type::CONTROL);
+    Node *i_phi = new (C, 3) PhiNode(head, TypeInt::INT);
+    i_phi->init_req(1, i);
+    kit.gvn().set_type(i_phi, TypeInt::INT);
+    charPos = PhiNode::make(head, charPos);
+    kit.gvn().set_type(charPos, TypeInt::INT);
+    Node *mem = PhiNode::make(head, kit.memory(char_adr_idx), Type::MEMORY, TypeAryPtr::CHARS);
+    kit.gvn().set_type(mem, Type::MEMORY);
+    kit.set_control(head);
+    kit.set_memory(mem, char_adr_idx);
+
+    Node* q = __ DivI(kit.null(), i_phi, __ intcon(10));
+    Node* r = __ SubI(i_phi, __ AddI(__ LShiftI(q, __ intcon(3)),
+                                     __ LShiftI(q, __ intcon(1))));
+    Node* m1 = __ SubI(charPos, __ intcon(1));
+    Node* ch = __ AddI(r, __ intcon('0'));
+
+    Node* st = __ store_to_memory(kit.control(), kit.array_element_address(char_array, m1, T_CHAR),
+                                  ch, T_CHAR, char_adr_idx);
+
+
+    IfNode* iff = kit.create_and_map_if(head, __ Bool(__ CmpI(q, __ intcon(0)), BoolTest::ne),
+                                        PROB_FAIR, COUNT_UNKNOWN);
+    Node* ne = __ IfTrue(iff);
+    Node* eq = __ IfFalse(iff);
+
+    head->init_req(2, ne);
+    mem->init_req(2, st);
+    i_phi->init_req(2, q);
+    charPos->init_req(2, m1);
+
+    charPos = m1;
+
+    kit.set_control(eq);
+    kit.set_memory(st, char_adr_idx);
+
+    C->record_for_igvn(head);
+    C->record_for_igvn(mem);
+    C->record_for_igvn(i_phi);
+    C->record_for_igvn(charPos);
+  }
+
+  {
+    // if (sign != 0) {
+    //     buf [--charPos] = sign;
+    // }
+    IfNode* iff = kit.create_and_map_if(kit.control(),
+                                        __ Bool(__ CmpI(sign, __ intcon(0)), BoolTest::ne),
+                                        PROB_FAIR, COUNT_UNKNOWN);
+
+    final_merge->init_req(2, __ IfFalse(iff));
+    final_mem->init_req(2, kit.memory(char_adr_idx));
+
+    kit.set_control(__ IfTrue(iff));
+    if (kit.stopped()) {
+      final_merge->init_req(1, C->top());
+      final_mem->init_req(1, C->top());
+    } else {
+      Node* m1 = __ SubI(charPos, __ intcon(1));
+      Node* st = __ store_to_memory(kit.control(), kit.array_element_address(char_array, m1, T_CHAR),
+                                    sign, T_CHAR, char_adr_idx);
+
+      final_merge->init_req(1, kit.control());
+      final_mem->init_req(1, st);
+    }
+
+    kit.set_control(final_merge);
+    kit.set_memory(final_mem, char_adr_idx);
+
+    C->record_for_igvn(final_merge);
+    C->record_for_igvn(final_mem);
+  }
+}
+
+
+Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start) {
+  Node* string = str;
+  Node* offset = kit.make_load(NULL,
+                               kit.basic_plus_adr(string, string, java_lang_String::offset_offset_in_bytes()),
+                               TypeInt::INT, T_INT, offset_field_idx);
+  Node* count = kit.make_load(NULL,
+                              kit.basic_plus_adr(string, string, java_lang_String::count_offset_in_bytes()),
+                              TypeInt::INT, T_INT, count_field_idx);
+  const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
+                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
+                                                   ciTypeArrayKlass::make(T_CHAR), true, 0);
+  Node* value = kit.make_load(NULL,
+                              kit.basic_plus_adr(string, string, java_lang_String::value_offset_in_bytes()),
+                              value_type, T_OBJECT, value_field_idx);
+
+  // copy the contents
+  if (offset->is_Con() && count->is_Con() && value->is_Con() && count->get_int() < unroll_string_copy_length) {
+    // For small constant strings just emit individual stores.
+    // A length of 6 seems like a good space/speed tradeof.
+    int c = count->get_int();
+    int o = offset->get_int();
+    const TypeOopPtr* t = kit.gvn().type(value)->isa_oopptr();
+    ciTypeArray* value_array = t->const_oop()->as_type_array();
+    for (int e = 0; e < c; e++) {
+      __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
+                         __ intcon(value_array->char_at(o + e)), T_CHAR, char_adr_idx);
+      start = __ AddI(start, __ intcon(1));
+    }
+  } else {
+    Node* src_ptr = kit.array_element_address(value, offset, T_CHAR);
+    Node* dst_ptr = kit.array_element_address(char_array, start, T_CHAR);
+    Node* c = count;
+    Node* extra = NULL;
+#ifdef _LP64
+    c = __ ConvI2L(c);
+    extra = C->top();
+#endif
+    Node* call = kit.make_runtime_call(GraphKit::RC_LEAF|GraphKit::RC_NO_FP,
+                                       OptoRuntime::fast_arraycopy_Type(),
+                                       CAST_FROM_FN_PTR(address, StubRoutines::jshort_disjoint_arraycopy()),
+                                       "jshort_disjoint_arraycopy", TypeAryPtr::CHARS,
+                                       src_ptr, dst_ptr, c, extra);
+    start = __ AddI(start, count);
+  }
+  return start;
+}
+
+
+void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
+  // Log a little info about the transformation
+  sc->maybe_log_transform();
+
+  // pull the JVMState of the allocation into a SafePointNode to serve as
+  // as a shim for the insertion of the new code.
+  JVMState* jvms     = sc->begin()->jvms()->clone_shallow(C);
+  uint size = sc->begin()->req();
+  SafePointNode* map = new (C, size) SafePointNode(size, jvms);
+
+  // copy the control and memory state from the final call into our
+  // new starting state.  This allows any preceeding tests to feed
+  // into the new section of code.
+  for (uint i1 = 0; i1 < TypeFunc::Parms; i1++) {
+    map->init_req(i1, sc->end()->in(i1));
+  }
+  // blow away old allocation arguments
+  for (uint i1 = TypeFunc::Parms; i1 < jvms->debug_start(); i1++) {
+    map->init_req(i1, C->top());
+  }
+  // Copy the rest of the inputs for the JVMState
+  for (uint i1 = jvms->debug_start(); i1 < sc->begin()->req(); i1++) {
+    map->init_req(i1, sc->begin()->in(i1));
+  }
+  // Make sure the memory state is a MergeMem for parsing.
+  if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
+    map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
+  }
+
+  jvms->set_map(map);
+  map->ensure_stack(jvms, jvms->method()->max_stack());
+
+
+  // disconnect all the old StringBuilder calls from the graph
+  sc->eliminate_unneeded_control();
+
+  // At this point all the old work has been completely removed from
+  // the graph and the saved JVMState exists at the point where the
+  // final toString call used to be.
+  GraphKit kit(jvms);
+
+  // There may be uncommon traps which are still using the
+  // intermediate states and these need to be rewritten to point at
+  // the JVMState at the beginning of the transformation.
+  sc->convert_uncommon_traps(kit, jvms);
+
+  // Now insert the logic to compute the size of the string followed
+  // by all the logic to construct array and resulting string.
+
+  Node* null_string = __ makecon(TypeInstPtr::make(C->env()->the_null_string()));
+
+  // Create a region for the overflow checks to merge into.
+  int args = MAX2(sc->num_arguments(), 1);
+  RegionNode* overflow = new (C, args) RegionNode(args);
+  kit.gvn().set_type(overflow, Type::CONTROL);
+
+  // Create a hook node to hold onto the individual sizes since they
+  // are need for the copying phase.
+  Node* string_sizes = new (C, args) Node(args);
+
+  Node* length = __ intcon(0);
+  for (int argi = 0; argi < sc->num_arguments(); argi++) {
+    Node* arg = sc->argument(argi);
+    switch (sc->mode(argi)) {
+      case StringConcat::IntMode: {
+        Node* string_size = int_stringSize(kit, arg);
+
+        // accumulate total
+        length = __ AddI(length, string_size);
+
+        // Cache this value for the use by int_toString
+        string_sizes->init_req(argi, string_size);
+        break;
+      }
+      case StringConcat::StringMode: {
+        const Type* type = kit.gvn().type(arg);
+        if (type == TypePtr::NULL_PTR) {
+          // replace the argument with the null checked version
+          arg = null_string;
+          sc->set_argument(argi, arg);
+        } else if (!type->higher_equal(TypeInstPtr::NOTNULL)) {
+          // s = s != null ? s : "null";
+          // length = length + (s.count - s.offset);
+          RegionNode *r = new (C, 3) RegionNode(3);
+          kit.gvn().set_type(r, Type::CONTROL);
+          Node *phi = new (C, 3) PhiNode(r, type->join(TypeInstPtr::NOTNULL));
+          kit.gvn().set_type(phi, phi->bottom_type());
+          Node* p = __ Bool(__ CmpP(arg, kit.null()), BoolTest::ne);
+          IfNode* iff = kit.create_and_map_if(kit.control(), p, PROB_MIN, COUNT_UNKNOWN);
+          Node* notnull = __ IfTrue(iff);
+          Node* isnull =  __ IfFalse(iff);
+          r->init_req(1, notnull);
+          phi->init_req(1, arg);
+          r->init_req(2, isnull);
+          phi->init_req(2, null_string);
+          kit.set_control(r);
+          C->record_for_igvn(r);
+          C->record_for_igvn(phi);
+          // replace the argument with the null checked version
+          arg = phi;
+          sc->set_argument(argi, arg);
+        }
+        //         Node* offset = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, offset_offset),
+        //                                      TypeInt::INT, T_INT, offset_field_idx);
+        Node* count = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()),
+                                    TypeInt::INT, T_INT, count_field_idx);
+        length = __ AddI(length, count);
+        string_sizes->init_req(argi, NULL);
+        break;
+      }
+      case StringConcat::CharMode: {
+        // one character only
+        length = __ AddI(length, __ intcon(1));
+        break;
+      }
+      default:
+        ShouldNotReachHere();
+    }
+    if (argi > 0) {
+      // Check that the sum hasn't overflowed
+      IfNode* iff = kit.create_and_map_if(kit.control(),
+                                          __ Bool(__ CmpI(length, __ intcon(0)), BoolTest::lt),
+                                          PROB_MIN, COUNT_UNKNOWN);
+      kit.set_control(__ IfFalse(iff));
+      overflow->set_req(argi, __ IfTrue(iff));
+    }
+  }
+
+  {
+    // Hook
+    PreserveJVMState pjvms(&kit);
+    kit.set_control(overflow);
+    kit.uncommon_trap(Deoptimization::Reason_intrinsic,
+                      Deoptimization::Action_make_not_entrant);
+  }
+
+  // length now contains the number of characters needed for the
+  // char[] so create a new AllocateArray for the char[]
+  Node* char_array = NULL;
+  {
+    PreserveReexecuteState preexecs(&kit);
+    // The original jvms is for an allocation of either a String or
+    // StringBuffer so no stack adjustment is necessary for proper
+    // reexecution.  If we deoptimize in the slow path the bytecode
+    // will be reexecuted and the char[] allocation will be thrown away.
+    kit.jvms()->set_should_reexecute(true);
+    char_array = kit.new_array(__ makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_CHAR))),
+                               length, 1);
+  }
+
+  // Mark the allocation so that zeroing is skipped since the code
+  // below will overwrite the entire array
+  AllocateArrayNode* char_alloc = AllocateArrayNode::Ideal_array_allocation(char_array, _gvn);
+  char_alloc->maybe_set_complete(_gvn);
+
+  // Now copy the string representations into the final char[]
+  Node* start = __ intcon(0);
+  for (int argi = 0; argi < sc->num_arguments(); argi++) {
+    Node* arg = sc->argument(argi);
+    switch (sc->mode(argi)) {
+      case StringConcat::IntMode: {
+        Node* end = __ AddI(start, string_sizes->in(argi));
+        // getChars words backwards so pass the ending point as well as the start
+        int_getChars(kit, arg, char_array, start, end);
+        start = end;
+        break;
+      }
+      case StringConcat::StringMode: {
+        start = copy_string(kit, arg, char_array, start);
+        break;
+      }
+      case StringConcat::CharMode: {
+        __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
+                           arg, T_CHAR, char_adr_idx);
+        start = __ AddI(start, __ intcon(1));
+        break;
+      }
+      default:
+        ShouldNotReachHere();
+    }
+  }
+
+  // If we're not reusing an existing String allocation then allocate one here.
+  Node* result = sc->string_alloc();
+  if (result == NULL) {
+    PreserveReexecuteState preexecs(&kit);
+    // The original jvms is for an allocation of either a String or
+    // StringBuffer so no stack adjustment is necessary for proper
+    // reexecution.
+    kit.jvms()->set_should_reexecute(true);
+    result = kit.new_instance(__ makecon(TypeKlassPtr::make(C->env()->String_klass())));
+  }
+
+  // Intialize the string
+  kit.store_to_memory(kit.control(), kit.basic_plus_adr(result, java_lang_String::offset_offset_in_bytes()),
+                      __ intcon(0), T_INT, offset_field_idx);
+  kit.store_to_memory(kit.control(), kit.basic_plus_adr(result, java_lang_String::count_offset_in_bytes()),
+                      length, T_INT, count_field_idx);
+  kit.store_to_memory(kit.control(), kit.basic_plus_adr(result, java_lang_String::value_offset_in_bytes()),
+                      char_array, T_OBJECT, value_field_idx);
+
+  // hook up the outgoing control and result
+  kit.replace_call(sc->end(), result);
+
+  // Unhook any hook nodes
+  string_sizes->disconnect_inputs(NULL);
+  sc->cleanup();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/opto/stringopts.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+class StringConcat;
+
+class PhaseStringOpts : public Phase {
+  friend class StringConcat;
+
+ private:
+  PhaseGVN* _gvn;
+
+  // List of dead nodes to clean up aggressively at the end
+  Unique_Node_List dead_worklist;
+
+  // Memory slices needed for code gen
+  int char_adr_idx;
+  int value_field_idx;
+  int count_field_idx;
+  int offset_field_idx;
+
+  // Integer.sizeTable - used for int to String conversion
+  ciField* size_table_field;
+
+  // A set for use by various stages
+  VectorSet _visited;
+
+  // Collect a list of all SB.toString calls
+  Node_List collect_toString_calls();
+
+  // Examine the use of the SB alloc to see if it can be replace with
+  // a single string construction.
+  StringConcat* build_candidate(CallStaticJavaNode* call);
+
+  // Replace all the SB calls in concat with an optimization String allocation
+  void replace_string_concat(StringConcat* concat);
+
+  // Load the value of a static field, performing any constant folding.
+  Node* fetch_static_field(GraphKit& kit, ciField* field);
+
+  // Compute the number of characters required to represent the int value
+  Node* int_stringSize(GraphKit& kit, Node* value);
+
+  // Copy the characters representing value into char_array starting at start
+  void int_getChars(GraphKit& kit, Node* value, Node* char_array, Node* start, Node* end);
+
+  // Copy of the contents of the String str into char_array starting at index start.
+  Node* copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start);
+
+  // Clean up any leftover nodes
+  void record_dead_node(Node* node);
+  void remove_dead_nodes();
+
+  PhaseGVN* gvn() { return _gvn; }
+
+  enum {
+    // max length of constant string copy unrolling in copy_string
+    unroll_string_copy_length = 6
+  };
+
+ public:
+  PhaseStringOpts(PhaseGVN* gvn, Unique_Node_List* worklist);
+};
--- a/hotspot/src/share/vm/opto/type.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/opto/type.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -847,9 +847,6 @@
   // Constant pointer to array
   static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
 
-  // Convenience
-  static const TypeAryPtr *make(ciObject* o);
-
   // Return a 'ptr' version of this type
   virtual const Type *cast_to_ptr_type(PTR ptr) const;
 
--- a/hotspot/src/share/vm/runtime/globals.cpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Thu Nov 12 09:24:21 2009 -0800
@@ -46,7 +46,8 @@
 bool Flag::is_unlocked() const {
   if (strcmp(kind, "{diagnostic}") == 0) {
     return UnlockDiagnosticVMOptions;
-  } else if (strcmp(kind, "{experimental}") == 0) {
+  } else if (strcmp(kind, "{experimental}") == 0 ||
+             strcmp(kind, "{C2 experimental}") == 0) {
     return UnlockExperimentalVMOptions;
   } else {
     return true;
@@ -169,6 +170,7 @@
 #define C2_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 product}", DEFAULT },
 #define C2_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, "{C2 pd product}", DEFAULT },
 #define C2_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 diagnostic}", DEFAULT },
+#define C2_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, "{C2 experimental}", DEFAULT },
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
   #define C2_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
@@ -190,7 +192,7 @@
  C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
 #endif
 #ifdef COMPILER2
- C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
+ C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
 #endif
  {0, NULL, NULL}
 };
--- a/hotspot/src/share/vm/runtime/globals_extension.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/runtime/globals_extension.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -64,6 +64,7 @@
 #define C2_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
 #define C2_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
 #define C2_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
+#define C2_EXPERIMENTAL_FLAG_MEMBER(type, name, value, doc)    FLAG_MEMBER(name),
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
   #define C2_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
@@ -84,7 +85,7 @@
  C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
 #endif
 #ifdef COMPILER2
- C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
+ C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
 #endif
  NUM_CommandLineFlag
 } CommandLineFlag;
@@ -130,6 +131,7 @@
 #define C2_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
 #define C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
 #define C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
+#define C2_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
 #ifdef PRODUCT
   #define C2_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
   #define C2_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
@@ -181,6 +183,7 @@
           C2_PRODUCT_FLAG_MEMBER_WITH_TYPE,
           C2_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
           C2_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
+          C2_EXPERIMENTAL_FLAG_MEMBER_WITH_TYPE,
           C2_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
 #endif
  NUM_CommandLineFlagWithType
--- a/hotspot/src/share/vm/utilities/growableArray.hpp	Fri Nov 27 07:56:58 2009 -0800
+++ b/hotspot/src/share/vm/utilities/growableArray.hpp	Thu Nov 12 09:24:21 2009 -0800
@@ -278,6 +278,17 @@
     _len--;
   }
 
+  // inserts the given element before the element at index i
+  void insert_before(const int idx, const E& elem) {
+    check_nesting();
+    if (_len == _max) grow(_len);
+    for (int j = _len - 1; j >= idx; j--) {
+      _data[j + 1] = _data[j];
+    }
+    _len++;
+    _data[idx] = elem;
+  }
+
   void appendAll(const GrowableArray<E>* l) {
     for (int i = 0; i < l->_len; i++) {
       raw_at_put_grow(_len, l->_data[i], 0);