7172640: C2: instrinsic implementations in LibraryCallKit should use argument() instead of pop()
authortwisti
Mon, 26 Nov 2012 17:25:11 -0800
changeset 14621 fd9265ab0f67
parent 14620 45167d2bc15a
child 14622 8e94e4186d35
7172640: C2: instrinsic implementations in LibraryCallKit should use argument() instead of pop() Reviewed-by: kvn, jrose
hotspot/src/share/vm/ci/ciMethod.cpp
hotspot/src/share/vm/ci/ciMethod.hpp
hotspot/src/share/vm/ci/ciSignature.hpp
hotspot/src/share/vm/interpreter/bytecodes.hpp
hotspot/src/share/vm/opto/callGenerator.cpp
hotspot/src/share/vm/opto/callnode.hpp
hotspot/src/share/vm/opto/doCall.cpp
hotspot/src/share/vm/opto/graphKit.cpp
hotspot/src/share/vm/opto/graphKit.hpp
hotspot/src/share/vm/opto/library_call.cpp
hotspot/src/share/vm/opto/locknode.cpp
hotspot/src/share/vm/opto/parse1.cpp
hotspot/src/share/vm/opto/parse2.cpp
hotspot/src/share/vm/opto/parse3.cpp
hotspot/src/share/vm/opto/parseHelper.cpp
hotspot/src/share/vm/opto/type.hpp
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -742,6 +742,24 @@
 }
 
 // ------------------------------------------------------------------
+// ciMethod::get_field_at_bci
+ciField* ciMethod::get_field_at_bci(int bci, bool &will_link) {
+  ciBytecodeStream iter(this);
+  iter.reset_to_bci(bci);
+  iter.next();
+  return iter.get_field(will_link);
+}
+
+// ------------------------------------------------------------------
+// ciMethod::get_method_at_bci
+ciMethod* ciMethod::get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature) {
+  ciBytecodeStream iter(this);
+  iter.reset_to_bci(bci);
+  iter.next();
+  return iter.get_method(will_link, declared_signature);
+}
+
+// ------------------------------------------------------------------
 // Adjust a CounterData count to be commensurate with
 // interpreter_invocation_count.  If the MDO exists for
 // only 25% of the time the method exists, then the
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Mon Nov 26 17:25:11 2012 -0800
@@ -226,6 +226,9 @@
   ciCallProfile call_profile_at_bci(int bci);
   int           interpreter_call_site_count(int bci);
 
+  ciField*      get_field_at_bci( int bci, bool &will_link);
+  ciMethod*     get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
+
   // Given a certain calling environment, find the monomorphic target
   // for the call.  Return NULL if the call is not monomorphic in
   // its calling environment.
--- a/hotspot/src/share/vm/ci/ciSignature.hpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/ci/ciSignature.hpp	Mon Nov 26 17:25:11 2012 -0800
@@ -57,12 +57,14 @@
   ciSymbol* as_symbol() const                    { return _symbol; }
   ciKlass*  accessing_klass() const              { return _accessing_klass; }
 
-  ciType* return_type() const;
-  ciType* type_at(int index) const;
+  ciType*   return_type() const;
+  ciType*   type_at(int index) const;
 
   int       size() const                         { return _size; }
   int       count() const                        { return _count; }
 
+  int       arg_size_for_bc(Bytecodes::Code bc)  { return size() + (Bytecodes::has_receiver(bc) ? 1 : 0); }
+
   bool equals(ciSignature* that);
 
   void print_signature();
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp	Mon Nov 26 17:25:11 2012 -0800
@@ -423,7 +423,9 @@
   static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
                                                            || code == _fconst_0 || code == _dconst_0); }
   static bool        is_invoke      (Code code)    { return (_invokevirtual <= code && code <= _invokedynamic); }
-
+  static bool        has_receiver   (Code code)    { assert(is_invoke(code), "");  return code == _invokevirtual ||
+                                                                                          code == _invokespecial ||
+                                                                                          code == _invokeinterface; }
   static bool        has_optional_appendix(Code code) { return code == _invokedynamic || code == _invokehandle; }
 
   static int         compute_flags  (const char* format, int more_flags = 0);  // compute the flags
--- a/hotspot/src/share/vm/opto/callGenerator.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -139,7 +139,7 @@
   if (!is_static) {
     // Make an explicit receiver null_check as part of this call.
     // Since we share a map with the caller, his JVMS gets adjusted.
-    kit.null_check_receiver(method());
+    kit.null_check_receiver_before_call(method());
     if (kit.stopped()) {
       // And dump it back to the caller, decorated with any exceptions:
       return kit.transfer_exceptions_into_jvms();
@@ -207,7 +207,7 @@
        >= (uint)ImplicitNullCheckThreshold))) {
     // Make an explicit receiver null_check as part of this call.
     // Since we share a map with the caller, his JVMS gets adjusted.
-    receiver = kit.null_check_receiver(method());
+    receiver = kit.null_check_receiver_before_call(method());
     if (kit.stopped()) {
       // And dump it back to the caller, decorated with any exceptions:
       return kit.transfer_exceptions_into_jvms();
@@ -491,7 +491,7 @@
               jvms->bci(), log->identify(_predicted_receiver));
   }
 
-  receiver = kit.null_check_receiver(method());
+  receiver = kit.null_check_receiver_before_call(method());
   if (kit.stopped()) {
     return kit.transfer_exceptions_into_jvms();
   }
@@ -597,7 +597,7 @@
   switch (iid) {
   case vmIntrinsics::_invokeBasic:
     {
-      // get MethodHandle receiver
+      // Get MethodHandle receiver:
       Node* receiver = kit.argument(0);
       if (receiver->Opcode() == Op_ConP) {
         const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
@@ -618,7 +618,7 @@
   case vmIntrinsics::_linkToSpecial:
   case vmIntrinsics::_linkToInterface:
     {
-      // pop MemberName argument
+      // Get MemberName argument:
       Node* member_name = kit.argument(callee->arg_size() - 1);
       if (member_name->Opcode() == Op_ConP) {
         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
--- a/hotspot/src/share/vm/opto/callnode.hpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/callnode.hpp	Mon Nov 26 17:25:11 2012 -0800
@@ -344,17 +344,26 @@
   OopMap *oop_map() const { return _oop_map; }
   void set_oop_map(OopMap *om) { _oop_map = om; }
 
+ private:
+  void verify_input(JVMState* jvms, uint idx) const {
+    assert(verify_jvms(jvms), "jvms must match");
+    Node* n = in(idx);
+    assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
+           in(idx + 1)->is_top(), "2nd half of long/double");
+  }
+
+ public:
   // Functionality from old debug nodes which has changed
   Node *local(JVMState* jvms, uint idx) const {
-    assert(verify_jvms(jvms), "jvms must match");
+    verify_input(jvms, jvms->locoff() + idx);
     return in(jvms->locoff() + idx);
   }
   Node *stack(JVMState* jvms, uint idx) const {
-    assert(verify_jvms(jvms), "jvms must match");
+    verify_input(jvms, jvms->stkoff() + idx);
     return in(jvms->stkoff() + idx);
   }
   Node *argument(JVMState* jvms, uint idx) const {
-    assert(verify_jvms(jvms), "jvms must match");
+    verify_input(jvms, jvms->argoff() + idx);
     return in(jvms->argoff() + idx);
   }
   Node *monitor_box(JVMState* jvms, uint idx) const {
--- a/hotspot/src/share/vm/opto/doCall.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -350,7 +350,7 @@
   // Set frequently used booleans
   const bool is_virtual = bc() == Bytecodes::_invokevirtual;
   const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
-  const bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
+  const bool has_receiver = Bytecodes::has_receiver(bc());
 
   // Find target being called
   bool             will_link;
@@ -380,6 +380,8 @@
   // Note:  In the absence of miranda methods, an abstract class K can perform
   // an invokevirtual directly on an interface method I.m if K implements I.
 
+  // orig_callee is the resolved callee which's signature includes the
+  // appendix argument.
   const int nargs = orig_callee->arg_size();
 
   // Push appendix argument (MethodType, CallSite, etc.), if one.
@@ -572,7 +574,7 @@
       }
       // If there is going to be a trap, put it at the next bytecode:
       set_bci(iter().next_bci());
-      do_null_assert(peek(), T_OBJECT);
+      null_assert(peek());
       set_bci(iter().cur_bci()); // put it back
     }
   }
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -93,6 +93,16 @@
   return jvms;
 }
 
+//--------------------------------sync_jvms_for_reexecute---------------------
+// Make sure our current jvms agrees with our parse state.  This version
+// uses the reexecute_sp for reexecuting bytecodes.
+JVMState* GraphKit::sync_jvms_for_reexecute() {
+  JVMState* jvms = this->jvms();
+  jvms->set_bci(bci());          // Record the new bci in the JVMState
+  jvms->set_sp(reexecute_sp());  // Record the new sp in the JVMState
+  return jvms;
+}
+
 #ifdef ASSERT
 bool GraphKit::jvms_in_sync() const {
   Parse* parse = is_Parse();
@@ -826,7 +836,16 @@
   // Walk the inline list to fill in the correct set of JVMState's
   // Also fill in the associated edges for each JVMState.
 
-  JVMState* youngest_jvms = sync_jvms();
+  // If the bytecode needs to be reexecuted we need to put
+  // the arguments back on the stack.
+  const bool should_reexecute = jvms()->should_reexecute();
+  JVMState* youngest_jvms = should_reexecute ? sync_jvms_for_reexecute() : sync_jvms();
+
+  // NOTE: set_bci (called from sync_jvms) might reset the reexecute bit to
+  // undefined if the bci is different.  This is normal for Parse but it
+  // should not happen for LibraryCallKit because only one bci is processed.
+  assert(!is_LibraryCallKit() || (jvms()->should_reexecute() == should_reexecute),
+         "in LibraryCallKit the reexecute bit should not change");
 
   // If we are guaranteed to throw, we can prune everything but the
   // input to the current bytecode.
@@ -860,7 +879,7 @@
   }
 
   // Presize the call:
-  debug_only(uint non_debug_edges = call->req());
+  DEBUG_ONLY(uint non_debug_edges = call->req());
   call->add_req_batch(top(), youngest_jvms->debug_depth());
   assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
 
@@ -965,7 +984,7 @@
   assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
 }
 
-bool GraphKit::compute_stack_effects(int& inputs, int& depth, bool for_parse) {
+bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
   Bytecodes::Code code = java_bc();
   if (code == Bytecodes::_wide) {
     code = method()->java_code_at_bci(bci() + 1);
@@ -1005,14 +1024,11 @@
   case Bytecodes::_getfield:
   case Bytecodes::_putfield:
     {
+      bool ignored_will_link;
+      ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
+      int      size  = field->type()->size();
       bool is_get = (depth >= 0), is_static = (depth & 1);
-      ciBytecodeStream iter(method());
-      iter.reset_to_bci(bci());
-      iter.next();
-      bool ignored_will_link;
-      ciField* field = iter.get_field(ignored_will_link);
-      int      size  = field->type()->size();
-      inputs  = (is_static ? 0 : 1);
+      inputs = (is_static ? 0 : 1);
       if (is_get) {
         depth = size - inputs;
       } else {
@@ -1028,26 +1044,11 @@
   case Bytecodes::_invokedynamic:
   case Bytecodes::_invokeinterface:
     {
-      ciBytecodeStream iter(method());
-      iter.reset_to_bci(bci());
-      iter.next();
       bool ignored_will_link;
       ciSignature* declared_signature = NULL;
-      ciMethod* callee = iter.get_method(ignored_will_link, &declared_signature);
+      ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
       assert(declared_signature != NULL, "cannot be null");
-      // (Do not use ciMethod::arg_size(), because
-      // it might be an unloaded method, which doesn't
-      // know whether it is static or not.)
-      if (for_parse) {
-        // Case 1: When called from parse we are *before* the invoke (in the
-        //         caller) and need to to adjust the inputs by an appendix
-        //         argument that will be pushed implicitly.
-        inputs = callee->invoke_arg_size(code) - (iter.has_appendix() ? 1 : 0);
-      } else {
-        // Case 2: Here we are *after* the invoke (in the callee) and need to
-        //         remove any appendix arguments that were popped.
-        inputs = callee->invoke_arg_size(code) - (callee->has_member_arg() ? 1 : 0);
-      }
+      inputs   = declared_signature->arg_size_for_bc(code);
       int size = declared_signature->return_type()->size();
       depth = size - inputs;
     }
@@ -1178,7 +1179,7 @@
   Node *chk = NULL;
   switch(type) {
     case T_LONG   : chk = new (C) CmpLNode(value, _gvn.zerocon(T_LONG)); break;
-    case T_INT    : chk = new (C) CmpINode( value, _gvn.intcon(0)); break;
+    case T_INT    : chk = new (C) CmpINode(value, _gvn.intcon(0)); break;
     case T_ARRAY  : // fall through
       type = T_OBJECT;  // simplify further tests
     case T_OBJECT : {
@@ -1229,7 +1230,8 @@
       break;
     }
 
-    default      : ShouldNotReachHere();
+    default:
+      fatal(err_msg_res("unexpected type: %s", type2name(type)));
   }
   assert(chk != NULL, "sanity check");
   chk = _gvn.transform(chk);
@@ -1861,15 +1863,17 @@
   // occurs here, the runtime will make sure an MDO exists.  There is
   // no need to call method()->ensure_method_data() at this point.
 
+  // Set the stack pointer to the right value for reexecution:
+  set_sp(reexecute_sp());
+
 #ifdef ASSERT
   if (!must_throw) {
     // Make sure the stack has at least enough depth to execute
     // the current bytecode.
-    int inputs, ignore;
-    if (compute_stack_effects(inputs, ignore)) {
-      assert(sp() >= inputs, "must have enough JVMS stack to execute");
-      // It is a frequent error in library_call.cpp to issue an
-      // uncommon trap with the _sp value already popped.
+    int inputs, ignored_depth;
+    if (compute_stack_effects(inputs, ignored_depth)) {
+      assert(sp() >= inputs, err_msg_res("must have enough JVMS stack to execute %s: sp=%d, inputs=%d",
+             Bytecodes::name(java_bc()), sp(), inputs));
     }
   }
 #endif
@@ -1900,7 +1904,8 @@
   case Deoptimization::Action_make_not_compilable:
     break;
   default:
-    assert(false, "bad action");
+    fatal(err_msg_res("unknown action %d: %s", action, Deoptimization::trap_action_name(action)));
+    break;
 #endif
   }
 
@@ -2667,7 +2672,7 @@
       case SSC_always_false:
         // It needs a null check because a null will *pass* the cast check.
         // A non-null value will always produce an exception.
-        return do_null_assert(obj, T_OBJECT);
+        return null_assert(obj);
       }
     }
   }
@@ -2786,7 +2791,7 @@
   mb->init_req(TypeFunc::Control, control());
   mb->init_req(TypeFunc::Memory,  reset_memory());
   Node* membar = _gvn.transform(mb);
-  set_control(_gvn.transform(new (C) ProjNode(membar,TypeFunc::Control) ));
+  set_control(_gvn.transform(new (C) ProjNode(membar, TypeFunc::Control)));
   set_all_memory_call(membar);
   return membar;
 }
@@ -3148,7 +3153,7 @@
     Node* cmp_lh = _gvn.transform( new(C) CmpINode(layout_val, intcon(layout_con)) );
     Node* bol_lh = _gvn.transform( new(C) BoolNode(cmp_lh, BoolTest::eq) );
     { BuildCutout unless(this, bol_lh, PROB_MAX);
-      _sp += nargs;
+      inc_sp(nargs);
       uncommon_trap(Deoptimization::Reason_class_check,
                     Deoptimization::Action_maybe_recompile);
     }
@@ -3391,7 +3396,7 @@
   {
     PreserveJVMState pjvms(this);
     set_control(iffalse);
-    _sp += nargs;
+    inc_sp(nargs);
     uncommon_trap(reason, Deoptimization::Action_maybe_recompile);
   }
   Node* iftrue = _gvn.transform(new (C) IfTrueNode(iff));
--- a/hotspot/src/share/vm/opto/graphKit.hpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/graphKit.hpp	Mon Nov 26 17:25:11 2012 -0800
@@ -41,6 +41,7 @@
 class FastLockNode;
 class FastUnlockNode;
 class IdealKit;
+class LibraryCallKit;
 class Parse;
 class RootNode;
 
@@ -60,11 +61,13 @@
   PhaseGVN         &_gvn;       // Some optimizations while parsing
   SafePointNode*    _map;       // Parser map from JVM to Nodes
   SafePointNode*    _exceptions;// Parser map(s) for exception state(s)
-  int               _sp;        // JVM Expression Stack Pointer
   int               _bci;       // JVM Bytecode Pointer
   ciMethod*         _method;    // JVM Current Method
 
  private:
+  int               _sp;        // JVM Expression Stack Pointer; don't modify directly!
+
+ private:
   SafePointNode*     map_not_null() const {
     assert(_map != NULL, "must call stopped() to test for reset compiler map");
     return _map;
@@ -80,7 +83,8 @@
   }
 #endif
 
-  virtual Parse* is_Parse() const { return NULL; }
+  virtual Parse*          is_Parse()          const { return NULL; }
+  virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
 
   ciEnv*        env()           const { return _env; }
   PhaseGVN&     gvn()           const { return _gvn; }
@@ -141,7 +145,7 @@
                                         _bci = jvms->bci();
                                         _method = jvms->has_method() ? jvms->method() : NULL; }
   void set_map(SafePointNode* m)      { _map = m; debug_only(verify_map()); }
-  void set_sp(int i)                  { assert(i >= 0, "must be non-negative"); _sp = i; }
+  void set_sp(int sp)                 { assert(sp >= 0, err_msg_res("sp must be non-negative: %d", sp)); _sp = sp; }
   void clean_stack(int from_sp); // clear garbage beyond from_sp to top
 
   void inc_sp(int i)                  { set_sp(sp() + i); }
@@ -149,7 +153,9 @@
   void set_bci(int bci)               { _bci = bci; }
 
   // Make sure jvms has current bci & sp.
-  JVMState* sync_jvms()     const;
+  JVMState* sync_jvms() const;
+  JVMState* sync_jvms_for_reexecute();
+
 #ifdef ASSERT
   // Make sure JVMS has an updated copy of bci and sp.
   // Also sanity-check method, depth, and monitor depth.
@@ -286,7 +292,7 @@
   // How many stack inputs does the current BC consume?
   // And, how does the stack change after the bytecode?
   // Returns false if unknown.
-  bool compute_stack_effects(int& inputs, int& depth, bool for_parse = false);
+  bool compute_stack_effects(int& inputs, int& depth);
 
   // Add a fixed offset to a pointer
   Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
@@ -337,20 +343,37 @@
   Node* load_object_klass(Node* object);
   // Find out the length of an array.
   Node* load_array_length(Node* array);
+
+
   // Helper function to do a NULL pointer check or ZERO check based on type.
-  Node* null_check_common(Node* value, BasicType type,
-                          bool assert_null, Node* *null_control);
   // Throw an exception if a given value is null.
   // Return the value cast to not-null.
   // Be clever about equivalent dominating null checks.
-  Node* do_null_check(Node* value, BasicType type) {
-    return null_check_common(value, type, false, NULL);
+  Node* null_check_common(Node* value, BasicType type,
+                          bool assert_null = false, Node* *null_control = NULL);
+  Node* null_check(Node* value, BasicType type = T_OBJECT) {
+    return null_check_common(value, type);
+  }
+  Node* null_check_receiver() {
+    assert(argument(0)->bottom_type()->isa_ptr(), "must be");
+    return null_check(argument(0));
+  }
+  Node* zero_check_int(Node* value) {
+    assert(value->bottom_type()->basic_type() == T_INT,
+        err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
+    return null_check_common(value, T_INT);
+  }
+  Node* zero_check_long(Node* value) {
+    assert(value->bottom_type()->basic_type() == T_LONG,
+        err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
+    return null_check_common(value, T_LONG);
   }
   // Throw an uncommon trap if a given value is __not__ null.
   // Return the value cast to null, and be clever about dominating checks.
-  Node* do_null_assert(Node* value, BasicType type) {
-    return null_check_common(value, type, true, NULL);
+  Node* null_assert(Node* value, BasicType type = T_OBJECT) {
+    return null_check_common(value, type, true);
   }
+
   // Null check oop.  Return null-path control into (*null_control).
   // Return a cast-not-null node which depends on the not-null control.
   // If never_see_null, use an uncommon trap (*null_control sees a top).
@@ -371,9 +394,9 @@
   // Replace all occurrences of one node by another.
   void replace_in_map(Node* old, Node* neww);
 
-  void  push(Node* n)     { map_not_null();        _map->set_stack(_map->_jvms,   _sp++, n); }
-  Node* pop()             { map_not_null(); return _map->stack(    _map->_jvms, --_sp); }
-  Node* peek(int off = 0) { map_not_null(); return _map->stack(    _map->_jvms,   _sp - off - 1); }
+  void  push(Node* n)     { map_not_null();        _map->set_stack(_map->_jvms,   _sp++        , n); }
+  Node* pop()             { map_not_null(); return _map->stack(    _map->_jvms, --_sp             ); }
+  Node* peek(int off = 0) { map_not_null(); return _map->stack(    _map->_jvms,   _sp - off - 1   ); }
 
   void push_pair(Node* ldval) {
     push(ldval);
@@ -580,19 +603,15 @@
 
   //---------- help for generating calls --------------
 
-  // Do a null check on the receiver, which is in argument(0).
-  Node* null_check_receiver(ciMethod* callee) {
+  // Do a null check on the receiver as it would happen before the call to
+  // callee (with all arguments still on the stack).
+  Node* null_check_receiver_before_call(ciMethod* callee) {
     assert(!callee->is_static(), "must be a virtual method");
-    int nargs = 1 + callee->signature()->size();
-    // Null check on self without removing any arguments.  The argument
-    // null check technically happens in the wrong place, which can lead to
-    // invalid stack traces when the primitive is inlined into a method
-    // which handles NullPointerExceptions.
-    Node* receiver = argument(0);
-    _sp += nargs;
-    receiver = do_null_check(receiver, T_OBJECT);
-    _sp -= nargs;
-    return receiver;
+    const int nargs = callee->arg_size();
+    inc_sp(nargs);
+    Node* n = null_check_receiver();
+    dec_sp(nargs);
+    return n;
   }
 
   // Fill in argument edges for the call from argument(0), argument(1), ...
@@ -645,6 +664,9 @@
                   klass, reason_string, must_throw, keep_exact_action);
   }
 
+  // SP when bytecode needs to be reexecuted.
+  virtual int reexecute_sp() { return sp(); }
+
   // Report if there were too many traps at the current method and bci.
   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
   // If there is no MDO at all, report no trap unless told to assume it.
--- a/hotspot/src/share/vm/opto/library_call.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -67,30 +67,59 @@
 // Local helper class for LibraryIntrinsic:
 class LibraryCallKit : public GraphKit {
  private:
-  LibraryIntrinsic* _intrinsic;   // the library intrinsic being called
+  LibraryIntrinsic* _intrinsic;     // the library intrinsic being called
+  Node*             _result;        // the result node, if any
+  int               _reexecute_sp;  // the stack pointer when bytecode needs to be reexecuted
 
   const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
 
  public:
-  LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic)
-    : GraphKit(caller),
-      _intrinsic(intrinsic)
+  LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic)
+    : GraphKit(jvms),
+      _intrinsic(intrinsic),
+      _result(NULL)
   {
+    // Find out how many arguments the interpreter needs when deoptimizing
+    // and save the stack pointer value so it can used by uncommon_trap.
+    // We find the argument count by looking at the declared signature.
+    bool ignored_will_link;
+    ciSignature* declared_signature = NULL;
+    ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
+    const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci()));
+    _reexecute_sp = sp() + nargs;  // "push" arguments back on stack
   }
 
+  virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; }
+
   ciMethod*         caller()    const    { return jvms()->method(); }
   int               bci()       const    { return jvms()->bci(); }
   LibraryIntrinsic* intrinsic() const    { return _intrinsic; }
   vmIntrinsics::ID  intrinsic_id() const { return _intrinsic->intrinsic_id(); }
   ciMethod*         callee()    const    { return _intrinsic->method(); }
-  ciSignature*      signature() const    { return callee()->signature(); }
-  int               arg_size()  const    { return callee()->arg_size(); }
 
   bool try_to_inline();
   Node* try_to_predicate();
 
+  void push_result() {
+    // Push the result onto the stack.
+    if (!stopped() && result() != NULL) {
+      BasicType bt = result()->bottom_type()->basic_type();
+      push_node(bt, result());
+    }
+  }
+
+ private:
+  void fatal_unexpected_iid(vmIntrinsics::ID iid) {
+    fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
+  }
+
+  void  set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; }
+  void  set_result(RegionNode* region, PhiNode* value);
+  Node*     result() { return _result; }
+
+  virtual int reexecute_sp() { return _reexecute_sp; }
+
   // Helper functions to inline natives
-  void push_result(RegionNode* region, PhiNode* value);
   Node* generate_guard(Node* test, RegionNode* region, float true_prob);
   Node* generate_slow_guard(Node* test, RegionNode* region);
   Node* generate_fair_guard(Node* test, RegionNode* region);
@@ -108,21 +137,19 @@
                               bool disjoint_bases, const char* &name, bool dest_uninitialized);
   Node* load_mirror_from_klass(Node* klass);
   Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
-                                      int nargs,
                                       RegionNode* region, int null_path,
                                       int offset);
-  Node* load_klass_from_mirror(Node* mirror, bool never_see_null, int nargs,
+  Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
                                RegionNode* region, int null_path) {
     int offset = java_lang_Class::klass_offset_in_bytes();
-    return load_klass_from_mirror_common(mirror, never_see_null, nargs,
+    return load_klass_from_mirror_common(mirror, never_see_null,
                                          region, null_path,
                                          offset);
   }
   Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
-                                     int nargs,
                                      RegionNode* region, int null_path) {
     int offset = java_lang_Class::array_klass_offset_in_bytes();
-    return load_klass_from_mirror_common(mirror, never_see_null, nargs,
+    return load_klass_from_mirror_common(mirror, never_see_null,
                                          region, null_path,
                                          offset);
   }
@@ -161,16 +188,14 @@
   bool inline_string_indexOf();
   Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i);
   bool inline_string_equals();
-  Node* pop_math_arg();
+  Node* round_double_node(Node* n);
   bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName);
   bool inline_math_native(vmIntrinsics::ID id);
   bool inline_trig(vmIntrinsics::ID id);
-  bool inline_trans(vmIntrinsics::ID id);
-  bool inline_abs(vmIntrinsics::ID id);
-  bool inline_sqrt(vmIntrinsics::ID id);
+  bool inline_math(vmIntrinsics::ID id);
+  bool inline_exp();
+  bool inline_pow();
   void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName);
-  bool inline_pow(vmIntrinsics::ID id);
-  bool inline_exp(vmIntrinsics::ID id);
   bool inline_min_max(vmIntrinsics::ID id);
   Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
   // This returns Type::AnyPtr, RawPtr, or OopPtr.
@@ -179,7 +204,7 @@
   // Helper for inline_unsafe_access.
   // Generates the guards that check whether the result of
   // Unsafe.getObject should be recorded in an SATB log buffer.
-  void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, int nargs, bool need_mem_bar);
+  void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
   bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
   bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
   bool inline_unsafe_allocate();
@@ -253,11 +278,7 @@
   bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
   bool inline_unsafe_ordered_store(BasicType type);
   bool inline_fp_conversions(vmIntrinsics::ID id);
-  bool inline_numberOfLeadingZeros(vmIntrinsics::ID id);
-  bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
-  bool inline_bitCount(vmIntrinsics::ID id);
-  bool inline_reverseBytes(vmIntrinsics::ID id);
-
+  bool inline_number_methods(vmIntrinsics::ID id);
   bool inline_reference_get();
   bool inline_aescrypt_Block(vmIntrinsics::ID id);
   bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
@@ -321,15 +342,18 @@
   switch (id) {
   case vmIntrinsics::_compareTo:
     if (!SpecialStringCompareTo)  return NULL;
+    if (!Matcher::match_rule_supported(Op_StrComp))  return NULL;
     break;
   case vmIntrinsics::_indexOf:
     if (!SpecialStringIndexOf)  return NULL;
     break;
   case vmIntrinsics::_equals:
     if (!SpecialStringEquals)  return NULL;
+    if (!Matcher::match_rule_supported(Op_StrEquals))  return NULL;
     break;
   case vmIntrinsics::_equalsC:
     if (!SpecialArraysEquals)  return NULL;
+    if (!Matcher::match_rule_supported(Op_AryEq))  return NULL;
     break;
   case vmIntrinsics::_arraycopy:
     if (!InlineArrayCopy)  return NULL;
@@ -382,6 +406,19 @@
     if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL;
     break;
 
+  case vmIntrinsics::_reverseBytes_c:
+    if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return false;
+    break;
+  case vmIntrinsics::_reverseBytes_s:
+    if (!Matcher::match_rule_supported(Op_ReverseBytesS))  return false;
+    break;
+  case vmIntrinsics::_reverseBytes_i:
+    if (!Matcher::match_rule_supported(Op_ReverseBytesI))  return false;
+    break;
+  case vmIntrinsics::_reverseBytes_l:
+    if (!Matcher::match_rule_supported(Op_ReverseBytesL))  return false;
+    break;
+
   case vmIntrinsics::_Reference_get:
     // Use the intrinsic version of Reference.get() so that the value in
     // the referent field can be registered by the G1 pre-barrier code.
@@ -488,10 +525,13 @@
     tty->print_cr("Intrinsic %s", str);
   }
 #endif
-
+  ciMethod* callee = kit.callee();
+  const int bci    = kit.bci();
+
+  // Try to inline the intrinsic.
   if (kit.try_to_inline()) {
     if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
-      CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
+      CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
     if (C->log()) {
@@ -500,6 +540,8 @@
                      (is_virtual() ? " virtual='1'" : ""),
                      C->unique() - nodes);
     }
+    // Push the result from the inlined method onto the stack.
+    kit.push_result();
     return kit.transfer_exceptions_into_jvms();
   }
 
@@ -508,12 +550,12 @@
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
-      CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), msg);
+      CompileTask::print_inlining(callee, jvms->depth() - 1, bci, msg);
     } else {
       // Root compile
       tty->print("Did not generate intrinsic %s%s at bci:%d in",
                vmIntrinsics::name_at(intrinsic_id()),
-               (is_virtual() ? " (virtual)" : ""), kit.bci());
+               (is_virtual() ? " (virtual)" : ""), bci);
     }
   }
   C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
@@ -532,9 +574,15 @@
     tty->print_cr("Predicate for intrinsic %s", str);
   }
 #endif
+  ciMethod* callee = kit.callee();
+  const int bci    = kit.bci();
 
   Node* slow_ctl = kit.try_to_predicate();
   if (!kit.failing()) {
+    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+      CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
+    }
+    C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
     if (C->log()) {
       C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'",
                      vmIntrinsics::name_at(intrinsic_id()),
@@ -549,12 +597,12 @@
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = "failed to generate predicate for intrinsic";
-      CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), msg);
+      CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
     } else {
       // Root compile
       tty->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
                vmIntrinsics::name_at(intrinsic_id()),
-               (is_virtual() ? " (virtual)" : ""), kit.bci());
+               (is_virtual() ? " (virtual)" : ""), bci);
     }
   }
   C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
@@ -566,6 +614,7 @@
   const bool is_store       = true;
   const bool is_native_ptr  = true;
   const bool is_static      = true;
+  const bool is_volatile    = true;
 
   if (!jvms()->has_method()) {
     // Root JVMState has a null method.
@@ -575,13 +624,11 @@
   }
   assert(merged_memory(), "");
 
+
   switch (intrinsic_id()) {
-  case vmIntrinsics::_hashCode:
-    return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
-  case vmIntrinsics::_identityHashCode:
-    return inline_native_hashcode(/*!virtual*/ false, is_static);
-  case vmIntrinsics::_getClass:
-    return inline_native_getClass();
+  case vmIntrinsics::_hashCode:                 return inline_native_hashcode(intrinsic()->is_virtual(), !is_static);
+  case vmIntrinsics::_identityHashCode:         return inline_native_hashcode(/*!virtual*/ false,         is_static);
+  case vmIntrinsics::_getClass:                 return inline_native_getClass();
 
   case vmIntrinsics::_dsin:
   case vmIntrinsics::_dcos:
@@ -592,203 +639,114 @@
   case vmIntrinsics::_dexp:
   case vmIntrinsics::_dlog:
   case vmIntrinsics::_dlog10:
-  case vmIntrinsics::_dpow:
-    return inline_math_native(intrinsic_id());
+  case vmIntrinsics::_dpow:                     return inline_math_native(intrinsic_id());
 
   case vmIntrinsics::_min:
-  case vmIntrinsics::_max:
-    return inline_min_max(intrinsic_id());
-
-  case vmIntrinsics::_arraycopy:
-    return inline_arraycopy();
-
-  case vmIntrinsics::_compareTo:
-    return inline_string_compareTo();
-  case vmIntrinsics::_indexOf:
-    return inline_string_indexOf();
-  case vmIntrinsics::_equals:
-    return inline_string_equals();
-
-  case vmIntrinsics::_getObject:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, false);
-  case vmIntrinsics::_getBoolean:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, false);
-  case vmIntrinsics::_getByte:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, false);
-  case vmIntrinsics::_getShort:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, false);
-  case vmIntrinsics::_getChar:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, false);
-  case vmIntrinsics::_getInt:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, false);
-  case vmIntrinsics::_getLong:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, false);
-  case vmIntrinsics::_getFloat:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, false);
-  case vmIntrinsics::_getDouble:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, false);
-
-  case vmIntrinsics::_putObject:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, false);
-  case vmIntrinsics::_putBoolean:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, false);
-  case vmIntrinsics::_putByte:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, false);
-  case vmIntrinsics::_putShort:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, false);
-  case vmIntrinsics::_putChar:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, false);
-  case vmIntrinsics::_putInt:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_INT, false);
-  case vmIntrinsics::_putLong:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, false);
-  case vmIntrinsics::_putFloat:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, false);
-  case vmIntrinsics::_putDouble:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, false);
-
-  case vmIntrinsics::_getByte_raw:
-    return inline_unsafe_access(is_native_ptr, !is_store, T_BYTE, false);
-  case vmIntrinsics::_getShort_raw:
-    return inline_unsafe_access(is_native_ptr, !is_store, T_SHORT, false);
-  case vmIntrinsics::_getChar_raw:
-    return inline_unsafe_access(is_native_ptr, !is_store, T_CHAR, false);
-  case vmIntrinsics::_getInt_raw:
-    return inline_unsafe_access(is_native_ptr, !is_store, T_INT, false);
-  case vmIntrinsics::_getLong_raw:
-    return inline_unsafe_access(is_native_ptr, !is_store, T_LONG, false);
-  case vmIntrinsics::_getFloat_raw:
-    return inline_unsafe_access(is_native_ptr, !is_store, T_FLOAT, false);
-  case vmIntrinsics::_getDouble_raw:
-    return inline_unsafe_access(is_native_ptr, !is_store, T_DOUBLE, false);
-  case vmIntrinsics::_getAddress_raw:
-    return inline_unsafe_access(is_native_ptr, !is_store, T_ADDRESS, false);
-
-  case vmIntrinsics::_putByte_raw:
-    return inline_unsafe_access(is_native_ptr, is_store, T_BYTE, false);
-  case vmIntrinsics::_putShort_raw:
-    return inline_unsafe_access(is_native_ptr, is_store, T_SHORT, false);
-  case vmIntrinsics::_putChar_raw:
-    return inline_unsafe_access(is_native_ptr, is_store, T_CHAR, false);
-  case vmIntrinsics::_putInt_raw:
-    return inline_unsafe_access(is_native_ptr, is_store, T_INT, false);
-  case vmIntrinsics::_putLong_raw:
-    return inline_unsafe_access(is_native_ptr, is_store, T_LONG, false);
-  case vmIntrinsics::_putFloat_raw:
-    return inline_unsafe_access(is_native_ptr, is_store, T_FLOAT, false);
-  case vmIntrinsics::_putDouble_raw:
-    return inline_unsafe_access(is_native_ptr, is_store, T_DOUBLE, false);
-  case vmIntrinsics::_putAddress_raw:
-    return inline_unsafe_access(is_native_ptr, is_store, T_ADDRESS, false);
-
-  case vmIntrinsics::_getObjectVolatile:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, true);
-  case vmIntrinsics::_getBooleanVolatile:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, true);
-  case vmIntrinsics::_getByteVolatile:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, true);
-  case vmIntrinsics::_getShortVolatile:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, true);
-  case vmIntrinsics::_getCharVolatile:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, true);
-  case vmIntrinsics::_getIntVolatile:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, true);
-  case vmIntrinsics::_getLongVolatile:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, true);
-  case vmIntrinsics::_getFloatVolatile:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, true);
-  case vmIntrinsics::_getDoubleVolatile:
-    return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, true);
-
-  case vmIntrinsics::_putObjectVolatile:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, true);
-  case vmIntrinsics::_putBooleanVolatile:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, true);
-  case vmIntrinsics::_putByteVolatile:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, true);
-  case vmIntrinsics::_putShortVolatile:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, true);
-  case vmIntrinsics::_putCharVolatile:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, true);
-  case vmIntrinsics::_putIntVolatile:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_INT, true);
-  case vmIntrinsics::_putLongVolatile:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, true);
-  case vmIntrinsics::_putFloatVolatile:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, true);
-  case vmIntrinsics::_putDoubleVolatile:
-    return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, true);
-
-  case vmIntrinsics::_prefetchRead:
-    return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
-  case vmIntrinsics::_prefetchWrite:
-    return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static);
-  case vmIntrinsics::_prefetchReadStatic:
-    return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
-  case vmIntrinsics::_prefetchWriteStatic:
-    return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
-
-  case vmIntrinsics::_compareAndSwapObject:
-    return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
-  case vmIntrinsics::_compareAndSwapInt:
-    return inline_unsafe_load_store(T_INT, LS_cmpxchg);
-  case vmIntrinsics::_compareAndSwapLong:
-    return inline_unsafe_load_store(T_LONG, LS_cmpxchg);
-
-  case vmIntrinsics::_putOrderedObject:
-    return inline_unsafe_ordered_store(T_OBJECT);
-  case vmIntrinsics::_putOrderedInt:
-    return inline_unsafe_ordered_store(T_INT);
-  case vmIntrinsics::_putOrderedLong:
-    return inline_unsafe_ordered_store(T_LONG);
-
-  case vmIntrinsics::_getAndAddInt:
-    return inline_unsafe_load_store(T_INT, LS_xadd);
-  case vmIntrinsics::_getAndAddLong:
-    return inline_unsafe_load_store(T_LONG, LS_xadd);
-  case vmIntrinsics::_getAndSetInt:
-    return inline_unsafe_load_store(T_INT, LS_xchg);
-  case vmIntrinsics::_getAndSetLong:
-    return inline_unsafe_load_store(T_LONG, LS_xchg);
-  case vmIntrinsics::_getAndSetObject:
-    return inline_unsafe_load_store(T_OBJECT, LS_xchg);
-
-  case vmIntrinsics::_currentThread:
-    return inline_native_currentThread();
-  case vmIntrinsics::_isInterrupted:
-    return inline_native_isInterrupted();
+  case vmIntrinsics::_max:                      return inline_min_max(intrinsic_id());
+
+  case vmIntrinsics::_arraycopy:                return inline_arraycopy();
+
+  case vmIntrinsics::_compareTo:                return inline_string_compareTo();
+  case vmIntrinsics::_indexOf:                  return inline_string_indexOf();
+  case vmIntrinsics::_equals:                   return inline_string_equals();
+
+  case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,  !is_volatile);
+  case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile);
+  case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,    !is_volatile);
+  case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile);
+  case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile);
+  case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile);
+  case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile);
+  case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,   !is_volatile);
+  case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,  !is_volatile);
+
+  case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,  !is_volatile);
+  case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN, !is_volatile);
+  case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,    !is_volatile);
+  case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile);
+  case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile);
+  case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile);
+  case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile);
+  case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,   !is_volatile);
+  case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,  !is_volatile);
+
+  case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,    !is_volatile);
+  case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,   !is_volatile);
+  case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,    !is_volatile);
+  case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,     !is_volatile);
+  case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,    !is_volatile);
+  case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,   !is_volatile);
+  case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,  !is_volatile);
+  case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile);
+
+  case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,    !is_volatile);
+  case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,   !is_volatile);
+  case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,    !is_volatile);
+  case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,     !is_volatile);
+  case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,    !is_volatile);
+  case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,   !is_volatile);
+  case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,  !is_volatile);
+  case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS, !is_volatile);
+
+  case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile);
+  case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile);
+  case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile);
+  case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile);
+  case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile);
+  case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile);
+  case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile);
+  case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile);
+  case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile);
+
+  case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile);
+  case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile);
+  case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile);
+  case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile);
+  case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile);
+  case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile);
+  case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile);
+  case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile);
+  case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile);
+
+  case vmIntrinsics::_prefetchRead:             return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
+  case vmIntrinsics::_prefetchWrite:            return inline_unsafe_prefetch(!is_native_ptr,  is_store, !is_static);
+  case vmIntrinsics::_prefetchReadStatic:       return inline_unsafe_prefetch(!is_native_ptr, !is_store,  is_static);
+  case vmIntrinsics::_prefetchWriteStatic:      return inline_unsafe_prefetch(!is_native_ptr,  is_store,  is_static);
+
+  case vmIntrinsics::_compareAndSwapObject:     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
+  case vmIntrinsics::_compareAndSwapInt:        return inline_unsafe_load_store(T_INT,    LS_cmpxchg);
+  case vmIntrinsics::_compareAndSwapLong:       return inline_unsafe_load_store(T_LONG,   LS_cmpxchg);
+
+  case vmIntrinsics::_putOrderedObject:         return inline_unsafe_ordered_store(T_OBJECT);
+  case vmIntrinsics::_putOrderedInt:            return inline_unsafe_ordered_store(T_INT);
+  case vmIntrinsics::_putOrderedLong:           return inline_unsafe_ordered_store(T_LONG);
+
+  case vmIntrinsics::_getAndAddInt:             return inline_unsafe_load_store(T_INT,    LS_xadd);
+  case vmIntrinsics::_getAndAddLong:            return inline_unsafe_load_store(T_LONG,   LS_xadd);
+  case vmIntrinsics::_getAndSetInt:             return inline_unsafe_load_store(T_INT,    LS_xchg);
+  case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
+  case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
+
+  case vmIntrinsics::_currentThread:            return inline_native_currentThread();
+  case vmIntrinsics::_isInterrupted:            return inline_native_isInterrupted();
 
 #ifdef TRACE_HAVE_INTRINSICS
-  case vmIntrinsics::_classID:
-    return inline_native_classID();
-  case vmIntrinsics::_threadID:
-    return inline_native_threadID();
-  case vmIntrinsics::_counterTime:
-    return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
+  case vmIntrinsics::_classID:                  return inline_native_classID();
+  case vmIntrinsics::_threadID:                 return inline_native_threadID();
+  case vmIntrinsics::_counterTime:              return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime");
 #endif
-  case vmIntrinsics::_currentTimeMillis:
-    return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
-  case vmIntrinsics::_nanoTime:
-    return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
-  case vmIntrinsics::_allocateInstance:
-    return inline_unsafe_allocate();
-  case vmIntrinsics::_copyMemory:
-    return inline_unsafe_copyMemory();
-  case vmIntrinsics::_newArray:
-    return inline_native_newArray();
-  case vmIntrinsics::_getLength:
-    return inline_native_getLength();
-  case vmIntrinsics::_copyOf:
-    return inline_array_copyOf(false);
-  case vmIntrinsics::_copyOfRange:
-    return inline_array_copyOf(true);
-  case vmIntrinsics::_equalsC:
-    return inline_array_equals();
-  case vmIntrinsics::_clone:
-    return inline_native_clone(intrinsic()->is_virtual());
-
-  case vmIntrinsics::_isAssignableFrom:
-    return inline_native_subtype_check();
+  case vmIntrinsics::_currentTimeMillis:        return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis");
+  case vmIntrinsics::_nanoTime:                 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime");
+  case vmIntrinsics::_allocateInstance:         return inline_unsafe_allocate();
+  case vmIntrinsics::_copyMemory:               return inline_unsafe_copyMemory();
+  case vmIntrinsics::_newArray:                 return inline_native_newArray();
+  case vmIntrinsics::_getLength:                return inline_native_getLength();
+  case vmIntrinsics::_copyOf:                   return inline_array_copyOf(false);
+  case vmIntrinsics::_copyOfRange:              return inline_array_copyOf(true);
+  case vmIntrinsics::_equalsC:                  return inline_array_equals();
+  case vmIntrinsics::_clone:                    return inline_native_clone(intrinsic()->is_virtual());
+
+  case vmIntrinsics::_isAssignableFrom:         return inline_native_subtype_check();
 
   case vmIntrinsics::_isInstance:
   case vmIntrinsics::_getModifiers:
@@ -797,44 +755,32 @@
   case vmIntrinsics::_isPrimitive:
   case vmIntrinsics::_getSuperclass:
   case vmIntrinsics::_getComponentType:
-  case vmIntrinsics::_getClassAccessFlags:
-    return inline_native_Class_query(intrinsic_id());
+  case vmIntrinsics::_getClassAccessFlags:      return inline_native_Class_query(intrinsic_id());
 
   case vmIntrinsics::_floatToRawIntBits:
   case vmIntrinsics::_floatToIntBits:
   case vmIntrinsics::_intBitsToFloat:
   case vmIntrinsics::_doubleToRawLongBits:
   case vmIntrinsics::_doubleToLongBits:
-  case vmIntrinsics::_longBitsToDouble:
-    return inline_fp_conversions(intrinsic_id());
+  case vmIntrinsics::_longBitsToDouble:         return inline_fp_conversions(intrinsic_id());
 
   case vmIntrinsics::_numberOfLeadingZeros_i:
   case vmIntrinsics::_numberOfLeadingZeros_l:
-    return inline_numberOfLeadingZeros(intrinsic_id());
-
   case vmIntrinsics::_numberOfTrailingZeros_i:
   case vmIntrinsics::_numberOfTrailingZeros_l:
-    return inline_numberOfTrailingZeros(intrinsic_id());
-
   case vmIntrinsics::_bitCount_i:
   case vmIntrinsics::_bitCount_l:
-    return inline_bitCount(intrinsic_id());
-
   case vmIntrinsics::_reverseBytes_i:
   case vmIntrinsics::_reverseBytes_l:
   case vmIntrinsics::_reverseBytes_s:
-  case vmIntrinsics::_reverseBytes_c:
-    return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
-
-  case vmIntrinsics::_getCallerClass:
-    return inline_native_Reflection_getCallerClass();
-
-  case vmIntrinsics::_Reference_get:
-    return inline_reference_get();
+  case vmIntrinsics::_reverseBytes_c:           return inline_number_methods(intrinsic_id());
+
+  case vmIntrinsics::_getCallerClass:           return inline_native_Reflection_getCallerClass();
+
+  case vmIntrinsics::_Reference_get:            return inline_reference_get();
 
   case vmIntrinsics::_aescrypt_encryptBlock:
-  case vmIntrinsics::_aescrypt_decryptBlock:
-    return inline_aescrypt_Block(intrinsic_id());
+  case vmIntrinsics::_aescrypt_decryptBlock:    return inline_aescrypt_Block(intrinsic_id());
 
   case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt:
   case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt:
@@ -883,13 +829,13 @@
   }
 }
 
-//------------------------------push_result------------------------------
+//------------------------------set_result-------------------------------
 // Helper function for finishing intrinsics.
-void LibraryCallKit::push_result(RegionNode* region, PhiNode* value) {
+void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
   record_for_igvn(region);
   set_control(_gvn.transform(region));
-  BasicType value_type = value->type()->basic_type();
-  push_node(value_type, _gvn.transform(value));
+  set_result( _gvn.transform(value));
+  assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
 }
 
 //------------------------------generate_guard---------------------------
@@ -1078,7 +1024,6 @@
 // to Int nodes containing the lenghts of str1 and str2.
 //
 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) {
-
   Node* result = NULL;
   switch (opcode) {
   case Op_StrIndexOf:
@@ -1105,51 +1050,23 @@
 }
 
 //------------------------------inline_string_compareTo------------------------
+// public int java.lang.String.compareTo(String anotherString);
 bool LibraryCallKit::inline_string_compareTo() {
-
-  if (!Matcher::has_match_rule(Op_StrComp)) return false;
-
-  _sp += 2;
-  Node *argument = pop();  // pop non-receiver first:  it was pushed second
-  Node *receiver = pop();
-
-  // Null check on self without removing any arguments.  The argument
-  // null check technically happens in the wrong place, which can lead to
-  // invalid stack traces when string compare is inlined into a method
-  // which handles NullPointerExceptions.
-  _sp += 2;
-  receiver = do_null_check(receiver, T_OBJECT);
-  argument = do_null_check(argument, T_OBJECT);
-  _sp -= 2;
+  Node* receiver = null_check(argument(0));
+  Node* arg      = null_check(argument(1));
   if (stopped()) {
     return true;
   }
-
-  Node* compare = make_string_method_node(Op_StrComp, receiver, argument);
-  push(compare);
+  set_result(make_string_method_node(Op_StrComp, receiver, arg));
   return true;
 }
 
 //------------------------------inline_string_equals------------------------
 bool LibraryCallKit::inline_string_equals() {
-
-  if (!Matcher::has_match_rule(Op_StrEquals)) return false;
-
-  int nargs = 2;
-  _sp += nargs;
-  Node* argument = pop();  // pop non-receiver first:  it was pushed second
-  Node* receiver = pop();
-
-  // Null check on self without removing any arguments.  The argument
-  // null check technically happens in the wrong place, which can lead to
-  // invalid stack traces when string compare is inlined into a method
-  // which handles NullPointerExceptions.
-  _sp += nargs;
-  receiver = do_null_check(receiver, T_OBJECT);
-  //should not do null check for argument for String.equals(), because spec
-  //allows to specify NULL as argument.
-  _sp -= nargs;
-
+  Node* receiver = null_check_receiver();
+  // NOTE: Do not null check argument for String.equals() because spec
+  // allows to specify NULL as argument.
+  Node* argument = this->argument(1);
   if (stopped()) {
     return true;
   }
@@ -1173,9 +1090,7 @@
   ciInstanceKlass* klass = env()->String_klass();
 
   if (!stopped()) {
-    _sp += nargs;          // gen_instanceof might do an uncommon trap
     Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass)));
-    _sp -= nargs;
     Node* cmp  = _gvn.transform(new (C) CmpINode(inst, intcon(1)));
     Node* bol  = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
 
@@ -1207,7 +1122,7 @@
     Node* receiver_cnt  = load_String_length(no_ctrl, receiver);
 
     // Get start addr of argument
-    Node* argument_val   = load_String_value(no_ctrl, argument);
+    Node* argument_val    = load_String_value(no_ctrl, argument);
     Node* argument_offset = load_String_offset(no_ctrl, argument);
     Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR);
 
@@ -1236,24 +1151,15 @@
   set_control(_gvn.transform(region));
   record_for_igvn(region);
 
-  push(_gvn.transform(phi));
-
+  set_result(_gvn.transform(phi));
   return true;
 }
 
 //------------------------------inline_array_equals----------------------------
 bool LibraryCallKit::inline_array_equals() {
-
-  if (!Matcher::has_match_rule(Op_AryEq)) return false;
-
-  _sp += 2;
-  Node *argument2 = pop();
-  Node *argument1 = pop();
-
-  Node* equals =
-    _gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS),
-                                        argument1, argument2) );
-  push(equals);
+  Node* arg1 = argument(0);
+  Node* arg2 = argument(1);
+  set_result(_gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2)));
   return true;
 }
 
@@ -1325,7 +1231,7 @@
   float likely   = PROB_LIKELY(0.9);
   float unlikely = PROB_UNLIKELY(0.9);
 
-  const int nargs = 2; // number of arguments to push back for uncommon trap in predicate
+  const int nargs = 0; // no arguments to push back for uncommon trap in predicate
 
   Node* source        = load_String_value(no_ctrl, string_object);
   Node* sourceOffset  = load_String_offset(no_ctrl, string_object);
@@ -1396,10 +1302,8 @@
 
 //------------------------------inline_string_indexOf------------------------
 bool LibraryCallKit::inline_string_indexOf() {
-
-  _sp += 2;
-  Node *argument = pop();  // pop non-receiver first:  it was pushed second
-  Node *receiver = pop();
+  Node* receiver = argument(0);
+  Node* arg      = argument(1);
 
   Node* result;
   // Disable the use of pcmpestri until it can be guaranteed that
@@ -1409,15 +1313,8 @@
     // Generate SSE4.2 version of indexOf
     // We currently only have match rules that use SSE4.2
 
-    // Null check on self without removing any arguments.  The argument
-    // null check technically happens in the wrong place, which can lead to
-    // invalid stack traces when string compare is inlined into a method
-    // which handles NullPointerExceptions.
-    _sp += 2;
-    receiver = do_null_check(receiver, T_OBJECT);
-    argument = do_null_check(argument, T_OBJECT);
-    _sp -= 2;
-
+    receiver = null_check(receiver);
+    arg      = null_check(arg);
     if (stopped()) {
       return true;
     }
@@ -1439,12 +1336,12 @@
     Node* source_cnt  = load_String_length(no_ctrl, receiver);
 
     // Get start addr of substring
-    Node* substr = load_String_value(no_ctrl, argument);
-    Node* substr_offset = load_String_offset(no_ctrl, argument);
+    Node* substr = load_String_value(no_ctrl, arg);
+    Node* substr_offset = load_String_offset(no_ctrl, arg);
     Node* substr_start = array_element_address(substr, substr_offset, T_CHAR);
 
     // Get length of source string
-    Node* substr_cnt  = load_String_length(no_ctrl, argument);
+    Node* substr_cnt  = load_String_length(no_ctrl, arg);
 
     // Check for substr count > string count
     Node* cmp = _gvn.transform( new(C) CmpINode(substr_cnt, source_cnt) );
@@ -1477,10 +1374,10 @@
 
   } else { // Use LibraryCallKit::string_indexOf
     // don't intrinsify if argument isn't a constant string.
-    if (!argument->is_Con()) {
+    if (!arg->is_Con()) {
      return false;
     }
-    const TypeOopPtr* str_type = _gvn.type(argument)->isa_oopptr();
+    const TypeOopPtr* str_type = _gvn.type(arg)->isa_oopptr();
     if (str_type == NULL) {
       return false;
     }
@@ -1511,21 +1408,15 @@
      return false;
     }
 
-    // Null check on self without removing any arguments.  The argument
-    // null check technically happens in the wrong place, which can lead to
-    // invalid stack traces when string compare is inlined into a method
-    // which handles NullPointerExceptions.
-    _sp += 2;
-    receiver = do_null_check(receiver, T_OBJECT);
-    // No null check on the argument is needed since it's a constant String oop.
-    _sp -= 2;
+    receiver = null_check(receiver, T_OBJECT);
+    // NOTE: No null check on the argument is needed since it's a constant String oop.
     if (stopped()) {
       return true;
     }
 
     // The null string as a pattern always returns 0 (match at beginning of string)
     if (c == 0) {
-      push(intcon(0));
+      set_result(intcon(0));
       return true;
     }
 
@@ -1548,47 +1439,54 @@
 
     result = string_indexOf(receiver, pat, o, cache, md2);
   }
-
-  push(result);
+  set_result(result);
   return true;
 }
 
-//--------------------------pop_math_arg--------------------------------
-// Pop a double argument to a math function from the stack
-// rounding it if necessary.
-Node * LibraryCallKit::pop_math_arg() {
-  Node *arg = pop_pair();
-  if( Matcher::strict_fp_requires_explicit_rounding && UseSSE<=1 )
-    arg = _gvn.transform( new (C) RoundDoubleNode(0, arg) );
-  return arg;
+//--------------------------round_double_node--------------------------------
+// Round a double node if necessary.
+Node* LibraryCallKit::round_double_node(Node* n) {
+  if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
+    n = _gvn.transform(new (C) RoundDoubleNode(0, n));
+  return n;
+}
+
+//------------------------------inline_math-----------------------------------
+// public static double Math.abs(double)
+// public static double Math.sqrt(double)
+// public static double Math.log(double)
+// public static double Math.log10(double)
+bool LibraryCallKit::inline_math(vmIntrinsics::ID id) {
+  Node* arg = round_double_node(argument(0));
+  Node* n;
+  switch (id) {
+  case vmIntrinsics::_dabs:   n = new (C) AbsDNode(    arg);  break;
+  case vmIntrinsics::_dsqrt:  n = new (C) SqrtDNode(0, arg);  break;
+  case vmIntrinsics::_dlog:   n = new (C) LogDNode(    arg);  break;
+  case vmIntrinsics::_dlog10: n = new (C) Log10DNode(  arg);  break;
+  default:  fatal_unexpected_iid(id);  break;
+  }
+  set_result(_gvn.transform(n));
+  return true;
 }
 
 //------------------------------inline_trig----------------------------------
 // Inline sin/cos/tan instructions, if possible.  If rounding is required, do
 // argument reduction which will turn into a fast/slow diamond.
 bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) {
-  _sp += arg_size();            // restore stack pointer
-  Node* arg = pop_math_arg();
-  Node* trig = NULL;
+  Node* arg = round_double_node(argument(0));
+  Node* n = NULL;
 
   switch (id) {
-  case vmIntrinsics::_dsin:
-    trig = _gvn.transform((Node*)new (C) SinDNode(arg));
-    break;
-  case vmIntrinsics::_dcos:
-    trig = _gvn.transform((Node*)new (C) CosDNode(arg));
-    break;
-  case vmIntrinsics::_dtan:
-    trig = _gvn.transform((Node*)new (C) TanDNode(arg));
-    break;
-  default:
-    assert(false, "bad intrinsic was passed in");
-    return false;
+  case vmIntrinsics::_dsin:  n = new (C) SinDNode(arg);  break;
+  case vmIntrinsics::_dcos:  n = new (C) CosDNode(arg);  break;
+  case vmIntrinsics::_dtan:  n = new (C) TanDNode(arg);  break;
+  default:  fatal_unexpected_iid(id);  break;
   }
+  n = _gvn.transform(n);
 
   // Rounding required?  Check for argument reduction!
-  if( Matcher::strict_fp_requires_explicit_rounding ) {
-
+  if (Matcher::strict_fp_requires_explicit_rounding) {
     static const double     pi_4 =  0.7853981633974483;
     static const double neg_pi_4 = -0.7853981633974483;
     // pi/2 in 80-bit extended precision
@@ -1623,8 +1521,8 @@
     // probably do the math inside the SIN encoding.
 
     // Make the merge point
-    RegionNode *r = new (C) RegionNode(3);
-    Node *phi = new (C) PhiNode(r,Type::DOUBLE);
+    RegionNode* r = new (C) RegionNode(3);
+    Node* phi = new (C) PhiNode(r, Type::DOUBLE);
 
     // Flatten arg so we need only 1 test
     Node *abs = _gvn.transform(new (C) AbsDNode(arg));
@@ -1639,7 +1537,7 @@
     set_control(opt_iff(r,iff));
 
     // Set fast path result
-    phi->init_req(2,trig);
+    phi->init_req(2, n);
 
     // Slow path - non-blocking leaf call
     Node* call = NULL;
@@ -1661,37 +1559,18 @@
       break;
     }
     assert(control()->in(0) == call, "");
-    Node* slow_result = _gvn.transform(new (C) ProjNode(call,TypeFunc::Parms));
-    r->init_req(1,control());
-    phi->init_req(1,slow_result);
+    Node* slow_result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms));
+    r->init_req(1, control());
+    phi->init_req(1, slow_result);
 
     // Post-merge
     set_control(_gvn.transform(r));
     record_for_igvn(r);
-    trig = _gvn.transform(phi);
+    n = _gvn.transform(phi);
 
     C->set_has_split_ifs(true); // Has chance for split-if optimization
   }
-  // Push result back on JVM stack
-  push_pair(trig);
-  return true;
-}
-
-//------------------------------inline_sqrt-------------------------------------
-// Inline square root instruction, if possible.
-bool LibraryCallKit::inline_sqrt(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_dsqrt, "Not square root");
-  _sp += arg_size();        // restore stack pointer
-  push_pair(_gvn.transform(new (C) SqrtDNode(0, pop_math_arg())));
-  return true;
-}
-
-//------------------------------inline_abs-------------------------------------
-// Inline absolute value instruction, if possible.
-bool LibraryCallKit::inline_abs(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_dabs, "Not absolute value");
-  _sp += arg_size();        // restore stack pointer
-  push_pair(_gvn.transform(new (C) AbsDNode(pop_math_arg())));
+  set_result(n);
   return true;
 }
 
@@ -1700,24 +1579,18 @@
   //result=(result.isNaN())? funcAddr():result;
   // Check: If isNaN() by checking result!=result? then either trap
   // or go to runtime
-  Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result,result));
+  Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result, result));
   // Build the boolean node
-  Node* bolisnum = _gvn.transform( new (C) BoolNode(cmpisnan, BoolTest::eq) );
+  Node* bolisnum = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::eq));
 
   if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
-    {
-      BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
-      // End the current control-flow path
-      push_pair(x);
-      if (y != NULL) {
-        push_pair(y);
-      }
+    { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT);
       // The pow or exp intrinsic returned a NaN, which requires a call
       // to the runtime.  Recompile with the runtime call.
       uncommon_trap(Deoptimization::Reason_intrinsic,
                     Deoptimization::Action_make_not_entrant);
     }
-    push_pair(result);
+    set_result(result);
   } else {
     // If this inlining ever returned NaN in the past, we compile a call
     // to the runtime to properly handle corner cases
@@ -1727,7 +1600,7 @@
     Node* if_fast = _gvn.transform( new (C) IfTrueNode(iff) );
 
     if (!if_slow->is_top()) {
-      RegionNode* result_region = new(C) RegionNode(3);
+      RegionNode* result_region = new (C) RegionNode(3);
       PhiNode*    result_val = new (C) PhiNode(result_region, Type::DOUBLE);
 
       result_region->init_req(1, if_fast);
@@ -1747,9 +1620,9 @@
 
       result_region->init_req(2, control());
       result_val->init_req(2, value);
-      push_result(result_region, result_val);
+      set_result(result_region, result_val);
     } else {
-      push_pair(result);
+      set_result(result);
     }
   }
 }
@@ -1757,25 +1630,19 @@
 //------------------------------inline_exp-------------------------------------
 // Inline exp instructions, if possible.  The Intel hardware only misses
 // really odd corner cases (+/- Infinity).  Just uncommon-trap them.
-bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_dexp, "Not exp");
-
-  _sp += arg_size();        // restore stack pointer
-  Node *x = pop_math_arg();
-  Node *result = _gvn.transform(new (C) ExpDNode(0,x));
-
-  finish_pow_exp(result, x, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
+bool LibraryCallKit::inline_exp() {
+  Node* arg = round_double_node(argument(0));
+  Node* n   = _gvn.transform(new (C) ExpDNode(0, arg));
+
+  finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
 
   C->set_has_split_ifs(true); // Has chance for split-if optimization
-
   return true;
 }
 
 //------------------------------inline_pow-------------------------------------
 // Inline power instructions, if possible.
-bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_dpow, "Not pow");
-
+bool LibraryCallKit::inline_pow() {
   // Pseudocode for pow
   // if (x <= 0.0) {
   //   long longy = (long)y;
@@ -1793,15 +1660,14 @@
   // }
   // return result;
 
-  _sp += arg_size();        // restore stack pointer
-  Node* y = pop_math_arg();
-  Node* x = pop_math_arg();
+  Node* x = round_double_node(argument(0));
+  Node* y = round_double_node(argument(2));
 
   Node* result = NULL;
 
   if (!too_many_traps(Deoptimization::Reason_intrinsic)) {
     // Short form: skip the fancy tests and just check for NaN result.
-    result = _gvn.transform( new (C) PowDNode(0, x, y) );
+    result = _gvn.transform(new (C) PowDNode(0, x, y));
   } else {
     // If this inlining ever returned NaN in the past, include all
     // checks + call to the runtime.
@@ -1919,55 +1785,23 @@
     // Post merge
     set_control(_gvn.transform(r));
     record_for_igvn(r);
-    result=_gvn.transform(phi);
+    result = _gvn.transform(phi);
   }
 
   finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
 
   C->set_has_split_ifs(true); // Has chance for split-if optimization
-
-  return true;
-}
-
-//------------------------------inline_trans-------------------------------------
-// Inline transcendental instructions, if possible.  The Intel hardware gets
-// these right, no funny corner cases missed.
-bool LibraryCallKit::inline_trans(vmIntrinsics::ID id) {
-  _sp += arg_size();        // restore stack pointer
-  Node* arg = pop_math_arg();
-  Node* trans = NULL;
-
-  switch (id) {
-  case vmIntrinsics::_dlog:
-    trans = _gvn.transform((Node*)new (C) LogDNode(arg));
-    break;
-  case vmIntrinsics::_dlog10:
-    trans = _gvn.transform((Node*)new (C) Log10DNode(arg));
-    break;
-  default:
-    assert(false, "bad intrinsic was passed in");
-    return false;
-  }
-
-  // Push result back on JVM stack
-  push_pair(trans);
   return true;
 }
 
 //------------------------------runtime_math-----------------------------
 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) {
-  Node* a = NULL;
-  Node* b = NULL;
-
   assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(),
          "must be (DD)D or (D)D type");
 
   // Inputs
-  _sp += arg_size();        // restore stack pointer
-  if (call_type == OptoRuntime::Math_DD_D_Type()) {
-    b = pop_math_arg();
-  }
-  a = pop_math_arg();
+  Node* a = round_double_node(argument(0));
+  Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL;
 
   const TypePtr* no_memory_effects = NULL;
   Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName,
@@ -1979,43 +1813,43 @@
   assert(value_top == top(), "second value must be top");
 #endif
 
-  push_pair(value);
+  set_result(value);
   return true;
 }
 
 //------------------------------inline_math_native-----------------------------
 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) {
+#define FN_PTR(f) CAST_FROM_FN_PTR(address, f)
   switch (id) {
     // These intrinsics are not properly supported on all hardware
-  case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) :
-    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS");
-  case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) :
-    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN");
-  case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) :
-    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN");
-
-  case vmIntrinsics::_dlog:   return Matcher::has_match_rule(Op_LogD) ? inline_trans(id) :
-    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG");
-  case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_trans(id) :
-    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10");
+  case vmIntrinsics::_dcos:   return Matcher::has_match_rule(Op_CosD)   ? inline_trig(id) :
+    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos),   "COS");
+  case vmIntrinsics::_dsin:   return Matcher::has_match_rule(Op_SinD)   ? inline_trig(id) :
+    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin),   "SIN");
+  case vmIntrinsics::_dtan:   return Matcher::has_match_rule(Op_TanD)   ? inline_trig(id) :
+    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan),   "TAN");
+
+  case vmIntrinsics::_dlog:   return Matcher::has_match_rule(Op_LogD)   ? inline_math(id) :
+    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog),   "LOG");
+  case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_math(id) :
+    runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
 
     // These intrinsics are supported on all hardware
-  case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_sqrt(id) : false;
-  case vmIntrinsics::_dabs:  return Matcher::has_match_rule(Op_AbsD)  ? inline_abs(id)  : false;
-
-  case vmIntrinsics::_dexp:  return
-    Matcher::has_match_rule(Op_ExpD) ? inline_exp(id) :
-    runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP");
-  case vmIntrinsics::_dpow:  return
-    Matcher::has_match_rule(Op_PowD) ? inline_pow(id) :
-    runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW");
+  case vmIntrinsics::_dsqrt:  return Matcher::has_match_rule(Op_SqrtD)  ? inline_math(id) : false;
+  case vmIntrinsics::_dabs:   return Matcher::has_match_rule(Op_AbsD)   ? inline_math(id) : false;
+
+  case vmIntrinsics::_dexp:   return Matcher::has_match_rule(Op_ExpD)   ? inline_exp()    :
+    runtime_math(OptoRuntime::Math_D_D_Type(),  FN_PTR(SharedRuntime::dexp),  "EXP");
+  case vmIntrinsics::_dpow:   return Matcher::has_match_rule(Op_PowD)   ? inline_pow()    :
+    runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow),  "POW");
+#undef FN_PTR
 
    // These intrinsics are not yet correctly implemented
   case vmIntrinsics::_datan2:
     return false;
 
   default:
-    ShouldNotReachHere();
+    fatal_unexpected_iid(id);
     return false;
   }
 }
@@ -2030,8 +1864,7 @@
 
 //----------------------------inline_min_max-----------------------------------
 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) {
-  push(generate_min_max(id, argument(0), argument(1)));
-
+  set_result(generate_min_max(id, argument(0), argument(1)));
   return true;
 }
 
@@ -2254,99 +2087,37 @@
   }
 }
 
-//-------------------inline_numberOfLeadingZeros_int/long-----------------------
-// inline int Integer.numberOfLeadingZeros(int)
-// inline int Long.numberOfLeadingZeros(long)
-bool LibraryCallKit::inline_numberOfLeadingZeros(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_numberOfLeadingZeros_i || id == vmIntrinsics::_numberOfLeadingZeros_l, "not numberOfLeadingZeros");
-  if (id == vmIntrinsics::_numberOfLeadingZeros_i && !Matcher::match_rule_supported(Op_CountLeadingZerosI)) return false;
-  if (id == vmIntrinsics::_numberOfLeadingZeros_l && !Matcher::match_rule_supported(Op_CountLeadingZerosL)) return false;
-  _sp += arg_size();  // restore stack pointer
+//--------------------------inline_number_methods-----------------------------
+// inline int     Integer.numberOfLeadingZeros(int)
+// inline int        Long.numberOfLeadingZeros(long)
+//
+// inline int     Integer.numberOfTrailingZeros(int)
+// inline int        Long.numberOfTrailingZeros(long)
+//
+// inline int     Integer.bitCount(int)
+// inline int        Long.bitCount(long)
+//
+// inline char  Character.reverseBytes(char)
+// inline short     Short.reverseBytes(short)
+// inline int     Integer.reverseBytes(int)
+// inline long       Long.reverseBytes(long)
+bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) {
+  Node* arg = argument(0);
+  Node* n;
   switch (id) {
-  case vmIntrinsics::_numberOfLeadingZeros_i:
-    push(_gvn.transform(new (C) CountLeadingZerosINode(pop())));
-    break;
-  case vmIntrinsics::_numberOfLeadingZeros_l:
-    push(_gvn.transform(new (C) CountLeadingZerosLNode(pop_pair())));
-    break;
-  default:
-    ShouldNotReachHere();
-  }
-  return true;
-}
-
-//-------------------inline_numberOfTrailingZeros_int/long----------------------
-// inline int Integer.numberOfTrailingZeros(int)
-// inline int Long.numberOfTrailingZeros(long)
-bool LibraryCallKit::inline_numberOfTrailingZeros(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_numberOfTrailingZeros_i || id == vmIntrinsics::_numberOfTrailingZeros_l, "not numberOfTrailingZeros");
-  if (id == vmIntrinsics::_numberOfTrailingZeros_i && !Matcher::match_rule_supported(Op_CountTrailingZerosI)) return false;
-  if (id == vmIntrinsics::_numberOfTrailingZeros_l && !Matcher::match_rule_supported(Op_CountTrailingZerosL)) return false;
-  _sp += arg_size();  // restore stack pointer
-  switch (id) {
-  case vmIntrinsics::_numberOfTrailingZeros_i:
-    push(_gvn.transform(new (C) CountTrailingZerosINode(pop())));
-    break;
-  case vmIntrinsics::_numberOfTrailingZeros_l:
-    push(_gvn.transform(new (C) CountTrailingZerosLNode(pop_pair())));
-    break;
-  default:
-    ShouldNotReachHere();
+  case vmIntrinsics::_numberOfLeadingZeros_i:   n = new (C) CountLeadingZerosINode( arg);  break;
+  case vmIntrinsics::_numberOfLeadingZeros_l:   n = new (C) CountLeadingZerosLNode( arg);  break;
+  case vmIntrinsics::_numberOfTrailingZeros_i:  n = new (C) CountTrailingZerosINode(arg);  break;
+  case vmIntrinsics::_numberOfTrailingZeros_l:  n = new (C) CountTrailingZerosLNode(arg);  break;
+  case vmIntrinsics::_bitCount_i:               n = new (C) PopCountINode(          arg);  break;
+  case vmIntrinsics::_bitCount_l:               n = new (C) PopCountLNode(          arg);  break;
+  case vmIntrinsics::_reverseBytes_c:           n = new (C) ReverseBytesUSNode(0,   arg);  break;
+  case vmIntrinsics::_reverseBytes_s:           n = new (C) ReverseBytesSNode( 0,   arg);  break;
+  case vmIntrinsics::_reverseBytes_i:           n = new (C) ReverseBytesINode( 0,   arg);  break;
+  case vmIntrinsics::_reverseBytes_l:           n = new (C) ReverseBytesLNode( 0,   arg);  break;
+  default:  fatal_unexpected_iid(id);  break;
   }
-  return true;
-}
-
-//----------------------------inline_bitCount_int/long-----------------------
-// inline int Integer.bitCount(int)
-// inline int Long.bitCount(long)
-bool LibraryCallKit::inline_bitCount(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_bitCount_i || id == vmIntrinsics::_bitCount_l, "not bitCount");
-  if (id == vmIntrinsics::_bitCount_i && !Matcher::has_match_rule(Op_PopCountI)) return false;
-  if (id == vmIntrinsics::_bitCount_l && !Matcher::has_match_rule(Op_PopCountL)) return false;
-  _sp += arg_size();  // restore stack pointer
-  switch (id) {
-  case vmIntrinsics::_bitCount_i:
-    push(_gvn.transform(new (C) PopCountINode(pop())));
-    break;
-  case vmIntrinsics::_bitCount_l:
-    push(_gvn.transform(new (C) PopCountLNode(pop_pair())));
-    break;
-  default:
-    ShouldNotReachHere();
-  }
-  return true;
-}
-
-//----------------------------inline_reverseBytes_int/long/char/short-------------------
-// inline Integer.reverseBytes(int)
-// inline Long.reverseBytes(long)
-// inline Character.reverseBytes(char)
-// inline Short.reverseBytes(short)
-bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
-  assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l ||
-         id == vmIntrinsics::_reverseBytes_c || id == vmIntrinsics::_reverseBytes_s,
-         "not reverse Bytes");
-  if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI))  return false;
-  if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL))  return false;
-  if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false;
-  if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS))  return false;
-  _sp += arg_size();  // restore stack pointer
-  switch (id) {
-  case vmIntrinsics::_reverseBytes_i:
-    push(_gvn.transform(new (C) ReverseBytesINode(0, pop())));
-    break;
-  case vmIntrinsics::_reverseBytes_l:
-    push_pair(_gvn.transform(new (C) ReverseBytesLNode(0, pop_pair())));
-    break;
-  case vmIntrinsics::_reverseBytes_c:
-    push(_gvn.transform(new (C) ReverseBytesUSNode(0, pop())));
-    break;
-  case vmIntrinsics::_reverseBytes_s:
-    push(_gvn.transform(new (C) ReverseBytesSNode(0, pop())));
-    break;
-  default:
-    ;
-  }
+  set_result(_gvn.transform(n));
   return true;
 }
 
@@ -2356,7 +2127,7 @@
 
 // Helper that guards and inserts a pre-barrier.
 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset,
-                                        Node* pre_val, int nargs, bool need_mem_bar) {
+                                        Node* pre_val, bool need_mem_bar) {
   // We could be accessing the referent field of a reference object. If so, when G1
   // is enabled, we need to log the value in the referent field in an SATB buffer.
   // This routine performs some compile time filters and generates suitable
@@ -2406,8 +2177,8 @@
   //   }
   // }
 
-  float likely  = PROB_LIKELY(0.999);
-  float unlikely  = PROB_UNLIKELY(0.999);
+  float likely   = PROB_LIKELY(  0.999);
+  float unlikely = PROB_UNLIKELY(0.999);
 
   IdealKit ideal(this);
 #define __ ideal.
@@ -2419,9 +2190,7 @@
       sync_kit(ideal);
 
       Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass()));
-      _sp += nargs;  // gen_instanceof might do an uncommon trap
       Node* is_instof = gen_instanceof(base_oop, ref_klass_con);
-      _sp -= nargs;
 
       // Update IdealKit memory and control from graphKit.
       __ sync_kit(this);
@@ -2505,7 +2274,7 @@
   {
     ResourceMark rm;
     // Check the signatures.
-    ciSignature* sig = signature();
+    ciSignature* sig = callee()->signature();
 #ifdef ASSERT
     if (!is_store) {
       // Object getObject(Object base, int/long offset), etc.
@@ -2543,42 +2312,19 @@
 
   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
 
-  int type_words = type2size[ (type == T_ADDRESS) ? T_LONG : type ];
-
-  // Argument words:  "this" plus (oop/offset) or (lo/hi) args plus maybe 1 or 2 value words
-  int nargs = 1 + (is_native_ptr ? 2 : 3) + (is_store ? type_words : 0);
-  assert(callee()->arg_size() == nargs, "must be");
-
-  debug_only(int saved_sp = _sp);
-  _sp += nargs;
-
-  Node* val;
-  debug_only(val = (Node*)(uintptr_t)-1);
-
-
-  if (is_store) {
-    // Get the value being stored.  (Pop it first; it was pushed last.)
-    switch (type) {
-    case T_DOUBLE:
-    case T_LONG:
-    case T_ADDRESS:
-      val = pop_pair();
-      break;
-    default:
-      val = pop();
-    }
-  }
+  Node* receiver = argument(0);  // type: oop
 
   // Build address expression.  See the code in inline_unsafe_prefetch.
-  Node *adr;
-  Node *heap_base_oop = top();
+  Node* adr;
+  Node* heap_base_oop = top();
   Node* offset = top();
+  Node* val;
 
   if (!is_native_ptr) {
+    // The base is either a Java object or a value produced by Unsafe.staticFieldBase
+    Node* base = argument(1);  // type: oop
     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
-    offset = pop_pair();
-    // The base is either a Java object or a value produced by Unsafe.staticFieldBase
-    Node* base   = pop();
+    offset = argument(2);  // type: long
     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
     // to be plain byte offsets, which are also the same as those accepted
     // by oopDesc::field_base.
@@ -2588,18 +2334,14 @@
     offset = ConvL2X(offset);
     adr = make_unsafe_address(base, offset);
     heap_base_oop = base;
+    val = is_store ? argument(4) : NULL;
   } else {
-    Node* ptr = pop_pair();
-    // Adjust Java long to machine word:
-    ptr = ConvL2X(ptr);
+    Node* ptr = argument(1);  // type: long
+    ptr = ConvL2X(ptr);  // adjust Java long to machine word
     adr = make_unsafe_address(NULL, ptr);
+    val = is_store ? argument(3) : NULL;
   }
 
-  // Pop receiver last:  it was pushed first.
-  Node *receiver = pop();
-
-  assert(saved_sp == _sp, "must have correct argument count");
-
   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
 
   // First guess at the value type.
@@ -2633,13 +2375,7 @@
     }
   }
 
-  // Null check on self without removing any arguments.  The argument
-  // null check technically happens in the wrong place, which can lead to
-  // invalid stack traces when the primitive is inlined into a method
-  // which handles NullPointerExceptions.
-  _sp += nargs;
-  do_null_check(receiver, T_OBJECT);
-  _sp -= nargs;
+  receiver = null_check(receiver);
   if (stopped()) {
     return true;
   }
@@ -2671,34 +2407,36 @@
 
   if (!is_store) {
     Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile);
-    // load value and push onto stack
+    // load value
     switch (type) {
     case T_BOOLEAN:
     case T_CHAR:
     case T_BYTE:
     case T_SHORT:
     case T_INT:
+    case T_LONG:
     case T_FLOAT:
-      push(p);
+    case T_DOUBLE:
       break;
     case T_OBJECT:
       if (need_read_barrier) {
-        insert_pre_barrier(heap_base_oop, offset, p, nargs, !(is_volatile || need_mem_bar));
+        insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar));
       }
-      push(p);
       break;
     case T_ADDRESS:
       // Cast to an int type.
-      p = _gvn.transform( new (C) CastP2XNode(NULL,p) );
+      p = _gvn.transform(new (C) CastP2XNode(NULL, p));
       p = ConvX2L(p);
-      push_pair(p);
+      break;
+    default:
+      fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
       break;
-    case T_DOUBLE:
-    case T_LONG:
-      push_pair( p );
-      break;
-    default: ShouldNotReachHere();
     }
+    // The load node has the control of the preceding MemBarCPUOrder.  All
+    // following nodes will have the control of the MemBarCPUOrder inserted at
+    // the end of this method.  So, pushing the load onto the stack at a later
+    // point is fine.
+    set_result(p);
   } else {
     // place effect of store into memory
     switch (type) {
@@ -2762,7 +2500,7 @@
   {
     ResourceMark rm;
     // Check the signatures.
-    ciSignature* sig = signature();
+    ciSignature* sig = callee()->signature();
 #ifdef ASSERT
     // Object getObject(Object base, int/long offset), etc.
     BasicType rtype = sig->return_type()->basic_type();
@@ -2780,19 +2518,21 @@
 
   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
 
-  // Argument words:  "this" if not static, plus (oop/offset) or (lo/hi) args
-  int nargs = (is_static ? 0 : 1) + (is_native_ptr ? 2 : 3);
-
-  debug_only(int saved_sp = _sp);
-  _sp += nargs;
+  const int idx = is_static ? 0 : 1;
+  if (!is_static) {
+    null_check_receiver();
+    if (stopped()) {
+      return true;
+    }
+  }
 
   // Build address expression.  See the code in inline_unsafe_access.
   Node *adr;
   if (!is_native_ptr) {
+    // The base is either a Java object or a value produced by Unsafe.staticFieldBase
+    Node* base   = argument(idx + 0);  // type: oop
     // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset
-    Node* offset = pop_pair();
-    // The base is either a Java object or a value produced by Unsafe.staticFieldBase
-    Node* base   = pop();
+    Node* offset = argument(idx + 1);  // type: long
     // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
     // to be plain byte offsets, which are also the same as those accepted
     // by oopDesc::field_base.
@@ -2802,31 +2542,11 @@
     offset = ConvL2X(offset);
     adr = make_unsafe_address(base, offset);
   } else {
-    Node* ptr = pop_pair();
-    // Adjust Java long to machine word:
-    ptr = ConvL2X(ptr);
+    Node* ptr = argument(idx + 0);  // type: long
+    ptr = ConvL2X(ptr);  // adjust Java long to machine word
     adr = make_unsafe_address(NULL, ptr);
   }
 
-  if (is_static) {
-    assert(saved_sp == _sp, "must have correct argument count");
-  } else {
-    // Pop receiver last:  it was pushed first.
-    Node *receiver = pop();
-    assert(saved_sp == _sp, "must have correct argument count");
-
-    // Null check on self without removing any arguments.  The argument
-    // null check technically happens in the wrong place, which can lead to
-    // invalid stack traces when the primitive is inlined into a method
-    // which handles NullPointerExceptions.
-    _sp += nargs;
-    do_null_check(receiver, T_OBJECT);
-    _sp -= nargs;
-    if (stopped()) {
-      return true;
-    }
-  }
-
   // Generate the read or write prefetch
   Node *prefetch;
   if (is_store) {
@@ -2841,7 +2561,22 @@
 }
 
 //----------------------------inline_unsafe_load_store----------------------------
-
+// This method serves a couple of different customers (depending on LoadStoreKind):
+//
+// LS_cmpxchg:
+//   public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x);
+//   public final native boolean compareAndSwapInt(   Object o, long offset, int    expected, int    x);
+//   public final native boolean compareAndSwapLong(  Object o, long offset, long   expected, long   x);
+//
+// LS_xadd:
+//   public int  getAndAddInt( Object o, long offset, int  delta)
+//   public long getAndAddLong(Object o, long offset, long delta)
+//
+// LS_xchg:
+//   int    getAndSet(Object o, long offset, int    newValue)
+//   long   getAndSet(Object o, long offset, long   newValue)
+//   Object getAndSet(Object o, long offset, Object newValue)
+//
 bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
   // This basic scheme here is the same as inline_unsafe_access, but
   // differs in enough details that combining them would make the code
@@ -2856,7 +2591,8 @@
   BasicType rtype;
   {
     ResourceMark rm;
-    ciSignature* sig = signature();
+    // Check the signatures.
+    ciSignature* sig = callee()->signature();
     rtype = sig->return_type()->basic_type();
     if (kind == LS_xadd || kind == LS_xchg) {
       // Check the signatures.
@@ -2881,28 +2617,31 @@
   }
 #endif //PRODUCT
 
-  // number of stack slots per value argument (1 or 2)
-  int type_words = type2size[type];
-
   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
 
-  // Argument words:  "this" plus oop plus offset (plus oldvalue) plus newvalue/delta;
-  int nargs = 1 + 1 + 2  + ((kind == LS_cmpxchg) ? type_words : 0) + type_words;
-
-  // pop arguments: newval, offset, base, and receiver
-  debug_only(int saved_sp = _sp);
-  _sp += nargs;
-  Node* newval   = (type_words == 1) ? pop() : pop_pair();
-  Node* oldval   = (kind == LS_cmpxchg) ? ((type_words == 1) ? pop() : pop_pair()) : NULL;
-  Node *offset   = pop_pair();
-  Node *base     = pop();
-  Node *receiver = pop();
-  assert(saved_sp == _sp, "must have correct argument count");
-
-  //  Null check receiver.
-  _sp += nargs;
-  do_null_check(receiver, T_OBJECT);
-  _sp -= nargs;
+  // Get arguments:
+  Node* receiver = NULL;
+  Node* base     = NULL;
+  Node* offset   = NULL;
+  Node* oldval   = NULL;
+  Node* newval   = NULL;
+  if (kind == LS_cmpxchg) {
+    const bool two_slot_type = type2size[type] == 2;
+    receiver = argument(0);  // type: oop
+    base     = argument(1);  // type: oop
+    offset   = argument(2);  // type: long
+    oldval   = argument(4);  // type: oop, int, or long
+    newval   = argument(two_slot_type ? 6 : 5);  // type: oop, int, or long
+  } else if (kind == LS_xadd || kind == LS_xchg){
+    receiver = argument(0);  // type: oop
+    base     = argument(1);  // type: oop
+    offset   = argument(2);  // type: long
+    oldval   = NULL;
+    newval   = argument(4);  // type: oop, int, or long
+  }
+
+  // Null check receiver.
+  receiver = null_check(receiver);
   if (stopped()) {
     return true;
   }
@@ -3008,7 +2747,7 @@
     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
     break;
   default:
-    ShouldNotReachHere();
+    fatal(err_msg_res("unexpected type %d: %s", type, type2name(type)));
     break;
   }
 
@@ -3029,10 +2768,14 @@
 #endif
 
   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
-  push_node(load_store->bottom_type()->basic_type(), load_store);
+  set_result(load_store);
   return true;
 }
 
+//----------------------------inline_unsafe_ordered_store----------------------
+// public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x);
+// public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x);
+// public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x);
 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
   // This is another variant of inline_unsafe_access, differing in
   // that it always issues store-store ("release") barrier and ensures
@@ -3044,7 +2787,7 @@
   {
     ResourceMark rm;
     // Check the signatures.
-    ciSignature* sig = signature();
+    ciSignature* sig = callee()->signature();
 #ifdef ASSERT
     BasicType rtype = sig->return_type()->basic_type();
     assert(rtype == T_VOID, "must return void");
@@ -3055,27 +2798,16 @@
   }
 #endif //PRODUCT
 
-  // number of stack slots per value argument (1 or 2)
-  int type_words = type2size[type];
-
   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
 
-  // Argument words:  "this" plus oop plus offset plus value;
-  int nargs = 1 + 1 + 2 + type_words;
-
-  // pop arguments: val, offset, base, and receiver
-  debug_only(int saved_sp = _sp);
-  _sp += nargs;
-  Node* val      = (type_words == 1) ? pop() : pop_pair();
-  Node *offset   = pop_pair();
-  Node *base     = pop();
-  Node *receiver = pop();
-  assert(saved_sp == _sp, "must have correct argument count");
-
-  //  Null check receiver.
-  _sp += nargs;
-  do_null_check(receiver, T_OBJECT);
-  _sp -= nargs;
+  // Get arguments:
+  Node* receiver = argument(0);  // type: oop
+  Node* base     = argument(1);  // type: oop
+  Node* offset   = argument(2);  // type: long
+  Node* val      = argument(4);  // type: oop, int, or long
+
+  // Null check receiver.
+  receiver = null_check(receiver);
   if (stopped()) {
     return true;
   }
@@ -3092,7 +2824,7 @@
   insert_mem_bar(Op_MemBarRelease);
   insert_mem_bar(Op_MemBarCPUOrder);
   // Ensure that the store is atomic for longs:
-  bool require_atomic_access = true;
+  const bool require_atomic_access = true;
   Node* store;
   if (type == T_OBJECT) // reference stores need a store barrier.
     store = store_oop_to_unknown(control(), base, adr, adr_type, val, type);
@@ -3103,20 +2835,17 @@
   return true;
 }
 
+//----------------------------inline_unsafe_allocate---------------------------
+// public native Object sun.mics.Unsafe.allocateInstance(Class<?> cls);
 bool LibraryCallKit::inline_unsafe_allocate() {
   if (callee()->is_static())  return false;  // caller must have the capability!
-  int nargs = 1 + 1;
-  assert(signature()->size() == nargs-1, "alloc has 1 argument");
-  null_check_receiver(callee());  // check then ignore argument(0)
-  _sp += nargs;  // set original stack for use by uncommon_trap
-  Node* cls = do_null_check(argument(1), T_OBJECT);
-  _sp -= nargs;
+
+  null_check_receiver();  // null-check, then ignore
+  Node* cls = null_check(argument(1));
   if (stopped())  return true;
 
-  Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0);
-  _sp += nargs;  // set original stack for use by uncommon_trap
-  kls = do_null_check(kls, T_OBJECT);
-  _sp -= nargs;
+  Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
+  kls = null_check(kls);
   if (stopped())  return true;  // argument was like int.class
 
   // Note:  The argument might still be an illegal value like
@@ -3127,12 +2856,11 @@
   // can generate code to load it as unsigned byte.
   Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN);
   Node* bits = intcon(InstanceKlass::fully_initialized);
-  Node* test = _gvn.transform( new (C) SubINode(inst, bits) );
+  Node* test = _gvn.transform(new (C) SubINode(inst, bits));
   // The 'test' is non-zero if we need to take a slow path.
 
   Node* obj = new_instance(kls, test);
-  push(obj);
-
+  set_result(obj);
   return true;
 }
 
@@ -3143,15 +2871,10 @@
  * return myklass->trace_id & ~0x3
  */
 bool LibraryCallKit::inline_native_classID() {
-  int nargs = 1 + 1;
-  null_check_receiver(callee());  // check then ignore argument(0)
-  _sp += nargs;
-  Node* cls = do_null_check(argument(1), T_OBJECT);
-  _sp -= nargs;
-  Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0);
-  _sp += nargs;
-  kls = do_null_check(kls, T_OBJECT);
-  _sp -= nargs;
+  null_check_receiver();  // null-check, then ignore
+  Node* cls = null_check(argument(1), T_OBJECT);
+  Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
+  kls = null_check(kls, T_OBJECT);
   ByteSize offset = TRACE_ID_OFFSET;
   Node* insp = basic_plus_adr(kls, in_bytes(offset));
   Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG);
@@ -3162,7 +2885,7 @@
 
   const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
   store_to_memory(control(), insp, orl, T_LONG, adr_type);
-  push_pair(andl);
+  set_result(andl);
   return true;
 }
 
@@ -3177,13 +2900,12 @@
   size_t thread_id_size = OSThread::thread_id_size();
   if (thread_id_size == (size_t) BytesPerLong) {
     threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG));
-    push(threadid);
   } else if (thread_id_size == (size_t) BytesPerInt) {
     threadid = make_load(control(), p, TypeInt::INT, T_INT);
-    push(threadid);
   } else {
     ShouldNotReachHere();
   }
+  set_result(threadid);
   return true;
 }
 #endif
@@ -3192,29 +2914,28 @@
 // inline code for System.currentTimeMillis() and System.nanoTime()
 // these have the same type and signature
 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) {
-  const TypeFunc *tf = OptoRuntime::void_long_Type();
+  const TypeFunc* tf = OptoRuntime::void_long_Type();
   const TypePtr* no_memory_effects = NULL;
   Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects);
   Node* value = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+0));
 #ifdef ASSERT
-  Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms + 1));
+  Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+1));
   assert(value_top == top(), "second value must be top");
 #endif
-  push_pair(value);
+  set_result(value);
   return true;
 }
 
 //------------------------inline_native_currentThread------------------
 bool LibraryCallKit::inline_native_currentThread() {
   Node* junk = NULL;
-  push(generate_current_thread(junk));
+  set_result(generate_current_thread(junk));
   return true;
 }
 
 //------------------------inline_native_isInterrupted------------------
+// private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted);
 bool LibraryCallKit::inline_native_isInterrupted() {
-  const int nargs = 1+1;  // receiver + boolean
-  assert(nargs == arg_size(), "sanity");
   // Add a fast path to t.isInterrupted(clear_int):
   //   (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int))
   //   ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int)
@@ -3312,9 +3033,8 @@
     set_i_o(        _gvn.transform(io_phi) );
   }
 
-  push_result(result_rgn, result_val);
   C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+  set_result(result_rgn, result_val);
   return true;
 }
 
@@ -3334,7 +3054,6 @@
 // If the region is NULL, force never_see_null = true.
 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror,
                                                     bool never_see_null,
-                                                    int nargs,
                                                     RegionNode* region,
                                                     int null_path,
                                                     int offset) {
@@ -3342,7 +3061,6 @@
   Node* p = basic_plus_adr(mirror, offset);
   const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
   Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) );
-  _sp += nargs; // any deopt will start just before call to enclosing method
   Node* null_ctl = top();
   kls = null_check_oop(kls, &null_ctl, never_see_null);
   if (region != NULL) {
@@ -3351,7 +3069,6 @@
   } else {
     assert(null_ctl == top(), "no loose ends");
   }
-  _sp -= nargs;
   return kls;
 }
 
@@ -3376,7 +3093,6 @@
 
 //-------------------------inline_native_Class_query-------------------
 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
-  int nargs = 1+0;  // just the Class mirror, in most cases
   const Type* return_type = TypeInt::BOOL;
   Node* prim_return_value = top();  // what happens if it's a primitive class?
   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
@@ -3384,11 +3100,14 @@
 
   enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
 
+  Node* mirror = argument(0);
+  Node* obj    = top();
+
   switch (id) {
   case vmIntrinsics::_isInstance:
-    nargs = 1+1;  // the Class mirror, plus the object getting queried about
     // nothing is an instance of a primitive type
     prim_return_value = intcon(0);
+    obj = argument(1);
     break;
   case vmIntrinsics::_getModifiers:
     prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
@@ -3419,12 +3138,10 @@
     return_type = TypeInt::INT;  // not bool!  6297094
     break;
   default:
-    ShouldNotReachHere();
+    fatal_unexpected_iid(id);
+    break;
   }
 
-  Node* mirror =                      argument(0);
-  Node* obj    = (nargs <= 1)? top(): argument(1);
-
   const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr();
   if (mirror_con == NULL)  return false;  // cannot happen?
 
@@ -3451,9 +3168,7 @@
   // For Reflection.getClassAccessFlags(), the null check occurs in
   // the wrong place; see inline_unsafe_access(), above, for a similar
   // situation.
-  _sp += nargs;  // set original stack for use by uncommon_trap
-  mirror = do_null_check(mirror, T_OBJECT);
-  _sp -= nargs;
+  mirror = null_check(mirror);
   // If mirror or obj is dead, only null-path is taken.
   if (stopped())  return true;
 
@@ -3461,11 +3176,10 @@
 
   // Now load the mirror's klass metaobject, and null-check it.
   // Side-effects region with the control path if the klass is null.
-  Node* kls = load_klass_from_mirror(mirror, never_see_null, nargs,
-                                     region, _prim_path);
+  Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path);
   // If kls is null, we have a primitive mirror.
   phi->init_req(_prim_path, prim_return_value);
-  if (stopped()) { push_result(region, phi); return true; }
+  if (stopped()) { set_result(region, phi); return true; }
 
   Node* p;  // handy temp
   Node* null_ctl;
@@ -3476,9 +3190,7 @@
   switch (id) {
   case vmIntrinsics::_isInstance:
     // nothing is an instance of a primitive type
-    _sp += nargs;          // gen_instanceof might do an uncommon trap
     query_value = gen_instanceof(obj, kls);
-    _sp -= nargs;
     break;
 
   case vmIntrinsics::_getModifiers:
@@ -3553,16 +3265,16 @@
     break;
 
   default:
-    ShouldNotReachHere();
+    fatal_unexpected_iid(id);
+    break;
   }
 
   // Fall-through is the normal case of a query to a real class.
   phi->init_req(1, query_value);
   region->init_req(1, control());
 
-  push_result(region, phi);
   C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+  set_result(region, phi);
   return true;
 }
 
@@ -3570,8 +3282,6 @@
 // This intrinsic takes the JNI calls out of the heart of
 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc.
 bool LibraryCallKit::inline_native_subtype_check() {
-  int nargs = 1+1;  // the Class mirror, plus the other class getting examined
-
   // Pull both arguments off the stack.
   Node* args[2];                // two java.lang.Class mirrors: superc, subc
   args[0] = argument(0);
@@ -3602,9 +3312,7 @@
   int which_arg;
   for (which_arg = 0; which_arg <= 1; which_arg++) {
     Node* arg = args[which_arg];
-    _sp += nargs;  // set original stack for use by uncommon_trap
-    arg = do_null_check(arg, T_OBJECT);
-    _sp -= nargs;
+    arg = null_check(arg);
     if (stopped())  break;
     args[which_arg] = _gvn.transform(arg);
 
@@ -3618,9 +3326,7 @@
   for (which_arg = 0; which_arg <= 1; which_arg++) {
     Node* kls = klasses[which_arg];
     Node* null_ctl = top();
-    _sp += nargs;  // set original stack for use by uncommon_trap
     kls = null_check_oop(kls, &null_ctl, never_see_null);
-    _sp -= nargs;
     int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path);
     region->init_req(prim_path, null_ctl);
     if (stopped())  break;
@@ -3670,8 +3376,7 @@
   }
 
   set_control(_gvn.transform(region));
-  push(_gvn.transform(phi));
-
+  set_result(_gvn.transform(phi));
   return true;
 }
 
@@ -3719,14 +3424,12 @@
 
 
 //-----------------------inline_native_newArray--------------------------
+// private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
 bool LibraryCallKit::inline_native_newArray() {
-  int nargs = 2;
   Node* mirror    = argument(0);
   Node* count_val = argument(1);
 
-  _sp += nargs;  // set original stack for use by uncommon_trap
-  mirror = do_null_check(mirror, T_OBJECT);
-  _sp -= nargs;
+  mirror = null_check(mirror);
   // If mirror or obj is dead, only null-path is taken.
   if (stopped())  return true;
 
@@ -3740,7 +3443,6 @@
 
   bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
   Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null,
-                                                  nargs,
                                                   result_reg, _slow_path);
   Node* normal_ctl   = control();
   Node* no_array_ctl = result_reg->in(_slow_path);
@@ -3767,7 +3469,7 @@
     // Normal case:  The array type has been cached in the java.lang.Class.
     // The following call works fine even if the array type is polymorphic.
     // It could be a dynamic mix of int[], boolean[], Object[], etc.
-    Node* obj = new_array(klass_node, count_val, nargs);
+    Node* obj = new_array(klass_node, count_val, 0);  // no arguments to push
     result_reg->init_req(_normal_path, control());
     result_val->init_req(_normal_path, obj);
     result_io ->init_req(_normal_path, i_o());
@@ -3777,23 +3479,18 @@
   // Return the combined state.
   set_i_o(        _gvn.transform(result_io)  );
   set_all_memory( _gvn.transform(result_mem) );
-  push_result(result_reg, result_val);
+
   C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+  set_result(result_reg, result_val);
   return true;
 }
 
 //----------------------inline_native_getLength--------------------------
+// public static native int java.lang.reflect.Array.getLength(Object array);
 bool LibraryCallKit::inline_native_getLength() {
   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
 
-  int nargs = 1;
-  Node* array = argument(0);
-
-  _sp += nargs;  // set original stack for use by uncommon_trap
-  array = do_null_check(array, T_OBJECT);
-  _sp -= nargs;
-
+  Node* array = null_check(argument(0));
   // If array is dead, only null-path is taken.
   if (stopped())  return true;
 
@@ -3803,7 +3500,6 @@
   if (non_array != NULL) {
     PreserveJVMState pjvms(this);
     set_control(non_array);
-    _sp += nargs;  // push the arguments back on the stack
     uncommon_trap(Deoptimization::Reason_intrinsic,
                   Deoptimization::Action_maybe_recompile);
   }
@@ -3813,19 +3509,21 @@
 
   // The works fine even if the array type is polymorphic.
   // It could be a dynamic mix of int[], boolean[], Object[], etc.
-  push( load_array_length(array) );
-
-  C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+  Node* result = load_array_length(array);
+
+  C->set_has_split_ifs(true);  // Has chance for split-if optimization
+  set_result(result);
   return true;
 }
 
 //------------------------inline_array_copyOf----------------------------
+// public static <T,U> T[] java.util.Arrays.copyOf(     U[] original, int newLength,         Class<? extends T[]> newType);
+// public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from,      int to, Class<? extends T[]> newType);
 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
+  return false;
   if (too_many_traps(Deoptimization::Reason_intrinsic))  return false;
 
-  // Restore the stack and pop off the arguments.
-  int nargs = 3 + (is_copyOfRange? 1: 0);
+  // Get the arguments.
   Node* original          = argument(0);
   Node* start             = is_copyOfRange? argument(1): intcon(0);
   Node* end               = is_copyOfRange? argument(2): argument(1);
@@ -3833,23 +3531,21 @@
 
   Node* newcopy;
 
-  //set the original stack and the reexecute bit for the interpreter to reexecute
-  //the bytecode that invokes Arrays.copyOf if deoptimization happens
+  // Set the original stack and the reexecute bit for the interpreter to reexecute
+  // the bytecode that invokes Arrays.copyOf if deoptimization happens.
   { PreserveReexecuteState preexecs(this);
-    _sp += nargs;
     jvms()->set_should_reexecute(true);
 
-    array_type_mirror = do_null_check(array_type_mirror, T_OBJECT);
-    original          = do_null_check(original, T_OBJECT);
+    array_type_mirror = null_check(array_type_mirror);
+    original          = null_check(original);
 
     // Check if a null path was taken unconditionally.
     if (stopped())  return true;
 
     Node* orig_length = load_array_length(original);
 
-    Node* klass_node = load_klass_from_mirror(array_type_mirror, false, 0,
-                                              NULL, 0);
-    klass_node = do_null_check(klass_node, T_OBJECT);
+    Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0);
+    klass_node = null_check(klass_node);
 
     RegionNode* bailout = new (C) RegionNode(1);
     record_for_igvn(bailout);
@@ -3872,7 +3568,7 @@
 
     Node* length = end;
     if (_gvn.type(start) != TypeInt::ZERO) {
-      length = _gvn.transform( new (C) SubINode(end, start) );
+      length = _gvn.transform(new (C) SubINode(end, start));
     }
 
     // Bail out if length is negative.
@@ -3883,19 +3579,18 @@
 
     if (bailout->req() > 1) {
       PreserveJVMState pjvms(this);
-      set_control( _gvn.transform(bailout) );
+      set_control(_gvn.transform(bailout));
       uncommon_trap(Deoptimization::Reason_intrinsic,
                     Deoptimization::Action_maybe_recompile);
     }
 
     if (!stopped()) {
-
       // How many elements will we copy from the original?
       // The answer is MinI(orig_length - start, length).
-      Node* orig_tail = _gvn.transform( new(C) SubINode(orig_length, start) );
+      Node* orig_tail = _gvn.transform(new (C) SubINode(orig_length, start));
       Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
 
-      newcopy = new_array(klass_node, length, 0);
+      newcopy = new_array(klass_node, length, 0);  // no argments to push
 
       // Generate a direct call to the right arraycopy function(s).
       // We know the copy is disjoint but we might not know if the
@@ -3910,14 +3605,12 @@
                          original, start, newcopy, intcon(0), moved,
                          disjoint_bases, length_never_negative);
     }
-  } //original reexecute and sp are set back here
-
-  if(!stopped()) {
-    push(newcopy);
-  }
+  } // original reexecute is set back here
 
   C->set_has_split_ifs(true); // Has chance for split-if optimization
-
+  if (!stopped()) {
+    set_result(newcopy);
+  }
   return true;
 }
 
@@ -3969,7 +3662,7 @@
                            SharedRuntime::get_resolve_static_call_stub(),
                            method, bci());
   } else if (is_virtual) {
-    null_check_receiver(method);
+    null_check_receiver();
     int vtable_index = Method::invalid_vtable_index;
     if (UseInlineCaches) {
       // Suppress the vtable call
@@ -3983,7 +3676,7 @@
                           SharedRuntime::get_resolve_virtual_call_stub(),
                           method, vtable_index, bci());
   } else {  // neither virtual nor static:  opt_virtual
-    null_check_receiver(method);
+    null_check_receiver();
     slow_call = new(C) CallStaticJavaNode(tf,
                                 SharedRuntime::get_resolve_opt_virtual_call_stub(),
                                 method, bci());
@@ -4012,7 +3705,7 @@
   Node* obj = NULL;
   if (!is_static) {
     // Check for hashing null object
-    obj = null_check_receiver(callee());
+    obj = null_check_receiver();
     if (stopped())  return true;        // unconditionally null
     result_reg->init_req(_null_path, top());
     result_val->init_req(_null_path, top());
@@ -4028,9 +3721,9 @@
 
   // Unconditionally null?  Then return right away.
   if (stopped()) {
-    set_control( result_reg->in(_null_path) );
+    set_control( result_reg->in(_null_path));
     if (!stopped())
-      push(      result_val ->in(_null_path) );
+      set_result(result_val->in(_null_path));
     return true;
   }
 
@@ -4103,8 +3796,7 @@
   if (!stopped()) {
     // No need for PreserveJVMState, because we're using up the present state.
     set_all_memory(init_mem);
-    vmIntrinsics::ID hashCode_id = vmIntrinsics::_hashCode;
-    if (is_static)   hashCode_id = vmIntrinsics::_identityHashCode;
+    vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode;
     CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static);
     Node* slow_result = set_results_for_java_call(slow_call);
     // this->control() comes from set_results_for_java_call
@@ -4117,48 +3809,38 @@
   // Return the combined state.
   set_i_o(        _gvn.transform(result_io)  );
   set_all_memory( _gvn.transform(result_mem) );
-  push_result(result_reg, result_val);
-
+
+  set_result(result_reg, result_val);
   return true;
 }
 
 //---------------------------inline_native_getClass----------------------------
+// public final native Class<?> java.lang.Object.getClass();
+//
 // Build special case code for calls to getClass on an object.
 bool LibraryCallKit::inline_native_getClass() {
-  Node* obj = null_check_receiver(callee());
+  Node* obj = null_check_receiver();
   if (stopped())  return true;
-  push( load_mirror_from_klass(load_object_klass(obj)) );
+  set_result(load_mirror_from_klass(load_object_klass(obj)));
   return true;
 }
 
 //-----------------inline_native_Reflection_getCallerClass---------------------
+// public static native Class<?> sun.reflect.Reflection.getCallerClass(int realFramesToSkip);
+//
 // In the presence of deep enough inlining, getCallerClass() becomes a no-op.
 //
 // NOTE that this code must perform the same logic as
 // vframeStream::security_get_caller_frame in that it must skip
 // Method.invoke() and auxiliary frames.
-
-
-
-
 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
-  ciMethod*       method = callee();
-
 #ifndef PRODUCT
   if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
   }
 #endif
 
-  debug_only(int saved_sp = _sp);
-
-  // Argument words:  (int depth)
-  int nargs = 1;
-
-  _sp += nargs;
-  Node* caller_depth_node = pop();
-
-  assert(saved_sp == _sp, "must have correct argument count");
+  Node* caller_depth_node = argument(0);
 
   // The depth value must be a constant in order for the runtime call
   // to be eliminated.
@@ -4230,7 +3912,8 @@
       tty->print_cr("  Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth);
       tty->print_cr("  JVM state at this point:");
       for (int i = _depth; i >= 1; i--) {
-        tty->print_cr("   %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8());
+        ciMethod* m = jvms()->of_depth(i)->method();
+        tty->print_cr("   %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
       }
     }
 #endif
@@ -4240,14 +3923,17 @@
   // Acquire method holder as java.lang.Class
   ciInstanceKlass* caller_klass  = caller_jvms->method()->holder();
   ciInstance*      caller_mirror = caller_klass->java_mirror();
+
   // Push this as a constant
-  push(makecon(TypeInstPtr::make(caller_mirror)));
+  set_result(makecon(TypeInstPtr::make(caller_mirror)));
+
 #ifndef PRODUCT
   if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
     tty->print_cr("  Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth);
     tty->print_cr("  JVM state at this point:");
     for (int i = _depth; i >= 1; i--) {
-      tty->print_cr("   %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8());
+      ciMethod* m = jvms()->of_depth(i)->method();
+      tty->print_cr("   %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8());
     }
   }
 #endif
@@ -4283,36 +3969,23 @@
 }
 
 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
-  // restore the arguments
-  _sp += arg_size();
+  Node* arg = argument(0);
+  Node* result;
 
   switch (id) {
-  case vmIntrinsics::_floatToRawIntBits:
-    push(_gvn.transform( new (C) MoveF2INode(pop())));
-    break;
-
-  case vmIntrinsics::_intBitsToFloat:
-    push(_gvn.transform( new (C) MoveI2FNode(pop())));
-    break;
-
-  case vmIntrinsics::_doubleToRawLongBits:
-    push_pair(_gvn.transform( new (C) MoveD2LNode(pop_pair())));
-    break;
-
-  case vmIntrinsics::_longBitsToDouble:
-    push_pair(_gvn.transform( new (C) MoveL2DNode(pop_pair())));
-    break;
+  case vmIntrinsics::_floatToRawIntBits:    result = new (C) MoveF2INode(arg);  break;
+  case vmIntrinsics::_intBitsToFloat:       result = new (C) MoveI2FNode(arg);  break;
+  case vmIntrinsics::_doubleToRawLongBits:  result = new (C) MoveD2LNode(arg);  break;
+  case vmIntrinsics::_longBitsToDouble:     result = new (C) MoveL2DNode(arg);  break;
 
   case vmIntrinsics::_doubleToLongBits: {
-    Node* value = pop_pair();
-
     // two paths (plus control) merge in a wood
     RegionNode *r = new (C) RegionNode(3);
     Node *phi = new (C) PhiNode(r, TypeLong::LONG);
 
-    Node *cmpisnan = _gvn.transform( new (C) CmpDNode(value, value));
+    Node *cmpisnan = _gvn.transform(new (C) CmpDNode(arg, arg));
     // Build the boolean node
-    Node *bolisnan = _gvn.transform( new (C) BoolNode( cmpisnan, BoolTest::ne ) );
+    Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
 
     // Branch either way.
     // NaN case is less traveled, which makes all the difference.
@@ -4330,35 +4003,30 @@
     r->init_req(1, iftrue);
 
     // Else fall through
-    Node *iffalse = _gvn.transform( new (C) IfFalseNode(opt_ifisnan) );
+    Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
     set_control(iffalse);
 
-    phi->init_req(2, _gvn.transform( new (C) MoveD2LNode(value)));
+    phi->init_req(2, _gvn.transform(new (C) MoveD2LNode(arg)));
     r->init_req(2, iffalse);
 
     // Post merge
     set_control(_gvn.transform(r));
     record_for_igvn(r);
 
-    Node* result = _gvn.transform(phi);
+    C->set_has_split_ifs(true); // Has chance for split-if optimization
+    result = phi;
     assert(result->bottom_type()->isa_long(), "must be");
-    push_pair(result);
-
-    C->set_has_split_ifs(true); // Has chance for split-if optimization
-
     break;
   }
 
   case vmIntrinsics::_floatToIntBits: {
-    Node* value = pop();
-
     // two paths (plus control) merge in a wood
     RegionNode *r = new (C) RegionNode(3);
     Node *phi = new (C) PhiNode(r, TypeInt::INT);
 
-    Node *cmpisnan = _gvn.transform( new (C) CmpFNode(value, value));
+    Node *cmpisnan = _gvn.transform(new (C) CmpFNode(arg, arg));
     // Build the boolean node
-    Node *bolisnan = _gvn.transform( new (C) BoolNode( cmpisnan, BoolTest::ne ) );
+    Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne));
 
     // Branch either way.
     // NaN case is less traveled, which makes all the difference.
@@ -4376,29 +4044,27 @@
     r->init_req(1, iftrue);
 
     // Else fall through
-    Node *iffalse = _gvn.transform( new (C) IfFalseNode(opt_ifisnan) );
+    Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan));
     set_control(iffalse);
 
-    phi->init_req(2, _gvn.transform( new (C) MoveF2INode(value)));
+    phi->init_req(2, _gvn.transform(new (C) MoveF2INode(arg)));
     r->init_req(2, iffalse);
 
     // Post merge
     set_control(_gvn.transform(r));
     record_for_igvn(r);
 
-    Node* result = _gvn.transform(phi);
+    C->set_has_split_ifs(true); // Has chance for split-if optimization
+    result = phi;
     assert(result->bottom_type()->isa_int(), "must be");
-    push(result);
-
-    C->set_has_split_ifs(true); // Has chance for split-if optimization
-
     break;
   }
 
   default:
-    ShouldNotReachHere();
+    fatal_unexpected_iid(id);
+    break;
   }
-
+  set_result(_gvn.transform(result));
   return true;
 }
 
@@ -4409,23 +4075,19 @@
 #endif //_LP64
 
 //----------------------inline_unsafe_copyMemory-------------------------
+// public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes);
 bool LibraryCallKit::inline_unsafe_copyMemory() {
   if (callee()->is_static())  return false;  // caller must have the capability!
-  int nargs = 1 + 5 + 3;  // 5 args:  (src: ptr,off, dst: ptr,off, size)
-  assert(signature()->size() == nargs-1, "copy has 5 arguments");
-  null_check_receiver(callee());  // check then ignore argument(0)
+  null_check_receiver();  // null-check receiver
   if (stopped())  return true;
 
   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
 
-  Node* src_ptr = argument(1);
-  Node* src_off = ConvL2X(argument(2));
-  assert(argument(3)->is_top(), "2nd half of long");
-  Node* dst_ptr = argument(4);
-  Node* dst_off = ConvL2X(argument(5));
-  assert(argument(6)->is_top(), "2nd half of long");
-  Node* size    = ConvL2X(argument(7));
-  assert(argument(8)->is_top(), "2nd half of long");
+  Node* src_ptr =         argument(1);   // type: oop
+  Node* src_off = ConvL2X(argument(2));  // type: long
+  Node* dst_ptr =         argument(4);   // type: oop
+  Node* dst_off = ConvL2X(argument(5));  // type: long
+  Node* size    = ConvL2X(argument(7));  // type: long
 
   assert(Unsafe_field_offset_to_byte_offset(11) == 11,
          "fieldOffset must be byte-scaled");
@@ -4545,6 +4207,8 @@
 }
 
 //------------------------inline_native_clone----------------------------
+// protected native Object java.lang.Object.clone();
+//
 // Here are the simple edge cases:
 //  null receiver => normal trap
 //  virtual and clone was overridden => slow path to out-of-line clone
@@ -4561,20 +4225,16 @@
 // can be sharply typed as an object array, a type array, or an instance.
 //
 bool LibraryCallKit::inline_native_clone(bool is_virtual) {
-  int nargs = 1;
   PhiNode* result_val;
 
-  //set the original stack and the reexecute bit for the interpreter to reexecute
-  //the bytecode that invokes Object.clone if deoptimization happens
+  // Set the reexecute bit for the interpreter to reexecute
+  // the bytecode that invokes Object.clone if deoptimization happens.
   { PreserveReexecuteState preexecs(this);
     jvms()->set_should_reexecute(true);
 
-    //null_check_receiver will adjust _sp (push and pop)
-    Node* obj = null_check_receiver(callee());
+    Node* obj = null_check_receiver();
     if (stopped())  return true;
 
-    _sp += nargs;
-
     Node* obj_klass = load_object_klass(obj);
     const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr();
     const TypeOopPtr*   toop   = ((tklass != NULL)
@@ -4611,7 +4271,7 @@
       set_control(array_ctl);
       Node* obj_length = load_array_length(obj);
       Node* obj_size  = NULL;
-      Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);
+      Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);  // no arguments to push
 
       if (!use_ReduceInitialCardMarks()) {
         // If it is an oop array, it requires very special treatment,
@@ -4711,10 +4371,9 @@
     set_control(    _gvn.transform(result_reg) );
     set_i_o(        _gvn.transform(result_i_o) );
     set_all_memory( _gvn.transform(result_mem) );
-  } //original reexecute and sp are set back here
-
-  push(_gvn.transform(result_val));
-
+  } // original reexecute is set back here
+
+  set_result(_gvn.transform(result_val));
   return true;
 }
 
@@ -4755,25 +4414,25 @@
 
 
 //------------------------------inline_arraycopy-----------------------
+// public static native void java.lang.System.arraycopy(Object src,  int  srcPos,
+//                                                      Object dest, int destPos,
+//                                                      int length);
 bool LibraryCallKit::inline_arraycopy() {
-  // Restore the stack and pop off the arguments.
-  int nargs = 5;  // 2 oops, 3 ints, no size_t or long
-  assert(callee()->signature()->size() == nargs, "copy has 5 arguments");
-
-  Node *src         = argument(0);
-  Node *src_offset  = argument(1);
-  Node *dest        = argument(2);
-  Node *dest_offset = argument(3);
-  Node *length      = argument(4);
+  // Get the arguments.
+  Node* src         = argument(0);  // type: oop
+  Node* src_offset  = argument(1);  // type: int
+  Node* dest        = argument(2);  // type: oop
+  Node* dest_offset = argument(3);  // type: int
+  Node* length      = argument(4);  // type: int
 
   // Compile time checks.  If any of these checks cannot be verified at compile time,
   // we do not make a fast path for this call.  Instead, we let the call remain as it
   // is.  The checks we choose to mandate at compile time are:
   //
   // (1) src and dest are arrays.
-  const Type* src_type = src->Value(&_gvn);
+  const Type* src_type  = src->Value(&_gvn);
   const Type* dest_type = dest->Value(&_gvn);
-  const TypeAryPtr* top_src = src_type->isa_aryptr();
+  const TypeAryPtr* top_src  = src_type->isa_aryptr();
   const TypeAryPtr* top_dest = dest_type->isa_aryptr();
   if (top_src  == NULL || top_src->klass()  == NULL ||
       top_dest == NULL || top_dest->klass() == NULL) {
@@ -4828,15 +4487,13 @@
   record_for_igvn(slow_region);
 
   // (3) operands must not be null
-  // We currently perform our null checks with the do_null_check routine.
+  // We currently perform our null checks with the null_check routine.
   // This means that the null exceptions will be reported in the caller
   // rather than (correctly) reported inside of the native arraycopy call.
   // This should be corrected, given time.  We do our null check with the
   // stack pointer restored.
-  _sp += nargs;
-  src  = do_null_check(src,  T_ARRAY);
-  dest = do_null_check(dest, T_ARRAY);
-  _sp -= nargs;
+  src  = null_check(src,  T_ARRAY);
+  dest = null_check(dest, T_ARRAY);
 
   // (4) src_offset must not be negative.
   generate_negative_guard(src_offset, slow_region);
@@ -5179,7 +4836,7 @@
   slow_control = top();
   if (slow_region != NULL)
     slow_control = _gvn.transform(slow_region);
-  debug_only(slow_region = (RegionNode*)badAddress);
+  DEBUG_ONLY(slow_region = (RegionNode*)badAddress);
 
   set_control(checked_control);
   if (!stopped()) {
@@ -5674,33 +5331,22 @@
 }
 
 //----------------------------inline_reference_get----------------------------
-
+// public T java.lang.ref.Reference.get();
 bool LibraryCallKit::inline_reference_get() {
-  const int nargs = 1; // self
-
-  guarantee(java_lang_ref_Reference::referent_offset > 0,
-            "should have already been set");
-
-  int referent_offset = java_lang_ref_Reference::referent_offset;
-
-  // Restore the stack and pop off the argument
-  _sp += nargs;
-  Node *reference_obj = pop();
-
-  // Null check on self without removing any arguments.
-  _sp += nargs;
-  reference_obj = do_null_check(reference_obj, T_OBJECT);
-  _sp -= nargs;;
-
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "should have already been set");
+
+  // Get the argument:
+  Node* reference_obj = null_check_receiver();
   if (stopped()) return true;
 
-  Node *adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
+  Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset);
 
   ciInstanceKlass* klass = env()->Object_klass();
   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
 
   Node* no_ctrl = NULL;
-  Node *result = make_load(no_ctrl, adr, object_type, T_OBJECT);
+  Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT);
 
   // Use the pre-barrier to record the value in the referent field
   pre_barrier(false /* do_load */,
@@ -5713,7 +5359,7 @@
   // across safepoint since GC can change its value.
   insert_mem_bar(Op_MemBarCPUOrder);
 
-  push(result);
+  set_result(result);
   return true;
 }
 
@@ -5770,15 +5416,11 @@
   }
   if (stubAddr == NULL) return false;
 
-  // Restore the stack and pop off the arguments.
-  int nargs = 5;  // this + 2 oop/offset combos
-  assert(callee()->signature()->size() == nargs-1, "encryptBlock has 4 arguments");
-
-  Node *aescrypt_object  = argument(0);
-  Node *src         = argument(1);
-  Node *src_offset  = argument(2);
-  Node *dest        = argument(3);
-  Node *dest_offset = argument(4);
+  Node* aescrypt_object = argument(0);
+  Node* src             = argument(1);
+  Node* src_offset      = argument(2);
+  Node* dest            = argument(3);
+  Node* dest_offset     = argument(4);
 
   // (1) src and dest are arrays.
   const Type* src_type = src->Value(&_gvn);
@@ -5829,16 +5471,12 @@
   }
   if (stubAddr == NULL) return false;
 
-
-  // Restore the stack and pop off the arguments.
-  int nargs = 6;  // this + oop/offset + len + oop/offset
-  assert(callee()->signature()->size() == nargs-1, "wrong number of arguments");
-  Node *cipherBlockChaining_object  = argument(0);
-  Node *src         = argument(1);
-  Node *src_offset  = argument(2);
-  Node *len         = argument(3);
-  Node *dest        = argument(4);
-  Node *dest_offset = argument(5);
+  Node* cipherBlockChaining_object = argument(0);
+  Node* src                        = argument(1);
+  Node* src_offset                 = argument(2);
+  Node* len                        = argument(3);
+  Node* dest                       = argument(4);
+  Node* dest_offset                = argument(5);
 
   // (1) src and dest are arrays.
   const Type* src_type = src->Value(&_gvn);
@@ -5920,11 +5558,8 @@
 //
 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) {
   // First, check receiver for NULL since it is virtual method.
-  int nargs = arg_size();
   Node* objCBC = argument(0);
-  _sp += nargs;
-  objCBC = do_null_check(objCBC, T_OBJECT);
-  _sp -= nargs;
+  objCBC = null_check(objCBC);
 
   if (stopped()) return NULL; // Always NULL
 
@@ -5948,9 +5583,7 @@
   }
   ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
 
-  _sp += nargs;          // gen_instanceof might do an uncommon trap
   Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt)));
-  _sp -= nargs;
   Node* cmp_instof  = _gvn.transform(new (C) CmpINode(instof, intcon(1)));
   Node* bool_instof  = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne));
 
@@ -5966,7 +5599,7 @@
   RegionNode* region = new(C) RegionNode(3);
   region->init_req(1, instof_false);
   Node* src = argument(1);
-  Node *dest = argument(4);
+  Node* dest = argument(4);
   Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest));
   Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq));
   Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN);
@@ -5974,7 +5607,4 @@
 
   record_for_igvn(region);
   return _gvn.transform(region);
-
 }
-
-
--- a/hotspot/src/share/vm/opto/locknode.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/locknode.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -165,7 +165,7 @@
   kill_dead_locals();
 
   // Null check; get casted pointer.
-  Node *obj = do_null_check(peek(), T_OBJECT);
+  Node* obj = null_check(peek());
   // Check for locking null object
   if (stopped()) return;
 
--- a/hotspot/src/share/vm/opto/parse1.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -1008,7 +1008,7 @@
   // If this is an inlined method, we may have to do a receiver null check.
   if (_caller->has_method() && is_normal_parse() && !method()->is_static()) {
     GraphKit kit(_caller);
-    kit.null_check_receiver(method());
+    kit.null_check_receiver_before_call(method());
     _caller = kit.transfer_exceptions_into_jvms();
     if (kit.stopped()) {
       _exits.add_exception_states_from(_caller);
@@ -1398,7 +1398,7 @@
 #ifdef ASSERT
     int pre_bc_sp = sp();
     int inputs, depth;
-    bool have_se = !stopped() && compute_stack_effects(inputs, depth, /*for_parse*/ true);
+    bool have_se = !stopped() && compute_stack_effects(inputs, depth);
     assert(!have_se || pre_bc_sp >= inputs, err_msg_res("have enough stack to execute this BC: pre_bc_sp=%d, inputs=%d", pre_bc_sp, inputs));
 #endif //ASSERT
 
--- a/hotspot/src/share/vm/opto/parse2.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/parse2.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -48,7 +48,7 @@
   const Type* elem = Type::TOP;
   Node* adr = array_addressing(elem_type, 0, &elem);
   if (stopped())  return;     // guaranteed null or range check
-  _sp -= 2;                   // Pop array and index
+  dec_sp(2);                  // Pop array and index
   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
   Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
   push(ld);
@@ -60,7 +60,7 @@
   Node* adr = array_addressing(elem_type, 1);
   if (stopped())  return;     // guaranteed null or range check
   Node* val = pop();
-  _sp -= 2;                   // Pop array and index
+  dec_sp(2);                  // Pop array and index
   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
   store_to_memory(control(), adr, val, elem_type, adr_type);
 }
@@ -73,7 +73,7 @@
   Node *ary   = peek(1+vals);   // in case of exception
 
   // Null check the array base, with correct stack contents
-  ary = do_null_check(ary, T_ARRAY);
+  ary = null_check(ary, T_ARRAY);
   // Compile-time detect of null-exception?
   if (stopped())  return top();
 
@@ -681,7 +681,7 @@
 
 void Parse::do_irem() {
   // Must keep both values on the expression-stack during null-check
-  do_null_check(peek(), T_INT);
+  zero_check_int(peek());
   // Compile-time detect of null-exception?
   if (stopped())  return;
 
@@ -958,7 +958,7 @@
   DEBUG_ONLY(sync_jvms());   // argument(n) requires a synced jvms
   assert(argument(0) != NULL, "must exist");
   assert(bc_depth == 1 || argument(1) != NULL, "two must exist");
-  _sp += bc_depth;
+  inc_sp(bc_depth);
   return bc_depth;
 }
 
@@ -1581,8 +1581,8 @@
     set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) );
     break;
 
-  case Bytecodes::_pop:  _sp -= 1;   break;
-  case Bytecodes::_pop2: _sp -= 2;   break;
+  case Bytecodes::_pop:  dec_sp(1);   break;
+  case Bytecodes::_pop2: dec_sp(2);   break;
   case Bytecodes::_swap:
     a = pop();
     b = pop();
@@ -1650,7 +1650,7 @@
 
   case Bytecodes::_arraylength: {
     // Must do null-check with value on expression stack
-    Node *ary = do_null_check(peek(), T_ARRAY);
+    Node *ary = null_check(peek(), T_ARRAY);
     // Compile-time detect of null-exception?
     if (stopped())  return;
     a = pop();
@@ -1667,15 +1667,15 @@
   case Bytecodes::_laload: {
     a = array_addressing(T_LONG, 0);
     if (stopped())  return;     // guaranteed null or range check
-    _sp -= 2;                   // Pop array and index
-    push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
+    dec_sp(2);                  // Pop array and index
+    push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
     break;
   }
   case Bytecodes::_daload: {
     a = array_addressing(T_DOUBLE, 0);
     if (stopped())  return;     // guaranteed null or range check
-    _sp -= 2;                   // Pop array and index
-    push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
+    dec_sp(2);                  // Pop array and index
+    push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
     break;
   }
   case Bytecodes::_bastore: array_store(T_BYTE);  break;
@@ -1699,7 +1699,7 @@
     a = array_addressing(T_LONG, 2);
     if (stopped())  return;     // guaranteed null or range check
     c = pop_pair();
-    _sp -= 2;                   // Pop array and index
+    dec_sp(2);                  // Pop array and index
     store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
     break;
   }
@@ -1707,7 +1707,7 @@
     a = array_addressing(T_DOUBLE, 2);
     if (stopped())  return;     // guaranteed null or range check
     c = pop_pair();
-    _sp -= 2;                   // Pop array and index
+    dec_sp(2);                  // Pop array and index
     c = dstore_rounding(c);
     store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES);
     break;
@@ -1733,7 +1733,7 @@
     break;
   case Bytecodes::_idiv:
     // Must keep both values on the expression-stack during null-check
-    do_null_check(peek(), T_INT);
+    zero_check_int(peek());
     // Compile-time detect of null-exception?
     if (stopped())  return;
     b = pop();
@@ -2041,7 +2041,7 @@
   case Bytecodes::_lrem:
     // Must keep both values on the expression-stack during null-check
     assert(peek(0) == top(), "long word order");
-    do_null_check(peek(1), T_LONG);
+    zero_check_long(peek(1));
     // Compile-time detect of null-exception?
     if (stopped())  return;
     b = pop_pair();
@@ -2053,7 +2053,7 @@
   case Bytecodes::_ldiv:
     // Must keep both values on the expression-stack during null-check
     assert(peek(0) == top(), "long word order");
-    do_null_check(peek(1), T_LONG);
+    zero_check_long(peek(1));
     // Compile-time detect of null-exception?
     if (stopped())  return;
     b = pop_pair();
@@ -2175,7 +2175,7 @@
 
   case Bytecodes::_athrow:
     // null exception oop throws NULL pointer exception
-    do_null_check(peek(), T_OBJECT);
+    null_check(peek());
     if (stopped())  return;
     // Hook the thrown exception directly to subsequent handlers.
     if (BailoutToInterpreterForThrows) {
--- a/hotspot/src/share/vm/opto/parse3.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/parse3.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -116,7 +116,7 @@
   Node* obj;
   if (is_field) {
     int obj_depth = is_get ? 0 : field->type()->size();
-    obj = do_null_check(peek(obj_depth), T_OBJECT);
+    obj = null_check(peek(obj_depth));
     // Compile-time detect of null-exception?
     if (stopped())  return;
 
@@ -126,11 +126,11 @@
 #endif
 
     if (is_get) {
-      --_sp;  // pop receiver before getting
+      (void) pop();  // pop receiver before getting
       do_get_xxx(obj, field, is_field);
     } else {
       do_put_xxx(obj, field, is_field);
-      --_sp;  // pop receiver after putting
+      (void) pop();  // pop receiver after putting
     }
   } else {
     const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
@@ -230,7 +230,7 @@
     }
     // If there is going to be a trap, put it at the next bytecode:
     set_bci(iter().next_bci());
-    do_null_assert(peek(), T_OBJECT);
+    null_assert(peek());
     set_bci(iter().cur_bci()); // put it back
   }
 
@@ -463,7 +463,7 @@
     // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
     // when AllocateArray node for newarray is created.
     { PreserveReexecuteState preexecs(this);
-      _sp += ndimensions;
+      inc_sp(ndimensions);
       // Pass 0 as nargs since uncommon trap code does not need to restore stack.
       obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
     } //original reexecute and sp are set back here
@@ -492,7 +492,7 @@
     // Create a java array for dimension sizes
     Node* dims = NULL;
     { PreserveReexecuteState preexecs(this);
-      _sp += ndimensions;
+      inc_sp(ndimensions);
       Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT))));
       dims = new_array(dims_array_klass, intcon(ndimensions), 0);
 
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Mon Nov 26 17:25:11 2012 -0800
@@ -84,7 +84,7 @@
                        C->log()->identify(tp->klass()));
       }
     }
-    do_null_assert(obj, T_OBJECT);
+    null_assert(obj);
     assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
     if (!stopped()) {
       profile_null_checkcast();
@@ -116,7 +116,7 @@
       C->log()->elem("assert_null reason='instanceof' klass='%d'",
                      C->log()->identify(klass));
     }
-    do_null_assert(peek(), T_OBJECT);
+    null_assert(peek());
     assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" );
     if (!stopped()) {
       // The object is now known to be null.
@@ -139,10 +139,10 @@
 // pull array from stack and check that the store is valid
 void Parse::array_store_check() {
 
-  // Shorthand access to array store elements
-  Node *obj = stack(_sp-1);
-  Node *idx = stack(_sp-2);
-  Node *ary = stack(_sp-3);
+  // Shorthand access to array store elements without popping them.
+  Node *obj = peek(0);
+  Node *idx = peek(1);
+  Node *ary = peek(2);
 
   if (_gvn.type(obj) == TypePtr::NULL_PTR) {
     // There's never a type check on null values.
--- a/hotspot/src/share/vm/opto/type.hpp	Wed Nov 21 05:57:12 2012 -0800
+++ b/hotspot/src/share/vm/opto/type.hpp	Mon Nov 26 17:25:11 2012 -0800
@@ -242,8 +242,10 @@
   const TypeInt    *isa_int() const;             // Returns NULL if not an Int
   const TypeLong   *is_long() const;
   const TypeLong   *isa_long() const;            // Returns NULL if not a Long
+  const TypeD      *isa_double() const;          // Returns NULL if not a Double{Top,Con,Bot}
   const TypeD      *is_double_constant() const;  // Asserts it is a DoubleCon
   const TypeD      *isa_double_constant() const; // Returns NULL if not a DoubleCon
+  const TypeF      *isa_float() const;           // Returns NULL if not a Float{Top,Con,Bot}
   const TypeF      *is_float_constant() const;   // Asserts it is a FloatCon
   const TypeF      *isa_float_constant() const;  // Returns NULL if not a FloatCon
   const TypeTuple  *is_tuple() const;            // Collection of fields, NOT a pointer
@@ -1320,24 +1322,6 @@
   return ((TypeD*)this)->_d;
 }
 
-inline const TypeF *Type::is_float_constant() const {
-  assert( _base == FloatCon, "Not a Float" );
-  return (TypeF*)this;
-}
-
-inline const TypeF *Type::isa_float_constant() const {
-  return ( _base == FloatCon ? (TypeF*)this : NULL);
-}
-
-inline const TypeD *Type::is_double_constant() const {
-  assert( _base == DoubleCon, "Not a Double" );
-  return (TypeD*)this;
-}
-
-inline const TypeD *Type::isa_double_constant() const {
-  return ( _base == DoubleCon ? (TypeD*)this : NULL);
-}
-
 inline const TypeInt *Type::is_int() const {
   assert( _base == Int, "Not an Int" );
   return (TypeInt*)this;
@@ -1356,6 +1340,36 @@
   return ( _base == Long ? (TypeLong*)this : NULL);
 }
 
+inline const TypeF *Type::isa_float() const {
+  return ((_base == FloatTop ||
+           _base == FloatCon ||
+           _base == FloatBot) ? (TypeF*)this : NULL);
+}
+
+inline const TypeF *Type::is_float_constant() const {
+  assert( _base == FloatCon, "Not a Float" );
+  return (TypeF*)this;
+}
+
+inline const TypeF *Type::isa_float_constant() const {
+  return ( _base == FloatCon ? (TypeF*)this : NULL);
+}
+
+inline const TypeD *Type::isa_double() const {
+  return ((_base == DoubleTop ||
+           _base == DoubleCon ||
+           _base == DoubleBot) ? (TypeD*)this : NULL);
+}
+
+inline const TypeD *Type::is_double_constant() const {
+  assert( _base == DoubleCon, "Not a Double" );
+  return (TypeD*)this;
+}
+
+inline const TypeD *Type::isa_double_constant() const {
+  return ( _base == DoubleCon ? (TypeD*)this : NULL);
+}
+
 inline const TypeTuple *Type::is_tuple() const {
   assert( _base == Tuple, "Not a Tuple" );
   return (TypeTuple*)this;