hotspot/src/share/vm/oops/cpCacheOop.cpp
changeset 13391 30245956af37
parent 11956 c363dc1ac4b7
equal deleted inserted replaced
13309:50c604cb0d5f 13391:30245956af37
    29 #include "memory/universe.inline.hpp"
    29 #include "memory/universe.inline.hpp"
    30 #include "oops/cpCacheOop.hpp"
    30 #include "oops/cpCacheOop.hpp"
    31 #include "oops/objArrayOop.hpp"
    31 #include "oops/objArrayOop.hpp"
    32 #include "oops/oop.inline.hpp"
    32 #include "oops/oop.inline.hpp"
    33 #include "prims/jvmtiRedefineClassesTrace.hpp"
    33 #include "prims/jvmtiRedefineClassesTrace.hpp"
       
    34 #include "prims/methodHandles.hpp"
    34 #include "runtime/handles.inline.hpp"
    35 #include "runtime/handles.inline.hpp"
    35 
    36 
    36 
    37 
    37 // Implememtation of ConstantPoolCacheEntry
    38 // Implememtation of ConstantPoolCacheEntry
    38 
    39 
    42   assert(constant_pool_index() == index, "");
    43   assert(constant_pool_index() == index, "");
    43 }
    44 }
    44 
    45 
    45 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
    46 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
    46   assert(0 <= main_index && main_index < 0x10000, "sanity check");
    47   assert(0 <= main_index && main_index < 0x10000, "sanity check");
    47   _indices = (main_index << 16);
    48   _indices = (main_index << main_cp_index_bits);
    48   assert(main_entry_index() == main_index, "");
    49   assert(main_entry_index() == main_index, "");
    49 }
    50 }
    50 
    51 
    51 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
    52 int ConstantPoolCacheEntry::make_flags(TosState state,
    52                     bool is_vfinal, bool is_volatile,
    53                                        int option_bits,
    53                     bool is_method_interface, bool is_method) {
    54                                        int field_index_or_method_params) {
    54   int f = state;
    55   assert(state < number_of_states, "Invalid state in make_flags");
    55 
    56   int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;
    56   assert( state < number_of_states, "Invalid state in as_flags");
       
    57 
       
    58   f <<= 1;
       
    59   if (is_final) f |= 1;
       
    60   f <<= 1;
       
    61   if (is_vfinal) f |= 1;
       
    62   f <<= 1;
       
    63   if (is_volatile) f |= 1;
       
    64   f <<= 1;
       
    65   if (is_method_interface) f |= 1;
       
    66   f <<= 1;
       
    67   if (is_method) f |= 1;
       
    68   f <<= ConstantPoolCacheEntry::hotSwapBit;
       
    69   // Preserve existing flag bit values
    57   // Preserve existing flag bit values
       
    58   // The low bits are a field offset, or else the method parameter size.
    70 #ifdef ASSERT
    59 #ifdef ASSERT
    71   int old_state = ((_flags >> tosBits) & 0x0F);
    60   TosState old_state = flag_state();
    72   assert(old_state == 0 || old_state == state,
    61   assert(old_state == (TosState)0 || old_state == state,
    73          "inconsistent cpCache flags state");
    62          "inconsistent cpCache flags state");
    74 #endif
    63 #endif
    75   return (_flags | f) ;
    64   return (_flags | f) ;
    76 }
    65 }
    77 
    66 
    78 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
    67 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
       
    68   assert(!is_secondary_entry(), "must not overwrite main_entry_index");
    79 #ifdef ASSERT
    69 #ifdef ASSERT
    80   // Read once.
    70   // Read once.
    81   volatile Bytecodes::Code c = bytecode_1();
    71   volatile Bytecodes::Code c = bytecode_1();
    82   assert(c == 0 || c == code || code == 0, "update must be consistent");
    72   assert(c == 0 || c == code || code == 0, "update must be consistent");
    83 #endif
    73 #endif
    84   // Need to flush pending stores here before bytecode is written.
    74   // Need to flush pending stores here before bytecode is written.
    85   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
    75   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));
    86 }
    76 }
    87 
    77 
    88 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
    78 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
       
    79   assert(!is_secondary_entry(), "must not overwrite main_entry_index");
    89 #ifdef ASSERT
    80 #ifdef ASSERT
    90   // Read once.
    81   // Read once.
    91   volatile Bytecodes::Code c = bytecode_2();
    82   volatile Bytecodes::Code c = bytecode_2();
    92   assert(c == 0 || c == code || code == 0, "update must be consistent");
    83   assert(c == 0 || c == code || code == 0, "update must be consistent");
    93 #endif
    84 #endif
    94   // Need to flush pending stores here before bytecode is written.
    85   // Need to flush pending stores here before bytecode is written.
    95   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
    86   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));
    96 }
    87 }
    97 
    88 
    98 // Atomically sets f1 if it is still NULL, otherwise it keeps the
    89 // Sets f1, ordering with previous writes.
    99 // current value.
    90 void ConstantPoolCacheEntry::release_set_f1(oop f1) {
   100 void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) {
       
   101   // Use barriers as in oop_store
    91   // Use barriers as in oop_store
       
    92   assert(f1 != NULL, "");
   102   oop* f1_addr = (oop*) &_f1;
    93   oop* f1_addr = (oop*) &_f1;
   103   update_barrier_set_pre(f1_addr, f1);
    94   update_barrier_set_pre(f1_addr, f1);
   104   void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
    95   OrderAccess::release_store_ptr((intptr_t*)f1_addr, f1);
   105   bool success = (result == NULL);
    96   update_barrier_set((void*) f1_addr, f1);
   106   if (success) {
    97 }
   107     update_barrier_set((void*) f1_addr, f1);
    98 
   108   }
    99 // Sets flags, but only if the value was previously zero.
       
   100 bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
       
   101   intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
       
   102   return (result == 0);
   109 }
   103 }
   110 
   104 
   111 #ifdef ASSERT
   105 #ifdef ASSERT
   112 // It is possible to have two different dummy methodOops created
   106 // It is possible to have two different dummy methodOops created
   113 // when the resolve code for invoke interface executes concurrently
   107 // when the resolve code for invoke interface executes concurrently
   133                                        TosState field_type,
   127                                        TosState field_type,
   134                                        bool is_final,
   128                                        bool is_final,
   135                                        bool is_volatile) {
   129                                        bool is_volatile) {
   136   set_f1(field_holder()->java_mirror());
   130   set_f1(field_holder()->java_mirror());
   137   set_f2(field_offset);
   131   set_f2(field_offset);
   138   assert(field_index <= field_index_mask,
   132   assert((field_index & field_index_mask) == field_index,
   139          "field index does not fit in low flag bits");
   133          "field index does not fit in low flag bits");
   140   set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
   134   set_field_flags(field_type,
   141             (field_index & field_index_mask));
   135                   ((is_volatile ? 1 : 0) << is_volatile_shift) |
       
   136                   ((is_final    ? 1 : 0) << is_final_shift),
       
   137                   field_index);
   142   set_bytecode_1(get_code);
   138   set_bytecode_1(get_code);
   143   set_bytecode_2(put_code);
   139   set_bytecode_2(put_code);
   144   NOT_PRODUCT(verify(tty));
   140   NOT_PRODUCT(verify(tty));
   145 }
   141 }
   146 
   142 
   147 int  ConstantPoolCacheEntry::field_index() const {
   143 void ConstantPoolCacheEntry::set_parameter_size(int value) {
   148   return (_flags & field_index_mask);
   144   // This routine is called only in corner cases where the CPCE is not yet initialized.
       
   145   // See AbstractInterpreter::deopt_continue_after_entry.
       
   146   assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
       
   147          err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
       
   148   // Setting the parameter size by itself is only safe if the
       
   149   // current value of _flags is 0, otherwise another thread may have
       
   150   // updated it and we don't want to overwrite that value.  Don't
       
   151   // bother trying to update it once it's nonzero but always make
       
   152   // sure that the final parameter size agrees with what was passed.
       
   153   if (_flags == 0) {
       
   154     Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
       
   155   }
       
   156   guarantee(parameter_size() == value,
       
   157             err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
   149 }
   158 }
   150 
   159 
   151 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
   160 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
   152                                         methodHandle method,
   161                                         methodHandle method,
   153                                         int vtable_index) {
   162                                         int vtable_index) {
   154   assert(!is_secondary_entry(), "");
   163   assert(!is_secondary_entry(), "");
   155   assert(method->interpreter_entry() != NULL, "should have been set at this point");
   164   assert(method->interpreter_entry() != NULL, "should have been set at this point");
   156   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
   165   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
   157   bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
       
   158 
   166 
   159   int byte_no = -1;
   167   int byte_no = -1;
   160   bool needs_vfinal_flag = false;
   168   bool change_to_virtual = false;
       
   169 
   161   switch (invoke_code) {
   170   switch (invoke_code) {
       
   171     case Bytecodes::_invokeinterface:
       
   172       // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
       
   173       // instruction somehow links to a non-interface method (in Object).
       
   174       // In that case, the method has no itable index and must be invoked as a virtual.
       
   175       // Set a flag to keep track of this corner case.
       
   176       change_to_virtual = true;
       
   177 
       
   178       // ...and fall through as if we were handling invokevirtual:
   162     case Bytecodes::_invokevirtual:
   179     case Bytecodes::_invokevirtual:
   163     case Bytecodes::_invokeinterface: {
   180       {
   164         if (method->can_be_statically_bound()) {
   181         if (method->can_be_statically_bound()) {
   165           set_f2((intptr_t)method());
   182           // set_f2_as_vfinal_method checks if is_vfinal flag is true.
   166           needs_vfinal_flag = true;
   183           set_method_flags(as_TosState(method->result_type()),
       
   184                            (                             1      << is_vfinal_shift) |
       
   185                            ((method->is_final_method() ? 1 : 0) << is_final_shift)  |
       
   186                            ((change_to_virtual         ? 1 : 0) << is_forced_virtual_shift),
       
   187                            method()->size_of_parameters());
       
   188           set_f2_as_vfinal_method(method());
   167         } else {
   189         } else {
   168           assert(vtable_index >= 0, "valid index");
   190           assert(vtable_index >= 0, "valid index");
       
   191           assert(!method->is_final_method(), "sanity");
       
   192           set_method_flags(as_TosState(method->result_type()),
       
   193                            ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
       
   194                            method()->size_of_parameters());
   169           set_f2(vtable_index);
   195           set_f2(vtable_index);
   170         }
   196         }
   171         byte_no = 2;
   197         byte_no = 2;
   172         break;
   198         break;
   173     }
       
   174 
       
   175     case Bytecodes::_invokedynamic:  // similar to _invokevirtual
       
   176       if (TraceInvokeDynamic) {
       
   177         tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d",
       
   178                       (is_secondary_entry() ? " secondary" : ""),
       
   179                       (intptr_t)method(), vtable_index);
       
   180         method->print();
       
   181         this->print(tty, 0);
       
   182       }
   199       }
   183       assert(method->can_be_statically_bound(), "must be a MH invoker method");
       
   184       assert(_f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized");
       
   185       // SystemDictionary::find_method_handle_invoke only caches
       
   186       // methods which signature classes are on the boot classpath,
       
   187       // otherwise the newly created method is returned.  To avoid
       
   188       // races in that case we store the first one coming in into the
       
   189       // cp-cache atomically if it's still unset.
       
   190       set_f1_if_null_atomic(method());
       
   191       needs_vfinal_flag = false;  // _f2 is not an oop
       
   192       assert(!is_vfinal(), "f2 not an oop");
       
   193       byte_no = 1;  // coordinate this with bytecode_number & is_resolved
       
   194       break;
       
   195 
   200 
   196     case Bytecodes::_invokespecial:
   201     case Bytecodes::_invokespecial:
   197       // Preserve the value of the vfinal flag on invokevirtual bytecode
       
   198       // which may be shared with this constant pool cache entry.
       
   199       needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
       
   200       // fall through
       
   201     case Bytecodes::_invokestatic:
   202     case Bytecodes::_invokestatic:
       
   203       // Note:  Read and preserve the value of the is_vfinal flag on any
       
   204       // invokevirtual bytecode shared with this constant pool cache entry.
       
   205       // It is cheap and safe to consult is_vfinal() at all times.
       
   206       // Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
       
   207       set_method_flags(as_TosState(method->result_type()),
       
   208                        ((is_vfinal()               ? 1 : 0) << is_vfinal_shift) |
       
   209                        ((method->is_final_method() ? 1 : 0) << is_final_shift),
       
   210                        method()->size_of_parameters());
   202       set_f1(method());
   211       set_f1(method());
   203       byte_no = 1;
   212       byte_no = 1;
   204       break;
   213       break;
   205     default:
   214     default:
   206       ShouldNotReachHere();
   215       ShouldNotReachHere();
   207       break;
   216       break;
   208   }
   217   }
   209 
   218 
   210   set_flags(as_flags(as_TosState(method->result_type()),
       
   211                      method->is_final_method(),
       
   212                      needs_vfinal_flag,
       
   213                      false,
       
   214                      change_to_virtual,
       
   215                      true)|
       
   216             method()->size_of_parameters());
       
   217 
       
   218   // Note:  byte_no also appears in TemplateTable::resolve.
   219   // Note:  byte_no also appears in TemplateTable::resolve.
   219   if (byte_no == 1) {
   220   if (byte_no == 1) {
       
   221     assert(invoke_code != Bytecodes::_invokevirtual &&
       
   222            invoke_code != Bytecodes::_invokeinterface, "");
   220     set_bytecode_1(invoke_code);
   223     set_bytecode_1(invoke_code);
   221   } else if (byte_no == 2)  {
   224   } else if (byte_no == 2)  {
   222     if (change_to_virtual) {
   225     if (change_to_virtual) {
       
   226       assert(invoke_code == Bytecodes::_invokeinterface, "");
   223       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
   227       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
   224       //
   228       //
   225       // Workaround for the case where we encounter an invokeinterface, but we
   229       // Workaround for the case where we encounter an invokeinterface, but we
   226       // should really have an _invokevirtual since the resolved method is a
   230       // should really have an _invokevirtual since the resolved method is a
   227       // virtual method in java.lang.Object. This is a corner case in the spec
   231       // virtual method in java.lang.Object. This is a corner case in the spec
   233       // See also interpreterRuntime.cpp. (8/25/2000)
   237       // See also interpreterRuntime.cpp. (8/25/2000)
   234       // Only set resolved for the invokeinterface case if method is public.
   238       // Only set resolved for the invokeinterface case if method is public.
   235       // Otherwise, the method needs to be reresolved with caller for each
   239       // Otherwise, the method needs to be reresolved with caller for each
   236       // interface call.
   240       // interface call.
   237       if (method->is_public()) set_bytecode_1(invoke_code);
   241       if (method->is_public()) set_bytecode_1(invoke_code);
   238       set_bytecode_2(Bytecodes::_invokevirtual);
       
   239     } else {
   242     } else {
   240       set_bytecode_2(invoke_code);
   243       assert(invoke_code == Bytecodes::_invokevirtual, "");
   241     }
   244     }
       
   245     // set up for invokevirtual, even if linking for invokeinterface also:
       
   246     set_bytecode_2(Bytecodes::_invokevirtual);
   242   } else {
   247   } else {
   243     ShouldNotReachHere();
   248     ShouldNotReachHere();
   244   }
   249   }
   245   NOT_PRODUCT(verify(tty));
   250   NOT_PRODUCT(verify(tty));
   246 }
   251 }
   248 
   253 
   249 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
   254 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
   250   assert(!is_secondary_entry(), "");
   255   assert(!is_secondary_entry(), "");
   251   klassOop interf = method->method_holder();
   256   klassOop interf = method->method_holder();
   252   assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
   257   assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
       
   258   assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
   253   set_f1(interf);
   259   set_f1(interf);
   254   set_f2(index);
   260   set_f2(index);
   255   set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
   261   set_method_flags(as_TosState(method->result_type()),
       
   262                    0,  // no option bits
       
   263                    method()->size_of_parameters());
   256   set_bytecode_1(Bytecodes::_invokeinterface);
   264   set_bytecode_1(Bytecodes::_invokeinterface);
   257 }
   265 }
   258 
   266 
   259 
   267 
   260 void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) {
   268 void ConstantPoolCacheEntry::set_method_handle(methodHandle adapter, Handle appendix) {
   261   assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
   269   assert(!is_secondary_entry(), "");
   262   assert(_f2 == 0, "initialize once");
   270   set_method_handle_common(Bytecodes::_invokehandle, adapter, appendix);
   263   assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob");
   271 }
   264   set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG);
   272 
   265 }
   273 void ConstantPoolCacheEntry::set_dynamic_call(methodHandle adapter, Handle appendix) {
   266 
       
   267 int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
       
   268   assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
       
   269   intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG;
       
   270   assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob");
       
   271   return (int) bsm_cache_index;
       
   272 }
       
   273 
       
   274 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) {
       
   275   assert(is_secondary_entry(), "");
   274   assert(is_secondary_entry(), "");
   276   // NOTE: it's important that all other values are set before f1 is
   275   set_method_handle_common(Bytecodes::_invokedynamic, adapter, appendix);
   277   // set since some users short circuit on f1 being set
   276 }
   278   // (i.e. non-null) and that may result in uninitialized values for
   277 
   279   // other racing threads (e.g. flags).
   278 void ConstantPoolCacheEntry::set_method_handle_common(Bytecodes::Code invoke_code, methodHandle adapter, Handle appendix) {
   280   int param_size = signature_invoker->size_of_parameters();
   279   // NOTE: This CPCE can be the subject of data races.
   281   assert(param_size >= 1, "method argument size must include MH.this");
   280   // There are three words to update: flags, f2, f1 (in that order).
   282   param_size -= 1;  // do not count MH.this; it is not stacked for invokedynamic
   281   // Writers must store all other values before f1.
   283   bool is_final = true;
   282   // Readers must test f1 first for non-null before reading other fields.
   284   assert(signature_invoker->is_final_method(), "is_final");
   283   // Competing writers must acquire exclusive access on the first
   285   int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size;
   284   // write, to flags, using a compare/exchange.
   286   assert(_flags == 0 || _flags == flags, "flags should be the same");
   285   // A losing writer must spin until the winner writes f1,
   287   set_flags(flags);
   286   // so that when he returns, he can use the linked cache entry.
   288   // do not do set_bytecode on a secondary CP cache entry
   287 
   289   //set_bytecode_1(Bytecodes::_invokedynamic);
   288   bool has_appendix = appendix.not_null();
   290   set_f1_if_null_atomic(call_site());  // This must be the last one to set (see NOTE above)!
   289   if (!has_appendix) {
   291 }
   290     // The extra argument is not used, but we need a non-null value to signify linkage state.
   292 
   291     // Set it to something benign that will never leak memory.
   293 
   292     appendix = Universe::void_mirror();
   294 methodOop ConstantPoolCacheEntry::get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool) {
   293   }
   295   assert(invoke_code > (Bytecodes::Code)0, "bad query");
   294 
       
   295   bool owner =
       
   296     init_method_flags_atomic(as_TosState(adapter->result_type()),
       
   297                    ((has_appendix ?  1 : 0) << has_appendix_shift) |
       
   298                    (                 1      << is_vfinal_shift)    |
       
   299                    (                 1      << is_final_shift),
       
   300                    adapter->size_of_parameters());
       
   301   if (!owner) {
       
   302     while (is_f1_null()) {
       
   303       // Pause momentarily on a low-level lock, to allow racing thread to win.
       
   304       MutexLockerEx mu(Patching_lock, Mutex::_no_safepoint_check_flag);
       
   305       os::yield();
       
   306     }
       
   307     return;
       
   308   }
       
   309 
       
   310   if (TraceInvokeDynamic) {
       
   311     tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ",
       
   312                   invoke_code,
       
   313                   (intptr_t)appendix(), (has_appendix ? "" : " (unused)"),
       
   314                   (intptr_t)adapter());
       
   315     adapter->print();
       
   316     if (has_appendix)  appendix()->print();
       
   317   }
       
   318 
       
   319   // Method handle invokes and invokedynamic sites use both cp cache words.
       
   320   // f1, if not null, contains a value passed as a trailing argument to the adapter.
       
   321   // In the general case, this could be the call site's MethodType,
       
   322   // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
       
   323   // f2 contains the adapter method which manages the actual call.
       
   324   // In the general case, this is a compiled LambdaForm.
       
   325   // (The Java code is free to optimize these calls by binding other
       
   326   // sorts of methods and appendices to call sites.)
       
   327   // JVM-level linking is via f2, as if for invokevfinal, and signatures are erased.
       
   328   // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
       
   329   // In principle this means that the method (with appendix) could take up to 256 parameter slots.
       
   330   //
       
   331   // This means that given a call site like (List)mh.invoke("foo"),
       
   332   // the f2 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
       
   333   // not '(Ljava/lang/String;)Ljava/util/List;'.
       
   334   // The fact that String and List are involved is encoded in the MethodType in f1.
       
   335   // This allows us to create fewer method oops, while keeping type safety.
       
   336   //
       
   337   set_f2_as_vfinal_method(adapter());
       
   338   assert(appendix.not_null(), "needed for linkage state");
       
   339   release_set_f1(appendix());  // This must be the last one to set (see NOTE above)!
       
   340   if (!is_secondary_entry()) {
       
   341     // The interpreter assembly code does not check byte_2,
       
   342     // but it is used by is_resolved, method_if_resolved, etc.
       
   343     set_bytecode_2(invoke_code);
       
   344   }
       
   345   NOT_PRODUCT(verify(tty));
       
   346   if (TraceInvokeDynamic) {
       
   347     this->print(tty, 0);
       
   348   }
       
   349 }
       
   350 
       
   351 methodOop ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {
   296   if (is_secondary_entry()) {
   352   if (is_secondary_entry()) {
   297     return cpool->cache()->entry_at(main_entry_index())->get_method_if_resolved(invoke_code, cpool);
   353     if (!is_f1_null())
       
   354       return f2_as_vfinal_method();
       
   355     return NULL;
   298   }
   356   }
   299   // Decode the action of set_method and set_interface_call
   357   // Decode the action of set_method and set_interface_call
   300   if (bytecode_1() == invoke_code) {
   358   Bytecodes::Code invoke_code = bytecode_1();
       
   359   if (invoke_code != (Bytecodes::Code)0) {
   301     oop f1 = _f1;
   360     oop f1 = _f1;
   302     if (f1 != NULL) {
   361     if (f1 != NULL) {
   303       switch (invoke_code) {
   362       switch (invoke_code) {
   304       case Bytecodes::_invokeinterface:
   363       case Bytecodes::_invokeinterface:
   305         assert(f1->is_klass(), "");
   364         assert(f1->is_klass(), "");
   306         return klassItable::method_for_itable_index(klassOop(f1), (int) f2());
   365         return klassItable::method_for_itable_index(klassOop(f1), f2_as_index());
   307       case Bytecodes::_invokestatic:
   366       case Bytecodes::_invokestatic:
   308       case Bytecodes::_invokespecial:
   367       case Bytecodes::_invokespecial:
       
   368         assert(!has_appendix(), "");
   309         assert(f1->is_method(), "");
   369         assert(f1->is_method(), "");
   310         return methodOop(f1);
   370         return methodOop(f1);
   311       }
   371       }
   312     }
   372     }
   313   }
   373   }
   314   if (bytecode_2() == invoke_code) {
   374   invoke_code = bytecode_2();
       
   375   if (invoke_code != (Bytecodes::Code)0) {
   315     switch (invoke_code) {
   376     switch (invoke_code) {
   316     case Bytecodes::_invokevirtual:
   377     case Bytecodes::_invokevirtual:
   317       if (is_vfinal()) {
   378       if (is_vfinal()) {
   318         // invokevirtual
   379         // invokevirtual
   319         methodOop m = methodOop((intptr_t) f2());
   380         methodOop m = f2_as_vfinal_method();
   320         assert(m->is_method(), "");
   381         assert(m->is_method(), "");
   321         return m;
   382         return m;
   322       } else {
   383       } else {
   323         int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
   384         int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
   324         if (cpool->tag_at(holder_index).is_klass()) {
   385         if (cpool->tag_at(holder_index).is_klass()) {
   325           klassOop klass = cpool->resolved_klass_at(holder_index);
   386           klassOop klass = cpool->resolved_klass_at(holder_index);
   326           if (!Klass::cast(klass)->oop_is_instance())
   387           if (!Klass::cast(klass)->oop_is_instance())
   327             klass = SystemDictionary::Object_klass();
   388             klass = SystemDictionary::Object_klass();
   328           return instanceKlass::cast(klass)->method_at_vtable((int) f2());
   389           return instanceKlass::cast(klass)->method_at_vtable(f2_as_index());
   329         }
   390         }
   330       }
   391       }
       
   392       break;
       
   393     case Bytecodes::_invokehandle:
       
   394     case Bytecodes::_invokedynamic:
       
   395       return f2_as_vfinal_method();
   331     }
   396     }
   332   }
   397   }
   333   return NULL;
   398   return NULL;
   334 }
   399 }
   335 
       
   336 
   400 
   337 
   401 
   338 class LocalOopClosure: public OopClosure {
   402 class LocalOopClosure: public OopClosure {
   339  private:
   403  private:
   340   void (*_f)(oop*);
   404   void (*_f)(oop*);
   417 // to refer to new_method.
   481 // to refer to new_method.
   418 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
   482 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
   419        methodOop new_method, bool * trace_name_printed) {
   483        methodOop new_method, bool * trace_name_printed) {
   420 
   484 
   421   if (is_vfinal()) {
   485   if (is_vfinal()) {
   422     // virtual and final so f2() contains method ptr instead of vtable index
   486     // virtual and final so _f2 contains method ptr instead of vtable index
   423     if (f2() == (intptr_t)old_method) {
   487     if (f2_as_vfinal_method() == old_method) {
   424       // match old_method so need an update
   488       // match old_method so need an update
       
   489       // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
   425       _f2 = (intptr_t)new_method;
   490       _f2 = (intptr_t)new_method;
   426       if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
   491       if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
   427         if (!(*trace_name_printed)) {
   492         if (!(*trace_name_printed)) {
   428           // RC_TRACE_MESG macro has an embedded ResourceMark
   493           // RC_TRACE_MESG macro has an embedded ResourceMark
   429           RC_TRACE_MESG(("adjust: name=%s",
   494           RC_TRACE_MESG(("adjust: name=%s",
   477   }
   542   }
   478 
   543 
   479   methodOop m = NULL;
   544   methodOop m = NULL;
   480   if (is_vfinal()) {
   545   if (is_vfinal()) {
   481     // virtual and final so _f2 contains method ptr instead of vtable index
   546     // virtual and final so _f2 contains method ptr instead of vtable index
   482     m = (methodOop)_f2;
   547     m = f2_as_vfinal_method();
   483   } else if ((oop)_f1 == NULL) {
   548   } else if (is_f1_null()) {
   484     // NULL _f1 means this is a virtual entry so also not interesting
   549     // NULL _f1 means this is a virtual entry so also not interesting
   485     return false;
   550     return false;
   486   } else {
   551   } else {
   487     if (!((oop)_f1)->is_method()) {
   552     oop f1 = _f1;  // _f1 is volatile
       
   553     if (!f1->is_method()) {
   488       // _f1 can also contain a klassOop for an interface
   554       // _f1 can also contain a klassOop for an interface
   489       return false;
   555       return false;
   490     }
   556     }
   491     m = (methodOop)_f1;
   557     m = f1_as_method();
   492   }
   558   }
   493 
   559 
   494   assert(m != NULL && m->is_method(), "sanity check");
   560   assert(m != NULL && m->is_method(), "sanity check");
   495   if (m == NULL || !m->is_method() || m->method_holder() != k) {
   561   if (m == NULL || !m->is_method() || m->method_holder() != k) {
   496     // robustness for above sanity checks or method is not in
   562     // robustness for above sanity checks or method is not in