hotspot/src/share/vm/oops/cpCache.cpp
changeset 13728 882756847a04
parent 13391 30245956af37
child 13740 ae6179641137
equal deleted inserted replaced
13727:caf5eb7dd4a7 13728:882756847a04
       
     1 /*
       
     2  * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "gc_implementation/shared/markSweep.inline.hpp"
       
    27 #include "interpreter/interpreter.hpp"
       
    28 #include "interpreter/rewriter.hpp"
       
    29 #include "memory/universe.inline.hpp"
       
    30 #include "oops/cpCache.hpp"
       
    31 #include "oops/objArrayOop.hpp"
       
    32 #include "oops/oop.inline.hpp"
       
    33 #include "prims/jvmtiRedefineClassesTrace.hpp"
       
    34 #include "prims/methodHandles.hpp"
       
    35 #include "runtime/handles.inline.hpp"
       
    36 #ifndef SERIALGC
       
    37 # include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
       
    38 #endif
       
    39 
       
    40 
       
    41 // Implememtation of ConstantPoolCacheEntry
       
    42 
       
    43 void ConstantPoolCacheEntry::initialize_entry(int index) {
       
    44   assert(0 < index && index < 0x10000, "sanity check");
       
    45   _indices = index;
       
    46   assert(constant_pool_index() == index, "");
       
    47 }
       
    48 
       
    49 int ConstantPoolCacheEntry::make_flags(TosState state,
       
    50                                        int option_bits,
       
    51                                        int field_index_or_method_params) {
       
    52   assert(state < number_of_states, "Invalid state in make_flags");
       
    53   int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;
       
    54   // Preserve existing flag bit values
       
    55   // The low bits are a field offset, or else the method parameter size.
       
    56 #ifdef ASSERT
       
    57   TosState old_state = flag_state();
       
    58   assert(old_state == (TosState)0 || old_state == state,
       
    59          "inconsistent cpCache flags state");
       
    60 #endif
       
    61   return (_flags | f) ;
       
    62 }
       
    63 
       
    64 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
       
    65 #ifdef ASSERT
       
    66   // Read once.
       
    67   volatile Bytecodes::Code c = bytecode_1();
       
    68   assert(c == 0 || c == code || code == 0, "update must be consistent");
       
    69 #endif
       
    70   // Need to flush pending stores here before bytecode is written.
       
    71   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));
       
    72 }
       
    73 
       
    74 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
       
    75 #ifdef ASSERT
       
    76   // Read once.
       
    77   volatile Bytecodes::Code c = bytecode_2();
       
    78   assert(c == 0 || c == code || code == 0, "update must be consistent");
       
    79 #endif
       
    80   // Need to flush pending stores here before bytecode is written.
       
    81   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));
       
    82 }
       
    83 
       
    84 // Sets f1, ordering with previous writes.
       
    85 void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
       
    86   assert(f1 != NULL, "");
       
    87   OrderAccess::release_store_ptr((HeapWord*) &_f1, f1);
       
    88 }
       
    89 
       
    90 // Sets flags, but only if the value was previously zero.
       
    91 bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
       
    92   intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
       
    93   return (result == 0);
       
    94 }
       
    95 
       
    96 // Note that concurrent update of both bytecodes can leave one of them
       
    97 // reset to zero.  This is harmless; the interpreter will simply re-resolve
       
    98 // the damaged entry.  More seriously, the memory synchronization is needed
       
    99 // to flush other fields (f1, f2) completely to memory before the bytecodes
       
   100 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
       
   101 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
       
   102                                        Bytecodes::Code put_code,
       
   103                                        KlassHandle field_holder,
       
   104                                        int field_index,
       
   105                                        int field_offset,
       
   106                                        TosState field_type,
       
   107                                        bool is_final,
       
   108                                        bool is_volatile,
       
   109                                        Klass* root_klass) {
       
   110   set_f1(field_holder());
       
   111   set_f2(field_offset);
       
   112   assert((field_index & field_index_mask) == field_index,
       
   113          "field index does not fit in low flag bits");
       
   114   set_field_flags(field_type,
       
   115                   ((is_volatile ? 1 : 0) << is_volatile_shift) |
       
   116                   ((is_final    ? 1 : 0) << is_final_shift),
       
   117                   field_index);
       
   118   set_bytecode_1(get_code);
       
   119   set_bytecode_2(put_code);
       
   120   NOT_PRODUCT(verify(tty));
       
   121 }
       
   122 
       
   123 void ConstantPoolCacheEntry::set_parameter_size(int value) {
       
   124   // This routine is called only in corner cases where the CPCE is not yet initialized.
       
   125   // See AbstractInterpreter::deopt_continue_after_entry.
       
   126   assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
       
   127          err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
       
   128   // Setting the parameter size by itself is only safe if the
       
   129   // current value of _flags is 0, otherwise another thread may have
       
   130   // updated it and we don't want to overwrite that value.  Don't
       
   131   // bother trying to update it once it's nonzero but always make
       
   132   // sure that the final parameter size agrees with what was passed.
       
   133   if (_flags == 0) {
       
   134     Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
       
   135   }
       
   136   guarantee(parameter_size() == value,
       
   137             err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
       
   138 }
       
   139 
       
   140 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
       
   141                                         methodHandle method,
       
   142                                         int vtable_index) {
       
   143   assert(method->interpreter_entry() != NULL, "should have been set at this point");
       
   144   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
       
   145 
       
   146   int byte_no = -1;
       
   147   bool change_to_virtual = false;
       
   148 
       
   149   switch (invoke_code) {
       
   150     case Bytecodes::_invokeinterface:
       
   151       // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
       
   152       // instruction somehow links to a non-interface method (in Object).
       
   153       // In that case, the method has no itable index and must be invoked as a virtual.
       
   154       // Set a flag to keep track of this corner case.
       
   155       change_to_virtual = true;
       
   156 
       
   157       // ...and fall through as if we were handling invokevirtual:
       
   158     case Bytecodes::_invokevirtual:
       
   159       {
       
   160         if (method->can_be_statically_bound()) {
       
   161           // set_f2_as_vfinal_method checks if is_vfinal flag is true.
       
   162           set_method_flags(as_TosState(method->result_type()),
       
   163                            (                             1      << is_vfinal_shift) |
       
   164                            ((method->is_final_method() ? 1 : 0) << is_final_shift)  |
       
   165                            ((change_to_virtual         ? 1 : 0) << is_forced_virtual_shift),
       
   166                            method()->size_of_parameters());
       
   167           set_f2_as_vfinal_method(method());
       
   168         } else {
       
   169           assert(vtable_index >= 0, "valid index");
       
   170           assert(!method->is_final_method(), "sanity");
       
   171           set_method_flags(as_TosState(method->result_type()),
       
   172                            ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
       
   173                            method()->size_of_parameters());
       
   174           set_f2(vtable_index);
       
   175         }
       
   176         byte_no = 2;
       
   177         break;
       
   178       }
       
   179 
       
   180     case Bytecodes::_invokespecial:
       
   181     case Bytecodes::_invokestatic:
       
   182       // Note:  Read and preserve the value of the is_vfinal flag on any
       
   183       // invokevirtual bytecode shared with this constant pool cache entry.
       
   184       // It is cheap and safe to consult is_vfinal() at all times.
       
   185       // Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
       
   186       set_method_flags(as_TosState(method->result_type()),
       
   187                        ((is_vfinal()               ? 1 : 0) << is_vfinal_shift) |
       
   188                        ((method->is_final_method() ? 1 : 0) << is_final_shift),
       
   189                        method()->size_of_parameters());
       
   190       set_f1(method());
       
   191       byte_no = 1;
       
   192       break;
       
   193     default:
       
   194       ShouldNotReachHere();
       
   195       break;
       
   196   }
       
   197 
       
   198   // Note:  byte_no also appears in TemplateTable::resolve.
       
   199   if (byte_no == 1) {
       
   200     assert(invoke_code != Bytecodes::_invokevirtual &&
       
   201            invoke_code != Bytecodes::_invokeinterface, "");
       
   202     set_bytecode_1(invoke_code);
       
   203   } else if (byte_no == 2)  {
       
   204     if (change_to_virtual) {
       
   205       assert(invoke_code == Bytecodes::_invokeinterface, "");
       
   206       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
       
   207       //
       
   208       // Workaround for the case where we encounter an invokeinterface, but we
       
   209       // should really have an _invokevirtual since the resolved method is a
       
   210       // virtual method in java.lang.Object. This is a corner case in the spec
       
   211       // but is presumably legal. javac does not generate this code.
       
   212       //
       
   213       // We set bytecode_1() to _invokeinterface, because that is the
       
   214       // bytecode # used by the interpreter to see if it is resolved.
       
   215       // We set bytecode_2() to _invokevirtual.
       
   216       // See also interpreterRuntime.cpp. (8/25/2000)
       
   217       // Only set resolved for the invokeinterface case if method is public.
       
   218       // Otherwise, the method needs to be reresolved with caller for each
       
   219       // interface call.
       
   220       if (method->is_public()) set_bytecode_1(invoke_code);
       
   221     } else {
       
   222       assert(invoke_code == Bytecodes::_invokevirtual, "");
       
   223     }
       
   224     // set up for invokevirtual, even if linking for invokeinterface also:
       
   225     set_bytecode_2(Bytecodes::_invokevirtual);
       
   226   } else {
       
   227     ShouldNotReachHere();
       
   228   }
       
   229   NOT_PRODUCT(verify(tty));
       
   230 }
       
   231 
       
   232 
       
   233 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
       
   234   Klass* interf = method->method_holder();
       
   235   assert(InstanceKlass::cast(interf)->is_interface(), "must be an interface");
       
   236   assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
       
   237   set_f1(interf);
       
   238   set_f2(index);
       
   239   set_method_flags(as_TosState(method->result_type()),
       
   240                    0,  // no option bits
       
   241                    method()->size_of_parameters());
       
   242   set_bytecode_1(Bytecodes::_invokeinterface);
       
   243 }
       
   244 
       
   245 
       
   246 void ConstantPoolCacheEntry::set_method_handle(methodHandle adapter, Handle appendix,
       
   247                                                objArrayHandle resolved_references) {
       
   248   set_method_handle_common(Bytecodes::_invokehandle, adapter, appendix, resolved_references);
       
   249 }
       
   250 
       
   251 void ConstantPoolCacheEntry::set_dynamic_call(methodHandle adapter, Handle appendix,
       
   252                                               objArrayHandle resolved_references) {
       
   253   set_method_handle_common(Bytecodes::_invokedynamic, adapter, appendix, resolved_references);
       
   254 }
       
   255 
       
   256 void ConstantPoolCacheEntry::set_method_handle_common(Bytecodes::Code invoke_code,
       
   257                                                       methodHandle adapter,
       
   258                                                       Handle appendix,
       
   259                                                       objArrayHandle resolved_references) {
       
   260   // NOTE: This CPCE can be the subject of data races.
       
   261   // There are three words to update: flags, refs[f2], f1 (in that order).
       
   262   // Writers must store all other values before f1.
       
   263   // Readers must test f1 first for non-null before reading other fields.
       
   264   // Competing writers must acquire exclusive access on the first
       
   265   // write, to flags, using a compare/exchange.
       
   266   // A losing writer to flags must spin until the winner writes f1,
       
   267   // so that when he returns, he can use the linked cache entry.
       
   268 
       
   269   bool has_appendix = appendix.not_null();
       
   270 
       
   271   // Write the flags.
       
   272   bool owner =
       
   273     init_method_flags_atomic(as_TosState(adapter->result_type()),
       
   274                    ((has_appendix ?  1 : 0) << has_appendix_shift) |
       
   275                    (                 1      << is_final_shift),
       
   276                    adapter->size_of_parameters());
       
   277   if (!owner) {
       
   278     // Somebody else is working on the same CPCE.  Let them proceed.
       
   279     while (is_f1_null()) {
       
   280       // Pause momentarily on a low-level lock, to allow racing thread to win.
       
   281       MutexLockerEx mu(Patching_lock, Mutex::_no_safepoint_check_flag);
       
   282       os::yield();
       
   283     }
       
   284     return;
       
   285   }
       
   286 
       
   287   if (TraceInvokeDynamic) {
       
   288     tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ",
       
   289                   invoke_code,
       
   290                   (intptr_t)appendix(), (has_appendix ? "" : " (unused)"),
       
   291                   (intptr_t)adapter());
       
   292     adapter->print();
       
   293     if (has_appendix)  appendix()->print();
       
   294   }
       
   295 
       
   296   // Method handle invokes and invokedynamic sites use both cp cache words.
       
   297   // refs[f2], if not null, contains a value passed as a trailing argument to the adapter.
       
   298   // In the general case, this could be the call site's MethodType,
       
   299   // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
       
   300   // f1 contains the adapter method which manages the actual call.
       
   301   // In the general case, this is a compiled LambdaForm.
       
   302   // (The Java code is free to optimize these calls by binding other
       
   303   // sorts of methods and appendices to call sites.)
       
   304   // JVM-level linking is via f1, as if for invokespecial, and signatures are erased.
       
   305   // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
       
   306   // Even with the appendix, the method will never take more than 255 parameter slots.
       
   307   //
       
   308   // This means that given a call site like (List)mh.invoke("foo"),
       
   309   // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
       
   310   // not '(Ljava/lang/String;)Ljava/util/List;'.
       
   311   // The fact that String and List are involved is encoded in the MethodType in refs[f2].
       
   312   // This allows us to create fewer method oops, while keeping type safety.
       
   313   //
       
   314 
       
   315   if (has_appendix) {
       
   316     int ref_index = f2_as_index();
       
   317     assert(ref_index >= 0 && ref_index < resolved_references->length(), "oob");
       
   318     assert(resolved_references->obj_at(ref_index) == NULL, "init just once");
       
   319     resolved_references->obj_at_put(ref_index, appendix());
       
   320   }
       
   321 
       
   322   release_set_f1(adapter());  // This must be the last one to set (see NOTE above)!
       
   323 
       
   324     // The interpreter assembly code does not check byte_2,
       
   325     // but it is used by is_resolved, method_if_resolved, etc.
       
   326   set_bytecode_1(invoke_code);
       
   327   NOT_PRODUCT(verify(tty));
       
   328   if (TraceInvokeDynamic) {
       
   329     this->print(tty, 0);
       
   330   }
       
   331 }
       
   332 
       
   333 Method* ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {
       
   334   // Decode the action of set_method and set_interface_call
       
   335   Bytecodes::Code invoke_code = bytecode_1();
       
   336   if (invoke_code != (Bytecodes::Code)0) {
       
   337     Metadata* f1 = (Metadata*)_f1;
       
   338     if (f1 != NULL) {
       
   339       switch (invoke_code) {
       
   340       case Bytecodes::_invokeinterface:
       
   341         assert(f1->is_klass(), "");
       
   342         return klassItable::method_for_itable_index((Klass*)f1, f2_as_index());
       
   343       case Bytecodes::_invokestatic:
       
   344       case Bytecodes::_invokespecial:
       
   345         assert(!has_appendix(), "");
       
   346       case Bytecodes::_invokehandle:
       
   347       case Bytecodes::_invokedynamic:
       
   348         assert(f1->is_method(), "");
       
   349         return (Method*)f1;
       
   350       }
       
   351     }
       
   352   }
       
   353   invoke_code = bytecode_2();
       
   354   if (invoke_code != (Bytecodes::Code)0) {
       
   355     switch (invoke_code) {
       
   356     case Bytecodes::_invokevirtual:
       
   357       if (is_vfinal()) {
       
   358         // invokevirtual
       
   359         Method* m = f2_as_vfinal_method();
       
   360         assert(m->is_method(), "");
       
   361         return m;
       
   362       } else {
       
   363         int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
       
   364         if (cpool->tag_at(holder_index).is_klass()) {
       
   365           Klass* klass = cpool->resolved_klass_at(holder_index);
       
   366           if (!Klass::cast(klass)->oop_is_instance())
       
   367             klass = SystemDictionary::Object_klass();
       
   368           return InstanceKlass::cast(klass)->method_at_vtable(f2_as_index());
       
   369         }
       
   370       }
       
   371       break;
       
   372     }
       
   373   }
       
   374   return NULL;
       
   375 }
       
   376 
       
   377 
       
   378 oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
       
   379   if (is_f1_null() || !has_appendix())
       
   380     return NULL;
       
   381   int ref_index = f2_as_index();
       
   382   objArrayOop resolved_references = cpool->resolved_references();
       
   383   return resolved_references->obj_at(ref_index);
       
   384 }
       
   385 
       
   386 
       
   387 // RedefineClasses() API support:
       
   388 // If this constantPoolCacheEntry refers to old_method then update it
       
   389 // to refer to new_method.
       
   390 bool ConstantPoolCacheEntry::adjust_method_entry(Method* old_method,
       
   391        Method* new_method, bool * trace_name_printed) {
       
   392 
       
   393   if (is_vfinal()) {
       
   394     // virtual and final so _f2 contains method ptr instead of vtable index
       
   395     if (f2_as_vfinal_method() == old_method) {
       
   396       // match old_method so need an update
       
   397       // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
       
   398       _f2 = (intptr_t)new_method;
       
   399       if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
       
   400         if (!(*trace_name_printed)) {
       
   401           // RC_TRACE_MESG macro has an embedded ResourceMark
       
   402           RC_TRACE_MESG(("adjust: name=%s",
       
   403             Klass::cast(old_method->method_holder())->external_name()));
       
   404           *trace_name_printed = true;
       
   405         }
       
   406         // RC_TRACE macro has an embedded ResourceMark
       
   407         RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
       
   408           new_method->name()->as_C_string(),
       
   409           new_method->signature()->as_C_string()));
       
   410       }
       
   411 
       
   412       return true;
       
   413     }
       
   414 
       
   415     // f1() is not used with virtual entries so bail out
       
   416     return false;
       
   417   }
       
   418 
       
   419   if (_f1 == NULL) {
       
   420     // NULL f1() means this is a virtual entry so bail out
       
   421     // We are assuming that the vtable index does not need change.
       
   422     return false;
       
   423   }
       
   424 
       
   425   if (_f1 == old_method) {
       
   426     _f1 = new_method;
       
   427     if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
       
   428       if (!(*trace_name_printed)) {
       
   429         // RC_TRACE_MESG macro has an embedded ResourceMark
       
   430         RC_TRACE_MESG(("adjust: name=%s",
       
   431           Klass::cast(old_method->method_holder())->external_name()));
       
   432         *trace_name_printed = true;
       
   433       }
       
   434       // RC_TRACE macro has an embedded ResourceMark
       
   435       RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
       
   436         new_method->name()->as_C_string(),
       
   437         new_method->signature()->as_C_string()));
       
   438     }
       
   439 
       
   440     return true;
       
   441   }
       
   442 
       
   443   return false;
       
   444 }
       
   445 
       
   446 #ifndef PRODUCT
       
   447 bool ConstantPoolCacheEntry::check_no_old_entries() {
       
   448   if (is_vfinal()) {
       
   449     Metadata* f2 = (Metadata*)_f2;
       
   450     return (f2->is_valid() && f2->is_method() && !((Method*)f2)->is_old());
       
   451   } else {
       
   452     return (_f1 == NULL || (_f1->is_valid() && _f1->is_method() && !((Method*)_f1)->is_old()));
       
   453   }
       
   454 }
       
   455 #endif
       
   456 
       
   457 bool ConstantPoolCacheEntry::is_interesting_method_entry(Klass* k) {
       
   458   if (!is_method_entry()) {
       
   459     // not a method entry so not interesting by default
       
   460     return false;
       
   461   }
       
   462 
       
   463   Method* m = NULL;
       
   464   if (is_vfinal()) {
       
   465     // virtual and final so _f2 contains method ptr instead of vtable index
       
   466     m = f2_as_vfinal_method();
       
   467   } else if (is_f1_null()) {
       
   468     // NULL _f1 means this is a virtual entry so also not interesting
       
   469     return false;
       
   470   } else {
       
   471     if (!(_f1->is_method())) {
       
   472       // _f1 can also contain a Klass* for an interface
       
   473       return false;
       
   474     }
       
   475     m = f1_as_method();
       
   476   }
       
   477 
       
   478   assert(m != NULL && m->is_method(), "sanity check");
       
   479   if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) {
       
   480     // robustness for above sanity checks or method is not in
       
   481     // the interesting class
       
   482     return false;
       
   483   }
       
   484 
       
   485   // the method is in the interesting class so the entry is interesting
       
   486   return true;
       
   487 }
       
   488 
       
   489 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
       
   490   // print separator
       
   491   if (index == 0) st->print_cr("                 -------------");
       
   492   // print entry
       
   493   st->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
       
   494     st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
       
   495   st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f1);
       
   496   st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
       
   497   st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
       
   498   st->print_cr("                 -------------");
       
   499 }
       
   500 
       
   501 void ConstantPoolCacheEntry::verify(outputStream* st) const {
       
   502   // not implemented yet
       
   503 }
       
   504 
       
   505 // Implementation of ConstantPoolCache
       
   506 
       
   507 ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
       
   508   int size = ConstantPoolCache::size(length);
       
   509 
       
   510   return new (loader_data, size, false, THREAD) ConstantPoolCache(length);
       
   511 }
       
   512 
       
   513 void ConstantPoolCache::initialize(intArray& inverse_index_map, intArray& invokedynamic_references_map) {
       
   514   assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
       
   515   for (int i = 0; i < length(); i++) {
       
   516     ConstantPoolCacheEntry* e = entry_at(i);
       
   517     int original_index = inverse_index_map[i];
       
   518       e->initialize_entry(original_index);
       
   519     assert(entry_at(i) == e, "sanity");
       
   520     }
       
   521   for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
       
   522     int cpci = invokedynamic_references_map[ref];
       
   523     if (cpci >= 0)
       
   524       entry_at(cpci)->initialize_resolved_reference_index(ref);
       
   525   }
       
   526 }
       
   527 
       
   528 // RedefineClasses() API support:
       
   529 // If any entry of this constantPoolCache points to any of
       
   530 // old_methods, replace it with the corresponding new_method.
       
   531 void ConstantPoolCache::adjust_method_entries(Method** old_methods, Method** new_methods,
       
   532                                                      int methods_length, bool * trace_name_printed) {
       
   533 
       
   534   if (methods_length == 0) {
       
   535     // nothing to do if there are no methods
       
   536     return;
       
   537   }
       
   538 
       
   539   // get shorthand for the interesting class
       
   540   Klass* old_holder = old_methods[0]->method_holder();
       
   541 
       
   542   for (int i = 0; i < length(); i++) {
       
   543     if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
       
   544       // skip uninteresting methods
       
   545       continue;
       
   546     }
       
   547 
       
   548     // The constantPoolCache contains entries for several different
       
   549     // things, but we only care about methods. In fact, we only care
       
   550     // about methods in the same class as the one that contains the
       
   551     // old_methods. At this point, we have an interesting entry.
       
   552 
       
   553     for (int j = 0; j < methods_length; j++) {
       
   554       Method* old_method = old_methods[j];
       
   555       Method* new_method = new_methods[j];
       
   556 
       
   557       if (entry_at(i)->adjust_method_entry(old_method, new_method,
       
   558           trace_name_printed)) {
       
   559         // current old_method matched this entry and we updated it so
       
   560         // break out and get to the next interesting entry if there one
       
   561         break;
       
   562       }
       
   563     }
       
   564   }
       
   565 }
       
   566 
       
   567 #ifndef PRODUCT
       
   568 bool ConstantPoolCache::check_no_old_entries() {
       
   569   for (int i = 1; i < length(); i++) {
       
   570     if (entry_at(i)->is_interesting_method_entry(NULL) &&
       
   571        !entry_at(i)->check_no_old_entries()) {
       
   572       return false;
       
   573     }
       
   574   }
       
   575   return true;
       
   576 }
       
   577 #endif // PRODUCT
       
   578 
       
   579 
       
   580 // Printing
       
   581 
       
   582 void ConstantPoolCache::print_on(outputStream* st) const {
       
   583   assert(is_constantPoolCache(), "obj must be constant pool cache");
       
   584   st->print_cr(internal_name());
       
   585   // print constant pool cache entries
       
   586   for (int i = 0; i < length(); i++) entry_at(i)->print(st, i);
       
   587 }
       
   588 
       
   589 void ConstantPoolCache::print_value_on(outputStream* st) const {
       
   590   assert(is_constantPoolCache(), "obj must be constant pool cache");
       
   591   st->print("cache [%d]", length());
       
   592   print_address_on(st);
       
   593   st->print(" for ");
       
   594   constant_pool()->print_value_on(st);
       
   595 }
       
   596 
       
   597 
       
   598 // Verification
       
   599 
       
   600 void ConstantPoolCache::verify_on(outputStream* st) {
       
   601   guarantee(is_constantPoolCache(), "obj must be constant pool cache");
       
   602   // print constant pool cache entries
       
   603   for (int i = 0; i < length(); i++) entry_at(i)->verify(st);
       
   604 }