src/hotspot/share/code/compiledMethod.cpp
changeset 47216 71c04702a3d5
parent 46796 ec791efbdecf
child 47687 fb290fd1f9d4
equal deleted inserted replaced
47215:4ebc2e2fb97c 47216:71c04702a3d5
       
     1 /*
       
     2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "code/compiledIC.hpp"
       
    27 #include "code/compiledMethod.inline.hpp"
       
    28 #include "code/scopeDesc.hpp"
       
    29 #include "code/codeCache.hpp"
       
    30 #include "prims/methodHandles.hpp"
       
    31 #include "interpreter/bytecode.hpp"
       
    32 #include "memory/resourceArea.hpp"
       
    33 #include "runtime/mutexLocker.hpp"
       
    34 
       
    35 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
       
    36   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
       
    37   _method(method), _mark_for_deoptimization_status(not_marked) {
       
    38   init_defaults();
       
    39 }
       
    40 
       
    41 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
       
    42   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
       
    43   _method(method), _mark_for_deoptimization_status(not_marked) {
       
    44   init_defaults();
       
    45 }
       
    46 
       
    47 void CompiledMethod::init_defaults() {
       
    48   _has_unsafe_access          = 0;
       
    49   _has_method_handle_invokes  = 0;
       
    50   _lazy_critical_native       = 0;
       
    51   _has_wide_vectors           = 0;
       
    52   _unloading_clock            = 0;
       
    53 }
       
    54 
       
    55 bool CompiledMethod::is_method_handle_return(address return_pc) {
       
    56   if (!has_method_handle_invokes())  return false;
       
    57   PcDesc* pd = pc_desc_at(return_pc);
       
    58   if (pd == NULL)
       
    59     return false;
       
    60   return pd->is_method_handle_invoke();
       
    61 }
       
    62 
       
    63 // Returns a string version of the method state.
       
    64 const char* CompiledMethod::state() const {
       
    65   int state = get_state();
       
    66   switch (state) {
       
    67   case in_use:
       
    68     return "in use";
       
    69   case not_used:
       
    70     return "not_used";
       
    71   case not_entrant:
       
    72     return "not_entrant";
       
    73   case zombie:
       
    74     return "zombie";
       
    75   case unloaded:
       
    76     return "unloaded";
       
    77   default:
       
    78     fatal("unexpected method state: %d", state);
       
    79     return NULL;
       
    80   }
       
    81 }
       
    82 
       
    83 //-----------------------------------------------------------------------------
       
    84 
       
    85 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
       
    86   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
       
    87   assert(new_entry != NULL,"Must be non null");
       
    88   assert(new_entry->next() == NULL, "Must be null");
       
    89 
       
    90   ExceptionCache *ec = exception_cache();
       
    91   if (ec != NULL) {
       
    92     new_entry->set_next(ec);
       
    93   }
       
    94   release_set_exception_cache(new_entry);
       
    95 }
       
    96 
       
    97 void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
       
    98   ExceptionCache* prev = NULL;
       
    99   ExceptionCache* curr = exception_cache();
       
   100 
       
   101   while (curr != NULL) {
       
   102     ExceptionCache* next = curr->next();
       
   103 
       
   104     Klass* ex_klass = curr->exception_type();
       
   105     if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
       
   106       if (prev == NULL) {
       
   107         set_exception_cache(next);
       
   108       } else {
       
   109         prev->set_next(next);
       
   110       }
       
   111       delete curr;
       
   112       // prev stays the same.
       
   113     } else {
       
   114       prev = curr;
       
   115     }
       
   116 
       
   117     curr = next;
       
   118   }
       
   119 }
       
   120 
       
   121 // public method for accessing the exception cache
       
   122 // These are the public access methods.
       
   123 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
       
   124   // We never grab a lock to read the exception cache, so we may
       
   125   // have false negatives. This is okay, as it can only happen during
       
   126   // the first few exception lookups for a given nmethod.
       
   127   ExceptionCache* ec = exception_cache();
       
   128   while (ec != NULL) {
       
   129     address ret_val;
       
   130     if ((ret_val = ec->match(exception,pc)) != NULL) {
       
   131       return ret_val;
       
   132     }
       
   133     ec = ec->next();
       
   134   }
       
   135   return NULL;
       
   136 }
       
   137 
       
   138 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
       
   139   // There are potential race conditions during exception cache updates, so we
       
   140   // must own the ExceptionCache_lock before doing ANY modifications. Because
       
   141   // we don't lock during reads, it is possible to have several threads attempt
       
   142   // to update the cache with the same data. We need to check for already inserted
       
   143   // copies of the current data before adding it.
       
   144 
       
   145   MutexLocker ml(ExceptionCache_lock);
       
   146   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
       
   147 
       
   148   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
       
   149     target_entry = new ExceptionCache(exception,pc,handler);
       
   150     add_exception_cache_entry(target_entry);
       
   151   }
       
   152 }
       
   153 
       
   154 //-------------end of code for ExceptionCache--------------
       
   155 
       
   156 // private method for handling exception cache
       
   157 // These methods are private, and used to manipulate the exception cache
       
   158 // directly.
       
   159 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
       
   160   ExceptionCache* ec = exception_cache();
       
   161   while (ec != NULL) {
       
   162     if (ec->match_exception_with_space(exception)) {
       
   163       return ec;
       
   164     }
       
   165     ec = ec->next();
       
   166   }
       
   167   return NULL;
       
   168 }
       
   169 
       
   170 bool CompiledMethod::is_at_poll_return(address pc) {
       
   171   RelocIterator iter(this, pc, pc+1);
       
   172   while (iter.next()) {
       
   173     if (iter.type() == relocInfo::poll_return_type)
       
   174       return true;
       
   175   }
       
   176   return false;
       
   177 }
       
   178 
       
   179 
       
   180 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
       
   181   RelocIterator iter(this, pc, pc+1);
       
   182   while (iter.next()) {
       
   183     relocInfo::relocType t = iter.type();
       
   184     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
       
   185       return true;
       
   186   }
       
   187   return false;
       
   188 }
       
   189 
       
   190 void CompiledMethod::verify_oop_relocations() {
       
   191   // Ensure sure that the code matches the current oop values
       
   192   RelocIterator iter(this, NULL, NULL);
       
   193   while (iter.next()) {
       
   194     if (iter.type() == relocInfo::oop_type) {
       
   195       oop_Relocation* reloc = iter.oop_reloc();
       
   196       if (!reloc->oop_is_immediate()) {
       
   197         reloc->verify_oop_relocation();
       
   198       }
       
   199     }
       
   200   }
       
   201 }
       
   202 
       
   203 
       
   204 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
       
   205   PcDesc* pd = pc_desc_at(pc);
       
   206   guarantee(pd != NULL, "scope must be present");
       
   207   return new ScopeDesc(this, pd->scope_decode_offset(),
       
   208                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
       
   209                        pd->return_oop());
       
   210 }
       
   211 
       
   212 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
       
   213   PcDesc* pd = pc_desc_near(pc);
       
   214   guarantee(pd != NULL, "scope must be present");
       
   215   return new ScopeDesc(this, pd->scope_decode_offset(),
       
   216                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
       
   217                        pd->return_oop());
       
   218 }
       
   219 
       
   220 void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
       
   221   assert_locked_or_safepoint(CompiledIC_lock);
       
   222 
       
   223   // If the method is not entrant or zombie then a JMP is plastered over the
       
   224   // first few bytes.  If an oop in the old code was there, that oop
       
   225   // should not get GC'd.  Skip the first few bytes of oops on
       
   226   // not-entrant methods.
       
   227   address low_boundary = verified_entry_point();
       
   228   if (!is_in_use() && is_nmethod()) {
       
   229     low_boundary += NativeJump::instruction_size;
       
   230     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
       
   231     // This means that the low_boundary is going to be a little too high.
       
   232     // This shouldn't matter, since oops of non-entrant methods are never used.
       
   233     // In fact, why are we bothering to look at oops in a non-entrant method??
       
   234   }
       
   235 
       
   236   // Find all calls in an nmethod and clear the ones that point to non-entrant,
       
   237   // zombie and unloaded nmethods.
       
   238   ResourceMark rm;
       
   239   RelocIterator iter(this, low_boundary);
       
   240   while(iter.next()) {
       
   241     switch(iter.type()) {
       
   242       case relocInfo::virtual_call_type:
       
   243       case relocInfo::opt_virtual_call_type: {
       
   244         CompiledIC *ic = CompiledIC_at(&iter);
       
   245         // Ok, to lookup references to zombies here
       
   246         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
       
   247         if( cb != NULL && cb->is_compiled() ) {
       
   248           CompiledMethod* nm = cb->as_compiled_method();
       
   249           // Clean inline caches pointing to zombie, non-entrant and unloaded methods
       
   250           if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
       
   251         }
       
   252         break;
       
   253       }
       
   254       case relocInfo::static_call_type: {
       
   255           CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
       
   256           CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
       
   257           if( cb != NULL && cb->is_compiled() ) {
       
   258             CompiledMethod* cm = cb->as_compiled_method();
       
   259             // Clean inline caches pointing to zombie, non-entrant and unloaded methods
       
   260             if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
       
   261               csc->set_to_clean();
       
   262             }
       
   263           }
       
   264         break;
       
   265       }
       
   266       default:
       
   267         break;
       
   268     }
       
   269   }
       
   270 }
       
   271 
       
   272 int CompiledMethod::verify_icholder_relocations() {
       
   273   ResourceMark rm;
       
   274   int count = 0;
       
   275 
       
   276   RelocIterator iter(this);
       
   277   while(iter.next()) {
       
   278     if (iter.type() == relocInfo::virtual_call_type) {
       
   279       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
       
   280         CompiledIC *ic = CompiledIC_at(&iter);
       
   281         if (TraceCompiledIC) {
       
   282           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
       
   283           ic->print();
       
   284         }
       
   285         assert(ic->cached_icholder() != NULL, "must be non-NULL");
       
   286         count++;
       
   287       }
       
   288     }
       
   289   }
       
   290 
       
   291   return count;
       
   292 }
       
   293 
       
   294 // Method that knows how to preserve outgoing arguments at call. This method must be
       
   295 // called with a frame corresponding to a Java invoke
       
   296 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
       
   297 #ifndef SHARK
       
   298   if (method() != NULL && !method()->is_native()) {
       
   299     address pc = fr.pc();
       
   300     SimpleScopeDesc ssd(this, pc);
       
   301     Bytecode_invoke call(ssd.method(), ssd.bci());
       
   302     bool has_receiver = call.has_receiver();
       
   303     bool has_appendix = call.has_appendix();
       
   304     Symbol* signature = call.signature();
       
   305 
       
   306     // The method attached by JIT-compilers should be used, if present.
       
   307     // Bytecode can be inaccurate in such case.
       
   308     Method* callee = attached_method_before_pc(pc);
       
   309     if (callee != NULL) {
       
   310       has_receiver = !(callee->access_flags().is_static());
       
   311       has_appendix = false;
       
   312       signature = callee->signature();
       
   313     }
       
   314 
       
   315     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
       
   316   }
       
   317 #endif // !SHARK
       
   318 }
       
   319 
       
   320 Method* CompiledMethod::attached_method(address call_instr) {
       
   321   assert(code_contains(call_instr), "not part of the nmethod");
       
   322   RelocIterator iter(this, call_instr, call_instr + 1);
       
   323   while (iter.next()) {
       
   324     if (iter.addr() == call_instr) {
       
   325       switch(iter.type()) {
       
   326         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
       
   327         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
       
   328         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
       
   329         default:                               break;
       
   330       }
       
   331     }
       
   332   }
       
   333   return NULL; // not found
       
   334 }
       
   335 
       
   336 Method* CompiledMethod::attached_method_before_pc(address pc) {
       
   337   if (NativeCall::is_call_before(pc)) {
       
   338     NativeCall* ncall = nativeCall_before(pc);
       
   339     return attached_method(ncall->instruction_address());
       
   340   }
       
   341   return NULL; // not a call
       
   342 }
       
   343 
       
   344 void CompiledMethod::clear_inline_caches() {
       
   345   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
       
   346   if (is_zombie()) {
       
   347     return;
       
   348   }
       
   349 
       
   350   RelocIterator iter(this);
       
   351   while (iter.next()) {
       
   352     iter.reloc()->clear_inline_cache();
       
   353   }
       
   354 }
       
   355 
       
   356 // Clear ICStubs of all compiled ICs
       
   357 void CompiledMethod::clear_ic_stubs() {
       
   358   assert_locked_or_safepoint(CompiledIC_lock);
       
   359   RelocIterator iter(this);
       
   360   while(iter.next()) {
       
   361     if (iter.type() == relocInfo::virtual_call_type) {
       
   362       CompiledIC* ic = CompiledIC_at(&iter);
       
   363       ic->clear_ic_stub();
       
   364     }
       
   365   }
       
   366 }
       
   367 
       
   368 #ifdef ASSERT
       
   369 
       
   370 class CheckClass : AllStatic {
       
   371   static BoolObjectClosure* _is_alive;
       
   372 
       
   373   // Check class_loader is alive for this bit of metadata.
       
   374   static void check_class(Metadata* md) {
       
   375     Klass* klass = NULL;
       
   376     if (md->is_klass()) {
       
   377       klass = ((Klass*)md);
       
   378     } else if (md->is_method()) {
       
   379       klass = ((Method*)md)->method_holder();
       
   380     } else if (md->is_methodData()) {
       
   381       klass = ((MethodData*)md)->method()->method_holder();
       
   382     } else {
       
   383       md->print();
       
   384       ShouldNotReachHere();
       
   385     }
       
   386     assert(klass->is_loader_alive(_is_alive), "must be alive");
       
   387   }
       
   388  public:
       
   389   static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
       
   390     assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
       
   391     _is_alive = is_alive;
       
   392     nm->metadata_do(check_class);
       
   393   }
       
   394 };
       
   395 
       
   396 // This is called during a safepoint so can use static data
       
   397 BoolObjectClosure* CheckClass::_is_alive = NULL;
       
   398 #endif // ASSERT
       
   399 
       
   400 
       
   401 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
       
   402   if (ic->is_icholder_call()) {
       
   403     // The only exception is compiledICHolder oops which may
       
   404     // yet be marked below. (We check this further below).
       
   405     CompiledICHolder* cichk_oop = ic->cached_icholder();
       
   406 
       
   407     if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
       
   408         cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
       
   409       return;
       
   410     }
       
   411   } else {
       
   412     Metadata* ic_oop = ic->cached_metadata();
       
   413     if (ic_oop != NULL) {
       
   414       if (ic_oop->is_klass()) {
       
   415         if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
       
   416           return;
       
   417         }
       
   418       } else if (ic_oop->is_method()) {
       
   419         if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
       
   420           return;
       
   421         }
       
   422       } else {
       
   423         ShouldNotReachHere();
       
   424       }
       
   425     }
       
   426   }
       
   427 
       
   428   ic->set_to_clean();
       
   429 }
       
   430 
       
   431 unsigned char CompiledMethod::_global_unloading_clock = 0;
       
   432 
       
   433 void CompiledMethod::increase_unloading_clock() {
       
   434   _global_unloading_clock++;
       
   435   if (_global_unloading_clock == 0) {
       
   436     // _nmethods are allocated with _unloading_clock == 0,
       
   437     // so 0 is never used as a clock value.
       
   438     _global_unloading_clock = 1;
       
   439   }
       
   440 }
       
   441 
       
   442 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
       
   443   OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
       
   444 }
       
   445 
       
   446 unsigned char CompiledMethod::unloading_clock() {
       
   447   return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
       
   448 }
       
   449 
       
   450 // Processing of oop references should have been sufficient to keep
       
   451 // all strong references alive.  Any weak references should have been
       
   452 // cleared as well.  Visit all the metadata and ensure that it's
       
   453 // really alive.
       
   454 void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
       
   455 #ifdef ASSERT
       
   456     RelocIterator iter(this, low_boundary);
       
   457     while (iter.next()) {
       
   458     // static_stub_Relocations may have dangling references to
       
   459     // Method*s so trim them out here.  Otherwise it looks like
       
   460     // compiled code is maintaining a link to dead metadata.
       
   461     address static_call_addr = NULL;
       
   462     if (iter.type() == relocInfo::opt_virtual_call_type) {
       
   463       CompiledIC* cic = CompiledIC_at(&iter);
       
   464       if (!cic->is_call_to_interpreted()) {
       
   465         static_call_addr = iter.addr();
       
   466       }
       
   467     } else if (iter.type() == relocInfo::static_call_type) {
       
   468       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
       
   469       if (!csc->is_call_to_interpreted()) {
       
   470         static_call_addr = iter.addr();
       
   471       }
       
   472     }
       
   473     if (static_call_addr != NULL) {
       
   474       RelocIterator sciter(this, low_boundary);
       
   475       while (sciter.next()) {
       
   476         if (sciter.type() == relocInfo::static_stub_type &&
       
   477             sciter.static_stub_reloc()->static_call() == static_call_addr) {
       
   478           sciter.static_stub_reloc()->clear_inline_cache();
       
   479         }
       
   480       }
       
   481     }
       
   482   }
       
   483   // Check that the metadata embedded in the nmethod is alive
       
   484   CheckClass::do_check_class(is_alive, this);
       
   485 #endif
       
   486 }
       
   487 
       
   488 // This is called at the end of the strong tracing/marking phase of a
       
   489 // GC to unload an nmethod if it contains otherwise unreachable
       
   490 // oops.
       
   491 
       
   492 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
       
   493   // Make sure the oop's ready to receive visitors
       
   494   assert(!is_zombie() && !is_unloaded(),
       
   495          "should not call follow on zombie or unloaded nmethod");
       
   496 
       
   497   // If the method is not entrant then a JMP is plastered over the
       
   498   // first few bytes.  If an oop in the old code was there, that oop
       
   499   // should not get GC'd.  Skip the first few bytes of oops on
       
   500   // not-entrant methods.
       
   501   address low_boundary = verified_entry_point();
       
   502   if (is_not_entrant()) {
       
   503     low_boundary += NativeJump::instruction_size;
       
   504     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
       
   505     // (See comment above.)
       
   506   }
       
   507 
       
   508   // The RedefineClasses() API can cause the class unloading invariant
       
   509   // to no longer be true. See jvmtiExport.hpp for details.
       
   510   // Also, leave a debugging breadcrumb in local flag.
       
   511   if (JvmtiExport::has_redefined_a_class()) {
       
   512     // This set of the unloading_occurred flag is done before the
       
   513     // call to post_compiled_method_unload() so that the unloading
       
   514     // of this nmethod is reported.
       
   515     unloading_occurred = true;
       
   516   }
       
   517 
       
   518   // Exception cache
       
   519   clean_exception_cache(is_alive);
       
   520 
       
   521   // If class unloading occurred we first iterate over all inline caches and
       
   522   // clear ICs where the cached oop is referring to an unloaded klass or method.
       
   523   // The remaining live cached oops will be traversed in the relocInfo::oop_type
       
   524   // iteration below.
       
   525   if (unloading_occurred) {
       
   526     RelocIterator iter(this, low_boundary);
       
   527     while(iter.next()) {
       
   528       if (iter.type() == relocInfo::virtual_call_type) {
       
   529         CompiledIC *ic = CompiledIC_at(&iter);
       
   530         clean_ic_if_metadata_is_dead(ic, is_alive);
       
   531       }
       
   532     }
       
   533   }
       
   534 
       
   535   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
       
   536     return;
       
   537   }
       
   538 
       
   539 #if INCLUDE_JVMCI
       
   540   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
       
   541     return;
       
   542   }
       
   543 #endif
       
   544 
       
   545   // Ensure that all metadata is still alive
       
   546   verify_metadata_loaders(low_boundary, is_alive);
       
   547 }
       
   548 
       
   549 template <class CompiledICorStaticCall>
       
   550 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
       
   551   // Ok, to lookup references to zombies here
       
   552   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
       
   553   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
       
   554   if (nm != NULL) {
       
   555     if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
       
   556       // The nmethod has not been processed yet.
       
   557       return true;
       
   558     }
       
   559 
       
   560     // Clean inline caches pointing to both zombie and not_entrant methods
       
   561     if (!nm->is_in_use() || (nm->method()->code() != nm)) {
       
   562       ic->set_to_clean();
       
   563       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
       
   564     }
       
   565   }
       
   566 
       
   567   return false;
       
   568 }
       
   569 
       
   570 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
       
   571   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
       
   572 }
       
   573 
       
   574 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
       
   575   return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
       
   576 }
       
   577 
       
   578 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
       
   579   ResourceMark rm;
       
   580 
       
   581   // Make sure the oop's ready to receive visitors
       
   582   assert(!is_zombie() && !is_unloaded(),
       
   583          "should not call follow on zombie or unloaded nmethod");
       
   584 
       
   585   // If the method is not entrant then a JMP is plastered over the
       
   586   // first few bytes.  If an oop in the old code was there, that oop
       
   587   // should not get GC'd.  Skip the first few bytes of oops on
       
   588   // not-entrant methods.
       
   589   address low_boundary = verified_entry_point();
       
   590   if (is_not_entrant()) {
       
   591     low_boundary += NativeJump::instruction_size;
       
   592     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
       
   593     // (See comment above.)
       
   594   }
       
   595 
       
   596   // The RedefineClasses() API can cause the class unloading invariant
       
   597   // to no longer be true. See jvmtiExport.hpp for details.
       
   598   // Also, leave a debugging breadcrumb in local flag.
       
   599   if (JvmtiExport::has_redefined_a_class()) {
       
   600     // This set of the unloading_occurred flag is done before the
       
   601     // call to post_compiled_method_unload() so that the unloading
       
   602     // of this nmethod is reported.
       
   603     unloading_occurred = true;
       
   604   }
       
   605 
       
   606   // Exception cache
       
   607   clean_exception_cache(is_alive);
       
   608 
       
   609   bool postponed = false;
       
   610 
       
   611   RelocIterator iter(this, low_boundary);
       
   612   while(iter.next()) {
       
   613 
       
   614     switch (iter.type()) {
       
   615 
       
   616     case relocInfo::virtual_call_type:
       
   617       if (unloading_occurred) {
       
   618         // If class unloading occurred we first iterate over all inline caches and
       
   619         // clear ICs where the cached oop is referring to an unloaded klass or method.
       
   620         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
       
   621       }
       
   622 
       
   623       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
       
   624       break;
       
   625 
       
   626     case relocInfo::opt_virtual_call_type:
       
   627       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
       
   628       break;
       
   629 
       
   630     case relocInfo::static_call_type:
       
   631       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
       
   632       break;
       
   633 
       
   634     case relocInfo::oop_type:
       
   635       // handled by do_unloading_oops below
       
   636       break;
       
   637 
       
   638     case relocInfo::metadata_type:
       
   639       break; // nothing to do.
       
   640 
       
   641     default:
       
   642       break;
       
   643     }
       
   644   }
       
   645 
       
   646   if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
       
   647     return postponed;
       
   648   }
       
   649 
       
   650 #if INCLUDE_JVMCI
       
   651   if (do_unloading_jvmci(is_alive, unloading_occurred)) {
       
   652     return postponed;
       
   653   }
       
   654 #endif
       
   655 
       
   656   // Ensure that all metadata is still alive
       
   657   verify_metadata_loaders(low_boundary, is_alive);
       
   658 
       
   659   return postponed;
       
   660 }
       
   661 
       
   662 void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
       
   663   ResourceMark rm;
       
   664 
       
   665   // Make sure the oop's ready to receive visitors
       
   666   assert(!is_zombie(),
       
   667          "should not call follow on zombie nmethod");
       
   668 
       
   669   // If the method is not entrant then a JMP is plastered over the
       
   670   // first few bytes.  If an oop in the old code was there, that oop
       
   671   // should not get GC'd.  Skip the first few bytes of oops on
       
   672   // not-entrant methods.
       
   673   address low_boundary = verified_entry_point();
       
   674   if (is_not_entrant()) {
       
   675     low_boundary += NativeJump::instruction_size;
       
   676     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
       
   677     // (See comment above.)
       
   678   }
       
   679 
       
   680   RelocIterator iter(this, low_boundary);
       
   681   while(iter.next()) {
       
   682 
       
   683     switch (iter.type()) {
       
   684 
       
   685     case relocInfo::virtual_call_type:
       
   686       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
       
   687       break;
       
   688 
       
   689     case relocInfo::opt_virtual_call_type:
       
   690       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
       
   691       break;
       
   692 
       
   693     case relocInfo::static_call_type:
       
   694       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
       
   695       break;
       
   696 
       
   697     default:
       
   698       break;
       
   699     }
       
   700   }
       
   701 }