src/hotspot/share/code/nmethod.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 55068 f6260463dbe4
child 58679 9c3209ff7550
equal deleted inserted replaced
58677:13588c901957 58678:9cf78a70fa4f
   475   if (nm != NULL) {
   475   if (nm != NULL) {
   476     // verify nmethod
   476     // verify nmethod
   477     debug_only(nm->verify();) // might block
   477     debug_only(nm->verify();) // might block
   478 
   478 
   479     nm->log_new_nmethod();
   479     nm->log_new_nmethod();
   480     nm->make_in_use();
       
   481   }
   480   }
   482   return nm;
   481   return nm;
   483 }
   482 }
   484 
   483 
   485 nmethod* nmethod::new_nmethod(const methodHandle& method,
   484 nmethod* nmethod::new_nmethod(const methodHandle& method,
   921 
   920 
   922 void nmethod::print_nmethod(bool printmethod) {
   921 void nmethod::print_nmethod(bool printmethod) {
   923   ttyLocker ttyl;  // keep the following output all in one block
   922   ttyLocker ttyl;  // keep the following output all in one block
   924   if (xtty != NULL) {
   923   if (xtty != NULL) {
   925     xtty->begin_head("print_nmethod");
   924     xtty->begin_head("print_nmethod");
       
   925     log_identity(xtty);
   926     xtty->stamp();
   926     xtty->stamp();
   927     xtty->end_head();
   927     xtty->end_head();
   928   }
   928   }
   929   // Print the header part, then print the requested information.
   929   // Print the header part, then print the requested information.
   930   // This is both handled in decode2().
   930   // This is both handled in decode2().
  1134   if (mdo == NULL)  return;
  1134   if (mdo == NULL)  return;
  1135   // There is a benign race here.  See comments in methodData.hpp.
  1135   // There is a benign race here.  See comments in methodData.hpp.
  1136   mdo->inc_decompile_count();
  1136   mdo->inc_decompile_count();
  1137 }
  1137 }
  1138 
  1138 
       
  1139 bool nmethod::try_transition(int new_state_int) {
       
  1140   signed char new_state = new_state_int;
       
  1141 #ifdef DEBUG
       
  1142   if (new_state != unloaded) {
       
  1143     assert_lock_strong(CompiledMethod_lock);
       
  1144   }
       
  1145 #endif
       
  1146   for (;;) {
       
  1147     signed char old_state = Atomic::load(&_state);
       
  1148     if (old_state >= new_state) {
       
  1149       // Ensure monotonicity of transitions.
       
  1150       return false;
       
  1151     }
       
  1152     if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) {
       
  1153       return true;
       
  1154     }
       
  1155   }
       
  1156 }
       
  1157 
  1139 void nmethod::make_unloaded() {
  1158 void nmethod::make_unloaded() {
  1140   post_compiled_method_unload();
  1159   post_compiled_method_unload();
  1141 
  1160 
  1142   // This nmethod is being unloaded, make sure that dependencies
  1161   // This nmethod is being unloaded, make sure that dependencies
  1143   // recorded in instanceKlasses get flushed.
  1162   // recorded in instanceKlasses get flushed.
  1157              p2i(this), p2i(_method));
  1176              p2i(this), p2i(_method));
  1158      ls.cr();
  1177      ls.cr();
  1159   }
  1178   }
  1160   // Unlink the osr method, so we do not look this up again
  1179   // Unlink the osr method, so we do not look this up again
  1161   if (is_osr_method()) {
  1180   if (is_osr_method()) {
  1162     // Invalidate the osr nmethod only once
  1181     // Invalidate the osr nmethod only once. Note that with concurrent
       
  1182     // code cache unloading, OSR nmethods are invalidated before they
       
  1183     // are made unloaded. Therefore, this becomes a no-op then.
  1163     if (is_in_use()) {
  1184     if (is_in_use()) {
  1164       invalidate_osr_method();
  1185       invalidate_osr_method();
  1165     }
  1186     }
  1166 #ifdef ASSERT
  1187 #ifdef ASSERT
  1167     if (method() != NULL) {
  1188     if (method() != NULL) {
  1207   assert(_method == NULL, "Tautology");
  1228   assert(_method == NULL, "Tautology");
  1208 
  1229 
  1209   set_osr_link(NULL);
  1230   set_osr_link(NULL);
  1210   NMethodSweeper::report_state_change(this);
  1231   NMethodSweeper::report_state_change(this);
  1211 
  1232 
  1212   // The release is only needed for compile-time ordering, as accesses
  1233   bool transition_success = try_transition(unloaded);
  1213   // into the nmethod after the store are not safe due to the sweeper
  1234 
  1214   // being allowed to free it when the store is observed, during
  1235   // It is an important invariant that there exists no race between
  1215   // concurrent nmethod unloading. Therefore, there is no need for
  1236   // the sweeper and GC thread competing for making the same nmethod
  1216   // acquire on the loader side.
  1237   // zombie and unloaded respectively. This is ensured by
  1217   OrderAccess::release_store(&_state, (signed char)unloaded);
  1238   // can_convert_to_zombie() returning false for any is_unloading()
       
  1239   // nmethod, informing the sweeper not to step on any GC toes.
       
  1240   assert(transition_success, "Invalid nmethod transition to unloaded");
  1218 
  1241 
  1219 #if INCLUDE_JVMCI
  1242 #if INCLUDE_JVMCI
  1220   // Clear the link between this nmethod and a HotSpotNmethod mirror
  1243   // Clear the link between this nmethod and a HotSpotNmethod mirror
  1221   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
  1244   JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
  1222   if (nmethod_data != NULL) {
  1245   if (nmethod_data != NULL) {
  1268 /**
  1291 /**
  1269  * Common functionality for both make_not_entrant and make_zombie
  1292  * Common functionality for both make_not_entrant and make_zombie
  1270  */
  1293  */
  1271 bool nmethod::make_not_entrant_or_zombie(int state) {
  1294 bool nmethod::make_not_entrant_or_zombie(int state) {
  1272   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
  1295   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
  1273   assert(!is_zombie(), "should not already be a zombie");
  1296 
  1274 
  1297   if (Atomic::load(&_state) >= state) {
  1275   if (_state == state) {
       
  1276     // Avoid taking the lock if already in required state.
  1298     // Avoid taking the lock if already in required state.
  1277     // This is safe from races because the state is an end-state,
  1299     // This is safe from races because the state is an end-state,
  1278     // which the nmethod cannot back out of once entered.
  1300     // which the nmethod cannot back out of once entered.
  1279     // No need for fencing either.
  1301     // No need for fencing either.
  1280     return false;
  1302     return false;
  1282 
  1304 
  1283   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
  1305   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
  1284   nmethodLocker nml(this);
  1306   nmethodLocker nml(this);
  1285   methodHandle the_method(method());
  1307   methodHandle the_method(method());
  1286   // This can be called while the system is already at a safepoint which is ok
  1308   // This can be called while the system is already at a safepoint which is ok
  1287   NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
  1309   NoSafepointVerifier nsv;
  1288 
  1310 
  1289   // during patching, depending on the nmethod state we must notify the GC that
  1311   // during patching, depending on the nmethod state we must notify the GC that
  1290   // code has been unloaded, unregistering it. We cannot do this right while
  1312   // code has been unloaded, unregistering it. We cannot do this right while
  1291   // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
  1313   // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This
  1292   // would be prone to deadlocks.
  1314   // would be prone to deadlocks.
  1293   // This flag is used to remember whether we need to later lock and unregister.
  1315   // This flag is used to remember whether we need to later lock and unregister.
  1294   bool nmethod_needs_unregister = false;
  1316   bool nmethod_needs_unregister = false;
  1295 
  1317 
  1296   // invalidate osr nmethod before acquiring the patching lock since
       
  1297   // they both acquire leaf locks and we don't want a deadlock.
       
  1298   // This logic is equivalent to the logic below for patching the
       
  1299   // verified entry point of regular methods. We check that the
       
  1300   // nmethod is in use to ensure that it is invalidated only once.
       
  1301   if (is_osr_method() && is_in_use()) {
       
  1302     // this effectively makes the osr nmethod not entrant
       
  1303     invalidate_osr_method();
       
  1304   }
       
  1305 
       
  1306   {
  1318   {
  1307     // Enter critical section.  Does not block for safepoint.
  1319     // Enter critical section.  Does not block for safepoint.
  1308     MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
  1320     MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
  1309 
  1321 
  1310     if (_state == state) {
  1322     // This logic is equivalent to the logic below for patching the
       
  1323     // verified entry point of regular methods. We check that the
       
  1324     // nmethod is in use to ensure that it is invalidated only once.
       
  1325     if (is_osr_method() && is_in_use()) {
       
  1326       // this effectively makes the osr nmethod not entrant
       
  1327       invalidate_osr_method();
       
  1328     }
       
  1329 
       
  1330     if (Atomic::load(&_state) >= state) {
  1311       // another thread already performed this transition so nothing
  1331       // another thread already performed this transition so nothing
  1312       // to do, but return false to indicate this.
  1332       // to do, but return false to indicate this.
  1313       return false;
  1333       return false;
  1314     }
  1334     }
  1315 
  1335 
  1341       mark_as_seen_on_stack();
  1361       mark_as_seen_on_stack();
  1342       OrderAccess::storestore(); // _stack_traversal_mark and _state
  1362       OrderAccess::storestore(); // _stack_traversal_mark and _state
  1343     }
  1363     }
  1344 
  1364 
  1345     // Change state
  1365     // Change state
  1346     _state = state;
  1366     if (!try_transition(state)) {
       
  1367       // If the transition fails, it is due to another thread making the nmethod more
       
  1368       // dead. In particular, one thread might be making the nmethod unloaded concurrently.
       
  1369       // If so, having patched in the jump in the verified entry unnecessarily is fine.
       
  1370       // The nmethod is no longer possible to call by Java threads.
       
  1371       // Incrementing the decompile count is also fine as the caller of make_not_entrant()
       
  1372       // had a valid reason to deoptimize the nmethod.
       
  1373       // Marking the nmethod as seen on stack also has no effect, as the nmethod is now
       
  1374       // !is_alive(), and the seen on stack value is only used to convert not_entrant
       
  1375       // nmethods to zombie in can_convert_to_zombie().
       
  1376       return false;
       
  1377     }
  1347 
  1378 
  1348     // Log the transition once
  1379     // Log the transition once
  1349     log_state_change();
  1380     log_state_change();
  1350 
  1381 
  1351     // Remove nmethod from method.
  1382     // Remove nmethod from method.
  1468 oop nmethod::oop_at(int index) const {
  1499 oop nmethod::oop_at(int index) const {
  1469   if (index == 0) {
  1500   if (index == 0) {
  1470     return NULL;
  1501     return NULL;
  1471   }
  1502   }
  1472   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(oop_addr_at(index));
  1503   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(oop_addr_at(index));
       
  1504 }
       
  1505 
       
  1506 oop nmethod::oop_at_phantom(int index) const {
       
  1507   if (index == 0) {
       
  1508     return NULL;
       
  1509   }
       
  1510   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(oop_addr_at(index));
  1473 }
  1511 }
  1474 
  1512 
  1475 //
  1513 //
  1476 // Notify all classes this nmethod is dependent on that it is no
  1514 // Notify all classes this nmethod is dependent on that it is no
  1477 // longer dependent. This should only be called in two situations.
  1515 // longer dependent. This should only be called in two situations.
  1755     guarantee(unload_nmethod_caches(unloading_occurred),
  1793     guarantee(unload_nmethod_caches(unloading_occurred),
  1756               "Should not need transition stubs");
  1794               "Should not need transition stubs");
  1757   }
  1795   }
  1758 }
  1796 }
  1759 
  1797 
  1760 void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
  1798 void nmethod::oops_do(OopClosure* f, bool allow_dead) {
  1761   // make sure the oops ready to receive visitors
  1799   // make sure the oops ready to receive visitors
  1762   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
  1800   assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
  1763   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
       
  1764 
  1801 
  1765   // Prevent extra code cache walk for platforms that don't have immediate oops.
  1802   // Prevent extra code cache walk for platforms that don't have immediate oops.
  1766   if (relocInfo::mustIterateImmediateOopsInCode()) {
  1803   if (relocInfo::mustIterateImmediateOopsInCode()) {
  1767     RelocIterator iter(this, oops_reloc_begin());
  1804     RelocIterator iter(this, oops_reloc_begin());
  1768 
  1805 
  2091   }
  2128   }
  2092   return true;
  2129   return true;
  2093 }
  2130 }
  2094 
  2131 
  2095 
  2132 
  2096 address nmethod::continuation_for_implicit_exception(address pc) {
       
  2097   // Exception happened outside inline-cache check code => we are inside
       
  2098   // an active nmethod => use cpc to determine a return address
       
  2099   int exception_offset = pc - code_begin();
       
  2100   int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
       
  2101 #ifdef ASSERT
       
  2102   if (cont_offset == 0) {
       
  2103     Thread* thread = Thread::current();
       
  2104     ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
       
  2105     HandleMark hm(thread);
       
  2106     ResourceMark rm(thread);
       
  2107     CodeBlob* cb = CodeCache::find_blob(pc);
       
  2108     assert(cb != NULL && cb == this, "");
       
  2109     ttyLocker ttyl;
       
  2110     tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
       
  2111     // Print all available nmethod info.
       
  2112     print_nmethod(true);
       
  2113     method()->print_codes();
       
  2114   }
       
  2115 #endif
       
  2116   if (cont_offset == 0) {
       
  2117     // Let the normal error handling report the exception
       
  2118     return NULL;
       
  2119   }
       
  2120   return code_begin() + cont_offset;
       
  2121 }
       
  2122 
       
  2123 
       
  2124 void nmethod_init() {
  2133 void nmethod_init() {
  2125   // make sure you didn't forget to adjust the filler fields
  2134   // make sure you didn't forget to adjust the filler fields
  2126   assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
  2135   assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
  2127 }
  2136 }
  2128 
  2137 
  2178     }
  2187     }
  2179   }
  2188   }
  2180   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  2189   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
  2181 };
  2190 };
  2182 
  2191 
       
  2192 class VerifyMetadataClosure: public MetadataClosure {
       
  2193  public:
       
  2194   void do_metadata(Metadata* md) {
       
  2195     if (md->is_method()) {
       
  2196       Method* method = (Method*)md;
       
  2197       assert(!method->is_old(), "Should not be installing old methods");
       
  2198     }
       
  2199   }
       
  2200 };
       
  2201 
       
  2202 
  2183 void nmethod::verify() {
  2203 void nmethod::verify() {
  2184 
  2204 
  2185   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
  2205   // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
  2186   // seems odd.
  2206   // seems odd.
  2187 
  2207 
  2210   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
  2230   for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) {
  2211     if (! p->verify(this)) {
  2231     if (! p->verify(this)) {
  2212       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
  2232       tty->print_cr("\t\tin nmethod at " INTPTR_FORMAT " (pcs)", p2i(this));
  2213     }
  2233     }
  2214   }
  2234   }
       
  2235 
       
  2236 #ifdef ASSERT
       
  2237 #if INCLUDE_JVMCI
       
  2238   {
       
  2239     // Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
       
  2240     ImmutableOopMapSet* oms = oop_maps();
       
  2241     ImplicitExceptionTable implicit_table(this);
       
  2242     for (uint i = 0; i < implicit_table.len(); i++) {
       
  2243       int exec_offset = (int) implicit_table.get_exec_offset(i);
       
  2244       if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
       
  2245         assert(pc_desc_at(code_begin() + exec_offset) != NULL, "missing PcDesc");
       
  2246         bool found = false;
       
  2247         for (int i = 0, imax = oms->count(); i < imax; i++) {
       
  2248           if (oms->pair_at(i)->pc_offset() == exec_offset) {
       
  2249             found = true;
       
  2250             break;
       
  2251           }
       
  2252         }
       
  2253         assert(found, "missing oopmap");
       
  2254       }
       
  2255     }
       
  2256   }
       
  2257 #endif
       
  2258 #endif
  2215 
  2259 
  2216   VerifyOopsClosure voc(this);
  2260   VerifyOopsClosure voc(this);
  2217   oops_do(&voc);
  2261   oops_do(&voc);
  2218   assert(voc.ok(), "embedded oops must be OK");
  2262   assert(voc.ok(), "embedded oops must be OK");
  2219   Universe::heap()->verify_nmethod(this);
  2263   Universe::heap()->verify_nmethod(this);
  2220 
  2264 
  2221   verify_scopes();
  2265   verify_scopes();
       
  2266 
       
  2267   CompiledICLocker nm_verify(this);
       
  2268   VerifyMetadataClosure vmc;
       
  2269   metadata_do(&vmc);
  2222 }
  2270 }
  2223 
  2271 
  2224 
  2272 
  2225 void nmethod::verify_interrupt_point(address call_site) {
  2273 void nmethod::verify_interrupt_point(address call_site) {
  2226   // Verify IC only when nmethod installation is finished.
  2274   // Verify IC only when nmethod installation is finished.
  2227   if (!is_not_installed()) {
  2275   if (!is_not_installed()) {
  2228     if (CompiledICLocker::is_safe(this)) {
  2276     if (CompiledICLocker::is_safe(this)) {
  2229       CompiledIC_at(this, call_site);
  2277       CompiledIC_at(this, call_site);
  2230       CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
       
  2231     } else {
  2278     } else {
  2232       CompiledICLocker ml_verify(this);
  2279       CompiledICLocker ml_verify(this);
  2233       CompiledIC_at(this, call_site);
  2280       CompiledIC_at(this, call_site);
  2234     }
  2281     }
  2235   }
  2282   }
  3010   // relocations?
  3057   // relocations?
  3011   const char* str = reloc_string_for(begin, end);
  3058   const char* str = reloc_string_for(begin, end);
  3012   if (str != NULL) return true;
  3059   if (str != NULL) return true;
  3013 
  3060 
  3014   // implicit exceptions?
  3061   // implicit exceptions?
  3015   int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
  3062   int cont_offset = ImplicitExceptionTable(this).continuation_offset(begin - code_begin());
  3016   if (cont_offset != 0) return true;
  3063   if (cont_offset != 0) return true;
  3017 
  3064 
  3018   return false;
  3065   return false;
  3019 }
  3066 }
  3020 
  3067 
  3021 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
  3068 void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
  3022   // First, find an oopmap in (begin, end].
  3069   ImplicitExceptionTable implicit_table(this);
  3023   // We use the odd half-closed interval so that oop maps and scope descs
  3070   int pc_offset = begin - code_begin();
  3024   // which are tied to the byte after a call are printed with the call itself.
  3071   int cont_offset = implicit_table.continuation_offset(pc_offset);
       
  3072   bool oop_map_required = false;
       
  3073   if (cont_offset != 0) {
       
  3074     st->move_to(column, 6, 0);
       
  3075     if (pc_offset == cont_offset) {
       
  3076       st->print("; implicit exception: deoptimizes");
       
  3077       oop_map_required = true;
       
  3078     } else {
       
  3079       st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
       
  3080     }
       
  3081   }
       
  3082 
       
  3083   // Find an oopmap in (begin, end].  We use the odd half-closed
       
  3084   // interval so that oop maps and scope descs which are tied to the
       
  3085   // byte after a call are printed with the call itself.  OopMaps
       
  3086   // associated with implicit exceptions are printed with the implicit
       
  3087   // instruction.
  3025   address base = code_begin();
  3088   address base = code_begin();
  3026   ImmutableOopMapSet* oms = oop_maps();
  3089   ImmutableOopMapSet* oms = oop_maps();
  3027   if (oms != NULL) {
  3090   if (oms != NULL) {
  3028     for (int i = 0, imax = oms->count(); i < imax; i++) {
  3091     for (int i = 0, imax = oms->count(); i < imax; i++) {
  3029       const ImmutableOopMapPair* pair = oms->pair_at(i);
  3092       const ImmutableOopMapPair* pair = oms->pair_at(i);
  3030       const ImmutableOopMap* om = pair->get_from(oms);
  3093       const ImmutableOopMap* om = pair->get_from(oms);
  3031       address pc = base + pair->pc_offset();
  3094       address pc = base + pair->pc_offset();
  3032       if (pc > begin) {
  3095       if (pc >= begin) {
  3033         if (pc <= end) {
  3096 #if INCLUDE_JVMCI
       
  3097         bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
       
  3098 #else
       
  3099         bool is_implicit_deopt = false;
       
  3100 #endif
       
  3101         if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
  3034           st->move_to(column, 6, 0);
  3102           st->move_to(column, 6, 0);
  3035           st->print("; ");
  3103           st->print("; ");
  3036           om->print_on(st);
  3104           om->print_on(st);
       
  3105           oop_map_required = false;
  3037         }
  3106         }
       
  3107       }
       
  3108       if (pc > end) {
  3038         break;
  3109         break;
  3039       }
  3110       }
  3040     }
  3111     }
  3041   }
  3112   }
       
  3113   assert(!oop_map_required, "missed oopmap");
  3042 
  3114 
  3043   // Print any debug info present at this pc.
  3115   // Print any debug info present at this pc.
  3044   ScopeDesc* sd  = scope_desc_in(begin, end);
  3116   ScopeDesc* sd  = scope_desc_in(begin, end);
  3045   if (sd != NULL) {
  3117   if (sd != NULL) {
  3046     st->move_to(column, 6, 0);
  3118     st->move_to(column, 6, 0);
  3126   if (str != NULL) {
  3198   if (str != NULL) {
  3127     if (sd != NULL) st->cr();
  3199     if (sd != NULL) st->cr();
  3128     st->move_to(column, 6, 0);
  3200     st->move_to(column, 6, 0);
  3129     st->print(";   {%s}", str);
  3201     st->print(";   {%s}", str);
  3130   }
  3202   }
  3131   int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
       
  3132   if (cont_offset != 0) {
       
  3133     st->move_to(column, 6, 0);
       
  3134     st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
       
  3135   }
       
  3136 
       
  3137 }
  3203 }
  3138 
  3204 
  3139 #endif
  3205 #endif
  3140 
  3206 
  3141 class DirectNativeCallWrapper: public NativeCallWrapper {
  3207 class DirectNativeCallWrapper: public NativeCallWrapper {