src/hotspot/share/oops/instanceKlass.cpp
changeset 59247 56bf71d64d51
parent 59070 22ee476cc664
child 59252 623722a6aeb9
equal deleted inserted replaced
59246:fcad92f425c5 59247:56bf71d64d51
  1095   Klass* volatile* k = adr_implementor();
  1095   Klass* volatile* k = adr_implementor();
  1096   if (k == NULL) {
  1096   if (k == NULL) {
  1097     return NULL;
  1097     return NULL;
  1098   } else {
  1098   } else {
  1099     // This load races with inserts, and therefore needs acquire.
  1099     // This load races with inserts, and therefore needs acquire.
  1100     Klass* kls = OrderAccess::load_acquire(k);
  1100     Klass* kls = Atomic::load_acquire(k);
  1101     if (kls != NULL && !kls->is_loader_alive()) {
  1101     if (kls != NULL && !kls->is_loader_alive()) {
  1102       return NULL;  // don't return unloaded class
  1102       return NULL;  // don't return unloaded class
  1103     } else {
  1103     } else {
  1104       return kls;
  1104       return kls;
  1105     }
  1105     }
  1111   assert_locked_or_safepoint(Compile_lock);
  1111   assert_locked_or_safepoint(Compile_lock);
  1112   assert(is_interface(), "not interface");
  1112   assert(is_interface(), "not interface");
  1113   Klass* volatile* addr = adr_implementor();
  1113   Klass* volatile* addr = adr_implementor();
  1114   assert(addr != NULL, "null addr");
  1114   assert(addr != NULL, "null addr");
  1115   if (addr != NULL) {
  1115   if (addr != NULL) {
  1116     OrderAccess::release_store(addr, k);
  1116     Atomic::release_store(addr, k);
  1117   }
  1117   }
  1118 }
  1118 }
  1119 
  1119 
  1120 int  InstanceKlass::nof_implementors() const {
  1120 int  InstanceKlass::nof_implementors() const {
  1121   Klass* k = implementor();
  1121   Klass* k = implementor();
  1368 
  1368 
  1369 void InstanceKlass::mask_for(const methodHandle& method, int bci,
  1369 void InstanceKlass::mask_for(const methodHandle& method, int bci,
  1370   InterpreterOopMap* entry_for) {
  1370   InterpreterOopMap* entry_for) {
  1371   // Lazily create the _oop_map_cache at first request
  1371   // Lazily create the _oop_map_cache at first request
  1372   // Lock-free access requires load_acquire.
  1372   // Lock-free access requires load_acquire.
  1373   OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache);
  1373   OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache);
  1374   if (oop_map_cache == NULL) {
  1374   if (oop_map_cache == NULL) {
  1375     MutexLocker x(OopMapCacheAlloc_lock);
  1375     MutexLocker x(OopMapCacheAlloc_lock);
  1376     // Check if _oop_map_cache was allocated while we were waiting for this lock
  1376     // Check if _oop_map_cache was allocated while we were waiting for this lock
  1377     if ((oop_map_cache = _oop_map_cache) == NULL) {
  1377     if ((oop_map_cache = _oop_map_cache) == NULL) {
  1378       oop_map_cache = new OopMapCache();
  1378       oop_map_cache = new OopMapCache();
  1379       // Ensure _oop_map_cache is stable, since it is examined without a lock
  1379       // Ensure _oop_map_cache is stable, since it is examined without a lock
  1380       OrderAccess::release_store(&_oop_map_cache, oop_map_cache);
  1380       Atomic::release_store(&_oop_map_cache, oop_map_cache);
  1381     }
  1381     }
  1382   }
  1382   }
  1383   // _oop_map_cache is constant after init; lookup below does its own locking.
  1383   // _oop_map_cache is constant after init; lookup below does its own locking.
  1384   oop_map_cache->lookup(method, bci, entry_for);
  1384   oop_map_cache->lookup(method, bci, entry_for);
  1385 }
  1385 }
  2112     id = new_id;
  2112     id = new_id;
  2113 
  2113 
  2114     // The jmethodID cache can be read while unlocked so we have to
  2114     // The jmethodID cache can be read while unlocked so we have to
  2115     // make sure the new jmethodID is complete before installing it
  2115     // make sure the new jmethodID is complete before installing it
  2116     // in the cache.
  2116     // in the cache.
  2117     OrderAccess::release_store(&jmeths[idnum+1], id);
  2117     Atomic::release_store(&jmeths[idnum+1], id);
  2118   } else {
  2118   } else {
  2119     *to_dealloc_id_p = new_id; // save new id for later delete
  2119     *to_dealloc_id_p = new_id; // save new id for later delete
  2120   }
  2120   }
  2121   return id;
  2121   return id;
  2122 }
  2122 }
  2194   assert(is_loader_alive(), "this klass should be live");
  2194   assert(is_loader_alive(), "this klass should be live");
  2195   if (is_interface()) {
  2195   if (is_interface()) {
  2196     assert (ClassUnloading, "only called for ClassUnloading");
  2196     assert (ClassUnloading, "only called for ClassUnloading");
  2197     for (;;) {
  2197     for (;;) {
  2198       // Use load_acquire due to competing with inserts
  2198       // Use load_acquire due to competing with inserts
  2199       Klass* impl = OrderAccess::load_acquire(adr_implementor());
  2199       Klass* impl = Atomic::load_acquire(adr_implementor());
  2200       if (impl != NULL && !impl->is_loader_alive()) {
  2200       if (impl != NULL && !impl->is_loader_alive()) {
  2201         // NULL this field, might be an unloaded klass or NULL
  2201         // NULL this field, might be an unloaded klass or NULL
  2202         Klass* volatile* klass = adr_implementor();
  2202         Klass* volatile* klass = adr_implementor();
  2203         if (Atomic::cmpxchg((Klass*)NULL, klass, impl) == impl) {
  2203         if (Atomic::cmpxchg((Klass*)NULL, klass, impl) == impl) {
  2204           // Successfully unlinking implementor.
  2204           // Successfully unlinking implementor.