185 } |
185 } |
186 |
186 |
187 oop* ClassLoaderData::ChunkedHandleList::add(oop o) { |
187 oop* ClassLoaderData::ChunkedHandleList::add(oop o) { |
188 if (_head == NULL || _head->_size == Chunk::CAPACITY) { |
188 if (_head == NULL || _head->_size == Chunk::CAPACITY) { |
189 Chunk* next = new Chunk(_head); |
189 Chunk* next = new Chunk(_head); |
190 OrderAccess::release_store(&_head, next); |
190 Atomic::release_store(&_head, next); |
191 } |
191 } |
192 oop* handle = &_head->_data[_head->_size]; |
192 oop* handle = &_head->_data[_head->_size]; |
193 NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o); |
193 NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o); |
194 OrderAccess::release_store(&_head->_size, _head->_size + 1); |
194 Atomic::release_store(&_head->_size, _head->_size + 1); |
195 return handle; |
195 return handle; |
196 } |
196 } |
197 |
197 |
198 int ClassLoaderData::ChunkedHandleList::count() const { |
198 int ClassLoaderData::ChunkedHandleList::count() const { |
199 int count = 0; |
199 int count = 0; |
212 } |
212 } |
213 } |
213 } |
214 } |
214 } |
215 |
215 |
216 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { |
216 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { |
217 Chunk* head = OrderAccess::load_acquire(&_head); |
217 Chunk* head = Atomic::load_acquire(&_head); |
218 if (head != NULL) { |
218 if (head != NULL) { |
219 // Must be careful when reading size of head |
219 // Must be careful when reading size of head |
220 oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size)); |
220 oops_do_chunk(f, head, Atomic::load_acquire(&head->_size)); |
221 for (Chunk* c = head->_next; c != NULL; c = c->_next) { |
221 for (Chunk* c = head->_next; c != NULL; c = c->_next) { |
222 oops_do_chunk(f, c, c->_size); |
222 oops_do_chunk(f, c, c->_size); |
223 } |
223 } |
224 } |
224 } |
225 } |
225 } |
324 _handles.oops_do(f); |
324 _handles.oops_do(f); |
325 } |
325 } |
326 |
326 |
327 void ClassLoaderData::classes_do(KlassClosure* klass_closure) { |
327 void ClassLoaderData::classes_do(KlassClosure* klass_closure) { |
328 // Lock-free access requires load_acquire |
328 // Lock-free access requires load_acquire |
329 for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
329 for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
330 klass_closure->do_klass(k); |
330 klass_closure->do_klass(k); |
331 assert(k != k->next_link(), "no loops!"); |
331 assert(k != k->next_link(), "no loops!"); |
332 } |
332 } |
333 } |
333 } |
334 |
334 |
335 void ClassLoaderData::classes_do(void f(Klass * const)) { |
335 void ClassLoaderData::classes_do(void f(Klass * const)) { |
336 // Lock-free access requires load_acquire |
336 // Lock-free access requires load_acquire |
337 for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
337 for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
338 f(k); |
338 f(k); |
339 assert(k != k->next_link(), "no loops!"); |
339 assert(k != k->next_link(), "no loops!"); |
340 } |
340 } |
341 } |
341 } |
342 |
342 |
343 void ClassLoaderData::methods_do(void f(Method*)) { |
343 void ClassLoaderData::methods_do(void f(Method*)) { |
344 // Lock-free access requires load_acquire |
344 // Lock-free access requires load_acquire |
345 for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
345 for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
346 if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { |
346 if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { |
347 InstanceKlass::cast(k)->methods_do(f); |
347 InstanceKlass::cast(k)->methods_do(f); |
348 } |
348 } |
349 } |
349 } |
350 } |
350 } |
351 |
351 |
352 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { |
352 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { |
353 // Lock-free access requires load_acquire |
353 // Lock-free access requires load_acquire |
354 for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
354 for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
355 // Do not filter ArrayKlass oops here... |
355 // Do not filter ArrayKlass oops here... |
356 if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) { |
356 if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) { |
357 #ifdef ASSERT |
357 #ifdef ASSERT |
358 oop m = k->java_mirror(); |
358 oop m = k->java_mirror(); |
359 assert(m != NULL, "NULL mirror"); |
359 assert(m != NULL, "NULL mirror"); |
364 } |
364 } |
365 } |
365 } |
366 |
366 |
367 void ClassLoaderData::classes_do(void f(InstanceKlass*)) { |
367 void ClassLoaderData::classes_do(void f(InstanceKlass*)) { |
368 // Lock-free access requires load_acquire |
368 // Lock-free access requires load_acquire |
369 for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
369 for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) { |
370 if (k->is_instance_klass()) { |
370 if (k->is_instance_klass()) { |
371 f(InstanceKlass::cast(k)); |
371 f(InstanceKlass::cast(k)); |
372 } |
372 } |
373 assert(k != k->next_link(), "no loops!"); |
373 assert(k != k->next_link(), "no loops!"); |
374 } |
374 } |
463 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); |
463 MutexLocker ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); |
464 Klass* old_value = _klasses; |
464 Klass* old_value = _klasses; |
465 k->set_next_link(old_value); |
465 k->set_next_link(old_value); |
466 // Link the new item into the list, making sure the linked class is stable |
466 // Link the new item into the list, making sure the linked class is stable |
467 // since the list can be walked without a lock |
467 // since the list can be walked without a lock |
468 OrderAccess::release_store(&_klasses, k); |
468 Atomic::release_store(&_klasses, k); |
469 if (k->is_array_klass()) { |
469 if (k->is_array_klass()) { |
470 ClassLoaderDataGraph::inc_array_classes(1); |
470 ClassLoaderDataGraph::inc_array_classes(1); |
471 } else { |
471 } else { |
472 ClassLoaderDataGraph::inc_instance_classes(1); |
472 ClassLoaderDataGraph::inc_instance_classes(1); |
473 } |
473 } |
550 } |
550 } |
551 |
551 |
552 ModuleEntryTable* ClassLoaderData::modules() { |
552 ModuleEntryTable* ClassLoaderData::modules() { |
553 // Lazily create the module entry table at first request. |
553 // Lazily create the module entry table at first request. |
554 // Lock-free access requires load_acquire. |
554 // Lock-free access requires load_acquire. |
555 ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules); |
555 ModuleEntryTable* modules = Atomic::load_acquire(&_modules); |
556 if (modules == NULL) { |
556 if (modules == NULL) { |
557 MutexLocker m1(Module_lock); |
557 MutexLocker m1(Module_lock); |
558 // Check if _modules got allocated while we were waiting for this lock. |
558 // Check if _modules got allocated while we were waiting for this lock. |
559 if ((modules = _modules) == NULL) { |
559 if ((modules = _modules) == NULL) { |
560 modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size); |
560 modules = new ModuleEntryTable(ModuleEntryTable::_moduletable_entry_size); |
561 |
561 |
562 { |
562 { |
563 MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); |
563 MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); |
564 // Ensure _modules is stable, since it is examined without a lock |
564 // Ensure _modules is stable, since it is examined without a lock |
565 OrderAccess::release_store(&_modules, modules); |
565 Atomic::release_store(&_modules, modules); |
566 } |
566 } |
567 } |
567 } |
568 } |
568 } |
569 return modules; |
569 return modules; |
570 } |
570 } |
750 // If the metaspace has not been allocated, create a new one. Might want |
750 // If the metaspace has not been allocated, create a new one. Might want |
751 // to create smaller arena for Reflection class loaders also. |
751 // to create smaller arena for Reflection class loaders also. |
752 // The reason for the delayed allocation is because some class loaders are |
752 // The reason for the delayed allocation is because some class loaders are |
753 // simply for delegating with no metadata of their own. |
753 // simply for delegating with no metadata of their own. |
754 // Lock-free access requires load_acquire. |
754 // Lock-free access requires load_acquire. |
755 ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace); |
755 ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace); |
756 if (metaspace == NULL) { |
756 if (metaspace == NULL) { |
757 MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); |
757 MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); |
758 // Check if _metaspace got allocated while we were waiting for this lock. |
758 // Check if _metaspace got allocated while we were waiting for this lock. |
759 if ((metaspace = _metaspace) == NULL) { |
759 if ((metaspace = _metaspace) == NULL) { |
760 if (this == the_null_class_loader_data()) { |
760 if (this == the_null_class_loader_data()) { |
766 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType); |
766 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType); |
767 } else { |
767 } else { |
768 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); |
768 metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); |
769 } |
769 } |
770 // Ensure _metaspace is stable, since it is examined without a lock |
770 // Ensure _metaspace is stable, since it is examined without a lock |
771 OrderAccess::release_store(&_metaspace, metaspace); |
771 Atomic::release_store(&_metaspace, metaspace); |
772 } |
772 } |
773 } |
773 } |
774 return metaspace; |
774 return metaspace; |
775 } |
775 } |
776 |
776 |