src/hotspot/share/gc/shared/parallelCleaning.cpp
changeset 51524 9d01ad46daef
child 51537 a5d47d1b2a74
equal deleted inserted replaced
51523:73523d329966 51524:9d01ad46daef
       
     1 /*
       
     2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "classfile/symbolTable.hpp"
       
    27 #include "classfile/stringTable.hpp"
       
    28 #include "code/codeCache.hpp"
       
    29 #include "gc/shared/parallelCleaning.hpp"
       
    30 #include "memory/resourceArea.hpp"
       
    31 #include "logging/log.hpp"
       
    32 
       
    33 StringCleaningTask::StringCleaningTask(BoolObjectClosure* is_alive, StringDedupUnlinkOrOopsDoClosure* dedup_closure, bool process_strings) :
       
    34   AbstractGangTask("String Unlinking"),
       
    35   _is_alive(is_alive),
       
    36   _dedup_closure(dedup_closure),
       
    37   _par_state_string(StringTable::weak_storage()),
       
    38   _initial_string_table_size((int) StringTable::the_table()->table_size()),
       
    39   _process_strings(process_strings), _strings_processed(0), _strings_removed(0) {
       
    40 
       
    41   if (process_strings) {
       
    42     StringTable::reset_dead_counter();
       
    43   }
       
    44 }
       
    45 
       
    46 StringCleaningTask::~StringCleaningTask() {
       
    47   log_info(gc, stringtable)(
       
    48       "Cleaned string table, "
       
    49       "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
       
    50       strings_processed(), strings_removed());
       
    51   if (_process_strings) {
       
    52     StringTable::finish_dead_counter();
       
    53   }
       
    54 }
       
    55 
       
    56 void StringCleaningTask::work(uint worker_id) {
       
    57   int strings_processed = 0;
       
    58   int strings_removed = 0;
       
    59   if (_process_strings) {
       
    60     StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed);
       
    61     Atomic::add(strings_processed, &_strings_processed);
       
    62     Atomic::add(strings_removed, &_strings_removed);
       
    63   }
       
    64   if (_dedup_closure != NULL) {
       
    65     StringDedup::parallel_unlink(_dedup_closure, worker_id);
       
    66   }
       
    67 }
       
    68 
       
    69 CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
       
    70       _is_alive(is_alive),
       
    71       _unloading_occurred(unloading_occurred),
       
    72       _num_workers(num_workers),
       
    73       _first_nmethod(NULL),
       
    74       _claimed_nmethod(NULL),
       
    75       _postponed_list(NULL),
       
    76       _num_entered_barrier(0) {
       
    77   CompiledMethod::increase_unloading_clock();
       
    78   // Get first alive nmethod
       
    79   CompiledMethodIterator iter = CompiledMethodIterator();
       
    80   if(iter.next_alive()) {
       
    81     _first_nmethod = iter.method();
       
    82   }
       
    83   _claimed_nmethod = _first_nmethod;
       
    84 }
       
    85 
       
    86 CodeCacheUnloadingTask::~CodeCacheUnloadingTask() {
       
    87   CodeCache::verify_clean_inline_caches();
       
    88 
       
    89   CodeCache::set_needs_cache_clean(false);
       
    90   guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
       
    91 
       
    92   CodeCache::verify_icholder_relocations();
       
    93 }
       
    94 
       
    95 Monitor* CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
       
    96 
       
    97 void CodeCacheUnloadingTask::add_to_postponed_list(CompiledMethod* nm) {
       
    98   CompiledMethod* old;
       
    99   do {
       
   100     old = _postponed_list;
       
   101     nm->set_unloading_next(old);
       
   102   } while (Atomic::cmpxchg(nm, &_postponed_list, old) != old);
       
   103 }
       
   104 
       
   105 void CodeCacheUnloadingTask::clean_nmethod(CompiledMethod* nm) {
       
   106   bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
       
   107 
       
   108   if (postponed) {
       
   109     // This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
       
   110     add_to_postponed_list(nm);
       
   111   }
       
   112 
       
   113   // Mark that this nmethod has been cleaned/unloaded.
       
   114   // After this call, it will be safe to ask if this nmethod was unloaded or not.
       
   115   nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
       
   116 }
       
   117 
       
   118 void CodeCacheUnloadingTask::clean_nmethod_postponed(CompiledMethod* nm) {
       
   119   nm->do_unloading_parallel_postponed();
       
   120 }
       
   121 
       
   122 void CodeCacheUnloadingTask::claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
       
   123   CompiledMethod* first;
       
   124   CompiledMethodIterator last;
       
   125 
       
   126   do {
       
   127     *num_claimed_nmethods = 0;
       
   128 
       
   129     first = _claimed_nmethod;
       
   130     last = CompiledMethodIterator(first);
       
   131 
       
   132     if (first != NULL) {
       
   133 
       
   134       for (int i = 0; i < MaxClaimNmethods; i++) {
       
   135         if (!last.next_alive()) {
       
   136           break;
       
   137         }
       
   138         claimed_nmethods[i] = last.method();
       
   139         (*num_claimed_nmethods)++;
       
   140       }
       
   141     }
       
   142 
       
   143   } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first);
       
   144 }
       
   145 
       
   146 CompiledMethod* CodeCacheUnloadingTask::claim_postponed_nmethod() {
       
   147   CompiledMethod* claim;
       
   148   CompiledMethod* next;
       
   149 
       
   150   do {
       
   151     claim = _postponed_list;
       
   152     if (claim == NULL) {
       
   153       return NULL;
       
   154     }
       
   155 
       
   156     next = claim->unloading_next();
       
   157 
       
   158   } while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim);
       
   159 
       
   160   return claim;
       
   161 }
       
   162 
       
   163 void CodeCacheUnloadingTask::barrier_mark(uint worker_id) {
       
   164   MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
       
   165   _num_entered_barrier++;
       
   166   if (_num_entered_barrier == _num_workers) {
       
   167     ml.notify_all();
       
   168   }
       
   169 }
       
   170 
       
   171 void CodeCacheUnloadingTask::barrier_wait(uint worker_id) {
       
   172   if (_num_entered_barrier < _num_workers) {
       
   173     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
       
   174     while (_num_entered_barrier < _num_workers) {
       
   175         ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
       
   176     }
       
   177   }
       
   178 }
       
   179 
       
   180 void CodeCacheUnloadingTask::work_first_pass(uint worker_id) {
       
   181   // The first nmethods is claimed by the first worker.
       
   182   if (worker_id == 0 && _first_nmethod != NULL) {
       
   183     clean_nmethod(_first_nmethod);
       
   184     _first_nmethod = NULL;
       
   185   }
       
   186 
       
   187   int num_claimed_nmethods;
       
   188   CompiledMethod* claimed_nmethods[MaxClaimNmethods];
       
   189 
       
   190   while (true) {
       
   191     claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
       
   192 
       
   193     if (num_claimed_nmethods == 0) {
       
   194       break;
       
   195     }
       
   196 
       
   197     for (int i = 0; i < num_claimed_nmethods; i++) {
       
   198       clean_nmethod(claimed_nmethods[i]);
       
   199     }
       
   200   }
       
   201 }
       
   202 
       
   203 void CodeCacheUnloadingTask::work_second_pass(uint worker_id) {
       
   204   CompiledMethod* nm;
       
   205   // Take care of postponed nmethods.
       
   206   while ((nm = claim_postponed_nmethod()) != NULL) {
       
   207     clean_nmethod_postponed(nm);
       
   208   }
       
   209 }
       
   210 
       
   211 KlassCleaningTask::KlassCleaningTask() :
       
   212   _clean_klass_tree_claimed(0),
       
   213   _klass_iterator() {
       
   214 }
       
   215 
       
   216 bool KlassCleaningTask::claim_clean_klass_tree_task() {
       
   217   if (_clean_klass_tree_claimed) {
       
   218     return false;
       
   219   }
       
   220 
       
   221   return Atomic::cmpxchg(1, &_clean_klass_tree_claimed, 0) == 0;
       
   222 }
       
   223 
       
   224 InstanceKlass* KlassCleaningTask::claim_next_klass() {
       
   225   Klass* klass;
       
   226   do {
       
   227     klass =_klass_iterator.next_klass();
       
   228   } while (klass != NULL && !klass->is_instance_klass());
       
   229 
       
   230   // this can be null so don't call InstanceKlass::cast
       
   231   return static_cast<InstanceKlass*>(klass);
       
   232 }
       
   233 
       
   234 void KlassCleaningTask::work() {
       
   235   ResourceMark rm;
       
   236 
       
   237   // One worker will clean the subklass/sibling klass tree.
       
   238   if (claim_clean_klass_tree_task()) {
       
   239     Klass::clean_subklass_tree();
       
   240   }
       
   241 
       
   242   // All workers will help cleaning the classes,
       
   243   InstanceKlass* klass;
       
   244   while ((klass = claim_next_klass()) != NULL) {
       
   245     clean_klass(klass);
       
   246   }
       
   247 }
       
   248 
       
   249 ParallelCleaningTask::ParallelCleaningTask(BoolObjectClosure* is_alive,
       
   250   StringDedupUnlinkOrOopsDoClosure* dedup_closure, uint num_workers, bool unloading_occurred) :
       
   251   AbstractGangTask("Parallel Cleaning"),
       
   252   _unloading_occurred(unloading_occurred),
       
   253   _string_task(is_alive, StringDedup::is_enabled() ? dedup_closure : NULL, true),
       
   254   _code_cache_task(num_workers, is_alive, unloading_occurred),
       
   255   _klass_cleaning_task() {
       
   256 }
       
   257 
       
   258 // The parallel work done by all worker threads.
       
   259 void ParallelCleaningTask::work(uint worker_id) {
       
   260     // Do first pass of code cache cleaning.
       
   261     _code_cache_task.work_first_pass(worker_id);
       
   262 
       
   263     // Let the threads mark that the first pass is done.
       
   264     _code_cache_task.barrier_mark(worker_id);
       
   265 
       
   266     // Clean the Strings and Symbols.
       
   267     _string_task.work(worker_id);
       
   268 
       
   269     // Wait for all workers to finish the first code cache cleaning pass.
       
   270     _code_cache_task.barrier_wait(worker_id);
       
   271 
       
   272     // Do the second code cache cleaning work, which realize on
       
   273     // the liveness information gathered during the first pass.
       
   274     _code_cache_task.work_second_pass(worker_id);
       
   275 
       
   276   // Clean all klasses that were not unloaded.
       
   277   // The weak metadata in klass doesn't need to be
       
   278   // processed if there was no unloading.
       
   279   if (_unloading_occurred) {
       
   280     _klass_cleaning_task.work();
       
   281   }
       
   282 }