hotspot/src/share/vm/interpreter/oopMapCache.cpp
changeset 46996 9cbcd7082efe
parent 46727 6e4a84748e2c
equal deleted inserted replaced
46992:95000145dd81 46996:9cbcd7082efe
    23  */
    23  */
    24 
    24 
    25 #include "precompiled.hpp"
    25 #include "precompiled.hpp"
    26 #include "interpreter/oopMapCache.hpp"
    26 #include "interpreter/oopMapCache.hpp"
    27 #include "logging/log.hpp"
    27 #include "logging/log.hpp"
       
    28 #include "logging/logStream.hpp"
    28 #include "memory/allocation.inline.hpp"
    29 #include "memory/allocation.inline.hpp"
    29 #include "memory/resourceArea.hpp"
    30 #include "memory/resourceArea.hpp"
    30 #include "oops/oop.inline.hpp"
    31 #include "oops/oop.inline.hpp"
    31 #include "runtime/handles.inline.hpp"
    32 #include "runtime/handles.inline.hpp"
    32 #include "runtime/signature.hpp"
    33 #include "runtime/signature.hpp"
    34 class OopMapCacheEntry: private InterpreterOopMap {
    35 class OopMapCacheEntry: private InterpreterOopMap {
    35   friend class InterpreterOopMap;
    36   friend class InterpreterOopMap;
    36   friend class OopMapForCacheEntry;
    37   friend class OopMapForCacheEntry;
    37   friend class OopMapCache;
    38   friend class OopMapCache;
    38   friend class VerifyClosure;
    39   friend class VerifyClosure;
       
    40 
       
    41  private:
       
    42   OopMapCacheEntry* _next;
    39 
    43 
    40  protected:
    44  protected:
    41   // Initialization
    45   // Initialization
    42   void fill(const methodHandle& method, int bci);
    46   void fill(const methodHandle& method, int bci);
    43   // fills the bit mask for native calls
    47   // fills the bit mask for native calls
    52   void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary
    56   void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary
    53   bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top);
    57   bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top);
    54 
    58 
    55  public:
    59  public:
    56   OopMapCacheEntry() : InterpreterOopMap() {
    60   OopMapCacheEntry() : InterpreterOopMap() {
       
    61     _next = NULL;
    57 #ifdef ASSERT
    62 #ifdef ASSERT
    58      _resource_allocate_bit_mask = false;
    63     _resource_allocate_bit_mask = false;
    59 #endif
    64 #endif
    60   }
    65   }
    61 };
    66 };
    62 
    67 
    63 
    68 
   261   iterate_oop(&blk);
   266   iterate_oop(&blk);
   262   if (blk.failed()) return false;
   267   if (blk.failed()) return false;
   263 
   268 
   264   // Check if map is generated correctly
   269   // Check if map is generated correctly
   265   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
   270   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
   266   if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);
   271   Log(interpreter, oopmap) logv;
   267 
   272   LogStream st(logv.trace());
       
   273 
       
   274   st.print("Locals (%d): ", max_locals);
   268   for(int i = 0; i < max_locals; i++) {
   275   for(int i = 0; i < max_locals; i++) {
   269     bool v1 = is_oop(i)               ? true : false;
   276     bool v1 = is_oop(i)               ? true : false;
   270     bool v2 = vars[i].is_reference()  ? true : false;
   277     bool v2 = vars[i].is_reference()  ? true : false;
   271     assert(v1 == v2, "locals oop mask generation error");
   278     assert(v1 == v2, "locals oop mask generation error");
   272     if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
   279     st.print("%d", v1 ? 1 : 0);
   273   }
   280   }
   274 
   281   st.cr();
   275   if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); }
   282 
       
   283   st.print("Stack (%d): ", stack_top);
   276   for(int j = 0; j < stack_top; j++) {
   284   for(int j = 0; j < stack_top; j++) {
   277     bool v1 = is_oop(max_locals + j)  ? true : false;
   285     bool v1 = is_oop(max_locals + j)  ? true : false;
   278     bool v2 = stack[j].is_reference() ? true : false;
   286     bool v2 = stack[j].is_reference() ? true : false;
   279     assert(v1 == v2, "stack oop mask generation error");
   287     assert(v1 == v2, "stack oop mask generation error");
   280     if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
   288     st.print("%d", v1 ? 1 : 0);
   281   }
   289   }
   282   if (TraceOopMapGeneration && Verbose) tty->cr();
   290   st.cr();
   283   return true;
   291   return true;
   284 }
   292 }
   285 
   293 
   286 void OopMapCacheEntry::allocate_bit_mask() {
   294 void OopMapCacheEntry::allocate_bit_mask() {
   287   if (mask_size() > small_mask_limit) {
   295   if (mask_size() > small_mask_limit) {
   371   // make sure last word is stored
   379   // make sure last word is stored
   372   bit_mask()[word_index] = value;
   380   bit_mask()[word_index] = value;
   373 
   381 
   374   // verify bit mask
   382   // verify bit mask
   375   assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
   383   assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
   376 
       
   377 
       
   378 }
   384 }
   379 
   385 
   380 void OopMapCacheEntry::flush() {
   386 void OopMapCacheEntry::flush() {
   381   deallocate_bit_mask();
   387   deallocate_bit_mask();
   382   initialize();
   388   initialize();
   383 }
   389 }
   384 
   390 
   385 
   391 
   386 // Implementation of OopMapCache
   392 // Implementation of OopMapCache
   387 
       
   388 #ifndef PRODUCT
       
   389 
       
   390 static long _total_memory_usage = 0;
       
   391 
       
   392 long OopMapCache::memory_usage() {
       
   393   return _total_memory_usage;
       
   394 }
       
   395 
       
   396 #endif
       
   397 
   393 
   398 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
   394 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
   399   assert(_resource_allocate_bit_mask,
   395   assert(_resource_allocate_bit_mask,
   400     "Should not resource allocate the _bit_mask");
   396     "Should not resource allocate the _bit_mask");
   401 
   397 
   433          ^ ((unsigned int) method->max_locals()         << 2)
   429          ^ ((unsigned int) method->max_locals()         << 2)
   434          ^ ((unsigned int) method->code_size()          << 4)
   430          ^ ((unsigned int) method->code_size()          << 4)
   435          ^ ((unsigned int) method->size_of_parameters() << 6);
   431          ^ ((unsigned int) method->size_of_parameters() << 6);
   436 }
   432 }
   437 
   433 
   438 
   434 OopMapCacheEntry* volatile OopMapCache::_old_entries = NULL;
   439 OopMapCache::OopMapCache() :
   435 
   440   _mut(Mutex::leaf, "An OopMapCache lock", true)
   436 OopMapCache::OopMapCache() {
   441 {
   437   _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry*, _size, mtClass);
   442   _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass);
   438   for(int i = 0; i < _size; i++) _array[i] = NULL;
   443   // Cannot call flush for initialization, since flush
       
   444   // will check if memory should be deallocated
       
   445   for(int i = 0; i < _size; i++) _array[i].initialize();
       
   446   NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
       
   447 }
   439 }
   448 
   440 
   449 
   441 
   450 OopMapCache::~OopMapCache() {
   442 OopMapCache::~OopMapCache() {
   451   assert(_array != NULL, "sanity check");
   443   assert(_array != NULL, "sanity check");
   452   // Deallocate oop maps that are allocated out-of-line
   444   // Deallocate oop maps that are allocated out-of-line
   453   flush();
   445   flush();
   454   // Deallocate array
   446   // Deallocate array
   455   NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
   447   FREE_C_HEAP_ARRAY(OopMapCacheEntry*, _array);
   456   FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
       
   457 }
   448 }
   458 
   449 
   459 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
   450 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
   460   return &_array[i % _size];
   451   return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size]));
       
   452 }
       
   453 
       
   454 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
       
   455   return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old;
   461 }
   456 }
   462 
   457 
   463 void OopMapCache::flush() {
   458 void OopMapCache::flush() {
   464   for (int i = 0; i < _size; i++) _array[i].flush();
   459   for (int i = 0; i < _size; i++) {
       
   460     OopMapCacheEntry* entry = _array[i];
       
   461     if (entry != NULL) {
       
   462       _array[i] = NULL;  // no barrier, only called in OopMapCache destructor
       
   463       entry->flush();
       
   464       FREE_C_HEAP_OBJ(entry);
       
   465     }
       
   466   }
   465 }
   467 }
   466 
   468 
   467 void OopMapCache::flush_obsolete_entries() {
   469 void OopMapCache::flush_obsolete_entries() {
   468   for (int i = 0; i < _size; i++)
   470   assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint");
   469     if (!_array[i].is_empty() && _array[i].method()->is_old()) {
   471   for (int i = 0; i < _size; i++) {
       
   472     OopMapCacheEntry* entry = _array[i];
       
   473     if (entry != NULL && !entry->is_empty() && entry->method()->is_old()) {
   470       // Cache entry is occupied by an old redefined method and we don't want
   474       // Cache entry is occupied by an old redefined method and we don't want
   471       // to pin it down so flush the entry.
   475       // to pin it down so flush the entry.
   472       if (log_is_enabled(Debug, redefine, class, oopmap)) {
   476       if (log_is_enabled(Debug, redefine, class, oopmap)) {
   473         ResourceMark rm;
   477         ResourceMark rm;
   474         log_debug(redefine, class, oopmap)
   478         log_debug(redefine, class, interpreter, oopmap)
   475           ("flush: %s(%s): cached entry @%d",
   479           ("flush: %s(%s): cached entry @%d",
   476            _array[i].method()->name()->as_C_string(), _array[i].method()->signature()->as_C_string(), i);
   480            entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i);
   477       }
   481       }
   478       _array[i].flush();
   482       _array[i] = NULL;
   479     }
   483       entry->flush();
   480 }
   484       FREE_C_HEAP_OBJ(entry);
   481 
   485     }
       
   486   }
       
   487 }
       
   488 
       
   489 // Called by GC for thread root scan during a safepoint only.  The other interpreted frame oopmaps
       
   490 // are generated locally and not cached.
   482 void OopMapCache::lookup(const methodHandle& method,
   491 void OopMapCache::lookup(const methodHandle& method,
   483                          int bci,
   492                          int bci,
   484                          InterpreterOopMap* entry_for) const {
   493                          InterpreterOopMap* entry_for) {
   485   MutexLocker x(&_mut);
   494   assert(SafepointSynchronize::is_at_safepoint(), "called by GC in a safepoint");
   486 
   495   int probe = hash_value_for(method, bci);
       
   496   int i;
   487   OopMapCacheEntry* entry = NULL;
   497   OopMapCacheEntry* entry = NULL;
   488   int probe = hash_value_for(method, bci);
   498 
       
   499   if (log_is_enabled(Debug, interpreter, oopmap)) {
       
   500     static int count = 0;
       
   501     ResourceMark rm;
       
   502     log_debug(interpreter, oopmap)
       
   503           ("%d - Computing oopmap at bci %d for %s at hash %d", ++count, bci,
       
   504            method()->name_and_sig_as_C_string(), probe);
       
   505   }
   489 
   506 
   490   // Search hashtable for match
   507   // Search hashtable for match
   491   int i;
       
   492   for(i = 0; i < _probe_depth; i++) {
   508   for(i = 0; i < _probe_depth; i++) {
   493     entry = entry_at(probe + i);
   509     entry = entry_at(probe + i);
   494     if (entry->match(method, bci)) {
   510     if (entry != NULL && !entry->is_empty() && entry->match(method, bci)) {
   495       entry_for->resource_copy(entry);
   511       entry_for->resource_copy(entry);
   496       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
   512       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
       
   513       log_debug(interpreter, oopmap)("- found at hash %d", probe + i);
   497       return;
   514       return;
   498     }
   515     }
   499   }
   516   }
   500 
   517 
   501   if (TraceOopMapGeneration) {
       
   502     static int count = 0;
       
   503     ResourceMark rm;
       
   504     tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
       
   505     method->print_value(); tty->cr();
       
   506   }
       
   507 
       
   508   // Entry is not in hashtable.
   518   // Entry is not in hashtable.
   509   // Compute entry and return it
   519   // Compute entry
       
   520 
       
   521   OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);
       
   522   tmp->initialize();
       
   523   tmp->fill(method, bci);
       
   524   entry_for->resource_copy(tmp);
   510 
   525 
   511   if (method->should_not_be_cached()) {
   526   if (method->should_not_be_cached()) {
   512     // It is either not safe or not a good idea to cache this Method*
   527     // It is either not safe or not a good idea to cache this Method*
   513     // at this time. We give the caller of lookup() a copy of the
   528     // at this time. We give the caller of lookup() a copy of the
   514     // interesting info via parameter entry_for, but we don't add it to
   529     // interesting info via parameter entry_for, but we don't add it to
   515     // the cache. See the gory details in Method*.cpp.
   530     // the cache. See the gory details in Method*.cpp.
   516     compute_one_oop_map(method, bci, entry_for);
   531     FREE_C_HEAP_OBJ(tmp);
   517     return;
   532     return;
   518   }
   533   }
   519 
   534 
   520   // First search for an empty slot
   535   // First search for an empty slot
   521   for(i = 0; i < _probe_depth; i++) {
   536   for(i = 0; i < _probe_depth; i++) {
   522     entry  = entry_at(probe + i);
   537     entry = entry_at(probe + i);
   523     if (entry->is_empty()) {
   538     if (entry == NULL) {
   524       entry->fill(method, bci);
   539       if (put_at(probe + i, tmp, NULL)) {
   525       entry_for->resource_copy(entry);
   540         assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
   526       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
   541         return;
   527       return;
   542       }
   528     }
   543     }
   529   }
   544   }
   530 
   545 
   531   if (TraceOopMapGeneration) {
   546   log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***");
       
   547 
       
   548   // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
       
   549   // where the first entry in the collision array is replaced with the new one.
       
   550   OopMapCacheEntry* old = entry_at(probe + 0);
       
   551   if (put_at(probe + 0, tmp, old)) {
       
   552     enqueue_for_cleanup(old);
       
   553   } else {
       
   554     enqueue_for_cleanup(tmp);
       
   555   }
       
   556 
       
   557   assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
       
   558   return;
       
   559 }
       
   560 
       
   561 void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
       
   562   bool success = false;
       
   563   OopMapCacheEntry* head;
       
   564   do {
       
   565     head = _old_entries;
       
   566     entry->_next = head;
       
   567     success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head;
       
   568   } while (!success);
       
   569 
       
   570   if (log_is_enabled(Debug, interpreter, oopmap)) {
   532     ResourceMark rm;
   571     ResourceMark rm;
   533     tty->print_cr("*** collision in oopmap cache - flushing item ***");
   572     log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup",
   534   }
   573                           entry->method()->name_and_sig_as_C_string(), entry->bci());
   535 
   574   }
   536   // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
   575 }
   537   //entry_at(probe + _probe_depth - 1)->flush();
   576 
   538   //for(i = _probe_depth - 1; i > 0; i--) {
   577 // This is called after GC threads are done and nothing is accessing the old_entries
   539   //  // Coping entry[i] = entry[i-1];
   578 // list, so no synchronization needed.
   540   //  OopMapCacheEntry *to   = entry_at(probe + i);
   579 void OopMapCache::cleanup_old_entries() {
   541   //  OopMapCacheEntry *from = entry_at(probe + i - 1);
   580   OopMapCacheEntry* entry = _old_entries;
   542   //  to->copy(from);
   581   _old_entries = NULL;
   543   // }
   582   while (entry != NULL) {
   544 
   583     if (log_is_enabled(Debug, interpreter, oopmap)) {
   545   assert(method->is_method(), "gaga");
   584       ResourceMark rm;
   546 
   585       log_debug(interpreter, oopmap)("cleanup entry %s at bci %d",
   547   entry = entry_at(probe + 0);
   586                           entry->method()->name_and_sig_as_C_string(), entry->bci());
   548   entry->fill(method, bci);
   587     }
   549 
   588     OopMapCacheEntry* next = entry->_next;
   550   // Copy the  newly cached entry to input parameter
   589     entry->flush();
   551   entry_for->resource_copy(entry);
   590     FREE_C_HEAP_OBJ(entry);
   552 
   591     entry = next;
   553   if (TraceOopMapGeneration) {
   592   }
   554     ResourceMark rm;
       
   555     tty->print("Done with ");
       
   556     method->print_value(); tty->cr();
       
   557   }
       
   558   assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
       
   559 
       
   560   return;
       
   561 }
   593 }
   562 
   594 
   563 void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) {
   595 void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) {
   564   // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
   596   // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
   565   OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass);
   597   OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass);