hotspot/src/share/vm/services/mallocSiteTable.cpp
changeset 25946 1572c9f03fb9
child 28953 75f0bfa6ea14
equal deleted inserted replaced
25902:7e9ffb1fe1df 25946:1572c9f03fb9
       
     1 /*
       
     2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 #include "precompiled.hpp"
       
    25 
       
    26 
       
    27 #include "memory/allocation.inline.hpp"
       
    28 #include "runtime/atomic.hpp"
       
    29 #include "services/mallocSiteTable.hpp"
       
    30 
       
    31 /*
       
    32  * Early os::malloc() calls come from initializations of static variables, long before entering any
       
    33  * VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be
       
    34  * initialized, along with the allocation site for the hashtable entries.
       
    35  * To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc()
       
    36  * call, the hashtable bucket array and hashtable entry allocation site have to be static.
       
    37  * It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just
       
    38  * allocates a block memory and zero the memory for it.
       
    39  * But for hashtable entry allocation site object, things get tricky. C runtime not only allocates
       
    40  * memory for it, but also calls its constructor at some later time. If we initialize the allocation site
       
    41  * at the first os::malloc() call, the object will be reinitialized when its constructor is called
       
    42  * by C runtime.
       
    43  * To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry,
       
    44  * the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site.
       
    45  * Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable,
       
    46  * which is exactly what we want.
       
    47  * The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation.
       
    48  *
       
    49  * Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare
       
    50  * the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment.
       
    51  */
       
    52 
       
    53 // Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects
       
    54 size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
       
    55 size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
       
    56 
       
    57 // Malloc site hashtable buckets
       
    58 MallocSiteHashtableEntry*  MallocSiteTable::_table[MallocSiteTable::table_size];
       
    59 
       
    60 // concurrent access counter
       
    61 volatile int MallocSiteTable::_access_count = 0;
       
    62 
       
    63 // Tracking hashtable contention
       
    64 NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
       
    65 
       
    66 
       
    67 /*
       
    68  * Initialize malloc site table.
       
    69  * Hashtable entry is malloc'd, so it can cause infinite recursion.
       
    70  * To avoid above problem, we pre-initialize a hash entry for
       
    71  * this allocation site.
       
    72  * The method is called during C runtime static variable initialization
       
    73  * time, it is in single-threaded mode from JVM perspective.
       
    74  */
       
    75 bool MallocSiteTable::initialize() {
       
    76   assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check");
       
    77   assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry),
       
    78     "Sanity Check");
       
    79   assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow");
       
    80 
       
    81   // Fake the call stack for hashtable entry allocation
       
    82   assert(NMT_TrackingStackDepth > 1, "At least one tracking stack");
       
    83 
       
    84   // Create pseudo call stack for hashtable entry allocation
       
    85   address pc[3];
       
    86   if (NMT_TrackingStackDepth >= 3) {
       
    87     pc[2] = (address)MallocSiteTable::allocation_at;
       
    88   }
       
    89   if (NMT_TrackingStackDepth >= 2) {
       
    90     pc[1] = (address)MallocSiteTable::lookup_or_add;
       
    91   }
       
    92   pc[0] = (address)MallocSiteTable::new_entry;
       
    93 
       
    94   // Instantiate NativeCallStack object, have to use placement new operator. (see comments above)
       
    95   NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack)
       
    96     NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
       
    97 
       
    98   // Instantiate hash entry for hashtable entry allocation callsite
       
    99   MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site)
       
   100     MallocSiteHashtableEntry(*stack);
       
   101 
       
   102   // Add the allocation site to hashtable.
       
   103   int index = hash_to_index(stack->hash());
       
   104   _table[index] = entry;
       
   105 
       
   106   return true;
       
   107 }
       
   108 
       
   109 // Walks entries in the hashtable.
       
   110 // It stops walk if the walker returns false.
       
   111 bool MallocSiteTable::walk(MallocSiteWalker* walker) {
       
   112   MallocSiteHashtableEntry* head;
       
   113   for (int index = 0; index < table_size; index ++) {
       
   114     head = _table[index];
       
   115     while (head != NULL) {
       
   116       if (!walker->do_malloc_site(head->peek())) {
       
   117         return false;
       
   118       }
       
   119       head = (MallocSiteHashtableEntry*)head->next();
       
   120     }
       
   121   }
       
   122   return true;
       
   123 }
       
   124 
       
   125 /*
       
   126  *  The hashtable does not have deletion policy on individual entry,
       
   127  *  and each linked list node is inserted via compare-and-swap,
       
   128  *  so each linked list is stable, the contention only happens
       
   129  *  at the end of linked list.
       
   130  *  This method should not return NULL under normal circumstance.
       
   131  *  If NULL is returned, it indicates:
       
   132  *    1. Out of memory, it cannot allocate new hash entry.
       
   133  *    2. Overflow hash bucket.
       
   134  *  Under any of above circumstances, caller should handle the situation.
       
   135  */
       
   136 MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
       
   137   size_t* pos_idx) {
       
   138   int index = hash_to_index(key.hash());
       
   139   assert(index >= 0, "Negative index");
       
   140   *bucket_idx = (size_t)index;
       
   141   *pos_idx = 0;
       
   142 
       
   143   // First entry for this hash bucket
       
   144   if (_table[index] == NULL) {
       
   145     MallocSiteHashtableEntry* entry = new_entry(key);
       
   146     // OOM check
       
   147     if (entry == NULL) return NULL;
       
   148 
       
   149     // swap in the head
       
   150     if (Atomic::cmpxchg_ptr((void*)entry, (volatile void *)&_table[index], NULL) == NULL) {
       
   151       return entry->data();
       
   152     }
       
   153 
       
   154     delete entry;
       
   155   }
       
   156 
       
   157   MallocSiteHashtableEntry* head = _table[index];
       
   158   while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
       
   159     MallocSite* site = head->data();
       
   160     if (site->equals(key)) {
       
   161       // found matched entry
       
   162       return head->data();
       
   163     }
       
   164 
       
   165     if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
       
   166       MallocSiteHashtableEntry* entry = new_entry(key);
       
   167       // OOM check
       
   168       if (entry == NULL) return NULL;
       
   169       if (head->atomic_insert(entry)) {
       
   170         (*pos_idx) ++;
       
   171         return entry->data();
       
   172       }
       
   173       // contended, other thread won
       
   174       delete entry;
       
   175     }
       
   176     head = (MallocSiteHashtableEntry*)head->next();
       
   177     (*pos_idx) ++;
       
   178   }
       
   179   return NULL;
       
   180 }
       
   181 
       
   182 // Access malloc site
       
   183 MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
       
   184   assert(bucket_idx < table_size, "Invalid bucket index");
       
   185   MallocSiteHashtableEntry* head = _table[bucket_idx];
       
   186   for (size_t index = 0; index < pos_idx && head != NULL;
       
   187     index ++, head = (MallocSiteHashtableEntry*)head->next());
       
   188   assert(head != NULL, "Invalid position index");
       
   189   return head->data();
       
   190 }
       
   191 
       
   192 // Allocates MallocSiteHashtableEntry object. Special call stack
       
   193 // (pre-installed allocation site) has to be used to avoid infinite
       
   194 // recursion.
       
   195 MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key) {
       
   196   void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT,
       
   197     *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL);
       
   198   return ::new (p) MallocSiteHashtableEntry(key);
       
   199 }
       
   200 
       
   201 void MallocSiteTable::reset() {
       
   202   for (int index = 0; index < table_size; index ++) {
       
   203     MallocSiteHashtableEntry* head = _table[index];
       
   204     _table[index] = NULL;
       
   205     delete_linked_list(head);
       
   206   }
       
   207 }
       
   208 
       
   209 void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
       
   210   MallocSiteHashtableEntry* p;
       
   211   while (head != NULL) {
       
   212     p = head;
       
   213     head = (MallocSiteHashtableEntry*)head->next();
       
   214     if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) {
       
   215       delete p;
       
   216     }
       
   217   }
       
   218 }
       
   219 
       
   220 void MallocSiteTable::shutdown() {
       
   221   AccessLock locker(&_access_count);
       
   222   locker.exclusiveLock();
       
   223   reset();
       
   224 }
       
   225 
       
   226 bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
       
   227   assert(walker != NULL, "NuLL walker");
       
   228   AccessLock locker(&_access_count);
       
   229   if (locker.sharedLock()) {
       
   230     NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
       
   231     return walk(walker);
       
   232   }
       
   233   return false;
       
   234 }
       
   235 
       
   236 
       
   237 void MallocSiteTable::AccessLock::exclusiveLock() {
       
   238   jint target;
       
   239   jint val;
       
   240 
       
   241   assert(_lock_state != ExclusiveLock, "Can only call once");
       
   242   assert(*_lock >= 0, "Can not content exclusive lock");
       
   243 
       
   244   // make counter negative to block out shared locks
       
   245   do {
       
   246     val = *_lock;
       
   247     target = _MAGIC_ + *_lock;
       
   248   } while (Atomic::cmpxchg(target, _lock, val) != val);
       
   249 
       
   250   // wait for all readers to exit
       
   251   while (*_lock != _MAGIC_) {
       
   252 #ifdef _WINDOWS
       
   253     os::naked_short_sleep(1);
       
   254 #else
       
   255     os::naked_yield();
       
   256 #endif
       
   257   }
       
   258   _lock_state = ExclusiveLock;
       
   259 }
       
   260 
       
   261