hotspot/src/share/vm/services/memSnapshot.hpp
changeset 26043 6d67910f057c
parent 26042 3c9e4f896414
parent 25964 a6a096238d10
child 26050 5d268650760d
equal deleted inserted replaced
26042:3c9e4f896414 26043:6d67910f057c
     1 /*
       
     2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
       
    26 #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
       
    27 
       
    28 #include "memory/allocation.hpp"
       
    29 #include "runtime/mutex.hpp"
       
    30 #include "runtime/mutexLocker.hpp"
       
    31 #include "services/memBaseline.hpp"
       
    32 #include "services/memPtrArray.hpp"
       
    33 
       
    34 // Snapshot pointer array iterator
       
    35 
       
    36 // The pointer array contains malloc-ed pointers
       
    37 class MemPointerIterator : public MemPointerArrayIteratorImpl {
       
    38  public:
       
    39   MemPointerIterator(MemPointerArray* arr):
       
    40     MemPointerArrayIteratorImpl(arr) {
       
    41     assert(arr != NULL, "null array");
       
    42   }
       
    43 
       
    44 #ifdef ASSERT
       
    45   virtual bool is_dup_pointer(const MemPointer* ptr1,
       
    46     const MemPointer* ptr2) const {
       
    47     MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
       
    48     MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
       
    49 
       
    50     if (p1->addr() != p2->addr()) return false;
       
    51     if ((p1->flags() & MemPointerRecord::tag_masks) !=
       
    52         (p2->flags() & MemPointerRecord::tag_masks)) {
       
    53       return false;
       
    54     }
       
    55     // we do see multiple commit/uncommit on the same memory, it is ok
       
    56     return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
       
    57            (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
       
    58   }
       
    59 
       
    60   virtual bool insert(MemPointer* ptr) {
       
    61     if (_pos > 0) {
       
    62       MemPointer* p1 = (MemPointer*)ptr;
       
    63       MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
       
    64       assert(!is_dup_pointer(p1, p2),
       
    65         err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
       
    66     }
       
    67      if (_pos < _array->length() -1) {
       
    68       MemPointer* p1 = (MemPointer*)ptr;
       
    69       MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
       
    70       assert(!is_dup_pointer(p1, p2),
       
    71         err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
       
    72      }
       
    73     return _array->insert_at(ptr, _pos);
       
    74   }
       
    75 
       
    76   virtual bool insert_after(MemPointer* ptr) {
       
    77     if (_pos > 0) {
       
    78       MemPointer* p1 = (MemPointer*)ptr;
       
    79       MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
       
    80       assert(!is_dup_pointer(p1, p2),
       
    81         err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
       
    82     }
       
    83     if (_pos < _array->length() - 1) {
       
    84       MemPointer* p1 = (MemPointer*)ptr;
       
    85       MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
       
    86 
       
    87       assert(!is_dup_pointer(p1, p2),
       
    88         err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
       
    89      }
       
    90     if (_array->insert_at(ptr, _pos + 1)) {
       
    91       _pos ++;
       
    92       return true;
       
    93     }
       
    94     return false;
       
    95   }
       
    96 #endif
       
    97 
       
    98   virtual MemPointer* locate(address addr) {
       
    99     MemPointer* cur = current();
       
   100     while (cur != NULL && cur->addr() < addr) {
       
   101       cur = next();
       
   102     }
       
   103     return cur;
       
   104   }
       
   105 };
       
   106 
       
   107 class VMMemPointerIterator : public MemPointerIterator {
       
   108  public:
       
   109   VMMemPointerIterator(MemPointerArray* arr):
       
   110       MemPointerIterator(arr) {
       
   111   }
       
   112 
       
   113   // locate an existing reserved memory region that contains specified address,
       
   114   // or the reserved region just above this address, where the incoming
       
   115   // reserved region should be inserted.
       
   116   virtual MemPointer* locate(address addr) {
       
   117     reset();
       
   118     VMMemRegion* reg = (VMMemRegion*)current();
       
   119     while (reg != NULL) {
       
   120       if (reg->is_reserved_region()) {
       
   121         if (reg->contains_address(addr) || addr < reg->base()) {
       
   122           return reg;
       
   123       }
       
   124     }
       
   125       reg = (VMMemRegion*)next();
       
   126     }
       
   127       return NULL;
       
   128     }
       
   129 
       
   130   // following methods update virtual memory in the context
       
   131   // of 'current' position, which is properly positioned by
       
   132   // callers via locate method.
       
   133   bool add_reserved_region(MemPointerRecord* rec);
       
   134   bool add_committed_region(MemPointerRecord* rec);
       
   135   bool remove_uncommitted_region(MemPointerRecord* rec);
       
   136   bool remove_released_region(MemPointerRecord* rec);
       
   137 
       
   138   // split a reserved region to create a new memory region with specified base and size
       
   139   bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
       
   140  private:
       
   141   bool insert_record(MemPointerRecord* rec);
       
   142   bool insert_record_after(MemPointerRecord* rec);
       
   143 
       
   144   bool insert_reserved_region(MemPointerRecord* rec);
       
   145 
       
   146   // reset current position
       
   147   inline void reset() { _pos = 0; }
       
   148 #ifdef ASSERT
       
   149   // check integrity of records on current reserved memory region.
       
   150   bool check_reserved_region() {
       
   151     VMMemRegion* reserved_region = (VMMemRegion*)current();
       
   152     assert(reserved_region != NULL && reserved_region->is_reserved_region(),
       
   153           "Sanity check");
       
   154     // all committed regions that follow current reserved region, should all
       
   155     // belong to the reserved region.
       
   156     VMMemRegion* next_region = (VMMemRegion*)next();
       
   157     for (; next_region != NULL && next_region->is_committed_region();
       
   158          next_region = (VMMemRegion*)next() ) {
       
   159       if(!reserved_region->contains_region(next_region)) {
       
   160         return false;
       
   161       }
       
   162     }
       
   163     return true;
       
   164   }
       
   165 
       
   166   virtual bool is_dup_pointer(const MemPointer* ptr1,
       
   167     const MemPointer* ptr2) const {
       
   168     VMMemRegion* p1 = (VMMemRegion*)ptr1;
       
   169     VMMemRegion* p2 = (VMMemRegion*)ptr2;
       
   170 
       
   171     if (p1->addr() != p2->addr()) return false;
       
   172     if ((p1->flags() & MemPointerRecord::tag_masks) !=
       
   173         (p2->flags() & MemPointerRecord::tag_masks)) {
       
   174       return false;
       
   175     }
       
   176     // we do see multiple commit/uncommit on the same memory, it is ok
       
   177     return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
       
   178            (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
       
   179   }
       
   180 #endif
       
   181 };
       
   182 
       
   183 class MallocRecordIterator : public MemPointerArrayIterator {
       
   184  private:
       
   185   MemPointerArrayIteratorImpl  _itr;
       
   186 
       
   187 
       
   188 
       
   189  public:
       
   190   MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
       
   191   }
       
   192 
       
   193   virtual MemPointer* current() const {
       
   194 #ifdef ASSERT
       
   195     MemPointer* cur_rec = _itr.current();
       
   196     if (cur_rec != NULL) {
       
   197       MemPointer* prev_rec = _itr.peek_prev();
       
   198       MemPointer* next_rec = _itr.peek_next();
       
   199       assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
       
   200       assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
       
   201     }
       
   202 #endif
       
   203     return _itr.current();
       
   204   }
       
   205   virtual MemPointer* next() {
       
   206     MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
       
   207     // arena memory record is a special case, which we have to compare
       
   208     // sequence number against its associated arena record.
       
   209     if (next_rec != NULL && next_rec->is_arena_memory_record()) {
       
   210       MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
       
   211       // if there is an associated arena record, it has to be previous
       
   212       // record because of sorting order (by address) - NMT generates a pseudo address
       
   213       // for arena's size record by offsetting arena's address, that guarantees
       
   214       // the order of arena record and it's size record.
       
   215       if (prev_rec != NULL && prev_rec->is_arena_record() &&
       
   216         next_rec->is_memory_record_of_arena(prev_rec)) {
       
   217         if (prev_rec->seq() > next_rec->seq()) {
       
   218           // Skip this arena memory record
       
   219           // Two scenarios:
       
   220           //   - if the arena record is an allocation record, this early
       
   221           //     size record must be leftover by previous arena,
       
   222           //     and the last size record should have size = 0.
       
   223           //   - if the arena record is a deallocation record, this
       
   224           //     size record should be its cleanup record, which should
       
   225           //     also have size = 0. In other world, arena alway reset
       
   226           //     its size before gone (see Arena's destructor)
       
   227           assert(next_rec->size() == 0, "size not reset");
       
   228           return _itr.next();
       
   229         } else {
       
   230           assert(prev_rec->is_allocation_record(),
       
   231             "Arena size record ahead of allocation record");
       
   232         }
       
   233       }
       
   234     }
       
   235     return next_rec;
       
   236   }
       
   237 
       
   238   MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
       
   239   MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
       
   240   void remove()                      { ShouldNotReachHere(); }
       
   241   bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
       
   242   bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
       
   243 };
       
   244 
       
   245 // collapse duplicated records. Eliminating duplicated records here, is much
       
   246 // cheaper than during promotion phase. However, it does have limitation - it
       
   247 // can only eliminate duplicated records within the generation, there are
       
   248 // still chances seeing duplicated records during promotion.
       
   249 // We want to use the record with higher sequence number, because it has
       
   250 // more accurate callsite pc.
       
   251 class VMRecordIterator : public MemPointerArrayIterator {
       
   252  private:
       
   253   MemPointerArrayIteratorImpl  _itr;
       
   254 
       
   255  public:
       
   256   VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
       
   257     MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
       
   258     MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
       
   259     while (next != NULL) {
       
   260       assert(cur != NULL, "Sanity check");
       
   261       assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
       
   262         "pre-sort order");
       
   263 
       
   264       if (is_duplicated_record(cur, next)) {
       
   265         _itr.next();
       
   266         next = (MemPointerRecord*)_itr.peek_next();
       
   267       } else {
       
   268         break;
       
   269       }
       
   270     }
       
   271   }
       
   272 
       
   273   virtual MemPointer* current() const {
       
   274     return _itr.current();
       
   275   }
       
   276 
       
   277   // get next record, but skip the duplicated records
       
   278   virtual MemPointer* next() {
       
   279     MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
       
   280     MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
       
   281     while (next != NULL) {
       
   282       assert(cur != NULL, "Sanity check");
       
   283       assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
       
   284         "pre-sort order");
       
   285 
       
   286       if (is_duplicated_record(cur, next)) {
       
   287         _itr.next();
       
   288         cur = next;
       
   289         next = (MemPointerRecord*)_itr.peek_next();
       
   290       } else {
       
   291         break;
       
   292       }
       
   293     }
       
   294     return cur;
       
   295   }
       
   296 
       
   297   MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
       
   298   MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
       
   299   void remove()                      { ShouldNotReachHere(); }
       
   300   bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
       
   301   bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
       
   302 
       
   303  private:
       
   304   bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
       
   305     bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
       
   306     assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
       
   307     return ret;
       
   308   }
       
   309 };
       
   310 
       
   311 class StagingArea VALUE_OBJ_CLASS_SPEC {
       
   312  private:
       
   313   MemPointerArray*   _malloc_data;
       
   314   MemPointerArray*   _vm_data;
       
   315 
       
   316  public:
       
   317   StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
       
   318     init();
       
   319   }
       
   320 
       
   321   ~StagingArea() {
       
   322     if (_malloc_data != NULL) delete _malloc_data;
       
   323     if (_vm_data != NULL) delete _vm_data;
       
   324   }
       
   325 
       
   326   MallocRecordIterator malloc_record_walker() {
       
   327     return MallocRecordIterator(malloc_data());
       
   328   }
       
   329 
       
   330   VMRecordIterator virtual_memory_record_walker();
       
   331 
       
   332   bool init();
       
   333   void clear() {
       
   334     assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
       
   335     _malloc_data->shrink();
       
   336     _malloc_data->clear();
       
   337     _vm_data->clear();
       
   338   }
       
   339 
       
   340   inline MemPointerArray* malloc_data() { return _malloc_data; }
       
   341   inline MemPointerArray* vm_data()     { return _vm_data; }
       
   342 };
       
   343 
       
   344 class MemBaseline;
       
   345 class MemSnapshot : public CHeapObj<mtNMT> {
       
   346  private:
       
   347   // the following two arrays contain records of all known lived memory blocks
       
   348   // live malloc-ed memory pointers
       
   349   MemPointerArray*      _alloc_ptrs;
       
   350   // live virtual memory pointers
       
   351   MemPointerArray*      _vm_ptrs;
       
   352 
       
   353   StagingArea           _staging_area;
       
   354 
       
   355   // the lock to protect this snapshot
       
   356   Monitor*              _lock;
       
   357 
       
   358   // the number of instance classes
       
   359   int                   _number_of_classes;
       
   360 
       
   361   NOT_PRODUCT(size_t    _untracked_count;)
       
   362   friend class MemBaseline;
       
   363 
       
   364  public:
       
   365   MemSnapshot();
       
   366   virtual ~MemSnapshot();
       
   367 
       
   368   // if we are running out of native memory
       
   369   bool out_of_memory() {
       
   370     return (_alloc_ptrs == NULL ||
       
   371       _staging_area.malloc_data() == NULL ||
       
   372       _staging_area.vm_data() == NULL ||
       
   373       _vm_ptrs == NULL || _lock == NULL ||
       
   374       _alloc_ptrs->out_of_memory() ||
       
   375       _vm_ptrs->out_of_memory());
       
   376   }
       
   377 
       
   378   // merge a per-thread memory recorder into staging area
       
   379   bool merge(MemRecorder* rec);
       
   380   // promote staged data to snapshot
       
   381   bool promote(int number_of_classes);
       
   382 
       
   383   int  number_of_classes() const { return _number_of_classes; }
       
   384 
       
   385   void wait(long timeout) {
       
   386     assert(_lock != NULL, "Just check");
       
   387     MonitorLockerEx locker(_lock);
       
   388     locker.wait(true, timeout);
       
   389   }
       
   390 
       
   391   NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
       
   392   NOT_PRODUCT(void check_staging_data();)
       
   393   NOT_PRODUCT(void check_malloc_pointers();)
       
   394   NOT_PRODUCT(bool has_allocation_record(address addr);)
       
   395   // dump all virtual memory pointers in snapshot
       
   396   DEBUG_ONLY( void dump_all_vm_pointers();)
       
   397 
       
   398  private:
       
   399    // copy sequenced pointer from src to dest
       
   400    void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
       
   401    // assign a sequenced pointer to non-sequenced pointer
       
   402    void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
       
   403 
       
   404    bool promote_malloc_records(MemPointerArrayIterator* itr);
       
   405    bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
       
   406 };
       
   407 
       
   408 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP