hotspot/src/share/vm/services/memSnapshot.cpp
changeset 25957 100a882dcffa
parent 25944 c8aa7a0bf7d0
parent 25956 99be217ac88d
child 25960 729cd80956ae
equal deleted inserted replaced
25944:c8aa7a0bf7d0 25957:100a882dcffa
     1 /*
       
     2  * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
       
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
       
     4  *
       
     5  * This code is free software; you can redistribute it and/or modify it
       
     6  * under the terms of the GNU General Public License version 2 only, as
       
     7  * published by the Free Software Foundation.
       
     8  *
       
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
       
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
       
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
       
    12  * version 2 for more details (a copy is included in the LICENSE file that
       
    13  * accompanied this code).
       
    14  *
       
    15  * You should have received a copy of the GNU General Public License version
       
    16  * 2 along with this work; if not, write to the Free Software Foundation,
       
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
       
    18  *
       
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
       
    20  * or visit www.oracle.com if you need additional information or have any
       
    21  * questions.
       
    22  *
       
    23  */
       
    24 
       
    25 #include "precompiled.hpp"
       
    26 #include "runtime/mutexLocker.hpp"
       
    27 #include "utilities/decoder.hpp"
       
    28 #include "services/memBaseline.hpp"
       
    29 #include "services/memPtr.hpp"
       
    30 #include "services/memPtrArray.hpp"
       
    31 #include "services/memSnapshot.hpp"
       
    32 #include "services/memTracker.hpp"
       
    33 
       
    34 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
       
    35 
       
    36 #ifdef ASSERT
       
    37 
       
    38 void decode_pointer_record(MemPointerRecord* rec) {
       
    39   tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT  "] size = %d bytes", rec->addr(),
       
    40     rec->addr() + rec->size(), (int)rec->size());
       
    41   tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
       
    42   if (rec->is_vm_pointer()) {
       
    43     if (rec->is_allocation_record()) {
       
    44       tty->print_cr(" (reserve)");
       
    45     } else if (rec->is_commit_record()) {
       
    46       tty->print_cr(" (commit)");
       
    47     } else if (rec->is_uncommit_record()) {
       
    48       tty->print_cr(" (uncommit)");
       
    49     } else if (rec->is_deallocation_record()) {
       
    50       tty->print_cr(" (release)");
       
    51     } else {
       
    52       tty->print_cr(" (tag)");
       
    53     }
       
    54   } else {
       
    55     if (rec->is_arena_memory_record()) {
       
    56       tty->print_cr(" (arena size)");
       
    57     } else if (rec->is_allocation_record()) {
       
    58       tty->print_cr(" (malloc)");
       
    59     } else {
       
    60       tty->print_cr(" (free)");
       
    61     }
       
    62   }
       
    63   if (MemTracker::track_callsite()) {
       
    64     char buf[1024];
       
    65     address pc = ((MemPointerRecordEx*)rec)->pc();
       
    66     if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
       
    67       tty->print_cr("\tfrom %s", buf);
       
    68     } else {
       
    69       tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
       
    70     }
       
    71   }
       
    72 }
       
    73 
       
    74 void decode_vm_region_record(VMMemRegion* rec) {
       
    75   tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
       
    76     rec->addr() + rec->size());
       
    77   tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
       
    78   if (rec->is_allocation_record()) {
       
    79     tty->print_cr(" (reserved)");
       
    80   } else if (rec->is_commit_record()) {
       
    81     tty->print_cr(" (committed)");
       
    82   } else {
       
    83     ShouldNotReachHere();
       
    84   }
       
    85   if (MemTracker::track_callsite()) {
       
    86     char buf[1024];
       
    87     address pc = ((VMMemRegionEx*)rec)->pc();
       
    88     if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
       
    89       tty->print_cr("\tfrom %s", buf);
       
    90     } else {
       
    91       tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
       
    92     }
       
    93 
       
    94   }
       
    95 }
       
    96 
       
    97 #endif
       
    98 
       
    99 
       
   100 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
       
   101   VMMemRegionEx new_rec;
       
   102   assert(rec->is_allocation_record() || rec->is_commit_record(),
       
   103     "Sanity check");
       
   104   if (MemTracker::track_callsite()) {
       
   105     new_rec.init((MemPointerRecordEx*)rec);
       
   106   } else {
       
   107     new_rec.init(rec);
       
   108   }
       
   109   return insert(&new_rec);
       
   110 }
       
   111 
       
   112 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
       
   113   VMMemRegionEx new_rec;
       
   114   assert(rec->is_allocation_record() || rec->is_commit_record(),
       
   115     "Sanity check");
       
   116   if (MemTracker::track_callsite()) {
       
   117     new_rec.init((MemPointerRecordEx*)rec);
       
   118   } else {
       
   119     new_rec.init(rec);
       
   120   }
       
   121   return insert_after(&new_rec);
       
   122 }
       
   123 
       
   124 // we don't consolidate reserved regions, since they may be categorized
       
   125 // in different types.
       
   126 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
       
   127   assert(rec->is_allocation_record(), "Sanity check");
       
   128   VMMemRegion* reserved_region = (VMMemRegion*)current();
       
   129 
       
   130   // we don't have anything yet
       
   131   if (reserved_region == NULL) {
       
   132     return insert_record(rec);
       
   133   }
       
   134 
       
   135   assert(reserved_region->is_reserved_region(), "Sanity check");
       
   136   // duplicated records
       
   137   if (reserved_region->is_same_region(rec)) {
       
   138     return true;
       
   139   }
       
   140   // Overlapping stack regions indicate that a JNI thread failed to
       
   141   // detach from the VM before exiting. This leaks the JavaThread object.
       
   142   if (CheckJNICalls)  {
       
   143       guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
       
   144          !reserved_region->overlaps_region(rec),
       
   145          "Attached JNI thread exited without being detached");
       
   146   }
       
   147   // otherwise, we should not have overlapping reserved regions
       
   148   assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
       
   149     reserved_region->base() > rec->addr(), "Just check: locate()");
       
   150   assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
       
   151     !reserved_region->overlaps_region(rec), "overlapping reserved regions");
       
   152 
       
   153   return insert_record(rec);
       
   154 }
       
   155 
       
   156 // we do consolidate committed regions
       
   157 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
       
   158   assert(rec->is_commit_record(), "Sanity check");
       
   159   VMMemRegion* reserved_rgn = (VMMemRegion*)current();
       
   160   assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
       
   161     "Sanity check");
       
   162 
       
   163   // thread's native stack is always marked as "committed", ignore
       
   164   // the "commit" operation for creating stack guard pages
       
   165   if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
       
   166       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
       
   167     return true;
       
   168   }
       
   169 
       
   170   // if the reserved region has any committed regions
       
   171   VMMemRegion* committed_rgn  = (VMMemRegion*)next();
       
   172   while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
       
   173     // duplicated commit records
       
   174     if(committed_rgn->contains_region(rec)) {
       
   175       return true;
       
   176     } else if (committed_rgn->overlaps_region(rec)) {
       
   177       // overlaps front part
       
   178       if (rec->addr() < committed_rgn->addr()) {
       
   179         committed_rgn->expand_region(rec->addr(),
       
   180           committed_rgn->addr() - rec->addr());
       
   181       } else {
       
   182         // overlaps tail part
       
   183         address committed_rgn_end = committed_rgn->addr() +
       
   184               committed_rgn->size();
       
   185         assert(committed_rgn_end < rec->addr() + rec->size(),
       
   186              "overlap tail part");
       
   187         committed_rgn->expand_region(committed_rgn_end,
       
   188           (rec->addr() + rec->size()) - committed_rgn_end);
       
   189       }
       
   190     } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
       
   191       // adjunct each other
       
   192       committed_rgn->expand_region(rec->addr(), rec->size());
       
   193       VMMemRegion* next_reg = (VMMemRegion*)next();
       
   194       // see if we can consolidate next committed region
       
   195       if (next_reg != NULL && next_reg->is_committed_region() &&
       
   196         next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
       
   197           committed_rgn->expand_region(next_reg->base(), next_reg->size());
       
   198           // delete merged region
       
   199           remove();
       
   200       }
       
   201       return true;
       
   202     } else if (committed_rgn->base() > rec->addr()) {
       
   203       // found the location, insert this committed region
       
   204       return insert_record(rec);
       
   205     }
       
   206     committed_rgn = (VMMemRegion*)next();
       
   207   }
       
   208   return insert_record(rec);
       
   209 }
       
   210 
       
   211 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
       
   212   assert(rec->is_uncommit_record(), "sanity check");
       
   213   VMMemRegion* cur;
       
   214   cur = (VMMemRegion*)current();
       
   215   assert(cur->is_reserved_region() && cur->contains_region(rec),
       
   216     "Sanity check");
       
   217   // thread's native stack is always marked as "committed", ignore
       
   218   // the "commit" operation for creating stack guard pages
       
   219   if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
       
   220       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
       
   221     return true;
       
   222   }
       
   223 
       
   224   cur = (VMMemRegion*)next();
       
   225   while (cur != NULL && cur->is_committed_region()) {
       
   226     // region already uncommitted, must be due to duplicated record
       
   227     if (cur->addr() >= rec->addr() + rec->size()) {
       
   228       break;
       
   229     } else if (cur->contains_region(rec)) {
       
   230       // uncommit whole region
       
   231       if (cur->is_same_region(rec)) {
       
   232         remove();
       
   233         break;
       
   234       } else if (rec->addr() == cur->addr() ||
       
   235         rec->addr() + rec->size() == cur->addr() + cur->size()) {
       
   236         // uncommitted from either end of current memory region.
       
   237         cur->exclude_region(rec->addr(), rec->size());
       
   238         break;
       
   239       } else { // split the committed region and release the middle
       
   240         address high_addr = cur->addr() + cur->size();
       
   241         size_t sz = high_addr - rec->addr();
       
   242         cur->exclude_region(rec->addr(), sz);
       
   243         sz = high_addr - (rec->addr() + rec->size());
       
   244         if (MemTracker::track_callsite()) {
       
   245           MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
       
   246              ((VMMemRegionEx*)cur)->pc());
       
   247           return insert_record_after(&tmp);
       
   248         } else {
       
   249           MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
       
   250           return insert_record_after(&tmp);
       
   251         }
       
   252       }
       
   253     }
       
   254     cur = (VMMemRegion*)next();
       
   255   }
       
   256 
       
   257   // we may not find committed record due to duplicated records
       
   258   return true;
       
   259 }
       
   260 
       
   261 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
       
   262   assert(rec->is_deallocation_record(), "Sanity check");
       
   263   VMMemRegion* cur = (VMMemRegion*)current();
       
   264   assert(cur->is_reserved_region() && cur->contains_region(rec),
       
   265     "Sanity check");
       
   266   if (rec->is_same_region(cur)) {
       
   267 
       
   268     // In snapshot, the virtual memory records are sorted in following orders:
       
   269     // 1. virtual memory's base address
       
   270     // 2. virtual memory reservation record, followed by commit records within this reservation.
       
   271     //    The commit records are also in base address order.
       
   272     // When a reserved region is released, we want to remove the reservation record and all
       
   273     // commit records following it.
       
   274 #ifdef ASSERT
       
   275     address low_addr = cur->addr();
       
   276     address high_addr = low_addr + cur->size();
       
   277 #endif
       
   278     // remove virtual memory reservation record
       
   279     remove();
       
   280     // remove committed regions within above reservation
       
   281     VMMemRegion* next_region = (VMMemRegion*)current();
       
   282     while (next_region != NULL && next_region->is_committed_region()) {
       
   283       assert(next_region->addr() >= low_addr &&
       
   284              next_region->addr() + next_region->size() <= high_addr,
       
   285             "Range check");
       
   286       remove();
       
   287       next_region = (VMMemRegion*)current();
       
   288     }
       
   289   } else if (rec->addr() == cur->addr() ||
       
   290     rec->addr() + rec->size() == cur->addr() + cur->size()) {
       
   291     // released region is at either end of this region
       
   292     cur->exclude_region(rec->addr(), rec->size());
       
   293     assert(check_reserved_region(), "Integrity check");
       
   294   } else { // split the reserved region and release the middle
       
   295     address high_addr = cur->addr() + cur->size();
       
   296     size_t sz = high_addr - rec->addr();
       
   297     cur->exclude_region(rec->addr(), sz);
       
   298     sz = high_addr - rec->addr() - rec->size();
       
   299     if (MemTracker::track_callsite()) {
       
   300       MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
       
   301         ((VMMemRegionEx*)cur)->pc());
       
   302       bool ret = insert_reserved_region(&tmp);
       
   303       assert(!ret || check_reserved_region(), "Integrity check");
       
   304       return ret;
       
   305     } else {
       
   306       MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
       
   307       bool ret = insert_reserved_region(&tmp);
       
   308       assert(!ret || check_reserved_region(), "Integrity check");
       
   309       return ret;
       
   310     }
       
   311   }
       
   312   return true;
       
   313 }
       
   314 
       
   315 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
       
   316   // skip all 'commit' records associated with previous reserved region
       
   317   VMMemRegion* p = (VMMemRegion*)next();
       
   318   while (p != NULL && p->is_committed_region() &&
       
   319          p->base() + p->size() < rec->addr()) {
       
   320     p = (VMMemRegion*)next();
       
   321   }
       
   322   return insert_record(rec);
       
   323 }
       
   324 
       
   325 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
       
   326   assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
       
   327   address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
       
   328   if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
       
   329     size_t sz = rgn->size() - new_rgn_size;
       
   330     // the original region becomes 'new' region
       
   331     rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
       
   332      // remaining becomes next region
       
   333     MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
       
   334     return insert_reserved_region(&next_rgn);
       
   335   } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
       
   336     rgn->exclude_region(new_rgn_addr, new_rgn_size);
       
   337     MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
       
   338     return insert_reserved_region(&next_rgn);
       
   339   } else {
       
   340     // the orginal region will be split into three
       
   341     address rgn_high_addr = rgn->base() + rgn->size();
       
   342     // first region
       
   343     rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
       
   344     // the second region is the new region
       
   345     MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
       
   346     if (!insert_reserved_region(&new_rgn)) return false;
       
   347     // the remaining region
       
   348     MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
       
   349       rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
       
   350     return insert_reserved_region(&rem_rgn);
       
   351   }
       
   352 }
       
   353 
       
   354 static int sort_in_seq_order(const void* p1, const void* p2) {
       
   355   assert(p1 != NULL && p2 != NULL, "Sanity check");
       
   356   const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
       
   357   const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
       
   358   return (mp1->seq() - mp2->seq());
       
   359 }
       
   360 
       
   361 bool StagingArea::init() {
       
   362   if (MemTracker::track_callsite()) {
       
   363     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
       
   364     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
       
   365   } else {
       
   366     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
       
   367     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
       
   368   }
       
   369 
       
   370   if (_malloc_data != NULL && _vm_data != NULL &&
       
   371       !_malloc_data->out_of_memory() &&
       
   372       !_vm_data->out_of_memory()) {
       
   373     return true;
       
   374   } else {
       
   375     if (_malloc_data != NULL) delete _malloc_data;
       
   376     if (_vm_data != NULL) delete _vm_data;
       
   377     _malloc_data = NULL;
       
   378     _vm_data = NULL;
       
   379     return false;
       
   380   }
       
   381 }
       
   382 
       
   383 
       
   384 VMRecordIterator StagingArea::virtual_memory_record_walker() {
       
   385   MemPointerArray* arr = vm_data();
       
   386   // sort into seq number order
       
   387   arr->sort((FN_SORT)sort_in_seq_order);
       
   388   return VMRecordIterator(arr);
       
   389 }
       
   390 
       
   391 
       
   392 MemSnapshot::MemSnapshot() {
       
   393   if (MemTracker::track_callsite()) {
       
   394     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
       
   395     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
       
   396   } else {
       
   397     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
       
   398     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
       
   399   }
       
   400 
       
   401   _staging_area.init();
       
   402   _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
       
   403   NOT_PRODUCT(_untracked_count = 0;)
       
   404   _number_of_classes = 0;
       
   405 }
       
   406 
       
   407 MemSnapshot::~MemSnapshot() {
       
   408   assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
       
   409   {
       
   410     MutexLockerEx locker(_lock);
       
   411     if (_alloc_ptrs != NULL) {
       
   412       delete _alloc_ptrs;
       
   413       _alloc_ptrs = NULL;
       
   414     }
       
   415 
       
   416     if (_vm_ptrs != NULL) {
       
   417       delete _vm_ptrs;
       
   418       _vm_ptrs = NULL;
       
   419     }
       
   420   }
       
   421 
       
   422   if (_lock != NULL) {
       
   423     delete _lock;
       
   424     _lock = NULL;
       
   425   }
       
   426 }
       
   427 
       
   428 
       
   429 void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
       
   430   assert(dest != NULL && src != NULL, "Just check");
       
   431   assert(dest->addr() == src->addr(), "Just check");
       
   432   assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
       
   433 
       
   434   if (MemTracker::track_callsite()) {
       
   435     *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
       
   436   } else {
       
   437     *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
       
   438   }
       
   439 }
       
   440 
       
   441 void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
       
   442   assert(src != NULL && dest != NULL, "Just check");
       
   443   assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
       
   444 
       
   445   if (MemTracker::track_callsite()) {
       
   446     *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
       
   447   } else {
       
   448     *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
       
   449   }
       
   450 }
       
   451 
       
   452 // merge a recorder to the staging area
       
   453 bool MemSnapshot::merge(MemRecorder* rec) {
       
   454   assert(rec != NULL && !rec->out_of_memory(), "Just check");
       
   455 
       
   456   SequencedRecordIterator itr(rec->pointer_itr());
       
   457 
       
   458   MutexLockerEx lock(_lock, true);
       
   459   MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
       
   460   MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
       
   461   MemPointerRecord* matched_rec;
       
   462 
       
   463   while (incoming_rec != NULL) {
       
   464     if (incoming_rec->is_vm_pointer()) {
       
   465       // we don't do anything with virtual memory records during merge
       
   466       if (!_staging_area.vm_data()->append(incoming_rec)) {
       
   467         return false;
       
   468       }
       
   469     } else {
       
   470       // locate matched record and/or also position the iterator to proper
       
   471       // location for this incoming record.
       
   472       matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
       
   473       // we have not seen this memory block in this generation,
       
   474       // so just add to staging area
       
   475       if (matched_rec == NULL) {
       
   476         if (!malloc_staging_itr.insert(incoming_rec)) {
       
   477           return false;
       
   478         }
       
   479       } else if (incoming_rec->addr() == matched_rec->addr()) {
       
   480         // whoever has higher sequence number wins
       
   481         if (incoming_rec->seq() > matched_rec->seq()) {
       
   482           copy_seq_pointer(matched_rec, incoming_rec);
       
   483         }
       
   484       } else if (incoming_rec->addr() < matched_rec->addr()) {
       
   485         if (!malloc_staging_itr.insert(incoming_rec)) {
       
   486           return false;
       
   487         }
       
   488       } else {
       
   489         ShouldNotReachHere();
       
   490       }
       
   491     }
       
   492     incoming_rec = (MemPointerRecord*)itr.next();
       
   493   }
       
   494   NOT_PRODUCT(void check_staging_data();)
       
   495   return true;
       
   496 }
       
   497 
       
   498 
       
   499 // promote data to next generation
       
   500 bool MemSnapshot::promote(int number_of_classes) {
       
   501   assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
       
   502   assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
       
   503          "Just check");
       
   504   MutexLockerEx lock(_lock, true);
       
   505 
       
   506   MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
       
   507   bool promoted = false;
       
   508   if (promote_malloc_records(&malloc_itr)) {
       
   509     VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
       
   510     if (promote_virtual_memory_records(&vm_itr)) {
       
   511       promoted = true;
       
   512     }
       
   513   }
       
   514 
       
   515   NOT_PRODUCT(check_malloc_pointers();)
       
   516   _staging_area.clear();
       
   517   _number_of_classes = number_of_classes;
       
   518   return promoted;
       
   519 }
       
   520 
       
   521 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
       
   522   MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
       
   523   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
       
   524   MemPointerRecord* matched_rec;
       
   525   while (new_rec != NULL) {
       
   526     matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
       
   527     // found matched memory block
       
   528     if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
       
   529       // snapshot already contains 'live' records
       
   530       assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
       
   531              "Sanity check");
       
   532       // update block states
       
   533       if (new_rec->is_allocation_record()) {
       
   534         assign_pointer(matched_rec, new_rec);
       
   535       } else if (new_rec->is_arena_memory_record()) {
       
   536         if (new_rec->size() == 0) {
       
   537           // remove size record once size drops to 0
       
   538           malloc_snapshot_itr.remove();
       
   539         } else {
       
   540           assign_pointer(matched_rec, new_rec);
       
   541         }
       
   542       } else {
       
   543         // a deallocation record
       
   544         assert(new_rec->is_deallocation_record(), "Sanity check");
       
   545         // an arena record can be followed by a size record, we need to remove both
       
   546         if (matched_rec->is_arena_record()) {
       
   547           MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
       
   548           if (next != NULL && next->is_arena_memory_record() &&
       
   549               next->is_memory_record_of_arena(matched_rec)) {
       
   550             malloc_snapshot_itr.remove();
       
   551           }
       
   552         }
       
   553         // the memory is deallocated, remove related record(s)
       
   554         malloc_snapshot_itr.remove();
       
   555       }
       
   556     } else {
       
   557       // don't insert size 0 record
       
   558       if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
       
   559         new_rec = NULL;
       
   560       }
       
   561 
       
   562       if (new_rec != NULL) {
       
   563         if  (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
       
   564           if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
       
   565             if (!malloc_snapshot_itr.insert_after(new_rec)) {
       
   566               return false;
       
   567             }
       
   568           } else {
       
   569             if (!malloc_snapshot_itr.insert(new_rec)) {
       
   570               return false;
       
   571             }
       
   572           }
       
   573         }
       
   574 #ifndef PRODUCT
       
   575         else if (!has_allocation_record(new_rec->addr())) {
       
   576           // NMT can not track some startup memory, which is allocated before NMT is on
       
   577           _untracked_count ++;
       
   578         }
       
   579 #endif
       
   580       }
       
   581     }
       
   582     new_rec = (MemPointerRecord*)itr->next();
       
   583   }
       
   584   return true;
       
   585 }
       
   586 
       
   587 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
       
   588   VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
       
   589   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
       
   590   VMMemRegion*  reserved_rec;
       
   591   while (new_rec != NULL) {
       
   592     assert(new_rec->is_vm_pointer(), "Sanity check");
       
   593 
       
   594     // locate a reserved region that contains the specified address, or
       
   595     // the nearest reserved region has base address just above the specified
       
   596     // address
       
   597     reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
       
   598     if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
       
   599       // snapshot can only have 'live' records
       
   600       assert(reserved_rec->is_reserved_region(), "Sanity check");
       
   601       if (new_rec->is_allocation_record()) {
       
   602         if (!reserved_rec->is_same_region(new_rec)) {
       
   603           // only deal with split a bigger reserved region into smaller regions.
       
   604           // So far, CDS is the only use case.
       
   605           if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
       
   606             return false;
       
   607           }
       
   608         }
       
   609       } else if (new_rec->is_uncommit_record()) {
       
   610         if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
       
   611           return false;
       
   612         }
       
   613       } else if (new_rec->is_commit_record()) {
       
   614         // insert or expand existing committed region to cover this
       
   615         // newly committed region
       
   616         if (!vm_snapshot_itr.add_committed_region(new_rec)) {
       
   617           return false;
       
   618         }
       
   619       } else if (new_rec->is_deallocation_record()) {
       
   620         // release part or all memory region
       
   621         if (!vm_snapshot_itr.remove_released_region(new_rec)) {
       
   622           return false;
       
   623         }
       
   624       } else if (new_rec->is_type_tagging_record()) {
       
   625         // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
       
   626         // to different type.
       
   627         assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
       
   628                FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
       
   629                "Sanity check");
       
   630         reserved_rec->tag(new_rec->flags());
       
   631     } else {
       
   632         ShouldNotReachHere();
       
   633           }
       
   634         } else {
       
   635       /*
       
   636        * The assertion failure indicates mis-matched virtual memory records. The likely
       
   637        * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
       
   638        * api, which have to be tracked manually. (perfMemory is an example).
       
   639       */
       
   640       assert(new_rec->is_allocation_record(), "Sanity check");
       
   641       if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
       
   642             return false;
       
   643           }
       
   644   }
       
   645     new_rec = (MemPointerRecord*)itr->next();
       
   646   }
       
   647   return true;
       
   648 }
       
   649 
       
   650 #ifndef PRODUCT
       
   651 void MemSnapshot::print_snapshot_stats(outputStream* st) {
       
   652   st->print_cr("Snapshot:");
       
   653   st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
       
   654     (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
       
   655 
       
   656   st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
       
   657     (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
       
   658 
       
   659   st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
       
   660     _staging_area.malloc_data()->capacity(),
       
   661     (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
       
   662     _staging_area.malloc_data()->instance_size()/K);
       
   663 
       
   664   st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
       
   665     _staging_area.vm_data()->capacity(),
       
   666     (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
       
   667     _staging_area.vm_data()->instance_size()/K);
       
   668 
       
   669   st->print_cr("\tUntracked allocation: %d", _untracked_count);
       
   670 }
       
   671 
       
   672 void MemSnapshot::check_malloc_pointers() {
       
   673   MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
       
   674   MemPointerRecord* p = (MemPointerRecord*)mItr.current();
       
   675   MemPointerRecord* prev = NULL;
       
   676   while (p != NULL) {
       
   677     if (prev != NULL) {
       
   678       assert(p->addr() >= prev->addr(), "sorting order");
       
   679     }
       
   680     prev = p;
       
   681     p = (MemPointerRecord*)mItr.next();
       
   682   }
       
   683 }
       
   684 
       
   685 bool MemSnapshot::has_allocation_record(address addr) {
       
   686   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
       
   687   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
       
   688   while (cur != NULL) {
       
   689     if (cur->addr() == addr && cur->is_allocation_record()) {
       
   690       return true;
       
   691     }
       
   692     cur = (MemPointerRecord*)itr.next();
       
   693   }
       
   694   return false;
       
   695 }
       
   696 #endif // PRODUCT
       
   697 
       
   698 #ifdef ASSERT
       
   699 void MemSnapshot::check_staging_data() {
       
   700   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
       
   701   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
       
   702   MemPointerRecord* next = (MemPointerRecord*)itr.next();
       
   703   while (next != NULL) {
       
   704     assert((next->addr() > cur->addr()) ||
       
   705       ((next->flags() & MemPointerRecord::tag_masks) >
       
   706        (cur->flags() & MemPointerRecord::tag_masks)),
       
   707        "sorting order");
       
   708     cur = next;
       
   709     next = (MemPointerRecord*)itr.next();
       
   710   }
       
   711 
       
   712   MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
       
   713   cur = (MemPointerRecord*)vm_itr.current();
       
   714   while (cur != NULL) {
       
   715     assert(cur->is_vm_pointer(), "virtual memory pointer only");
       
   716     cur = (MemPointerRecord*)vm_itr.next();
       
   717   }
       
   718 }
       
   719 
       
   720 void MemSnapshot::dump_all_vm_pointers() {
       
   721   MemPointerArrayIteratorImpl itr(_vm_ptrs);
       
   722   VMMemRegion* ptr = (VMMemRegion*)itr.current();
       
   723   tty->print_cr("dump virtual memory pointers:");
       
   724   while (ptr != NULL) {
       
   725     if (ptr->is_committed_region()) {
       
   726       tty->print("\t");
       
   727     }
       
   728     tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
       
   729       (ptr->addr() + ptr->size()), ptr->flags());
       
   730 
       
   731     if (MemTracker::track_callsite()) {
       
   732       VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
       
   733       if (ex->pc() != NULL) {
       
   734         char buf[1024];
       
   735         if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
       
   736           tty->print_cr("\t%s", buf);
       
   737         } else {
       
   738           tty->cr();
       
   739         }
       
   740       }
       
   741     }
       
   742 
       
   743     ptr = (VMMemRegion*)itr.next();
       
   744   }
       
   745   tty->flush();
       
   746 }
       
   747 #endif // ASSERT
       
   748