author | iignatyev |
Mon, 07 Sep 2015 20:03:56 +0200 | |
changeset 32626 | 5d1d9327219c |
parent 32625 | 054d452e4e06 (current diff) |
parent 32623 | 390a27af5657 (diff) |
child 32627 | a48ad2501e47 |
child 32734 | 7029a3eb3008 |
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -2989,7 +2989,7 @@ assert(task_size > CardTableModRefBS::card_size_in_words && (task_size % CardTableModRefBS::card_size_in_words == 0), "Otherwise arithmetic below would be incorrect"); - MemRegion span = _gen->reserved(); + MemRegion span = _old_gen->reserved(); if (low != NULL) { if (span.contains(low)) { // Align low down to a card boundary so that
--- a/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/cms/compactibleFreeListSpace.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -99,7 +99,7 @@ BlockOffsetArrayNonContigSpace _bt; CMSCollector* _collector; - ConcurrentMarkSweepGeneration* _gen; + ConcurrentMarkSweepGeneration* _old_gen; // Data structures for free blocks (used during allocation/sweeping)
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -212,7 +212,7 @@ use_adaptive_freelists, dictionaryChoice); NOT_PRODUCT(debug_cms_space = _cmsSpace;) - _cmsSpace->_gen = this; + _cmsSpace->_old_gen = this; _gc_stats = new CMSGCStats(); @@ -359,13 +359,13 @@ (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); if (cms_free > expected_promotion) { // Start a cms collection if there isn't enough space to promote - // for the next minor collection. Use the padded average as + // for the next young collection. Use the padded average as // a safety factor. cms_free -= expected_promotion; // Adjust by the safety factor. double cms_free_dbl = (double)cms_free; - double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0; + double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0; // Apply a further correction factor which tries to adjust // for recent occurance of concurrent mode failures. cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); @@ -531,7 +531,7 @@ if (CMSConcurrentMTEnabled) { if (FLAG_IS_DEFAULT(ConcGCThreads)) { // just for now - FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); + FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4); } if (ConcGCThreads > 1) { _conc_workers = new YieldingFlexibleWorkGang("CMS Thread", @@ -592,7 +592,7 @@ _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); // Clip CMSBootstrapOccupancy between 0 and 100. - _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100; + _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0; // Now tell CMS generations the identity of their collector ConcurrentMarkSweepGeneration::set_collector(this); @@ -613,7 +613,7 @@ _end_addr = gch->end_addr(); assert(_young_gen != NULL, "no _young_gen"); _eden_chunk_index = 0; - _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; + _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain; _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); } @@ -795,29 +795,22 @@ size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); gclog_or_tty->print_cr("\nFrom compute_new_size: "); gclog_or_tty->print_cr(" Free fraction %f", free_percentage); - gclog_or_tty->print_cr(" Desired free fraction %f", - desired_free_percentage); - gclog_or_tty->print_cr(" Maximum free fraction %f", - maximum_free_percentage); - gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity()/1000); - gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, - desired_capacity/1000); + gclog_or_tty->print_cr(" Desired free fraction %f", desired_free_percentage); + gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage); + gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity() / 1000); + gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->is_old_gen(this), "The CMS generation should always be the old generation"); size_t young_size = gch->young_gen()->capacity(); gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000); - gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, - unsafe_max_alloc_nogc()/1000); - gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, - contiguous_available()/1000); - gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", - expand_bytes); + gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); + gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); + gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); } // safe if expansion fails expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr(" Expanded free fraction %f", - ((double) free()) / capacity()); + gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity()); } } else { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); @@ -834,16 +827,14 @@ return cmsSpace()->freelistLock(); } -HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, - bool tlab) { +HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) { CMSSynchronousYieldRequest yr; - MutexLockerEx x(freelistLock(), - Mutex::_no_safepoint_check_flag); + MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); return have_lock_and_allocate(size, tlab); } HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, - bool tlab /* ignored */) { + bool tlab /* ignored */) { assert_lock_strong(freelistLock()); size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); HeapWord* res = cmsSpace()->allocate(adjustedSize); @@ -2426,7 +2417,7 @@ gch->gen_process_roots(&srs, GenCollectedHeap::OldGen, - true, // younger gens are roots + true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), ¬Older, @@ -2498,7 +2489,7 @@ gch->gen_process_roots(&srs, GenCollectedHeap::OldGen, - true, // younger gens are roots + true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), ¬Older, @@ -2952,12 +2943,7 @@ assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); assert(_collectorState == InitialMarking, "just checking"); - // If there has not been a GC[n-1] since last GC[n] cycle completed, - // precede our marking with a collection of all - // younger generations to keep floating garbage to a minimum. - // XXX: we won't do this for now -- it's an optimization to be done later. - - // already have locks + // Already have locks. assert_lock_strong(bitMapLock()); assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); @@ -3027,7 +3013,7 @@ gch->gen_process_roots(&srs, GenCollectedHeap::OldGen, - true, // younger gens are roots + true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), ¬Older, @@ -3037,7 +3023,7 @@ } // Clear mod-union table; it will be dirtied in the prologue of - // CMS generation per each younger generation collection. + // CMS generation per each young generation collection. assert(_modUnionTable.isAllClear(), "Was cleared in most recent final checkpoint phase" @@ -3057,7 +3043,7 @@ // assert(!SafepointSynchronize::is_at_safepoint(), // "inconsistent argument?"); // However that wouldn't be right, because it's possible that - // a safepoint is indeed in progress as a younger generation + // a safepoint is indeed in progress as a young generation // stop-the-world GC happens even as we mark in this generation. assert(_collectorState == Marking, "inconsistent state?"); check_correct_thread_executing(); @@ -3065,7 +3051,7 @@ // Weak ref discovery note: We may be discovering weak // refs in this generation concurrent (but interleaved) with - // weak ref discovery by a younger generation collector. + // weak ref discovery by the young generation collector. CMSTokenSyncWithLocks ts(true, bitMapLock()); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); @@ -3095,7 +3081,7 @@ // Note that when we do a marking step we need to hold the // bit map lock -- recall that direct allocation (by mutators) - // and promotion (by younger generation collectors) is also + // and promotion (by the young generation collector) is also // marking the bit map. [the so-called allocate live policy.] // Because the implementation of bit map marking is not // robust wrt simultaneous marking of bits in the same word, @@ -4049,7 +4035,7 @@ // one of these methods, please check the other method too. size_t CMSCollector::preclean_mod_union_table( - ConcurrentMarkSweepGeneration* gen, + ConcurrentMarkSweepGeneration* old_gen, ScanMarkedObjectsAgainCarefullyClosure* cl) { verify_work_stacks_empty(); verify_overflow_empty(); @@ -4064,10 +4050,10 @@ // generation, but we might potentially miss cards when the // generation is rapidly expanding while we are in the midst // of precleaning. - HeapWord* startAddr = gen->reserved().start(); - HeapWord* endAddr = gen->reserved().end(); - - cl->setFreelistLock(gen->freelistLock()); // needed for yielding + HeapWord* startAddr = old_gen->reserved().start(); + HeapWord* endAddr = old_gen->reserved().end(); + + cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding size_t numDirtyCards, cumNumDirtyCards; HeapWord *nextAddr, *lastAddr; @@ -4109,7 +4095,7 @@ HeapWord* stop_point = NULL; stopTimer(); // Potential yield point - CMSTokenSyncWithLocks ts(true, gen->freelistLock(), + CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock()); startTimer(); { @@ -4117,7 +4103,7 @@ verify_overflow_empty(); sample_eden(); stop_point = - gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); + old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); } if (stop_point != NULL) { // The careful iteration stopped early either because it found an @@ -4152,15 +4138,15 @@ // below are largely identical; if you need to modify // one of these methods, please check the other method too. -size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen, +size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen, ScanMarkedObjectsAgainCarefullyClosure* cl) { // strategy: it's similar to precleamModUnionTable above, in that // we accumulate contiguous ranges of dirty cards, mark these cards // precleaned, then scan the region covered by these cards. - HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high()); - HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low()); - - cl->setFreelistLock(gen->freelistLock()); // needed for yielding + HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high()); + HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low()); + + cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding size_t numDirtyCards, cumNumDirtyCards; HeapWord *lastAddr, *nextAddr; @@ -4197,13 +4183,13 @@ if (!dirtyRegion.is_empty()) { stopTimer(); - CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); + CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock()); startTimer(); sample_eden(); verify_work_stacks_empty(); verify_overflow_empty(); HeapWord* stop_point = - gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); + old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); if (stop_point != NULL) { assert((_collectorState == AbortablePreclean && should_abort_preclean()), "Should only be AbortablePreclean."); @@ -5086,7 +5072,7 @@ // preclean phase did of eden, plus the [two] tasks of // scanning the [two] survivor spaces. Further fine-grain // parallelization of the scanning of the survivor spaces - // themselves, and of precleaning of the younger gen itself + // themselves, and of precleaning of the young gen itself // is deferred to the future. initialize_sequential_subtasks_for_young_gen_rescan(n_workers); @@ -5177,7 +5163,7 @@ gch->gen_process_roots(&srs, GenCollectedHeap::OldGen, - true, // younger gens as roots + true, // young gen as roots GenCollectedHeap::ScanningOption(roots_scanning_options()), should_unload_classes(), &mrias_cl, @@ -5661,7 +5647,7 @@ } } -void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) { +void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) { // We iterate over the space(s) underlying this generation, // checking the mark bit map to see if the bits corresponding // to specific blocks are marked or not. Blocks that are @@ -5690,26 +5676,26 @@ // check that we hold the requisite locks assert(have_cms_token(), "Should hold cms token"); assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep"); - assert_lock_strong(gen->freelistLock()); + assert_lock_strong(old_gen->freelistLock()); assert_lock_strong(bitMapLock()); assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); - gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), - _inter_sweep_estimate.padded_average(), - _intra_sweep_estimate.padded_average()); - gen->setNearLargestChunk(); + old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), + _inter_sweep_estimate.padded_average(), + _intra_sweep_estimate.padded_average()); + old_gen->setNearLargestChunk(); { - SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield); - gen->cmsSpace()->blk_iterate_careful(&sweepClosure); + SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield); + old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure); // We need to free-up/coalesce garbage/blocks from a // co-terminal free run. This is done in the SweepClosure // destructor; so, do not remove this scope, else the // end-of-sweep-census below will be off by a little bit. } - gen->cmsSpace()->sweep_completed(); - gen->cmsSpace()->endSweepFLCensus(sweep_count()); + old_gen->cmsSpace()->sweep_completed(); + old_gen->cmsSpace()->endSweepFLCensus(sweep_count()); if (should_unload_classes()) { // unloaded classes this cycle, _concurrent_cycles_since_last_unload = 0; // ... reset count } else { // did not unload classes,
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -723,7 +723,7 @@ private: // Support for parallelizing young gen rescan in CMS remark phase - ParNewGeneration* _young_gen; // the younger gen + ParNewGeneration* _young_gen; HeapWord** _top_addr; // ... Top of Eden HeapWord** _end_addr; // ... End of Eden @@ -772,9 +772,9 @@ private: // Concurrent precleaning work - size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, + size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen, ScanMarkedObjectsAgainCarefullyClosure* cl); - size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, + size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen, ScanMarkedObjectsAgainCarefullyClosure* cl); // Does precleaning work, returning a quantity indicative of // the amount of "useful work" done. @@ -797,7 +797,7 @@ void refProcessingWork(); // Concurrent sweeping work - void sweepWork(ConcurrentMarkSweepGeneration* gen); + void sweepWork(ConcurrentMarkSweepGeneration* old_gen); // (Concurrent) resetting of support data structures void reset(bool concurrent); @@ -1120,10 +1120,8 @@ MemRegion used_region_at_save_marks() const; // Does a "full" (forced) collection invoked on this generation collect - // all younger generations as well? Note that the second conjunct is a - // hack to allow the collection of the younger gen first if the flag is - // set. - virtual bool full_collects_younger_generations() const { + // the young generation as well? + virtual bool full_collects_young_generation() const { return !ScavengeBeforeFullGC; } @@ -1153,9 +1151,8 @@ virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; - // Inform this (non-young) generation that a promotion failure was - // encountered during a collection of a younger generation that - // promotes into this generation. + // Inform this (old) generation that a promotion failure was + // encountered during a collection of the young generation. virtual void promotion_failure_occurred(); bool should_collect(bool full, size_t size, bool tlab);
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -295,7 +295,7 @@ promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin; } - // If the younger gen collections were skipped, then the + // If the young gen collection was skipped, then the // number of promoted bytes will be 0 and adding it to the // average will incorrectly lessen the average. It is, however, // also possible that no promotion was needed.
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepThread.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -39,23 +39,17 @@ // ======= Concurrent Mark Sweep Thread ======== -// The CMS thread is created when Concurrent Mark Sweep is used in the -// older of two generations in a generational memory system. +ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL; +CMSCollector* ConcurrentMarkSweepThread::_collector = NULL; +bool ConcurrentMarkSweepThread::_should_terminate = false; +int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil; -ConcurrentMarkSweepThread* - ConcurrentMarkSweepThread::_cmst = NULL; -CMSCollector* ConcurrentMarkSweepThread::_collector = NULL; -bool ConcurrentMarkSweepThread::_should_terminate = false; -int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil; +volatile jint ConcurrentMarkSweepThread::_pending_yields = 0; -volatile jint ConcurrentMarkSweepThread::_pending_yields = 0; - -SurrogateLockerThread* - ConcurrentMarkSweepThread::_slt = NULL; +SurrogateLockerThread* ConcurrentMarkSweepThread::_slt = NULL; SurrogateLockerThread::SLT_msg_type ConcurrentMarkSweepThread::_sltBuffer = SurrogateLockerThread::empty; -Monitor* - ConcurrentMarkSweepThread::_sltMonitor = NULL; +Monitor* ConcurrentMarkSweepThread::_sltMonitor = NULL; ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector) : ConcurrentGCThread() {
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -69,20 +69,28 @@ Stack<oop, mtGC>* overflow_stacks_, size_t desired_plab_sz_, ParallelTaskTerminator& term_) : - _to_space(to_space_), _old_gen(old_gen_), _young_gen(young_gen_), _thread_num(thread_num_), - _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false), + _to_space(to_space_), + _old_gen(old_gen_), + _young_gen(young_gen_), + _thread_num(thread_num_), + _work_queue(work_queue_set_->queue(thread_num_)), + _to_space_full(false), _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL), _ageTable(false), // false ==> not the global age table, no perf data. _to_space_alloc_buffer(desired_plab_sz_), - _to_space_closure(young_gen_, this), _old_gen_closure(young_gen_, this), - _to_space_root_closure(young_gen_, this), _old_gen_root_closure(young_gen_, this), + _to_space_closure(young_gen_, this), + _old_gen_closure(young_gen_, this), + _to_space_root_closure(young_gen_, this), + _old_gen_root_closure(young_gen_, this), _older_gen_closure(young_gen_, this), _evacuate_followers(this, &_to_space_closure, &_old_gen_closure, &_to_space_root_closure, young_gen_, &_old_gen_root_closure, work_queue_set_, &term_), - _is_alive_closure(young_gen_), _scan_weak_ref_closure(young_gen_, this), + _is_alive_closure(young_gen_), + _scan_weak_ref_closure(young_gen_, this), _keep_alive_closure(&_scan_weak_ref_closure), - _strong_roots_time(0.0), _term_time(0.0) + _strong_roots_time(0.0), + _term_time(0.0) { #if TASKQUEUE_STATS _term_attempts = 0; @@ -90,8 +98,7 @@ _overflow_refill_objs = 0; #endif // TASKQUEUE_STATS - _survivor_chunk_array = - (ChunkArray*) old_gen()->get_data_recorder(thread_num()); + _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num()); _hash_seed = 17; // Might want to take time-based random value. _start = os::elapsedTime(); _old_gen_closure.set_generation(old_gen_); @@ -154,7 +161,6 @@ } } - void ParScanThreadState::trim_queues(int max_size) { ObjToScanQueue* queue = work_queue(); do { @@ -222,15 +228,12 @@ } HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) { - - // Otherwise, if the object is small enough, try to reallocate the - // buffer. + // If the object is small enough, try to reallocate the buffer. HeapWord* obj = NULL; if (!_to_space_full) { PLAB* const plab = to_space_alloc_buffer(); - Space* const sp = to_space(); - if (word_sz * 100 < - ParallelGCBufferWastePct * plab->word_sz()) { + Space* const sp = to_space(); + if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) { // Is small enough; abandon this buffer and start a new one. plab->retire(); size_t buf_size = plab->word_sz(); @@ -241,8 +244,7 @@ size_t free_bytes = sp->free(); while(buf_space == NULL && free_bytes >= min_bytes) { buf_size = free_bytes >> LogHeapWordSize; - assert(buf_size == (size_t)align_object_size(buf_size), - "Invariant"); + assert(buf_size == (size_t)align_object_size(buf_size), "Invariant"); buf_space = sp->par_allocate(buf_size); free_bytes = sp->free(); } @@ -262,7 +264,6 @@ // We're used up. _to_space_full = true; } - } else { // Too large; allocate the object individually. obj = sp->par_allocate(word_sz); @@ -271,7 +272,6 @@ return obj; } - void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) { to_space_alloc_buffer()->undo_allocation(obj, word_sz); } @@ -288,7 +288,7 @@ // Initializes states for the specified number of threads; ParScanThreadStateSet(int num_threads, Space& to_space, - ParNewGeneration& gen, + ParNewGeneration& young_gen, Generation& old_gen, ObjToScanQueueSet& queue_set, Stack<oop, mtGC>* overflow_stacks_, @@ -315,21 +315,25 @@ private: ParallelTaskTerminator& _term; - ParNewGeneration& _gen; + ParNewGeneration& _young_gen; Generation& _old_gen; public: bool is_valid(int id) const { return id < length(); } ParallelTaskTerminator* terminator() { return &_term; } }; - -ParScanThreadStateSet::ParScanThreadStateSet( - int num_threads, Space& to_space, ParNewGeneration& gen, - Generation& old_gen, ObjToScanQueueSet& queue_set, - Stack<oop, mtGC>* overflow_stacks, - size_t desired_plab_sz, ParallelTaskTerminator& term) +ParScanThreadStateSet::ParScanThreadStateSet(int num_threads, + Space& to_space, + ParNewGeneration& young_gen, + Generation& old_gen, + ObjToScanQueueSet& queue_set, + Stack<oop, mtGC>* overflow_stacks, + size_t desired_plab_sz, + ParallelTaskTerminator& term) : ResourceArray(sizeof(ParScanThreadState), num_threads), - _gen(gen), _old_gen(old_gen), _term(term) + _young_gen(young_gen), + _old_gen(old_gen), + _term(term) { assert(num_threads > 0, "sanity check!"); assert(ParGCUseLocalOverflow == (overflow_stacks != NULL), @@ -337,13 +341,12 @@ // Initialize states. for (int i = 0; i < num_threads; ++i) { new ((ParScanThreadState*)_data + i) - ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set, + ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set, overflow_stacks, desired_plab_sz, term); } } -inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) -{ +inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) { assert(i >= 0 && i < length(), "sanity check!"); return ((ParScanThreadState*)_data)[i]; } @@ -357,8 +360,7 @@ } } -void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) -{ +void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) { _term.reset_for_reuse(active_threads); if (promotion_failed) { for (int i = 0; i < length(); ++i) { @@ -368,36 +370,27 @@ } #if TASKQUEUE_STATS -void -ParScanThreadState::reset_stats() -{ +void ParScanThreadState::reset_stats() { taskqueue_stats().reset(); _term_attempts = 0; _overflow_refills = 0; _overflow_refill_objs = 0; } -void ParScanThreadStateSet::reset_stats() -{ +void ParScanThreadStateSet::reset_stats() { for (int i = 0; i < length(); ++i) { thread_state(i).reset_stats(); } } -void -ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) -{ +void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) { st->print_raw_cr("GC Termination Stats"); - st->print_raw_cr(" elapsed --strong roots-- " - "-------termination-------"); - st->print_raw_cr("thr ms ms % " - " ms % attempts"); - st->print_raw_cr("--- --------- --------- ------ " - "--------- ------ --------"); + st->print_raw_cr(" elapsed --strong roots-- -------termination-------"); + st->print_raw_cr("thr ms ms % ms % attempts"); + st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"); } -void ParScanThreadStateSet::print_termination_stats(outputStream* const st) -{ +void ParScanThreadStateSet::print_termination_stats(outputStream* const st) { print_termination_stats_hdr(st); for (int i = 0; i < length(); ++i) { @@ -405,23 +398,20 @@ const double elapsed_ms = pss.elapsed_time() * 1000.0; const double s_roots_ms = pss.strong_roots_time() * 1000.0; const double term_ms = pss.term_time() * 1000.0; - st->print_cr("%3d %9.2f %9.2f %6.2f " - "%9.2f %6.2f " SIZE_FORMAT_W(8), + st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8), i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms, term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts()); } } // Print stats related to work queue activity. -void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) -{ +void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) { st->print_raw_cr("GC Task Stats"); st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr(); st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); } -void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) -{ +void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) { print_taskqueue_stats_hdr(st); TaskQueueStats totals; @@ -443,8 +433,7 @@ } #endif // TASKQUEUE_STATS -void ParScanThreadStateSet::flush() -{ +void ParScanThreadStateSet::flush() { // Work in this loop should be kept as lightweight as // possible since this might otherwise become a bottleneck // to scaling. Should we add heavy-weight work into this @@ -454,12 +443,12 @@ // Flush stats related to To-space PLAB activity and // retire the last buffer. - par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_gen.plab_stats()); + par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats()); // Every thread has its own age table. We need to merge // them all into one. ageTable *local_table = par_scan_state.age_table(); - _gen.age_table()->merge(local_table); + _young_gen.age_table()->merge(local_table); // Inform old gen that we're done. _old_gen.par_promote_alloc_done(i); @@ -478,8 +467,7 @@ ParScanClosure::ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state) : - OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) -{ + OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) { _boundary = _g->reserved().end(); } @@ -531,24 +519,23 @@ ObjToScanQueue* work_q = par_scan_state()->work_queue(); while (true) { - // Scan to-space and old-gen objs until we run out of both. oop obj_to_scan; par_scan_state()->trim_queues(0); // We have no local work, attempt to steal from other threads. - // attempt to steal work from promoted. + // Attempt to steal work from promoted. if (task_queues()->steal(par_scan_state()->thread_num(), par_scan_state()->hash_seed(), obj_to_scan)) { bool res = work_q->push(obj_to_scan); assert(res, "Empty queue should have room for a push."); - // if successful, goto Start. + // If successful, goto Start. continue; - // try global overflow list. + // Try global overflow list. } else if (par_gen()->take_from_overflow_list(par_scan_state())) { continue; } @@ -564,15 +551,17 @@ par_scan_state()->end_term_time(); } -ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, Generation* old_gen, - HeapWord* young_old_boundary, ParScanThreadStateSet* state_set, +ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen, + Generation* old_gen, + HeapWord* young_old_boundary, + ParScanThreadStateSet* state_set, StrongRootsScope* strong_roots_scope) : AbstractGangTask("ParNewGeneration collection"), _young_gen(young_gen), _old_gen(old_gen), _young_old_boundary(young_old_boundary), _state_set(state_set), _strong_roots_scope(strong_roots_scope) - {} +{} void ParNewGenTask::work(uint worker_id) { GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -595,8 +584,7 @@ par_scan_state.start_strong_roots(); gch->gen_process_roots(_strong_roots_scope, GenCollectedHeap::YoungGen, - true, // Process younger gens, if any, - // as strong roots. + true, // Process younger gens, if any, as strong roots. GenCollectedHeap::SO_ScavengeCodeCache, GenCollectedHeap::StrongAndWeakRoots, &par_scan_state.to_space_root_closure(), @@ -613,8 +601,7 @@ #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list #endif -ParNewGeneration:: -ParNewGeneration(ReservedSpace rs, size_t initial_byte_size) +ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size) : DefNewGeneration(rs, initial_byte_size, "PCopy"), _overflow_list(NULL), _is_alive_closure(this), @@ -625,20 +612,19 @@ _task_queues = new ObjToScanQueueSet(ParallelGCThreads); guarantee(_task_queues != NULL, "task_queues allocation failure."); - for (uint i1 = 0; i1 < ParallelGCThreads; i1++) { + for (uint i = 0; i < ParallelGCThreads; i++) { ObjToScanQueue *q = new ObjToScanQueue(); guarantee(q != NULL, "work_queue Allocation failure."); - _task_queues->register_queue(i1, q); + _task_queues->register_queue(i, q); } - for (uint i2 = 0; i2 < ParallelGCThreads; i2++) - _task_queues->queue(i2)->initialize(); + for (uint i = 0; i < ParallelGCThreads; i++) { + _task_queues->queue(i)->initialize(); + } _overflow_stacks = NULL; if (ParGCUseLocalOverflow) { - - // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal - // with ',' + // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ',' typedef Stack<oop, mtGC> GCOopStack; _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); @@ -742,7 +728,7 @@ typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; public: ParNewRefProcTaskProxy(ProcessTask& task, - ParNewGeneration& gen, + ParNewGeneration& young_gen, Generation& old_gen, HeapWord* young_old_boundary, ParScanThreadStateSet& state_set); @@ -768,11 +754,9 @@ _old_gen(old_gen), _young_old_boundary(young_old_boundary), _state_set(state_set) -{ -} +{ } -void ParNewRefProcTaskProxy::work(uint worker_id) -{ +void ParNewRefProcTaskProxy::work(uint worker_id) { ResourceMark rm; HandleMark hm; ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id); @@ -792,15 +776,12 @@ _task(task) { } - virtual void work(uint worker_id) - { + virtual void work(uint worker_id) { _task.work(worker_id); } }; - -void ParNewRefProcTaskExecutor::execute(ProcessTask& task) -{ +void ParNewRefProcTaskExecutor::execute(ProcessTask& task) { GenCollectedHeap* gch = GenCollectedHeap::heap(); WorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); @@ -812,8 +793,7 @@ _young_gen.promotion_failed()); } -void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) -{ +void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) { GenCollectedHeap* gch = GenCollectedHeap::heap(); WorkGang* workers = gch->workers(); assert(workers != NULL, "Need parallel worker threads."); @@ -821,8 +801,7 @@ workers->run_task(&enq_task); } -void ParNewRefProcTaskExecutor::set_single_threaded_mode() -{ +void ParNewRefProcTaskExecutor::set_single_threaded_mode() { _state_set.flush(); GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->save_marks(); @@ -830,7 +809,8 @@ ScanClosureWithParBarrier:: ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) : - ScanClosure(g, gc_barrier) {} + ScanClosure(g, gc_barrier) +{ } EvacuateFollowersClosureGeneral:: EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, @@ -838,7 +818,7 @@ OopsInGenClosure* older) : _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) -{} +{ } void EvacuateFollowersClosureGeneral::do_void() { do { @@ -850,7 +830,6 @@ } while (!_gch->no_allocs_since_save_marks()); } - // A Generation that does parallel young-gen collection. void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) { @@ -996,9 +975,9 @@ if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the - // collection(s). When a minor collection fails to collect + // collection(s). When a young collection fails to collect // sufficient space resizing of the young generation can occur - // an redistribute the spaces in the young generation. Mangle + // and redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to // other spaces. to()->mangle_unused_area(); @@ -1113,8 +1092,10 @@ // thus avoiding the need to undo the copy as in // copy_to_survivor_space_avoiding_with_undo. -oop ParNewGeneration::copy_to_survivor_space( - ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) { +oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state, + oop old, + size_t sz, + markOop m) { // In the sequential version, this assert also says that the object is // not forwarded. That might not be the case here. It is the case that // the caller observed it to be not forwarded at some time in the past. @@ -1141,8 +1122,7 @@ } if (new_obj == NULL) { - // Either to-space is full or we decided to promote - // try allocating obj tenured + // Either to-space is full or we decided to promote try allocating obj tenured // Attempt to install a null forwarding pointer (atomically), // to claim the right to install the real forwarding pointer.
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -71,11 +71,7 @@ ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier - // One of these two will be passed to process_roots, which will - // set its generation. The first is for two-gen configs where the - // old gen collects the perm gen; the second is for arbitrary configs. - // The second isn't used right now (it used to be used for the train, an - // incremental collector) but the declaration has been left as a reminder. + // Will be passed to process_roots to set its generation. ParRootScanWithBarrierTwoGensClosure _older_gen_closure; // This closure will always be bound to the old gen; it will be used // in evacuate_followers. @@ -85,7 +81,6 @@ ParScanWeakRefClosure _scan_weak_ref_closure; ParKeepAliveClosure _keep_alive_closure; - Space* _to_space; Space* to_space() { return _to_space; }
--- a/hotspot/src/share/vm/gc/g1/heapRegionType.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/g1/heapRegionType.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -35,7 +35,7 @@ // We encode the value of the heap region type so the generation can be // determined quickly. The tag is split into two parts: // - // major type (young, humongous) : top N-1 bits + // major type (young, old, humongous, archive) : top N-1 bits // minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit // // If there's need to increase the number of minor types in the
--- a/hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.inline.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/parallel/parallelScavengeHeap.inline.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -30,26 +30,22 @@ #include "gc/parallel/psParallelCompact.hpp" #include "gc/parallel/psScavenge.hpp" -inline size_t ParallelScavengeHeap::total_invocations() -{ +inline size_t ParallelScavengeHeap::total_invocations() { return UseParallelOldGC ? PSParallelCompact::total_invocations() : PSMarkSweep::total_invocations(); } -inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const -{ +inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const { const size_t eden_size = young_gen()->eden_space()->capacity_in_words(); return size < eden_size / 2; } -inline void ParallelScavengeHeap::invoke_scavenge() -{ +inline void ParallelScavengeHeap::invoke_scavenge() { PSScavenge::invoke(); } inline bool ParallelScavengeHeap::is_in_young(oop p) { // Assumes the the old gen address range is lower than that of the young gen. - const void* loc = (void*) p; bool result = ((HeapWord*)p) >= young_gen()->reserved().start(); assert(result == young_gen()->is_in_reserved(p), err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
--- a/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -299,7 +299,7 @@ // subtracted out. size_t eden_limit = max_eden_size; - const double gc_cost_limit = GCTimeLimit/100.0; + const double gc_cost_limit = GCTimeLimit / 100.0; // Which way should we go? // if pause requirement is not met
--- a/hotspot/src/share/vm/gc/parallel/psOldGen.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/parallel/psOldGen.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -486,12 +486,12 @@ object_space()->verify(); } class VerifyObjectStartArrayClosure : public ObjectClosure { - PSOldGen* _gen; + PSOldGen* _old_gen; ObjectStartArray* _start_array; public: - VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) : - _gen(gen), _start_array(start_array) { } + VerifyObjectStartArrayClosure(PSOldGen* old_gen, ObjectStartArray* start_array) : + _old_gen(old_gen), _start_array(start_array) { } virtual void do_object(oop obj) { HeapWord* test_addr = (HeapWord*)obj + 1;
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -958,7 +958,7 @@ { // Update the from & to space pointers in space_info, since they are swapped // at each young gen gc. Do the update unconditionally (even though a - // promotion failure does not swap spaces) because an unknown number of minor + // promotion failure does not swap spaces) because an unknown number of young // collections will have swapped the spaces an unknown number of times. GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -303,7 +303,7 @@ // completed(), which is desirable since a region must be claimed before it // can be completed. bool available() const { return _dc_and_los < dc_one; } - bool claimed() const { return _dc_and_los >= dc_claimed; } + bool claimed() const { return _dc_and_los >= dc_claimed; } bool completed() const { return _dc_and_los >= dc_completed; } // These are not atomic. @@ -979,7 +979,6 @@ static bool _dwl_initialized; #endif // #ifdef ASSERT - public: static ParallelOldTracer* gc_tracer() { return &_gc_tracer; }
--- a/hotspot/src/share/vm/gc/parallel/psScavenge.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/parallel/psScavenge.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -597,9 +597,9 @@ // to allow resizes that may have been inhibited by the // relative location of the "to" and "from" spaces. - // Resizing the old gen at minor collects can cause increases + // Resizing the old gen at young collections can cause increases // that don't feed back to the generation sizing policy until - // a major collection. Don't resize the old gen here. + // a full collection. Don't resize the old gen here. heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes());
--- a/hotspot/src/share/vm/gc/parallel/psTasks.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/parallel/psTasks.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -172,10 +172,10 @@ void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) { // There are not old-to-young pointers if the old gen is empty. - assert(!_gen->object_space()->is_empty(), + assert(!_old_gen->object_space()->is_empty(), "Should not be called is there is no work"); - assert(_gen != NULL, "Sanity"); - assert(_gen->object_space()->contains(_gen_top) || _gen_top == _gen->object_space()->top(), "Sanity"); + assert(_old_gen != NULL, "Sanity"); + assert(_old_gen->object_space()->contains(_gen_top) || _gen_top == _old_gen->object_space()->top(), "Sanity"); assert(_stripe_number < ParallelGCThreads, "Sanity"); { @@ -183,8 +183,8 @@ CardTableExtension* card_table = barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set()); - card_table->scavenge_contents_parallel(_gen->start_array(), - _gen->object_space(), + card_table->scavenge_contents_parallel(_old_gen->start_array(), + _old_gen->object_space(), _gen_top, pm, _stripe_number,
--- a/hotspot/src/share/vm/gc/parallel/psTasks.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/parallel/psTasks.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -160,17 +160,17 @@ class OldToYoungRootsTask : public GCTask { private: - PSOldGen* _gen; + PSOldGen* _old_gen; HeapWord* _gen_top; uint _stripe_number; uint _stripe_total; public: - OldToYoungRootsTask(PSOldGen *gen, + OldToYoungRootsTask(PSOldGen *old_gen, HeapWord* gen_top, uint stripe_number, uint stripe_total) : - _gen(gen), + _old_gen(old_gen), _gen_top(gen_top), _stripe_number(stripe_number), _stripe_total(stripe_total) { }
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -106,14 +106,14 @@ _gch(gch), _scan_cur_or_nonheap(cur), _scan_older(older) { assert(_gch->young_gen()->kind() == Generation::DefNew, "Generation should be DefNew"); - _gen = (DefNewGeneration*)_gch->young_gen(); + _young_gen = (DefNewGeneration*)_gch->young_gen(); } void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { do { _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen, _scan_cur_or_nonheap, _scan_older); } while (!_gch->no_allocs_since_save_marks()); - guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); + guarantee(_young_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); } ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : @@ -200,8 +200,9 @@ _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); - if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) + if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { vm_exit_during_initialization("Could not allocate a new gen space"); + } // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. @@ -655,7 +656,7 @@ if (ZapUnusedHeapArea) { // This is now done here because of the piece-meal mangling which // can check for valid mangling at intermediate points in the - // collection(s). When a minor collection fails to collect + // collection(s). When a young collection fails to collect // sufficient space resizing of the young generation can occur // an redistribute the spaces in the young generation. Mangle // here so that unzapped regions don't get distributed to
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -193,7 +193,7 @@ class FastEvacuateFollowersClosure: public VoidClosure { GenCollectedHeap* _gch; - DefNewGeneration* _gen; + DefNewGeneration* _young_gen; FastScanClosure* _scan_cur_or_nonheap; FastScanClosure* _scan_older; public:
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.inline.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.inline.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -57,8 +57,8 @@ // each generation, allowing them in turn to examine the modified // field. // - // We could check that p is also in an older generation, but - // dirty cards in the youngest gen are never scanned, so the + // We could check that p is also in the old generation, but + // dirty cards in the young gen are never scanned, so the // extra check probably isn't worthwhile. if (GenCollectedHeap::heap()->is_in_reserved(p)) { oop obj = oopDesc::load_decode_heap_oop_not_null(p);
--- a/hotspot/src/share/vm/gc/serial/tenuredGeneration.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/serial/tenuredGeneration.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -108,7 +108,7 @@ free()); } } - // If we had to expand to accommodate promotions from younger generations + // If we had to expand to accommodate promotions from the young generation if (!result && _capacity_at_prologue < capacity()) { result = true; if (PrintGC && Verbose) { @@ -140,11 +140,11 @@ // that are of interest at this point. bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation); if (!full && current_is_young) { - // Calculate size of data promoted from the younger generations + // Calculate size of data promoted from the young generation // before doing the collection. size_t used_before_gc = used(); - // If the younger gen collections were skipped, then the + // If the young gen collection was skipped, then the // number of promoted bytes will be 0 and adding it to the // average will incorrectly lessen the average. It is, however, // also possible that no promotion was needed.
--- a/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -42,10 +42,10 @@ friend class VM_PopulateDumpSharedSpace; protected: - ContiguousSpace* _the_space; // Actual space holding objects + ContiguousSpace* _the_space; // Actual space holding objects - GenerationCounters* _gen_counters; - CSpaceCounters* _space_counters; + GenerationCounters* _gen_counters; + CSpaceCounters* _space_counters; // Allocation failure virtual bool expand(size_t bytes, size_t expand_bytes); @@ -54,6 +54,7 @@ ContiguousSpace* space() const { return _the_space; } void assert_correct_size_change_locking(); + public: TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, @@ -66,10 +67,9 @@ const char* short_name() const { return "Tenured"; } // Does a "full" (forced) collection invoked on this generation collect - // all younger generations as well? Note that this is a - // hack to allow the collection of the younger gen first if the flag is - // set. - virtual bool full_collects_younger_generations() const { + // the young generation as well? Note that this is a hack to allow the + // collection of the young gen first if the flag is set. + virtual bool full_collects_young_generation() const { return !ScavengeBeforeFullGC; } @@ -99,15 +99,16 @@ bool clear_all_soft_refs, size_t size, bool is_tlab); + HeapWord* expand_and_allocate(size_t size, bool is_tlab, bool parallel = false); virtual void prepare_for_verify(); - virtual void gc_prologue(bool full); virtual void gc_epilogue(bool full); + bool should_collect(bool full, size_t word_size, bool is_tlab);
--- a/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/shared/adaptiveSizePolicy.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -266,22 +266,22 @@ } // The policy does not have enough data until at least some - // minor collections have been done. + // young collections have been done. _young_gen_policy_is_ready = (_avg_minor_gc_cost->count() >= AdaptiveSizePolicyReadyThreshold); // Calculate variables used to estimate pause time vs. gen sizes - double eden_size_in_mbytes = ((double)_eden_size)/((double)M); + double eden_size_in_mbytes = ((double)_eden_size) / ((double)M); update_minor_pause_young_estimator(minor_pause_in_ms); update_minor_pause_old_estimator(minor_pause_in_ms); if (PrintAdaptiveSizePolicy && Verbose) { gclog_or_tty->print("AdaptiveSizePolicy::minor_collection_end: " - "minor gc cost: %f average: %f", collection_cost, - _avg_minor_gc_cost->average()); + "minor gc cost: %f average: %f", collection_cost, + _avg_minor_gc_cost->average()); gclog_or_tty->print_cr(" minor pause: %f minor period %f", - minor_pause_in_ms, - _latest_minor_mutator_interval_seconds * MILLIUNITS); + minor_pause_in_ms, + _latest_minor_mutator_interval_seconds * MILLIUNITS); } // Calculate variable used to estimate collection cost vs. gen sizes @@ -295,8 +295,7 @@ _minor_timer.start(); } -size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, - uint percent_change) { +size_t AdaptiveSizePolicy::eden_increment(size_t cur_eden, uint percent_change) { size_t eden_heap_delta; eden_heap_delta = cur_eden / 100 * percent_change; return eden_heap_delta; @@ -312,8 +311,7 @@ return eden_heap_delta; } -size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, - uint percent_change) { +size_t AdaptiveSizePolicy::promo_increment(size_t cur_promo, uint percent_change) { size_t promo_heap_delta; promo_heap_delta = cur_promo / 100 * percent_change; return promo_heap_delta;
--- a/hotspot/src/share/vm/gc/shared/cardTableRS.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/shared/cardTableRS.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -80,7 +80,9 @@ break; } } - if (!seen) return v; + if (!seen) { + return v; + } } ShouldNotReachHere(); return 0; @@ -502,7 +504,7 @@ // // The main point below is that the parallel card scanning code // deals correctly with these stale card values. There are two main - // cases to consider where we have a stale "younger gen" value and a + // cases to consider where we have a stale "young gen" value and a // "derivative" case to consider, where we have a stale // "cur_younger_gen_and_prev_non_clean" value, as will become // apparent in the case analysis below.
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/shared/collectedHeap.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -160,16 +160,20 @@ // Memory state functions. -CollectedHeap::CollectedHeap() { +CollectedHeap::CollectedHeap() : + _barrier_set(NULL), + _is_gc_active(false), + _total_collections(0), + _total_full_collections(0), + _gc_cause(GCCause::_no_gc), + _gc_lastcause(GCCause::_no_gc), + _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below. +{ const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); const size_t elements_per_word = HeapWordSize / sizeof(jint); _filler_array_max_size = align_object_size(filler_array_hdr_size() + max_len / elements_per_word); - _barrier_set = NULL; - _is_gc_active = false; - _total_collections = _total_full_collections = 0; - _gc_cause = _gc_lastcause = GCCause::_no_gc; NOT_PRODUCT(_promotion_failure_alot_count = 0;) NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) @@ -184,7 +188,7 @@ PerfDataManager::create_string_variable(SUN_GC, "lastCause", 80, GCCause::to_string(_gc_lastcause), CHECK); } - _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. + // Create the ring log if (LogEvents) { _gc_heap_log = new GCHeapLog(); @@ -570,8 +574,8 @@ void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { if (HeapDumpBeforeFullGC) { GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create()); - // We are doing a "major" collection and a heap dump before - // major collection has been requested. + // We are doing a full collection and a heap dump before + // full collection has been requested. HeapDumper::dump_heap(); } if (PrintClassHistogramBeforeFullGC) {
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -464,7 +464,7 @@ bool prepared_for_verification = false; bool collected_old = false; bool old_collects_young = complete && - _old_gen->full_collects_younger_generations(); + _old_gen->full_collects_young_generation(); if (!old_collects_young && _young_gen->should_collect(full, size, is_tlab)) { if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) { @@ -521,7 +521,7 @@ // a whole heap collection. complete = complete || collected_old; - if (complete) { // We did a "major" collection + if (complete) { // We did a full collection // FIXME: See comment at pre_full_gc_dump call post_full_gc_dump(NULL); // do any post full gc dumps } @@ -668,13 +668,13 @@ void GenCollectedHeap::gen_process_roots(StrongRootsScope* scope, GenerationType type, - bool younger_gens_as_roots, + bool young_gen_as_roots, ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens, OopsInGenClosure* older_gens, CLDClosure* cld_closure) { - const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; + const bool is_adjust_phase = !only_strong_roots && !young_gen_as_roots; bool is_moving_collection = false; if (type == YoungGen || is_adjust_phase) { @@ -691,7 +691,7 @@ cld_closure, weak_cld_closure, &mark_code_closure); - if (younger_gens_as_roots) { + if (young_gen_as_roots) { if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { if (type == OldGen) { not_older_gens->set_generation(_young_gen); @@ -763,25 +763,25 @@ void GenCollectedHeap::collect(GCCause::Cause cause) { if (should_do_concurrent_full_gc(cause)) { #if INCLUDE_ALL_GCS - // mostly concurrent full collection + // Mostly concurrent full collection. collect_mostly_concurrent(cause); #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS } else if (cause == GCCause::_wb_young_gc) { - // minor collection for WhiteBox API + // Young collection for the WhiteBox API. collect(cause, YoungGen); } else { #ifdef ASSERT if (cause == GCCause::_scavenge_alot) { - // minor collection only + // Young collection only. collect(cause, YoungGen); } else { - // Stop-the-world full collection + // Stop-the-world full collection. collect(cause, OldGen); } #else - // Stop-the-world full collection + // Stop-the-world full collection. collect(cause, OldGen); #endif }
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -173,8 +173,7 @@ size_t max_capacity() const; - HeapWord* mem_allocate(size_t size, - bool* gc_overhead_limit_was_exceeded); + HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); // We may support a shared contiguous allocation area, if the youngest // generation does. @@ -403,7 +402,7 @@ void gen_process_roots(StrongRootsScope* scope, GenerationType type, - bool younger_gens_as_roots, + bool young_gen_as_roots, ScanningOption so, bool only_strong_roots, OopsInGenClosure* not_older_gens,
--- a/hotspot/src/share/vm/gc/shared/genRemSet.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/shared/genRemSet.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -110,13 +110,11 @@ virtual void print() {} // Informs the RS that the given memregion contains no references to - // younger generations. + // the young generation. virtual void clear(MemRegion mr) = 0; - // Informs the RS that there are no references to generations - // younger than gen from generations gen and older. - // The parameter clear_perm indicates if the perm_gen's - // remembered set should also be processed/cleared. + // Informs the RS that there are no references to the young generation + // from old_gen. virtual void clear_into_younger(Generation* old_gen) = 0; // Informs the RS that refs in the given "mr" may have changed
--- a/hotspot/src/share/vm/gc/shared/generation.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/shared/generation.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -80,7 +80,6 @@ // first two fields are word-sized.) }; - class Generation: public CHeapObj<mtGC> { friend class VMStructs; private: @@ -299,8 +298,7 @@ // word of "obj" may have been overwritten with a forwarding pointer, and // also taking care to copy the klass pointer *last*. Returns the new // object if successful, or else NULL. - virtual oop par_promote(int thread_num, - oop obj, markOop m, size_t word_sz); + virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz); // Informs the current generation that all par_promote_alloc's in the // collection have been completed; any supporting data structures can be @@ -315,7 +313,7 @@ // This generation will collect all younger generations // during a full collection. - virtual bool full_collects_younger_generations() const { return false; } + virtual bool full_collects_young_generation() const { return false; } // This generation does in-place marking, meaning that mark words // are mutated during the marking phase and presumably reinitialized @@ -370,18 +368,18 @@ // Some generations may require some cleanup or preparation actions before // allowing a collection. The default is to do nothing. - virtual void gc_prologue(bool full) {}; + virtual void gc_prologue(bool full) {} // Some generations may require some cleanup actions after a collection. // The default is to do nothing. - virtual void gc_epilogue(bool full) {}; + virtual void gc_epilogue(bool full) {} // Save the high water marks for the used space in a generation. - virtual void record_spaces_top() {}; + virtual void record_spaces_top() {} // Some generations may need to be "fixed-up" after some allocation // activity to make them parsable again. The default is to do nothing. - virtual void ensure_parsability() {}; + virtual void ensure_parsability() {} // Time (in ms) when we were last collected or now if a collection is // in progress. @@ -417,7 +415,7 @@ virtual void adjust_pointers(); // Mark sweep support phase4 virtual void compact(); - virtual void post_compact() {ShouldNotReachHere();} + virtual void post_compact() { ShouldNotReachHere(); } // Support for CMS's rescan. In this general form we return a pointer // to an abstract object that can be used, based on specific previously @@ -432,7 +430,7 @@ // Some generations may require some cleanup actions before allowing // a verification. - virtual void prepare_for_verify() {}; + virtual void prepare_for_verify() {} // Accessing "marks". @@ -483,7 +481,7 @@ // Give each generation an opportunity to do clean up for any // contributed scratch. - virtual void reset_scratch() {}; + virtual void reset_scratch() {} // When an older generation has been collected, and perhaps resized, // this method will be invoked on all younger generations (from older to
--- a/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/shared/referenceProcessor.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -1065,7 +1065,7 @@ // can mark through them now, rather than delaying that // to the reference-processing phase. Since all current // time-stamp policies advance the soft-ref clock only - // at a major collection cycle, this is always currently + // at a full collection cycle, this is always currently // accurate. if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { return false;
--- a/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/gc/shared/vmGCOperations.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -213,15 +213,18 @@ size_t _size; // size of object to be allocated Metaspace::MetadataType _mdtype; ClassLoaderData* _loader_data; + public: VM_CollectForMetadataAllocation(ClassLoaderData* loader_data, - size_t size, Metaspace::MetadataType mdtype, + size_t size, + Metaspace::MetadataType mdtype, uint gc_count_before, uint full_gc_count_before, GCCause::Cause gc_cause) : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { } + virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; } virtual void doit(); MetaWord* result() const { return _result; }
--- a/hotspot/src/share/vm/runtime/globals.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/runtime/globals.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -816,7 +816,7 @@ \ product(bool, UseSHA1Intrinsics, false, \ "Use intrinsics for SHA-1 crypto hash function. " \ - "Requires that UseSHA is enabled.") \ + "Requires that UseSHA is enabled.") \ \ product(bool, UseSHA256Intrinsics, false, \ "Use intrinsics for SHA-224 and SHA-256 crypto hash functions. " \ @@ -1596,7 +1596,7 @@ "(ParallelGC only)") \ \ product(bool, ScavengeBeforeFullGC, true, \ - "Scavenge youngest generation before each full GC.") \ + "Scavenge young generation before each full GC.") \ \ develop(bool, ScavengeWithObjectsInToSpace, false, \ "Allow scavenges to occur when to-space contains objects") \ @@ -2094,11 +2094,11 @@ "promotion failure") \ \ notproduct(bool, PromotionFailureALot, false, \ - "Use promotion failure handling on every youngest generation " \ + "Use promotion failure handling on every young generation " \ "collection") \ \ develop(uintx, PromotionFailureALotCount, 1000, \ - "Number of promotion failures occurring at PLAB " \ + "Number of promotion failures occurring at PLAB " \ "refill attempts (ParNew) or promotion attempts " \ "(other young collectors)") \ \
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/runtime/synchronizer.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -1360,7 +1360,7 @@ } // We've successfully installed INFLATING (0) into the mark-word. - // This is the only case where 0 will appear in a mark-work. + // This is the only case where 0 will appear in a mark-word. // Only the singular thread that successfully swings the mark-word // to 0 can perform (or more precisely, complete) inflation. //
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -283,17 +283,17 @@ volatile_nonstatic_field(ArrayKlass, _higher_dimension, Klass*) \ volatile_nonstatic_field(ArrayKlass, _lower_dimension, Klass*) \ nonstatic_field(ArrayKlass, _vtable_len, int) \ - nonstatic_field(CompiledICHolder, _holder_method, Method*) \ - nonstatic_field(CompiledICHolder, _holder_klass, Klass*) \ - nonstatic_field(ConstantPool, _tags, Array<u1>*) \ - nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \ - nonstatic_field(ConstantPool, _pool_holder, InstanceKlass*) \ - nonstatic_field(ConstantPool, _operands, Array<u2>*) \ - nonstatic_field(ConstantPool, _length, int) \ - nonstatic_field(ConstantPool, _resolved_references, jobject) \ - nonstatic_field(ConstantPool, _reference_map, Array<u2>*) \ - nonstatic_field(ConstantPoolCache, _length, int) \ - nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \ + nonstatic_field(CompiledICHolder, _holder_method, Method*) \ + nonstatic_field(CompiledICHolder, _holder_klass, Klass*) \ + nonstatic_field(ConstantPool, _tags, Array<u1>*) \ + nonstatic_field(ConstantPool, _cache, ConstantPoolCache*) \ + nonstatic_field(ConstantPool, _pool_holder, InstanceKlass*) \ + nonstatic_field(ConstantPool, _operands, Array<u2>*) \ + nonstatic_field(ConstantPool, _length, int) \ + nonstatic_field(ConstantPool, _resolved_references, jobject) \ + nonstatic_field(ConstantPool, _reference_map, Array<u2>*) \ + nonstatic_field(ConstantPoolCache, _length, int) \ + nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \ nonstatic_field(InstanceKlass, _array_klasses, Klass*) \ nonstatic_field(InstanceKlass, _methods, Array<Method*>*) \ nonstatic_field(InstanceKlass, _default_methods, Array<Method*>*) \ @@ -303,12 +303,12 @@ nonstatic_field(InstanceKlass, _java_fields_count, u2) \ nonstatic_field(InstanceKlass, _constants, ConstantPool*) \ nonstatic_field(InstanceKlass, _class_loader_data, ClassLoaderData*) \ - nonstatic_field(InstanceKlass, _source_file_name_index, u2) \ + nonstatic_field(InstanceKlass, _source_file_name_index, u2) \ nonstatic_field(InstanceKlass, _source_debug_extension, char*) \ - nonstatic_field(InstanceKlass, _inner_classes, Array<jushort>*) \ + nonstatic_field(InstanceKlass, _inner_classes, Array<jushort>*) \ nonstatic_field(InstanceKlass, _nonstatic_field_size, int) \ nonstatic_field(InstanceKlass, _static_field_size, int) \ - nonstatic_field(InstanceKlass, _static_oop_field_count, u2) \ + nonstatic_field(InstanceKlass, _static_oop_field_count, u2) \ nonstatic_field(InstanceKlass, _nonstatic_oop_map_size, int) \ nonstatic_field(InstanceKlass, _is_marked_dependent, bool) \ nonstatic_field(InstanceKlass, _minor_version, u2) \ @@ -346,62 +346,62 @@ nonstatic_field(Klass, _prototype_header, markOop) \ nonstatic_field(Klass, _next_sibling, Klass*) \ nonstatic_field(vtableEntry, _method, Method*) \ - nonstatic_field(MethodData, _size, int) \ - nonstatic_field(MethodData, _method, Method*) \ - nonstatic_field(MethodData, _data_size, int) \ - nonstatic_field(MethodData, _data[0], intptr_t) \ - nonstatic_field(MethodData, _parameters_type_data_di, int) \ - nonstatic_field(MethodData, _nof_decompiles, uint) \ - nonstatic_field(MethodData, _nof_overflow_recompiles, uint) \ - nonstatic_field(MethodData, _nof_overflow_traps, uint) \ - nonstatic_field(MethodData, _trap_hist._array[0], u1) \ - nonstatic_field(MethodData, _eflags, intx) \ - nonstatic_field(MethodData, _arg_local, intx) \ - nonstatic_field(MethodData, _arg_stack, intx) \ - nonstatic_field(MethodData, _arg_returned, intx) \ - nonstatic_field(MethodData, _tenure_traps, uint) \ - nonstatic_field(MethodData, _invoke_mask, int) \ - nonstatic_field(MethodData, _backedge_mask, int) \ - nonstatic_field(DataLayout, _header._struct._tag, u1) \ - nonstatic_field(DataLayout, _header._struct._flags, u1) \ - nonstatic_field(DataLayout, _header._struct._bci, u2) \ - nonstatic_field(DataLayout, _cells[0], intptr_t) \ - nonstatic_field(MethodCounters, _nmethod_age, int) \ - nonstatic_field(MethodCounters, _interpreter_invocation_limit, int) \ - nonstatic_field(MethodCounters, _interpreter_backward_branch_limit, int) \ - nonstatic_field(MethodCounters, _interpreter_profile_limit, int) \ - nonstatic_field(MethodCounters, _invoke_mask, int) \ - nonstatic_field(MethodCounters, _backedge_mask, int) \ - nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \ - nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \ - nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \ - nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \ - nonstatic_field(MethodCounters, _backedge_counter, InvocationCounter) \ - nonstatic_field(Method, _constMethod, ConstMethod*) \ - nonstatic_field(Method, _method_data, MethodData*) \ - nonstatic_field(Method, _method_counters, MethodCounters*) \ - nonstatic_field(Method, _access_flags, AccessFlags) \ - nonstatic_field(Method, _vtable_index, int) \ - nonstatic_field(Method, _method_size, u2) \ - nonstatic_field(Method, _intrinsic_id, u1) \ - nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \ - volatile_nonstatic_field(Method, _code, nmethod*) \ - nonstatic_field(Method, _i2i_entry, address) \ - nonstatic_field(Method, _adapter, AdapterHandlerEntry*) \ - volatile_nonstatic_field(Method, _from_compiled_entry, address) \ - volatile_nonstatic_field(Method, _from_interpreted_entry, address) \ - volatile_nonstatic_field(ConstMethod, _fingerprint, uint64_t) \ - nonstatic_field(ConstMethod, _constants, ConstantPool*) \ - nonstatic_field(ConstMethod, _stackmap_data, Array<u1>*) \ - nonstatic_field(ConstMethod, _constMethod_size, int) \ - nonstatic_field(ConstMethod, _flags, u2) \ - nonstatic_field(ConstMethod, _code_size, u2) \ - nonstatic_field(ConstMethod, _name_index, u2) \ - nonstatic_field(ConstMethod, _signature_index, u2) \ - nonstatic_field(ConstMethod, _method_idnum, u2) \ - nonstatic_field(ConstMethod, _max_stack, u2) \ - nonstatic_field(ConstMethod, _max_locals, u2) \ - nonstatic_field(ConstMethod, _size_of_parameters, u2) \ + nonstatic_field(MethodData, _size, int) \ + nonstatic_field(MethodData, _method, Method*) \ + nonstatic_field(MethodData, _data_size, int) \ + nonstatic_field(MethodData, _data[0], intptr_t) \ + nonstatic_field(MethodData, _parameters_type_data_di, int) \ + nonstatic_field(MethodData, _nof_decompiles, uint) \ + nonstatic_field(MethodData, _nof_overflow_recompiles, uint) \ + nonstatic_field(MethodData, _nof_overflow_traps, uint) \ + nonstatic_field(MethodData, _trap_hist._array[0], u1) \ + nonstatic_field(MethodData, _eflags, intx) \ + nonstatic_field(MethodData, _arg_local, intx) \ + nonstatic_field(MethodData, _arg_stack, intx) \ + nonstatic_field(MethodData, _arg_returned, intx) \ + nonstatic_field(MethodData, _tenure_traps, uint) \ + nonstatic_field(MethodData, _invoke_mask, int) \ + nonstatic_field(MethodData, _backedge_mask, int) \ + nonstatic_field(DataLayout, _header._struct._tag, u1) \ + nonstatic_field(DataLayout, _header._struct._flags, u1) \ + nonstatic_field(DataLayout, _header._struct._bci, u2) \ + nonstatic_field(DataLayout, _cells[0], intptr_t) \ + nonstatic_field(MethodCounters, _nmethod_age, int) \ + nonstatic_field(MethodCounters, _interpreter_invocation_limit, int) \ + nonstatic_field(MethodCounters, _interpreter_backward_branch_limit, int) \ + nonstatic_field(MethodCounters, _interpreter_profile_limit, int) \ + nonstatic_field(MethodCounters, _invoke_mask, int) \ + nonstatic_field(MethodCounters, _backedge_mask, int) \ + nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \ + nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \ + nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \ + nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \ + nonstatic_field(MethodCounters, _backedge_counter, InvocationCounter) \ + nonstatic_field(Method, _constMethod, ConstMethod*) \ + nonstatic_field(Method, _method_data, MethodData*) \ + nonstatic_field(Method, _method_counters, MethodCounters*) \ + nonstatic_field(Method, _access_flags, AccessFlags) \ + nonstatic_field(Method, _vtable_index, int) \ + nonstatic_field(Method, _method_size, u2) \ + nonstatic_field(Method, _intrinsic_id, u1) \ + nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \ + volatile_nonstatic_field(Method, _code, nmethod*) \ + nonstatic_field(Method, _i2i_entry, address) \ + nonstatic_field(Method, _adapter, AdapterHandlerEntry*) \ + volatile_nonstatic_field(Method, _from_compiled_entry, address) \ + volatile_nonstatic_field(Method, _from_interpreted_entry, address) \ + volatile_nonstatic_field(ConstMethod, _fingerprint, uint64_t) \ + nonstatic_field(ConstMethod, _constants, ConstantPool*) \ + nonstatic_field(ConstMethod, _stackmap_data, Array<u1>*) \ + nonstatic_field(ConstMethod, _constMethod_size, int) \ + nonstatic_field(ConstMethod, _flags, u2) \ + nonstatic_field(ConstMethod, _code_size, u2) \ + nonstatic_field(ConstMethod, _name_index, u2) \ + nonstatic_field(ConstMethod, _signature_index, u2) \ + nonstatic_field(ConstMethod, _method_idnum, u2) \ + nonstatic_field(ConstMethod, _max_stack, u2) \ + nonstatic_field(ConstMethod, _max_locals, u2) \ + nonstatic_field(ConstMethod, _size_of_parameters, u2) \ nonstatic_field(ObjArrayKlass, _element_klass, Klass*) \ nonstatic_field(ObjArrayKlass, _bottom_klass, Klass*) \ volatile_nonstatic_field(Symbol, _refcount, short) \ @@ -414,10 +414,10 @@ /* Constant Pool Cache */ \ /***********************/ \ \ - volatile_nonstatic_field(ConstantPoolCacheEntry, _indices, intx) \ - nonstatic_field(ConstantPoolCacheEntry, _f1, volatile Metadata*) \ - volatile_nonstatic_field(ConstantPoolCacheEntry, _f2, intx) \ - volatile_nonstatic_field(ConstantPoolCacheEntry, _flags, intx) \ + volatile_nonstatic_field(ConstantPoolCacheEntry, _indices, intx) \ + nonstatic_field(ConstantPoolCacheEntry, _f1, volatile Metadata*) \ + volatile_nonstatic_field(ConstantPoolCacheEntry, _f2, intx) \ + volatile_nonstatic_field(ConstantPoolCacheEntry, _flags, intx) \ \ /********************************/ \ /* MethodOop-related structures */ \ @@ -631,83 +631,83 @@ /* SymbolTable */ \ /***************/ \ \ - static_field(SymbolTable, _the_table, SymbolTable*) \ - static_field(SymbolTable, _shared_table, SymbolCompactHashTable) \ + static_field(SymbolTable, _the_table, SymbolTable*) \ + static_field(SymbolTable, _shared_table, SymbolCompactHashTable) \ \ /***************/ \ /* StringTable */ \ /***************/ \ \ - static_field(StringTable, _the_table, StringTable*) \ + static_field(StringTable, _the_table, StringTable*) \ \ /********************/ \ /* CompactHashTable */ \ /********************/ \ \ - nonstatic_field(SymbolCompactHashTable, _base_address, uintx) \ - nonstatic_field(SymbolCompactHashTable, _entry_count, juint) \ - nonstatic_field(SymbolCompactHashTable, _bucket_count, juint) \ - nonstatic_field(SymbolCompactHashTable, _table_end_offset, juint) \ - nonstatic_field(SymbolCompactHashTable, _buckets, juint*) \ + nonstatic_field(SymbolCompactHashTable, _base_address, uintx) \ + nonstatic_field(SymbolCompactHashTable, _entry_count, juint) \ + nonstatic_field(SymbolCompactHashTable, _bucket_count, juint) \ + nonstatic_field(SymbolCompactHashTable, _table_end_offset, juint) \ + nonstatic_field(SymbolCompactHashTable, _buckets, juint*) \ \ /********************/ \ /* SystemDictionary */ \ /********************/ \ \ - static_field(SystemDictionary, _dictionary, Dictionary*) \ - static_field(SystemDictionary, _placeholders, PlaceholderTable*) \ - static_field(SystemDictionary, _shared_dictionary, Dictionary*) \ - static_field(SystemDictionary, _system_loader_lock_obj, oop) \ - static_field(SystemDictionary, _loader_constraints, LoaderConstraintTable*) \ - static_field(SystemDictionary, WK_KLASS(Object_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(String_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Class_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Cloneable_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(ClassLoader_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Serializable_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(System_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Throwable_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(ThreadDeath_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Error_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Exception_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(RuntimeException_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(ClassNotFoundException_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(NoClassDefFoundError_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(LinkageError_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(ClassCastException_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(ArrayStoreException_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(VirtualMachineError_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(OutOfMemoryError_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(StackOverflowError_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(ProtectionDomain_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(AccessControlContext_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(SecureClassLoader_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Reference_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(SoftReference_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(WeakReference_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(FinalReference_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(PhantomReference_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Cleaner_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Finalizer_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Thread_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(ThreadGroup_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(Properties_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(StringBuffer_klass), Klass*) \ - static_field(SystemDictionary, WK_KLASS(MethodHandle_klass), Klass*) \ - static_field(SystemDictionary, _box_klasses[0], Klass*) \ - static_field(SystemDictionary, _java_system_loader, oop) \ + static_field(SystemDictionary, _dictionary, Dictionary*) \ + static_field(SystemDictionary, _placeholders, PlaceholderTable*) \ + static_field(SystemDictionary, _shared_dictionary, Dictionary*) \ + static_field(SystemDictionary, _system_loader_lock_obj, oop) \ + static_field(SystemDictionary, _loader_constraints, LoaderConstraintTable*) \ + static_field(SystemDictionary, WK_KLASS(Object_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(String_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Class_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Cloneable_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(ClassLoader_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Serializable_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(System_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Throwable_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(ThreadDeath_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Error_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Exception_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(RuntimeException_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(ClassNotFoundException_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(NoClassDefFoundError_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(LinkageError_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(ClassCastException_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(ArrayStoreException_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(VirtualMachineError_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(OutOfMemoryError_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(StackOverflowError_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(ProtectionDomain_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(AccessControlContext_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(SecureClassLoader_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Reference_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(SoftReference_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(WeakReference_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(FinalReference_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(PhantomReference_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Cleaner_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Finalizer_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Thread_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(ThreadGroup_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(Properties_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(StringBuffer_klass), Klass*) \ + static_field(SystemDictionary, WK_KLASS(MethodHandle_klass), Klass*) \ + static_field(SystemDictionary, _box_klasses[0], Klass*) \ + static_field(SystemDictionary, _java_system_loader, oop) \ \ /*************/ \ /* vmSymbols */ \ /*************/ \ \ - static_field(vmSymbols, _symbols[0], Symbol*) \ + static_field(vmSymbols, _symbols[0], Symbol*) \ \ /*******************/ \ /* HashtableBucket */ \ /*******************/ \ \ - nonstatic_field(HashtableBucket<mtInternal>, _entry, BasicHashtableEntry<mtInternal>*) \ + nonstatic_field(HashtableBucket<mtInternal>, _entry, BasicHashtableEntry<mtInternal>*) \ \ /******************/ \ /* HashtableEntry */ \ @@ -721,12 +721,12 @@ /* Hashtable */ \ /*************/ \ \ - nonstatic_field(BasicHashtable<mtInternal>, _table_size, int) \ - nonstatic_field(BasicHashtable<mtInternal>, _buckets, HashtableBucket<mtInternal>*) \ - nonstatic_field(BasicHashtable<mtInternal>, _free_list, BasicHashtableEntry<mtInternal>*) \ - nonstatic_field(BasicHashtable<mtInternal>, _first_free_entry, char*) \ - nonstatic_field(BasicHashtable<mtInternal>, _end_block, char*) \ - nonstatic_field(BasicHashtable<mtInternal>, _entry_size, int) \ + nonstatic_field(BasicHashtable<mtInternal>, _table_size, int) \ + nonstatic_field(BasicHashtable<mtInternal>, _buckets, HashtableBucket<mtInternal>*) \ + nonstatic_field(BasicHashtable<mtInternal>, _free_list, BasicHashtableEntry<mtInternal>*) \ + nonstatic_field(BasicHashtable<mtInternal>, _first_free_entry, char*) \ + nonstatic_field(BasicHashtable<mtInternal>, _end_block, char*) \ + nonstatic_field(BasicHashtable<mtInternal>, _entry_size, int) \ \ /*******************/ \ /* DictionaryEntry */ \ @@ -764,7 +764,7 @@ nonstatic_field(ClassLoaderData, _class_loader, oop) \ nonstatic_field(ClassLoaderData, _next, ClassLoaderData*) \ \ - static_field(ClassLoaderDataGraph, _head, ClassLoaderData*) \ + static_field(ClassLoaderDataGraph, _head, ClassLoaderData*) \ \ /**********/ \ /* Arrays */ \ @@ -786,8 +786,8 @@ /* CodeCache (NOTE: incomplete) */ \ /********************************/ \ \ - static_field(CodeCache, _heaps, GrowableArray<CodeHeap*>*) \ - static_field(CodeCache, _scavenge_root_nmethods, nmethod*) \ + static_field(CodeCache, _heaps, GrowableArray<CodeHeap*>*) \ + static_field(CodeCache, _scavenge_root_nmethods, nmethod*) \ \ /*******************************/ \ /* CodeHeap (NOTE: incomplete) */ \ @@ -873,37 +873,37 @@ /* NMethods (NOTE: incomplete, but only a little) */ \ /**************************************************/ \ \ - nonstatic_field(nmethod, _method, Method*) \ - nonstatic_field(nmethod, _entry_bci, int) \ - nonstatic_field(nmethod, _osr_link, nmethod*) \ - nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \ - nonstatic_field(nmethod, _scavenge_root_state, jbyte) \ - nonstatic_field(nmethod, _state, volatile unsigned char) \ - nonstatic_field(nmethod, _exception_offset, int) \ - nonstatic_field(nmethod, _deoptimize_offset, int) \ - nonstatic_field(nmethod, _deoptimize_mh_offset, int) \ - nonstatic_field(nmethod, _orig_pc_offset, int) \ - nonstatic_field(nmethod, _stub_offset, int) \ - nonstatic_field(nmethod, _consts_offset, int) \ - nonstatic_field(nmethod, _oops_offset, int) \ - nonstatic_field(nmethod, _metadata_offset, int) \ - nonstatic_field(nmethod, _scopes_data_offset, int) \ - nonstatic_field(nmethod, _scopes_pcs_offset, int) \ - nonstatic_field(nmethod, _dependencies_offset, int) \ - nonstatic_field(nmethod, _handler_table_offset, int) \ - nonstatic_field(nmethod, _nul_chk_table_offset, int) \ - nonstatic_field(nmethod, _nmethod_end_offset, int) \ - nonstatic_field(nmethod, _entry_point, address) \ - nonstatic_field(nmethod, _verified_entry_point, address) \ - nonstatic_field(nmethod, _osr_entry_point, address) \ - volatile_nonstatic_field(nmethod, _lock_count, jint) \ - nonstatic_field(nmethod, _stack_traversal_mark, long) \ - nonstatic_field(nmethod, _compile_id, int) \ - nonstatic_field(nmethod, _comp_level, int) \ - nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \ - nonstatic_field(nmethod, _marked_for_deoptimization, bool) \ - \ - unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ + nonstatic_field(nmethod, _method, Method*) \ + nonstatic_field(nmethod, _entry_bci, int) \ + nonstatic_field(nmethod, _osr_link, nmethod*) \ + nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \ + nonstatic_field(nmethod, _scavenge_root_state, jbyte) \ + nonstatic_field(nmethod, _state, volatile unsigned char) \ + nonstatic_field(nmethod, _exception_offset, int) \ + nonstatic_field(nmethod, _deoptimize_offset, int) \ + nonstatic_field(nmethod, _deoptimize_mh_offset, int) \ + nonstatic_field(nmethod, _orig_pc_offset, int) \ + nonstatic_field(nmethod, _stub_offset, int) \ + nonstatic_field(nmethod, _consts_offset, int) \ + nonstatic_field(nmethod, _oops_offset, int) \ + nonstatic_field(nmethod, _metadata_offset, int) \ + nonstatic_field(nmethod, _scopes_data_offset, int) \ + nonstatic_field(nmethod, _scopes_pcs_offset, int) \ + nonstatic_field(nmethod, _dependencies_offset, int) \ + nonstatic_field(nmethod, _handler_table_offset, int) \ + nonstatic_field(nmethod, _nul_chk_table_offset, int) \ + nonstatic_field(nmethod, _nmethod_end_offset, int) \ + nonstatic_field(nmethod, _entry_point, address) \ + nonstatic_field(nmethod, _verified_entry_point, address) \ + nonstatic_field(nmethod, _osr_entry_point, address) \ + volatile_nonstatic_field(nmethod, _lock_count, jint) \ + nonstatic_field(nmethod, _stack_traversal_mark, long) \ + nonstatic_field(nmethod, _compile_id, int) \ + nonstatic_field(nmethod, _comp_level, int) \ + nonstatic_field(nmethod, _exception_cache, ExceptionCache*) \ + nonstatic_field(nmethod, _marked_for_deoptimization, bool) \ + \ + unchecked_c2_static_field(Deoptimization, _trap_reason_name, void*) \ \ /********************************/ \ /* JavaCalls (NOTE: incomplete) */ \ @@ -928,7 +928,7 @@ nonstatic_field(ThreadShadow, _pending_exception, oop) \ nonstatic_field(ThreadShadow, _exception_file, const char*) \ nonstatic_field(ThreadShadow, _exception_line, int) \ - volatile_nonstatic_field(Thread, _suspend_flags, uint32_t) \ + volatile_nonstatic_field(Thread, _suspend_flags, uint32_t) \ nonstatic_field(Thread, _active_handles, JNIHandleBlock*) \ nonstatic_field(Thread, _tlab, ThreadLocalAllocBuffer) \ nonstatic_field(Thread, _allocated_bytes, jlong) \ @@ -948,7 +948,7 @@ volatile_nonstatic_field(JavaThread, _is_method_handle_return, int) \ nonstatic_field(JavaThread, _special_runtime_exit_condition, JavaThread::AsyncRequests) \ nonstatic_field(JavaThread, _saved_exception_pc, address) \ - volatile_nonstatic_field(JavaThread, _thread_state, JavaThreadState) \ + volatile_nonstatic_field(JavaThread, _thread_state, JavaThreadState) \ nonstatic_field(JavaThread, _osthread, OSThread*) \ nonstatic_field(JavaThread, _stack_base, address) \ nonstatic_field(JavaThread, _stack_size, size_t) \ @@ -991,7 +991,7 @@ static_field(JNIHandles, _weak_global_handles, JNIHandleBlock*) \ static_field(JNIHandles, _deleted_handle, oop) \ \ - unchecked_nonstatic_field(JNIHandleBlock, _handles, JNIHandleBlock::block_size_in_oops * sizeof(Oop)) /* Note: no type */ \ + unchecked_nonstatic_field(JNIHandleBlock, _handles, JNIHandleBlock::block_size_in_oops * sizeof(Oop)) /* Note: no type */ \ nonstatic_field(JNIHandleBlock, _top, int) \ nonstatic_field(JNIHandleBlock, _next, JNIHandleBlock*) \ \ @@ -1019,80 +1019,80 @@ /* allocation */ \ /**************/ \ \ - nonstatic_field(Chunk, _next, Chunk*) \ - nonstatic_field(Chunk, _len, const size_t) \ + nonstatic_field(Chunk, _next, Chunk*) \ + nonstatic_field(Chunk, _len, const size_t) \ \ - nonstatic_field(Arena, _first, Chunk*) \ - nonstatic_field(Arena, _chunk, Chunk*) \ - nonstatic_field(Arena, _hwm, char*) \ - nonstatic_field(Arena, _max, char*) \ + nonstatic_field(Arena, _first, Chunk*) \ + nonstatic_field(Arena, _chunk, Chunk*) \ + nonstatic_field(Arena, _hwm, char*) \ + nonstatic_field(Arena, _max, char*) \ \ /************/ \ /* CI */ \ /************/ \ \ - nonstatic_field(ciEnv, _system_dictionary_modification_counter, int) \ - nonstatic_field(ciEnv, _compiler_data, void*) \ - nonstatic_field(ciEnv, _failure_reason, const char*) \ - nonstatic_field(ciEnv, _factory, ciObjectFactory*) \ - nonstatic_field(ciEnv, _dependencies, Dependencies*) \ - nonstatic_field(ciEnv, _task, CompileTask*) \ - nonstatic_field(ciEnv, _arena, Arena*) \ + nonstatic_field(ciEnv, _system_dictionary_modification_counter, int) \ + nonstatic_field(ciEnv, _compiler_data, void*) \ + nonstatic_field(ciEnv, _failure_reason, const char*) \ + nonstatic_field(ciEnv, _factory, ciObjectFactory*) \ + nonstatic_field(ciEnv, _dependencies, Dependencies*) \ + nonstatic_field(ciEnv, _task, CompileTask*) \ + nonstatic_field(ciEnv, _arena, Arena*) \ \ - nonstatic_field(ciBaseObject, _ident, uint) \ + nonstatic_field(ciBaseObject, _ident, uint) \ \ - nonstatic_field(ciObject, _handle, jobject) \ - nonstatic_field(ciObject, _klass, ciKlass*) \ + nonstatic_field(ciObject, _handle, jobject) \ + nonstatic_field(ciObject, _klass, ciKlass*) \ \ - nonstatic_field(ciMetadata, _metadata, Metadata*) \ + nonstatic_field(ciMetadata, _metadata, Metadata*) \ \ - nonstatic_field(ciSymbol, _symbol, Symbol*) \ + nonstatic_field(ciSymbol, _symbol, Symbol*) \ \ - nonstatic_field(ciType, _basic_type, BasicType) \ + nonstatic_field(ciType, _basic_type, BasicType) \ \ - nonstatic_field(ciKlass, _name, ciSymbol*) \ + nonstatic_field(ciKlass, _name, ciSymbol*) \ \ - nonstatic_field(ciArrayKlass, _dimension, jint) \ + nonstatic_field(ciArrayKlass, _dimension, jint) \ \ - nonstatic_field(ciObjArrayKlass, _element_klass, ciKlass*) \ - nonstatic_field(ciObjArrayKlass, _base_element_klass, ciKlass*) \ + nonstatic_field(ciObjArrayKlass, _element_klass, ciKlass*) \ + nonstatic_field(ciObjArrayKlass, _base_element_klass, ciKlass*) \ \ - nonstatic_field(ciInstanceKlass, _init_state, InstanceKlass::ClassState) \ - nonstatic_field(ciInstanceKlass, _is_shared, bool) \ + nonstatic_field(ciInstanceKlass, _init_state, InstanceKlass::ClassState) \ + nonstatic_field(ciInstanceKlass, _is_shared, bool) \ \ - nonstatic_field(ciMethod, _interpreter_invocation_count, int) \ - nonstatic_field(ciMethod, _interpreter_throwout_count, int) \ - nonstatic_field(ciMethod, _instructions_size, int) \ + nonstatic_field(ciMethod, _interpreter_invocation_count, int) \ + nonstatic_field(ciMethod, _interpreter_throwout_count, int) \ + nonstatic_field(ciMethod, _instructions_size, int) \ \ - nonstatic_field(ciMethodData, _data_size, int) \ - nonstatic_field(ciMethodData, _state, u_char) \ - nonstatic_field(ciMethodData, _extra_data_size, int) \ - nonstatic_field(ciMethodData, _data, intptr_t*) \ - nonstatic_field(ciMethodData, _hint_di, int) \ - nonstatic_field(ciMethodData, _eflags, intx) \ - nonstatic_field(ciMethodData, _arg_local, intx) \ - nonstatic_field(ciMethodData, _arg_stack, intx) \ - nonstatic_field(ciMethodData, _arg_returned, intx) \ - nonstatic_field(ciMethodData, _current_mileage, int) \ - nonstatic_field(ciMethodData, _orig, MethodData) \ + nonstatic_field(ciMethodData, _data_size, int) \ + nonstatic_field(ciMethodData, _state, u_char) \ + nonstatic_field(ciMethodData, _extra_data_size, int) \ + nonstatic_field(ciMethodData, _data, intptr_t*) \ + nonstatic_field(ciMethodData, _hint_di, int) \ + nonstatic_field(ciMethodData, _eflags, intx) \ + nonstatic_field(ciMethodData, _arg_local, intx) \ + nonstatic_field(ciMethodData, _arg_stack, intx) \ + nonstatic_field(ciMethodData, _arg_returned, intx) \ + nonstatic_field(ciMethodData, _current_mileage, int) \ + nonstatic_field(ciMethodData, _orig, MethodData) \ \ - nonstatic_field(ciField, _holder, ciInstanceKlass*) \ - nonstatic_field(ciField, _name, ciSymbol*) \ - nonstatic_field(ciField, _signature, ciSymbol*) \ - nonstatic_field(ciField, _offset, int) \ - nonstatic_field(ciField, _is_constant, bool) \ - nonstatic_field(ciField, _constant_value, ciConstant) \ + nonstatic_field(ciField, _holder, ciInstanceKlass*) \ + nonstatic_field(ciField, _name, ciSymbol*) \ + nonstatic_field(ciField, _signature, ciSymbol*) \ + nonstatic_field(ciField, _offset, int) \ + nonstatic_field(ciField, _is_constant, bool) \ + nonstatic_field(ciField, _constant_value, ciConstant) \ \ - nonstatic_field(ciObjectFactory, _ci_metadata, GrowableArray<ciMetadata*>*) \ - nonstatic_field(ciObjectFactory, _symbols, GrowableArray<ciSymbol*>*) \ - nonstatic_field(ciObjectFactory, _unloaded_methods, GrowableArray<ciMethod*>*) \ + nonstatic_field(ciObjectFactory, _ci_metadata, GrowableArray<ciMetadata*>*) \ + nonstatic_field(ciObjectFactory, _symbols, GrowableArray<ciSymbol*>*) \ + nonstatic_field(ciObjectFactory, _unloaded_methods, GrowableArray<ciMethod*>*) \ \ - nonstatic_field(ciConstant, _type, BasicType) \ - nonstatic_field(ciConstant, _value._int, jint) \ - nonstatic_field(ciConstant, _value._long, jlong) \ - nonstatic_field(ciConstant, _value._float, jfloat) \ - nonstatic_field(ciConstant, _value._double, jdouble) \ - nonstatic_field(ciConstant, _value._object, ciObject*) \ + nonstatic_field(ciConstant, _type, BasicType) \ + nonstatic_field(ciConstant, _value._int, jint) \ + nonstatic_field(ciConstant, _value._long, jlong) \ + nonstatic_field(ciConstant, _value._float, jfloat) \ + nonstatic_field(ciConstant, _value._double, jdouble) \ + nonstatic_field(ciConstant, _value._object, ciObject*) \ \ /************/ \ /* Monitors */ \ @@ -1108,7 +1108,7 @@ volatile_nonstatic_field(BasicLock, _displaced_header, markOop) \ nonstatic_field(BasicObjectLock, _lock, BasicLock) \ nonstatic_field(BasicObjectLock, _obj, oop) \ - static_field(ObjectSynchronizer, gBlockList, ObjectMonitor*) \ + static_field(ObjectSynchronizer, gBlockList, ObjectMonitor*) \ \ /*********************/ \ /* Matcher (C2 only) */ \ @@ -1116,111 +1116,111 @@ \ unchecked_c2_static_field(Matcher, _regEncode, sizeof(Matcher::_regEncode)) /* NOTE: no type */ \ \ - c2_nonstatic_field(Node, _in, Node**) \ - c2_nonstatic_field(Node, _out, Node**) \ - c2_nonstatic_field(Node, _cnt, node_idx_t) \ - c2_nonstatic_field(Node, _max, node_idx_t) \ - c2_nonstatic_field(Node, _outcnt, node_idx_t) \ - c2_nonstatic_field(Node, _outmax, node_idx_t) \ - c2_nonstatic_field(Node, _idx, const node_idx_t) \ - c2_nonstatic_field(Node, _class_id, jushort) \ - c2_nonstatic_field(Node, _flags, jushort) \ + c2_nonstatic_field(Node, _in, Node**) \ + c2_nonstatic_field(Node, _out, Node**) \ + c2_nonstatic_field(Node, _cnt, node_idx_t) \ + c2_nonstatic_field(Node, _max, node_idx_t) \ + c2_nonstatic_field(Node, _outcnt, node_idx_t) \ + c2_nonstatic_field(Node, _outmax, node_idx_t) \ + c2_nonstatic_field(Node, _idx, const node_idx_t) \ + c2_nonstatic_field(Node, _class_id, jushort) \ + c2_nonstatic_field(Node, _flags, jushort) \ \ - c2_nonstatic_field(Compile, _root, RootNode*) \ - c2_nonstatic_field(Compile, _unique, uint) \ - c2_nonstatic_field(Compile, _entry_bci, int) \ - c2_nonstatic_field(Compile, _top, Node*) \ - c2_nonstatic_field(Compile, _cfg, PhaseCFG*) \ - c2_nonstatic_field(Compile, _regalloc, PhaseRegAlloc*) \ - c2_nonstatic_field(Compile, _method, ciMethod*) \ - c2_nonstatic_field(Compile, _compile_id, const int) \ - c2_nonstatic_field(Compile, _save_argument_registers, const bool) \ - c2_nonstatic_field(Compile, _subsume_loads, const bool) \ - c2_nonstatic_field(Compile, _do_escape_analysis, const bool) \ - c2_nonstatic_field(Compile, _eliminate_boxing, const bool) \ - c2_nonstatic_field(Compile, _ilt, InlineTree*) \ + c2_nonstatic_field(Compile, _root, RootNode*) \ + c2_nonstatic_field(Compile, _unique, uint) \ + c2_nonstatic_field(Compile, _entry_bci, int) \ + c2_nonstatic_field(Compile, _top, Node*) \ + c2_nonstatic_field(Compile, _cfg, PhaseCFG*) \ + c2_nonstatic_field(Compile, _regalloc, PhaseRegAlloc*) \ + c2_nonstatic_field(Compile, _method, ciMethod*) \ + c2_nonstatic_field(Compile, _compile_id, const int) \ + c2_nonstatic_field(Compile, _save_argument_registers, const bool) \ + c2_nonstatic_field(Compile, _subsume_loads, const bool) \ + c2_nonstatic_field(Compile, _do_escape_analysis, const bool) \ + c2_nonstatic_field(Compile, _eliminate_boxing, const bool) \ + c2_nonstatic_field(Compile, _ilt, InlineTree*) \ \ - c2_nonstatic_field(InlineTree, _caller_jvms, JVMState*) \ - c2_nonstatic_field(InlineTree, _method, ciMethod*) \ - c2_nonstatic_field(InlineTree, _caller_tree, InlineTree*) \ - c2_nonstatic_field(InlineTree, _subtrees, GrowableArray<InlineTree*>) \ + c2_nonstatic_field(InlineTree, _caller_jvms, JVMState*) \ + c2_nonstatic_field(InlineTree, _method, ciMethod*) \ + c2_nonstatic_field(InlineTree, _caller_tree, InlineTree*) \ + c2_nonstatic_field(InlineTree, _subtrees, GrowableArray<InlineTree*>) \ \ - c2_nonstatic_field(OptoRegPair, _first, short) \ - c2_nonstatic_field(OptoRegPair, _second, short) \ + c2_nonstatic_field(OptoRegPair, _first, short) \ + c2_nonstatic_field(OptoRegPair, _second, short) \ \ - c2_nonstatic_field(JVMState, _caller, JVMState*) \ - c2_nonstatic_field(JVMState, _depth, uint) \ - c2_nonstatic_field(JVMState, _locoff, uint) \ - c2_nonstatic_field(JVMState, _stkoff, uint) \ - c2_nonstatic_field(JVMState, _monoff, uint) \ - c2_nonstatic_field(JVMState, _scloff, uint) \ - c2_nonstatic_field(JVMState, _endoff, uint) \ - c2_nonstatic_field(JVMState, _sp, uint) \ - c2_nonstatic_field(JVMState, _bci, int) \ - c2_nonstatic_field(JVMState, _method, ciMethod*) \ - c2_nonstatic_field(JVMState, _map, SafePointNode*) \ + c2_nonstatic_field(JVMState, _caller, JVMState*) \ + c2_nonstatic_field(JVMState, _depth, uint) \ + c2_nonstatic_field(JVMState, _locoff, uint) \ + c2_nonstatic_field(JVMState, _stkoff, uint) \ + c2_nonstatic_field(JVMState, _monoff, uint) \ + c2_nonstatic_field(JVMState, _scloff, uint) \ + c2_nonstatic_field(JVMState, _endoff, uint) \ + c2_nonstatic_field(JVMState, _sp, uint) \ + c2_nonstatic_field(JVMState, _bci, int) \ + c2_nonstatic_field(JVMState, _method, ciMethod*) \ + c2_nonstatic_field(JVMState, _map, SafePointNode*) \ \ - c2_nonstatic_field(SafePointNode, _jvms, JVMState* const) \ + c2_nonstatic_field(SafePointNode, _jvms, JVMState* const) \ \ - c2_nonstatic_field(MachSafePointNode, _jvms, JVMState*) \ - c2_nonstatic_field(MachSafePointNode, _jvmadj, uint) \ + c2_nonstatic_field(MachSafePointNode, _jvms, JVMState*) \ + c2_nonstatic_field(MachSafePointNode, _jvmadj, uint) \ \ - c2_nonstatic_field(MachIfNode, _prob, jfloat) \ - c2_nonstatic_field(MachIfNode, _fcnt, jfloat) \ + c2_nonstatic_field(MachIfNode, _prob, jfloat) \ + c2_nonstatic_field(MachIfNode, _fcnt, jfloat) \ \ - c2_nonstatic_field(CallNode, _entry_point, address) \ + c2_nonstatic_field(CallNode, _entry_point, address) \ \ - c2_nonstatic_field(CallJavaNode, _method, ciMethod*) \ + c2_nonstatic_field(CallJavaNode, _method, ciMethod*) \ \ - c2_nonstatic_field(CallRuntimeNode, _name, const char*) \ + c2_nonstatic_field(CallRuntimeNode, _name, const char*) \ \ - c2_nonstatic_field(CallStaticJavaNode, _name, const char*) \ + c2_nonstatic_field(CallStaticJavaNode, _name, const char*) \ \ - c2_nonstatic_field(MachCallJavaNode, _method, ciMethod*) \ - c2_nonstatic_field(MachCallJavaNode, _bci, int) \ + c2_nonstatic_field(MachCallJavaNode, _method, ciMethod*) \ + c2_nonstatic_field(MachCallJavaNode, _bci, int) \ \ - c2_nonstatic_field(MachCallStaticJavaNode, _name, const char*) \ + c2_nonstatic_field(MachCallStaticJavaNode, _name, const char*) \ \ - c2_nonstatic_field(MachCallRuntimeNode, _name, const char*) \ + c2_nonstatic_field(MachCallRuntimeNode, _name, const char*) \ \ - c2_nonstatic_field(PhaseCFG, _number_of_blocks, uint) \ - c2_nonstatic_field(PhaseCFG, _blocks, Block_List) \ - c2_nonstatic_field(PhaseCFG, _node_to_block_mapping, Block_Array) \ - c2_nonstatic_field(PhaseCFG, _root_block, Block*) \ + c2_nonstatic_field(PhaseCFG, _number_of_blocks, uint) \ + c2_nonstatic_field(PhaseCFG, _blocks, Block_List) \ + c2_nonstatic_field(PhaseCFG, _node_to_block_mapping, Block_Array) \ + c2_nonstatic_field(PhaseCFG, _root_block, Block*) \ \ - c2_nonstatic_field(PhaseRegAlloc, _node_regs, OptoRegPair*) \ - c2_nonstatic_field(PhaseRegAlloc, _node_regs_max_index, uint) \ - c2_nonstatic_field(PhaseRegAlloc, _framesize, uint) \ - c2_nonstatic_field(PhaseRegAlloc, _max_reg, OptoReg::Name) \ + c2_nonstatic_field(PhaseRegAlloc, _node_regs, OptoRegPair*) \ + c2_nonstatic_field(PhaseRegAlloc, _node_regs_max_index, uint) \ + c2_nonstatic_field(PhaseRegAlloc, _framesize, uint) \ + c2_nonstatic_field(PhaseRegAlloc, _max_reg, OptoReg::Name) \ \ - c2_nonstatic_field(PhaseChaitin, _trip_cnt, int) \ - c2_nonstatic_field(PhaseChaitin, _alternate, int) \ - c2_nonstatic_field(PhaseChaitin, _lo_degree, uint) \ - c2_nonstatic_field(PhaseChaitin, _lo_stk_degree, uint) \ - c2_nonstatic_field(PhaseChaitin, _hi_degree, uint) \ - c2_nonstatic_field(PhaseChaitin, _simplified, uint) \ + c2_nonstatic_field(PhaseChaitin, _trip_cnt, int) \ + c2_nonstatic_field(PhaseChaitin, _alternate, int) \ + c2_nonstatic_field(PhaseChaitin, _lo_degree, uint) \ + c2_nonstatic_field(PhaseChaitin, _lo_stk_degree, uint) \ + c2_nonstatic_field(PhaseChaitin, _hi_degree, uint) \ + c2_nonstatic_field(PhaseChaitin, _simplified, uint) \ \ - c2_nonstatic_field(Block, _nodes, Node_List) \ - c2_nonstatic_field(Block, _succs, Block_Array) \ - c2_nonstatic_field(Block, _num_succs, uint) \ - c2_nonstatic_field(Block, _pre_order, uint) \ - c2_nonstatic_field(Block, _dom_depth, uint) \ - c2_nonstatic_field(Block, _idom, Block*) \ - c2_nonstatic_field(Block, _freq, jdouble) \ + c2_nonstatic_field(Block, _nodes, Node_List) \ + c2_nonstatic_field(Block, _succs, Block_Array) \ + c2_nonstatic_field(Block, _num_succs, uint) \ + c2_nonstatic_field(Block, _pre_order, uint) \ + c2_nonstatic_field(Block, _dom_depth, uint) \ + c2_nonstatic_field(Block, _idom, Block*) \ + c2_nonstatic_field(Block, _freq, jdouble) \ \ - c2_nonstatic_field(CFGElement, _freq, jdouble) \ + c2_nonstatic_field(CFGElement, _freq, jdouble) \ \ - c2_nonstatic_field(Block_List, _cnt, uint) \ + c2_nonstatic_field(Block_List, _cnt, uint) \ \ - c2_nonstatic_field(Block_Array, _size, uint) \ - c2_nonstatic_field(Block_Array, _blocks, Block**) \ - c2_nonstatic_field(Block_Array, _arena, Arena*) \ + c2_nonstatic_field(Block_Array, _size, uint) \ + c2_nonstatic_field(Block_Array, _blocks, Block**) \ + c2_nonstatic_field(Block_Array, _arena, Arena*) \ \ - c2_nonstatic_field(Node_List, _cnt, uint) \ + c2_nonstatic_field(Node_List, _cnt, uint) \ \ - c2_nonstatic_field(Node_Array, _max, uint) \ - c2_nonstatic_field(Node_Array, _nodes, Node**) \ - c2_nonstatic_field(Node_Array, _a, Arena*) \ + c2_nonstatic_field(Node_Array, _max, uint) \ + c2_nonstatic_field(Node_Array, _nodes, Node**) \ + c2_nonstatic_field(Node_Array, _a, Arena*) \ \ \ /*********************/ \ @@ -1231,22 +1231,22 @@ nonstatic_field(Flag, _name, const char*) \ unchecked_nonstatic_field(Flag, _addr, sizeof(void*)) /* NOTE: no type */ \ nonstatic_field(Flag, _flags, Flag::Flags) \ - static_field(Flag, flags, Flag*) \ - static_field(Flag, numFlags, size_t) \ + static_field(Flag, flags, Flag*) \ + static_field(Flag, numFlags, size_t) \ \ /*************************/ \ /* JDK / VM version info */ \ /*************************/ \ \ - static_field(Abstract_VM_Version, _s_vm_release, const char*) \ - static_field(Abstract_VM_Version, _s_internal_vm_info_string, const char*) \ - static_field(Abstract_VM_Version, _vm_major_version, int) \ - static_field(Abstract_VM_Version, _vm_minor_version, int) \ - static_field(Abstract_VM_Version, _vm_micro_version, int) \ - static_field(Abstract_VM_Version, _vm_build_number, int) \ - static_field(Abstract_VM_Version, _reserve_for_allocation_prefetch, int) \ + static_field(Abstract_VM_Version, _s_vm_release, const char*) \ + static_field(Abstract_VM_Version, _s_internal_vm_info_string, const char*) \ + static_field(Abstract_VM_Version, _vm_major_version, int) \ + static_field(Abstract_VM_Version, _vm_minor_version, int) \ + static_field(Abstract_VM_Version, _vm_micro_version, int) \ + static_field(Abstract_VM_Version, _vm_build_number, int) \ + static_field(Abstract_VM_Version, _reserve_for_allocation_prefetch, int) \ \ - static_field(JDK_Version, _current, JDK_Version) \ + static_field(JDK_Version, _current, JDK_Version) \ nonstatic_field(JDK_Version, _partially_initialized, bool) \ nonstatic_field(JDK_Version, _major, unsigned char) \ \ @@ -1260,65 +1260,65 @@ /* Arguments */ \ /*************/ \ \ - static_field(Arguments, _jvm_flags_array, char**) \ - static_field(Arguments, _num_jvm_flags, int) \ - static_field(Arguments, _jvm_args_array, char**) \ - static_field(Arguments, _num_jvm_args, int) \ - static_field(Arguments, _java_command, char*) \ + static_field(Arguments, _jvm_flags_array, char**) \ + static_field(Arguments, _num_jvm_flags, int) \ + static_field(Arguments, _jvm_args_array, char**) \ + static_field(Arguments, _num_jvm_args, int) \ + static_field(Arguments, _java_command, char*) \ \ /************/ \ /* Array<T> */ \ /************/ \ \ - nonstatic_field(Array<int>, _length, int) \ - unchecked_nonstatic_field(Array<int>, _data, sizeof(int)) \ - unchecked_nonstatic_field(Array<u1>, _data, sizeof(u1)) \ - unchecked_nonstatic_field(Array<u2>, _data, sizeof(u2)) \ - unchecked_nonstatic_field(Array<Method*>, _data, sizeof(Method*)) \ - unchecked_nonstatic_field(Array<Klass*>, _data, sizeof(Klass*)) \ + nonstatic_field(Array<int>, _length, int) \ + unchecked_nonstatic_field(Array<int>, _data, sizeof(int)) \ + unchecked_nonstatic_field(Array<u1>, _data, sizeof(u1)) \ + unchecked_nonstatic_field(Array<u2>, _data, sizeof(u2)) \ + unchecked_nonstatic_field(Array<Method*>, _data, sizeof(Method*)) \ + unchecked_nonstatic_field(Array<Klass*>, _data, sizeof(Klass*)) \ \ /*********************************/ \ /* java_lang_Class fields */ \ /*********************************/ \ \ - static_field(java_lang_Class, _klass_offset, int) \ - static_field(java_lang_Class, _array_klass_offset, int) \ - static_field(java_lang_Class, _oop_size_offset, int) \ - static_field(java_lang_Class, _static_oop_field_count_offset, int) \ + static_field(java_lang_Class, _klass_offset, int) \ + static_field(java_lang_Class, _array_klass_offset, int) \ + static_field(java_lang_Class, _oop_size_offset, int) \ + static_field(java_lang_Class, _static_oop_field_count_offset, int) \ \ /************************/ \ /* Miscellaneous fields */ \ /************************/ \ \ - nonstatic_field(CompileTask, _method, Method*) \ - nonstatic_field(CompileTask, _osr_bci, int) \ - nonstatic_field(CompileTask, _comp_level, int) \ - nonstatic_field(CompileTask, _compile_id, uint) \ - nonstatic_field(CompileTask, _next, CompileTask*) \ - nonstatic_field(CompileTask, _prev, CompileTask*) \ + nonstatic_field(CompileTask, _method, Method*) \ + nonstatic_field(CompileTask, _osr_bci, int) \ + nonstatic_field(CompileTask, _comp_level, int) \ + nonstatic_field(CompileTask, _compile_id, uint) \ + nonstatic_field(CompileTask, _next, CompileTask*) \ + nonstatic_field(CompileTask, _prev, CompileTask*) \ \ - nonstatic_field(vframeArray, _next, vframeArray*) \ - nonstatic_field(vframeArray, _original, frame) \ - nonstatic_field(vframeArray, _caller, frame) \ - nonstatic_field(vframeArray, _frames, int) \ + nonstatic_field(vframeArray, _next, vframeArray*) \ + nonstatic_field(vframeArray, _original, frame) \ + nonstatic_field(vframeArray, _caller, frame) \ + nonstatic_field(vframeArray, _frames, int) \ \ - nonstatic_field(vframeArrayElement, _frame, frame) \ - nonstatic_field(vframeArrayElement, _bci, int) \ - nonstatic_field(vframeArrayElement, _method, Method*) \ + nonstatic_field(vframeArrayElement, _frame, frame) \ + nonstatic_field(vframeArrayElement, _bci, int) \ + nonstatic_field(vframeArrayElement, _method, Method*) \ \ - nonstatic_field(PtrQueue, _active, bool) \ - nonstatic_field(PtrQueue, _buf, void**) \ - nonstatic_field(PtrQueue, _index, size_t) \ + nonstatic_field(PtrQueue, _active, bool) \ + nonstatic_field(PtrQueue, _buf, void**) \ + nonstatic_field(PtrQueue, _index, size_t) \ \ - nonstatic_field(AccessFlags, _flags, jint) \ - nonstatic_field(elapsedTimer, _counter, jlong) \ - nonstatic_field(elapsedTimer, _active, bool) \ - nonstatic_field(InvocationCounter, _counter, unsigned int) \ - volatile_nonstatic_field(FreeChunk, _size, size_t) \ - nonstatic_field(FreeChunk, _next, FreeChunk*) \ - nonstatic_field(FreeChunk, _prev, FreeChunk*) \ - nonstatic_field(AdaptiveFreeList<FreeChunk>, _size, size_t) \ - nonstatic_field(AdaptiveFreeList<FreeChunk>, _count, ssize_t) + nonstatic_field(AccessFlags, _flags, jint) \ + nonstatic_field(elapsedTimer, _counter, jlong) \ + nonstatic_field(elapsedTimer, _active, bool) \ + nonstatic_field(InvocationCounter, _counter, unsigned int) \ + volatile_nonstatic_field(FreeChunk, _size, size_t) \ + nonstatic_field(FreeChunk, _next, FreeChunk*) \ + nonstatic_field(FreeChunk, _prev, FreeChunk*) \ + nonstatic_field(AdaptiveFreeList<FreeChunk>, _size, size_t) \ + nonstatic_field(AdaptiveFreeList<FreeChunk>, _count, ssize_t) //--------------------------------------------------------------------------------
--- a/hotspot/src/share/vm/services/memoryPool.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/services/memoryPool.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -204,21 +204,21 @@ return MemoryUsage(initial_size(), used, committed, maxSize); } -SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* gen, +SurvivorContiguousSpacePool::SurvivorContiguousSpacePool(DefNewGeneration* young_gen, const char* name, PoolType type, size_t max_size, bool support_usage_threshold) : - CollectedMemoryPool(name, type, gen->from()->capacity(), max_size, - support_usage_threshold), _gen(gen) { + CollectedMemoryPool(name, type, young_gen->from()->capacity(), max_size, + support_usage_threshold), _young_gen(young_gen) { } size_t SurvivorContiguousSpacePool::used_in_bytes() { - return _gen->from()->used(); + return _young_gen->from()->used(); } size_t SurvivorContiguousSpacePool::committed_in_bytes() { - return _gen->from()->capacity(); + return _young_gen->from()->capacity(); } MemoryUsage SurvivorContiguousSpacePool::get_memory_usage() {
--- a/hotspot/src/share/vm/services/memoryPool.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/services/memoryPool.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -163,10 +163,10 @@ class SurvivorContiguousSpacePool : public CollectedMemoryPool { private: - DefNewGeneration* _gen; + DefNewGeneration* _young_gen; public: - SurvivorContiguousSpacePool(DefNewGeneration* gen, + SurvivorContiguousSpacePool(DefNewGeneration* young_gen, const char* name, PoolType type, size_t max_size,
--- a/hotspot/src/share/vm/services/memoryService.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/services/memoryService.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -212,13 +212,13 @@ return (MemoryPool*) pool; } -MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* gen, +MemoryPool* MemoryService::add_survivor_spaces(DefNewGeneration* young_gen, const char* name, bool is_heap, size_t max_size, bool support_usage_threshold) { MemoryPool::PoolType type = (is_heap ? MemoryPool::Heap : MemoryPool::NonHeap); - SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(gen, name, type, max_size, support_usage_threshold); + SurvivorContiguousSpacePool* pool = new SurvivorContiguousSpacePool(young_gen, name, type, max_size, support_usage_threshold); _pools_list->append(pool); return (MemoryPool*) pool; @@ -328,18 +328,18 @@ #if INCLUDE_ALL_GCS -void MemoryService::add_psYoung_memory_pool(PSYoungGen* gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) { +void MemoryService::add_psYoung_memory_pool(PSYoungGen* young_gen, MemoryManager* major_mgr, MemoryManager* minor_mgr) { assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers"); // Add a memory pool for each space and young gen doesn't // support low memory detection as it is expected to get filled up. - EdenMutableSpacePool* eden = new EdenMutableSpacePool(gen, - gen->eden_space(), + EdenMutableSpacePool* eden = new EdenMutableSpacePool(young_gen, + young_gen->eden_space(), "PS Eden Space", MemoryPool::Heap, false /* support_usage_threshold */); - SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(gen, + SurvivorMutableSpacePool* survivor = new SurvivorMutableSpacePool(young_gen, "PS Survivor Space", MemoryPool::Heap, false /* support_usage_threshold */); @@ -352,13 +352,13 @@ _pools_list->append(survivor); } -void MemoryService::add_psOld_memory_pool(PSOldGen* gen, MemoryManager* mgr) { - PSGenerationPool* old_gen = new PSGenerationPool(gen, - "PS Old Gen", - MemoryPool::Heap, - true /* support_usage_threshold */); - mgr->add_pool(old_gen); - _pools_list->append(old_gen); +void MemoryService::add_psOld_memory_pool(PSOldGen* old_gen, MemoryManager* mgr) { + PSGenerationPool* old_gen_pool = new PSGenerationPool(old_gen, + "PS Old Gen", + MemoryPool::Heap, + true /* support_usage_threshold */); + mgr->add_pool(old_gen_pool); + _pools_list->append(old_gen_pool); } void MemoryService::add_g1YoungGen_memory_pool(G1CollectedHeap* g1h, @@ -548,7 +548,7 @@ } // // GC manager type depends on the type of Generation. Depending on the space -// availablity and vm options the gc uses major gc manager or minor gc +// availability and vm options the gc uses major gc manager or minor gc // manager or both. The type of gc manager depends on the generation kind. // For DefNew and ParNew generation doing scavenge gc uses minor gc manager (so // _fullGC is set to false ) and for other generation kinds doing
--- a/hotspot/src/share/vm/services/memoryService.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/services/memoryService.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -80,10 +80,10 @@ } - static void add_psYoung_memory_pool(PSYoungGen* gen, + static void add_psYoung_memory_pool(PSYoungGen* young_gen, MemoryManager* major_mgr, MemoryManager* minor_mgr); - static void add_psOld_memory_pool(PSOldGen* gen, + static void add_psOld_memory_pool(PSOldGen* old_gen, MemoryManager* mgr); static void add_g1YoungGen_memory_pool(G1CollectedHeap* g1h, @@ -97,7 +97,7 @@ bool is_heap, size_t max_size, bool support_usage_threshold); - static MemoryPool* add_survivor_spaces(DefNewGeneration* gen, + static MemoryPool* add_survivor_spaces(DefNewGeneration* young_gen, const char* name, bool is_heap, size_t max_size, @@ -162,7 +162,6 @@ bool recordGCEndTime, bool countCollection, GCCause::Cause cause); - static void oops_do(OopClosure* f); static bool get_verbose() { return PrintGC; }
--- a/hotspot/src/share/vm/services/psMemoryPool.cpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/services/psMemoryPool.cpp Mon Sep 07 20:03:56 2015 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,18 +33,18 @@ #include "services/memoryManager.hpp" #include "services/psMemoryPool.hpp" -PSGenerationPool::PSGenerationPool(PSOldGen* gen, +PSGenerationPool::PSGenerationPool(PSOldGen* old_gen, const char* name, PoolType type, bool support_usage_threshold) : - CollectedMemoryPool(name, type, gen->capacity_in_bytes(), - gen->reserved().byte_size(), support_usage_threshold), _gen(gen) { + CollectedMemoryPool(name, type, old_gen->capacity_in_bytes(), + old_gen->reserved().byte_size(), support_usage_threshold), _old_gen(old_gen) { } MemoryUsage PSGenerationPool::get_memory_usage() { size_t maxSize = (available_for_allocation() ? max_size() : 0); size_t used = used_in_bytes(); - size_t committed = _gen->capacity_in_bytes(); + size_t committed = _old_gen->capacity_in_bytes(); return MemoryUsage(initial_size(), used, committed, maxSize); } @@ -55,15 +55,16 @@ // Max size of PS eden space is changing due to ergonomic. // PSYoungGen, PSOldGen, Eden, Survivor spaces are all resizable. // -EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* gen, +EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* young_gen, MutableSpace* space, const char* name, PoolType type, bool support_usage_threshold) : CollectedMemoryPool(name, type, space->capacity_in_bytes(), - (gen->max_size() - gen->from_space()->capacity_in_bytes() - gen->to_space()->capacity_in_bytes()), + (young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes()), support_usage_threshold), - _gen(gen), _space(space) { + _young_gen(young_gen), + _space(space) { } MemoryUsage EdenMutableSpacePool::get_memory_usage() { @@ -79,13 +80,13 @@ // // PS from and to survivor spaces could have different sizes. // -SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* gen, +SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* young_gen, const char* name, PoolType type, bool support_usage_threshold) : - CollectedMemoryPool(name, type, gen->from_space()->capacity_in_bytes(), - gen->from_space()->capacity_in_bytes(), - support_usage_threshold), _gen(gen) { + CollectedMemoryPool(name, type, young_gen->from_space()->capacity_in_bytes(), + young_gen->from_space()->capacity_in_bytes(), + support_usage_threshold), _young_gen(young_gen) { } MemoryUsage SurvivorMutableSpacePool::get_memory_usage() {
--- a/hotspot/src/share/vm/services/psMemoryPool.hpp Tue Sep 01 21:38:07 2015 +0300 +++ b/hotspot/src/share/vm/services/psMemoryPool.hpp Mon Sep 07 20:03:56 2015 +0200 @@ -39,23 +39,23 @@ class PSGenerationPool : public CollectedMemoryPool { private: - PSOldGen* _gen; + PSOldGen* _old_gen; public: PSGenerationPool(PSOldGen* pool, const char* name, PoolType type, bool support_usage_threshold); MemoryUsage get_memory_usage(); - size_t used_in_bytes() { return _gen->used_in_bytes(); } - size_t max_size() const { return _gen->reserved().byte_size(); } + size_t used_in_bytes() { return _old_gen->used_in_bytes(); } + size_t max_size() const { return _old_gen->reserved().byte_size(); } }; class EdenMutableSpacePool : public CollectedMemoryPool { private: - PSYoungGen* _gen; + PSYoungGen* _young_gen; MutableSpace* _space; public: - EdenMutableSpacePool(PSYoungGen* gen, + EdenMutableSpacePool(PSYoungGen* young_gen, MutableSpace* space, const char* name, PoolType type, @@ -66,16 +66,16 @@ size_t used_in_bytes() { return space()->used_in_bytes(); } size_t max_size() const { // Eden's max_size = max_size of Young Gen - the current committed size of survivor spaces - return _gen->max_size() - _gen->from_space()->capacity_in_bytes() - _gen->to_space()->capacity_in_bytes(); + return _young_gen->max_size() - _young_gen->from_space()->capacity_in_bytes() - _young_gen->to_space()->capacity_in_bytes(); } }; class SurvivorMutableSpacePool : public CollectedMemoryPool { private: - PSYoungGen* _gen; + PSYoungGen* _young_gen; public: - SurvivorMutableSpacePool(PSYoungGen* gen, + SurvivorMutableSpacePool(PSYoungGen* young_gen, const char* name, PoolType type, bool support_usage_threshold); @@ -83,14 +83,14 @@ MemoryUsage get_memory_usage(); size_t used_in_bytes() { - return _gen->from_space()->used_in_bytes(); + return _young_gen->from_space()->used_in_bytes(); } size_t committed_in_bytes() { - return _gen->from_space()->capacity_in_bytes(); + return _young_gen->from_space()->capacity_in_bytes(); } size_t max_size() const { // Return current committed size of the from-space - return _gen->from_space()->capacity_in_bytes(); + return _young_gen->from_space()->capacity_in_bytes(); } };