# HG changeset patch # User amurillo # Date 1418367974 28800 # Node ID 62648789b8ba25c46f50cfb1a35ebf1ac9db256c # Parent 6494b13f88a867026ee316b444d9a4fa589dd6bd# Parent a4c96111465590a541220044d171ccd4ed9feb76 Merge diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp --- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -675,7 +675,7 @@ case handle_exception_nofpu_id: case handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id); + oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); break; case handle_exception_from_callee_id: { // At this point all registers except exception oop (RAX) and @@ -748,7 +748,7 @@ case handle_exception_nofpu_id: case handle_exception_id: // Restore the registers that were saved at the beginning. - restore_live_registers(sasm, id == handle_exception_nofpu_id); + restore_live_registers(sasm, id != handle_exception_nofpu_id); break; case handle_exception_from_callee_id: // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/os/windows/vm/os_windows.cpp --- a/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/os/windows/vm/os_windows.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -3074,7 +3074,7 @@ char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { assert((size_t)addr % os::vm_allocation_granularity() == 0, "reserve alignment"); - assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); + assert(bytes % os::vm_page_size() == 0, "reserve page size"); char* res; // note that if UseLargePages is on, all the areas that require interleaving // will go thru reserve_memory_special rather than thru here. diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/classfile/classFileParser.cpp --- a/hotspot/src/share/vm/classfile/classFileParser.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -3108,21 +3108,39 @@ } } -// Transfer ownership of metadata allocated to the InstanceKlass. -void ClassFileParser::apply_parsed_class_metadata( - instanceKlassHandle this_klass, - int java_fields_count, TRAPS) { - // Assign annotations if needed - if (_annotations != NULL || _type_annotations != NULL || - _fields_annotations != NULL || _fields_type_annotations != NULL) { +// Create the Annotations object that will +// hold the annotations array for the Klass. +void ClassFileParser::create_combined_annotations(TRAPS) { + if (_annotations == NULL && + _type_annotations == NULL && + _fields_annotations == NULL && + _fields_type_annotations == NULL) { + // Don't create the Annotations object unnecessarily. + return; + } + Annotations* annotations = Annotations::allocate(_loader_data, CHECK); annotations->set_class_annotations(_annotations); annotations->set_class_type_annotations(_type_annotations); annotations->set_fields_annotations(_fields_annotations); annotations->set_fields_type_annotations(_fields_type_annotations); - this_klass->set_annotations(annotations); - } - + + // This is the Annotations object that will be + // assigned to InstanceKlass being constructed. + _combined_annotations = annotations; + + // The annotations arrays below has been transfered the + // _combined_annotations so these fields can now be cleared. + _annotations = NULL; + _type_annotations = NULL; + _fields_annotations = NULL; + _fields_type_annotations = NULL; +} + +// Transfer ownership of metadata allocated to the InstanceKlass. +void ClassFileParser::apply_parsed_class_metadata( + instanceKlassHandle this_klass, + int java_fields_count, TRAPS) { _cp->set_pool_holder(this_klass()); this_klass->set_constants(_cp); this_klass->set_fields(_fields, java_fields_count); @@ -3130,6 +3148,7 @@ this_klass->set_inner_classes(_inner_classes); this_klass->set_local_interfaces(_local_interfaces); this_klass->set_transitive_interfaces(_transitive_interfaces); + this_klass->set_annotations(_combined_annotations); // Clear out these fields so they don't get deallocated by the destructor clear_class_metadata(); @@ -4002,6 +4021,10 @@ ClassAnnotationCollector parsed_annotations; parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle)); + // Finalize the Annotations metadata object, + // now that all annotation arrays have been created. + create_combined_annotations(CHECK_(nullHandle)); + // Make sure this is the end of class file stream guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle)); @@ -4302,10 +4325,27 @@ InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(), _local_interfaces, _transitive_interfaces); - MetadataFactory::free_array(_loader_data, _annotations); - MetadataFactory::free_array(_loader_data, _type_annotations); - Annotations::free_contents(_loader_data, _fields_annotations); - Annotations::free_contents(_loader_data, _fields_type_annotations); + if (_combined_annotations != NULL) { + // After all annotations arrays have been created, they are installed into the + // Annotations object that will be assigned to the InstanceKlass being created. + + // Deallocate the Annotations object and the installed annotations arrays. + _combined_annotations->deallocate_contents(_loader_data); + + // If the _combined_annotations pointer is non-NULL, + // then the other annotations fields should have been cleared. + assert(_annotations == NULL, "Should have been cleared"); + assert(_type_annotations == NULL, "Should have been cleared"); + assert(_fields_annotations == NULL, "Should have been cleared"); + assert(_fields_type_annotations == NULL, "Should have been cleared"); + } else { + // If the annotations arrays were not installed into the Annotations object, + // then they have to be deallocated explicitly. + MetadataFactory::free_array(_loader_data, _annotations); + MetadataFactory::free_array(_loader_data, _type_annotations); + Annotations::free_contents(_loader_data, _fields_annotations); + Annotations::free_contents(_loader_data, _fields_type_annotations); + } clear_class_metadata(); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/classfile/classFileParser.hpp --- a/hotspot/src/share/vm/classfile/classFileParser.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/classfile/classFileParser.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -75,6 +75,7 @@ Array* _inner_classes; Array* _local_interfaces; Array* _transitive_interfaces; + Annotations* _combined_annotations; AnnotationArray* _annotations; AnnotationArray* _type_annotations; Array* _fields_annotations; @@ -86,6 +87,8 @@ void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; } void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; } + void create_combined_annotations(TRAPS); + void init_parsed_class_attributes(ClassLoaderData* loader_data) { _loader_data = loader_data; _synthetic_flag = false; @@ -110,6 +113,7 @@ _inner_classes = NULL; _local_interfaces = NULL; _transitive_interfaces = NULL; + _combined_annotations = NULL; _annotations = _type_annotations = NULL; _fields_annotations = _fields_type_annotations = NULL; } diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -793,11 +793,6 @@ } } -CompactibleSpace* -ConcurrentMarkSweepGeneration::first_compaction_space() const { - return _cmsSpace; -} - void ConcurrentMarkSweepGeneration::reset_after_compaction() { // Clear the promotion information. These pointers can be adjusted // along with all the other pointers into the heap but @@ -808,10 +803,6 @@ } } -void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) { - blk->do_space(_cmsSpace); -} - void ConcurrentMarkSweepGeneration::compute_new_size() { assert_locked_or_safepoint(Heap_lock); @@ -882,7 +873,7 @@ expand_bytes); } // safe if expansion fails - expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); + expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity()); @@ -1048,8 +1039,7 @@ if (res == NULL) { // expand and retry size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords - expand(s*HeapWordSize, MinHeapDeltaBytes, - CMSExpansionCause::_satisfy_promotion); + expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion); // Since there's currently no next generation, we don't try to promote // into a more senior generation. assert(next_gen() == NULL, "assumption, based upon which no attempt " @@ -2625,13 +2615,6 @@ ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) void -ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { - cl->set_generation(this); - younger_refs_in_space_iterate(_cmsSpace, cl); - cl->reset_generation(); -} - -void ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) { if (freelistLock()->owned_by_self()) { Generation::oop_iterate(cl); @@ -2803,23 +2786,17 @@ CMSSynchronousYieldRequest yr; assert(!tlab, "Can't deal with TLAB allocation"); MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); - expand(word_size*HeapWordSize, MinHeapDeltaBytes, - CMSExpansionCause::_satisfy_allocation); + expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation); if (GCExpandToAllocateDelayMillis > 0) { os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); } return have_lock_and_allocate(word_size, tlab); } -// YSR: All of this generation expansion/shrinking stuff is an exact copy of -// TenuredGeneration, which makes me wonder if we should move this -// to CardGeneration and share it... -bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) { - return CardGeneration::expand(bytes, expand_bytes); -} - -void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes, - CMSExpansionCause::Cause cause) +void ConcurrentMarkSweepGeneration::expand_for_gc_cause( + size_t bytes, + size_t expand_bytes, + CMSExpansionCause::Cause cause) { bool success = expand(bytes, expand_bytes); @@ -2848,8 +2825,7 @@ return NULL; } // Otherwise, we try expansion. - expand(word_sz*HeapWordSize, MinHeapDeltaBytes, - CMSExpansionCause::_allocate_par_lab); + expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab); // Now go around the loop and try alloc again; // A competing par_promote might beat us to the expansion space, // so we may go around the loop again if promotion fails again. @@ -2876,8 +2852,7 @@ return false; } // Otherwise, we try expansion. - expand(refill_size_bytes, MinHeapDeltaBytes, - CMSExpansionCause::_allocate_par_spooling_space); + expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space); // Now go around the loop and try alloc again; // A competing allocation might beat us to the expansion space, // so we may go around the loop again if allocation fails again. @@ -2887,77 +2862,16 @@ } } - -void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) { - assert_locked_or_safepoint(ExpandHeap_lock); - // Shrink committed space - _virtual_space.shrink_by(bytes); - // Shrink space; this also shrinks the space's BOT - _cmsSpace->set_end((HeapWord*) _virtual_space.high()); - size_t new_word_size = heap_word_size(_cmsSpace->capacity()); - // Shrink the shared block offset array - _bts->resize(new_word_size); - MemRegion mr(_cmsSpace->bottom(), new_word_size); - // Shrink the card table - Universe::heap()->barrier_set()->resize_covered_region(mr); - - if (Verbose && PrintGC) { - size_t new_mem_size = _virtual_space.committed_size(); - size_t old_mem_size = new_mem_size + bytes; - gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", - name(), old_mem_size/K, new_mem_size/K); - } -} - void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { - assert_locked_or_safepoint(Heap_lock); - size_t size = ReservedSpace::page_align_size_down(bytes); // Only shrink if a compaction was done so that all the free space // in the generation is in a contiguous block at the end. - if (size > 0 && did_compact()) { - shrink_by(size); - } -} - -bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) { + if (did_compact()) { + CardGeneration::shrink(bytes); + } +} + +void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() { assert_locked_or_safepoint(Heap_lock); - bool result = _virtual_space.expand_by(bytes); - if (result) { - size_t new_word_size = - heap_word_size(_virtual_space.committed_size()); - MemRegion mr(_cmsSpace->bottom(), new_word_size); - _bts->resize(new_word_size); // resize the block offset shared array - Universe::heap()->barrier_set()->resize_covered_region(mr); - // Hmmmm... why doesn't CFLS::set_end verify locking? - // This is quite ugly; FIX ME XXX - _cmsSpace->assert_locked(freelistLock()); - _cmsSpace->set_end((HeapWord*)_virtual_space.high()); - - // update the space and generation capacity counters - if (UsePerfData) { - _space_counters->update_capacity(); - _gen_counters->update_all(); - } - - if (Verbose && PrintGC) { - size_t new_mem_size = _virtual_space.committed_size(); - size_t old_mem_size = new_mem_size - bytes; - gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", - name(), old_mem_size/K, bytes/K, new_mem_size/K); - } - } - return result; -} - -bool ConcurrentMarkSweepGeneration::grow_to_reserved() { - assert_locked_or_safepoint(Heap_lock); - bool success = true; - const size_t remaining_bytes = _virtual_space.uncommitted_size(); - if (remaining_bytes > 0) { - success = grow_by(remaining_bytes); - DEBUG_ONLY(if (!success) warning("grow to reserved failed");) - } - return success; } void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,9 +30,10 @@ #include "gc_implementation/shared/gcStats.hpp" #include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/generationCounters.hpp" +#include "memory/cardGeneration.hpp" #include "memory/freeBlockDictionary.hpp" -#include "memory/generation.hpp" #include "memory/iterator.hpp" +#include "memory/space.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/virtualspace.hpp" #include "services/memoryService.hpp" @@ -171,9 +172,7 @@ // Represents a marking stack used by the CMS collector. // Ideally this should be GrowableArray<> just like MSC's marking stack(s). class CMSMarkStack: public CHeapObj { - // friend class CMSCollector; // To get at expansion stats further below. - // VirtualSpace _virtual_space; // Space for the stack oop* _base; // Bottom of stack @@ -1031,6 +1030,9 @@ void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } + // Accessing spaces + CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; } + private: // For parallel young-gen GC support. CMSParGCThreadState** _par_gc_thread_states; @@ -1064,6 +1066,10 @@ double initiating_occupancy() const { return _initiating_occupancy; } void init_initiating_occupancy(intx io, uintx tr); + void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause); + + void assert_correct_size_change_locking(); + public: ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, int level, CardTableRS* ct, @@ -1100,23 +1106,14 @@ // Override virtual void ref_processor_init(); - // Grow generation by specified size (returns false if unable to grow) - bool grow_by(size_t bytes); - // Grow generation to reserved size. - bool grow_to_reserved(); - void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } // Space enquiries - size_t capacity() const; - size_t used() const; - size_t free() const; double occupancy() const { return ((double)used())/((double)capacity()); } size_t contiguous_available() const; size_t unsafe_max_alloc_nogc() const; // over-rides - MemRegion used_region() const; MemRegion used_region_at_save_marks() const; // Does a "full" (forced) collection invoked on this generation collect @@ -1127,10 +1124,6 @@ return !ScavengeBeforeFullGC; } - void space_iterate(SpaceClosure* blk, bool usedOnly = false); - - // Support for compaction - CompactibleSpace* first_compaction_space() const; // Adjust quantities in the generation affected by // the compaction. void reset_after_compaction(); @@ -1190,18 +1183,13 @@ } // Allocation failure - void expand(size_t bytes, size_t expand_bytes, - CMSExpansionCause::Cause cause); - virtual bool expand(size_t bytes, size_t expand_bytes); void shrink(size_t bytes); - void shrink_by(size_t bytes); HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); bool expand_and_ensure_spooling_space(PromotionInfo* promo); // Iteration support and related enquiries void save_marks(); bool no_allocs_since_save_marks(); - void younger_refs_iterate(OopsInGenClosure* cl); // Iteration support specific to CMS generations void save_sweep_limit(); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -369,22 +369,6 @@ cmsSpace()->save_sweep_limit(); } -inline size_t ConcurrentMarkSweepGeneration::capacity() const { - return _cmsSpace->capacity(); -} - -inline size_t ConcurrentMarkSweepGeneration::used() const { - return _cmsSpace->used(); -} - -inline size_t ConcurrentMarkSweepGeneration::free() const { - return _cmsSpace->free(); -} - -inline MemRegion ConcurrentMarkSweepGeneration::used_region() const { - return _cmsSpace->used_region(); -} - inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const { return _cmsSpace->used_region_at_save_marks(); } diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -352,7 +352,7 @@ } void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) { - OtherRegionsTable::invalidate(start_idx, num_regions); + HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions); } void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) { diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -407,20 +407,8 @@ } } -void OtherRegionsTable::initialize(uint max_regions) { - FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions); -} - -void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) { - FromCardCache::invalidate(start_idx, num_regions); -} - -void OtherRegionsTable::print_from_card_cache() { - FromCardCache::print(); -} - void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) { - uint cur_hrm_ind = hr()->hrm_index(); + uint cur_hrm_ind = _hr->hrm_index(); if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", @@ -434,7 +422,7 @@ if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)", - hr()->bottom(), from_card, + _hr->bottom(), from_card, FromCardCache::at(tid, cur_hrm_ind)); } @@ -477,13 +465,13 @@ if (G1HRRSUseSparseTable && _sparse_table.add_card(from_hrm_ind, card_index)) { if (G1RecordHRRSOops) { - HeapRegionRemSet::record(hr(), from); + HeapRegionRemSet::record(_hr, from); if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print(" Added card " PTR_FORMAT " to region " "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", align_size_down(uintptr_t(from), CardTableModRefBS::card_size), - hr()->bottom(), from); + _hr->bottom(), from); } } if (G1TraceHeapRegionRememberedSet) { @@ -539,13 +527,13 @@ prt->add_reference(from); if (G1RecordHRRSOops) { - HeapRegionRemSet::record(hr(), from); + HeapRegionRemSet::record(_hr, from); if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print("Added card " PTR_FORMAT " to region " "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n", align_size_down(uintptr_t(from), CardTableModRefBS::card_size), - hr()->bottom(), from); + _hr->bottom(), from); } } assert(contains_reference(from), "We just added it!"); @@ -614,7 +602,7 @@ if (G1TraceHeapRegionRememberedSet) { gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " "for region [" PTR_FORMAT "...] (" SIZE_FORMAT " coarse entries).\n", - hr()->bottom(), + _hr->bottom(), max->hr()->bottom(), _n_coarse_entries); } @@ -627,13 +615,11 @@ return max; } - -// At present, this must be called stop-world single-threaded. void OtherRegionsTable::scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm) { // First eliminated garbage regions from the coarse map. if (G1RSScrubVerbose) { - gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index()); + gclog_or_tty->print_cr("Scrubbing region %u:", _hr->hrm_index()); } assert(_coarse_map.size() == region_bm->size(), "Precondition"); @@ -752,7 +738,7 @@ } void OtherRegionsTable::clear_fcc() { - FromCardCache::clear(hr()->hrm_index()); + FromCardCache::clear(_hr->hrm_index()); } void OtherRegionsTable::clear() { @@ -774,27 +760,6 @@ clear_fcc(); } -bool OtherRegionsTable::del_single_region_table(size_t ind, - HeapRegion* hr) { - assert(0 <= ind && ind < _max_fine_entries, "Preconditions."); - PerRegionTable** prev_addr = &_fine_grain_regions[ind]; - PerRegionTable* prt = *prev_addr; - while (prt != NULL && prt->hr() != hr) { - prev_addr = prt->collision_list_next_addr(); - prt = prt->collision_list_next(); - } - if (prt != NULL) { - assert(prt->hr() == hr, "Loop postcondition."); - *prev_addr = prt->collision_list_next(); - unlink_from_all(prt); - PerRegionTable::free(prt); - _n_fine_entries--; - return true; - } else { - return false; - } -} - bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const { // Cast away const in this case. MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag); @@ -975,7 +940,7 @@ _hrrs(hrrs), _g1h(G1CollectedHeap::heap()), _coarse_map(&hrrs->_other_regions._coarse_map), - _bosa(hrrs->bosa()), + _bosa(hrrs->_bosa), _is(Sparse), // Set these values so that we increment to the first region. _coarse_cur_region_index(-1), diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -162,32 +162,36 @@ // to hold _m, and the fine-grain table to be full. PerRegionTable* delete_region_table(); - // If a PRT for "hr" is in the bucket list indicated by "ind" (which must - // be the correct index for "hr"), delete it and return true; else return - // false. - bool del_single_region_table(size_t ind, HeapRegion* hr); - // link/add the given fine grain remembered set into the "all" list void link_to_all(PerRegionTable * prt); // unlink/remove the given fine grain remembered set into the "all" list void unlink_from_all(PerRegionTable * prt); + bool contains_reference_locked(OopOrNarrowOopStar from) const; + + // Clear the from_card_cache entries for this region. + void clear_fcc(); public: + // Create a new remembered set for the given heap region. The given mutex should + // be used to ensure consistency. OtherRegionsTable(HeapRegion* hr, Mutex* m); - HeapRegion* hr() const { return _hr; } - // For now. Could "expand" some tables in the future, so that this made // sense. void add_reference(OopOrNarrowOopStar from, uint tid); + // Returns whether the remembered set contains the given reference. + bool contains_reference(OopOrNarrowOopStar from) const; + // Removes any entries shown by the given bitmaps to contain only dead - // objects. + // objects. Not thread safe. + // Set bits in the bitmaps indicate that the given region or card is live. void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); - // Returns whether this remembered set (and all sub-sets) contain no entries. + // Returns whether this remembered set (and all sub-sets) does not contain any entry. bool is_empty() const; + // Returns the number of cards contained in this remembered set. size_t occupied() const; size_t occ_fine() const; size_t occ_coarse() const; @@ -195,31 +199,17 @@ static jint n_coarsenings() { return _n_coarsenings; } - // Returns size in bytes. - // Not const because it takes a lock. + // Returns size of the actual remembered set containers in bytes. size_t mem_size() const; + // Returns the size of static data in bytes. static size_t static_mem_size(); + // Returns the size of the free list content in bytes. static size_t fl_mem_size(); - bool contains_reference(OopOrNarrowOopStar from) const; - bool contains_reference_locked(OopOrNarrowOopStar from) const; - + // Clear the entire contents of this remembered set. void clear(); - // Specifically clear the from_card_cache. - void clear_fcc(); - void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task); - - // Declare the heap size (in # of regions) to the OtherRegionsTable. - // (Uses it to initialize from_card_cache). - static void initialize(uint max_regions); - - // Declares that regions between start_idx <= i < start_idx + num_regions are - // not in use. Make sure that any entries for these regions are invalid. - static void invalidate(uint start_idx, size_t num_regions); - - static void print_from_card_cache(); }; class HeapRegionRemSet : public CHeapObj { @@ -233,7 +223,6 @@ private: G1BlockOffsetSharedArray* _bosa; - G1BlockOffsetSharedArray* bosa() const { return _bosa; } // A set of code blobs (nmethods) whose code contains pointers into // the region that owns this RSet. @@ -268,10 +257,6 @@ static uint num_par_rem_sets(); static void setup_remset_size(); - HeapRegion* hr() const { - return _other_regions.hr(); - } - bool is_empty() const { return (strong_code_roots_list_length() == 0) && _other_regions.is_empty(); } @@ -305,8 +290,9 @@ _other_regions.add_reference(from, tid); } - // Removes any entries shown by the given bitmaps to contain only dead - // objects. + // Removes any entries in the remembered set shown by the given bitmaps to + // contain only dead objects. Not thread safe. + // One bits in the bitmaps indicate that the given region or card is live. void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); // The region is being reclaimed; clear its remset, and any mention of @@ -397,16 +383,16 @@ // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). // (Uses it to initialize from_card_cache). static void init_heap(uint max_regions) { - OtherRegionsTable::initialize(max_regions); + FromCardCache::initialize(num_par_rem_sets(), max_regions); } - static void invalidate(uint start_idx, uint num_regions) { - OtherRegionsTable::invalidate(start_idx, num_regions); + static void invalidate_from_card_cache(uint start_idx, size_t num_regions) { + FromCardCache::invalidate(start_idx, num_regions); } #ifndef PRODUCT static void print_from_card_cache() { - OtherRegionsTable::print_from_card_cache(); + FromCardCache::print(); } #endif diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp --- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -172,6 +172,27 @@ _tenuring_threshold = tenuring_threshold; } +bool YoungGCTracer::should_report_promotion_in_new_plab_event() const { + return should_send_promotion_in_new_plab_event(); +} + +bool YoungGCTracer::should_report_promotion_outside_plab_event() const { + return should_send_promotion_outside_plab_event(); +} + +void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const { + assert_set_gc_id(); + send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size); +} + +void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const { + assert_set_gc_id(); + send_promotion_outside_plab_event(klass, obj_size, age, tenured); +} + void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { assert_set_gc_id(); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp --- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -156,9 +156,38 @@ void report_promotion_failed(const PromotionFailedInfo& pf_info); void report_tenuring_threshold(const uint tenuring_threshold); + /* + * Methods for reporting Promotion in new or outside PLAB Events. + * + * The object age is always required as it is not certain that the mark word + * of the oop can be trusted at this stage. + * + * obj_size is the size of the promoted object in bytes. + * + * tenured should be true if the object has been promoted to the old + * space during this GC, if the object is copied to survivor space + * from young space or survivor space (aging) tenured should be false. + * + * plab_size is the size of the newly allocated PLAB in bytes. + */ + bool should_report_promotion_in_new_plab_event() const; + bool should_report_promotion_outside_plab_event() const; + void report_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const; + void report_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const; + private: void send_young_gc_event() const; void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const; + bool should_send_promotion_in_new_plab_event() const; + bool should_send_promotion_outside_plab_event() const; + void send_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const; + void send_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const; }; class OldGCTracer : public GCTracer { diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp --- a/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -111,6 +111,44 @@ } } +bool YoungGCTracer::should_send_promotion_in_new_plab_event() const { + return EventPromoteObjectInNewPLAB::is_enabled(); +} + +bool YoungGCTracer::should_send_promotion_outside_plab_event() const { + return EventPromoteObjectOutsidePLAB::is_enabled(); +} + +void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured, + size_t plab_size) const { + + EventPromoteObjectInNewPLAB event; + if (event.should_commit()) { + event.set_gcId(_shared_gc_info.gc_id().id()); + event.set_class(klass); + event.set_objectSize(obj_size); + event.set_tenured(tenured); + event.set_tenuringAge(age); + event.set_plabSize(plab_size); + event.commit(); + } +} + +void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size, + uint age, bool tenured) const { + + EventPromoteObjectOutsidePLAB event; + if (event.should_commit()) { + event.set_gcId(_shared_gc_info.gc_id().id()); + event.set_class(klass); + event.set_objectSize(obj_size); + event.set_tenured(tenured); + event.set_tenuringAge(age); + event.commit(); + } +} + void OldGCTracer::send_old_gc_event() const { EventGCOldGarbageCollection e(UNTIMED); if (e.should_commit()) { diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/interpreter/interpreterRuntime.cpp --- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -385,6 +385,18 @@ int handler_bci; int current_bci = bci(thread); + if (thread->frames_to_pop_failed_realloc() > 0) { + // Allocation of scalar replaced object used in this frame + // failed. Unconditionally pop the frame. + thread->dec_frames_to_pop_failed_realloc(); + thread->set_vm_result(h_exception()); + // If the method is synchronized we already unlocked the monitor + // during deoptimization so the interpreter needs to skip it when + // the frame is popped. + thread->set_do_not_unlock_if_synchronized(true); + return Interpreter::remove_activation_entry(); + } + // Need to do this check first since when _do_not_unlock_if_synchronized // is set, we don't want to trigger any classloading which may make calls // into java, or surprisingly find a matching exception handler for bci 0 diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/cardGeneration.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/memory/cardGeneration.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "memory/blockOffsetTable.inline.hpp" +#include "memory/cardGeneration.inline.hpp" +#include "memory/gcLocker.hpp" +#include "memory/generationSpec.hpp" +#include "memory/genOopClosures.inline.hpp" +#include "memory/genRemSet.hpp" +#include "memory/iterator.hpp" +#include "memory/memRegion.hpp" +#include "memory/space.inline.hpp" +#include "runtime/java.hpp" + +CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, + int level, + GenRemSet* remset) : + Generation(rs, initial_byte_size, level), _rs(remset), + _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), + _used_at_prologue() +{ + HeapWord* start = (HeapWord*)rs.base(); + size_t reserved_byte_size = rs.size(); + assert((uintptr_t(start) & 3) == 0, "bad alignment"); + assert((reserved_byte_size & 3) == 0, "bad alignment"); + MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); + _bts = new BlockOffsetSharedArray(reserved_mr, + heap_word_size(initial_byte_size)); + MemRegion committed_mr(start, heap_word_size(initial_byte_size)); + _rs->resize_covered_region(committed_mr); + if (_bts == NULL) { + vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); + } + + // Verify that the start and end of this generation is the start of a card. + // If this wasn't true, a single card could span more than on generation, + // which would cause problems when we commit/uncommit memory, and when we + // clear and dirty cards. + guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); + if (reserved_mr.end() != Universe::heap()->reserved_region().end()) { + // Don't check at the very end of the heap as we'll assert that we're probing off + // the end if we try. + guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); + } + _min_heap_delta_bytes = MinHeapDeltaBytes; + _capacity_at_prologue = initial_byte_size; + _used_at_prologue = 0; +} + +bool CardGeneration::grow_by(size_t bytes) { + assert_correct_size_change_locking(); + bool result = _virtual_space.expand_by(bytes); + if (result) { + size_t new_word_size = + heap_word_size(_virtual_space.committed_size()); + MemRegion mr(space()->bottom(), new_word_size); + // Expand card table + Universe::heap()->barrier_set()->resize_covered_region(mr); + // Expand shared block offset array + _bts->resize(new_word_size); + + // Fix for bug #4668531 + if (ZapUnusedHeapArea) { + MemRegion mangle_region(space()->end(), + (HeapWord*)_virtual_space.high()); + SpaceMangler::mangle_region(mangle_region); + } + + // Expand space -- also expands space's BOT + // (which uses (part of) shared array above) + space()->set_end((HeapWord*)_virtual_space.high()); + + // update the space and generation capacity counters + update_counters(); + + if (Verbose && PrintGC) { + size_t new_mem_size = _virtual_space.committed_size(); + size_t old_mem_size = new_mem_size - bytes; + gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " + SIZE_FORMAT "K to " SIZE_FORMAT "K", + name(), old_mem_size/K, bytes/K, new_mem_size/K); + } + } + return result; +} + +bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { + assert_locked_or_safepoint(Heap_lock); + if (bytes == 0) { + return true; // That's what grow_by(0) would return + } + size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); + if (aligned_bytes == 0){ + // The alignment caused the number of bytes to wrap. An expand_by(0) will + // return true with the implication that an expansion was done when it + // was not. A call to expand implies a best effort to expand by "bytes" + // but not a guarantee. Align down to give a best effort. This is likely + // the most that the generation can expand since it has some capacity to + // start with. + aligned_bytes = ReservedSpace::page_align_size_down(bytes); + } + size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); + bool success = false; + if (aligned_expand_bytes > aligned_bytes) { + success = grow_by(aligned_expand_bytes); + } + if (!success) { + success = grow_by(aligned_bytes); + } + if (!success) { + success = grow_to_reserved(); + } + if (PrintGC && Verbose) { + if (success && GC_locker::is_active_and_needs_gc()) { + gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); + } + } + + return success; +} + +bool CardGeneration::grow_to_reserved() { + assert_correct_size_change_locking(); + bool success = true; + const size_t remaining_bytes = _virtual_space.uncommitted_size(); + if (remaining_bytes > 0) { + success = grow_by(remaining_bytes); + DEBUG_ONLY(if (!success) warning("grow to reserved failed");) + } + return success; +} + +void CardGeneration::shrink(size_t bytes) { + assert_correct_size_change_locking(); + + size_t size = ReservedSpace::page_align_size_down(bytes); + if (size == 0) { + return; + } + + // Shrink committed space + _virtual_space.shrink_by(size); + // Shrink space; this also shrinks the space's BOT + space()->set_end((HeapWord*) _virtual_space.high()); + size_t new_word_size = heap_word_size(space()->capacity()); + // Shrink the shared block offset array + _bts->resize(new_word_size); + MemRegion mr(space()->bottom(), new_word_size); + // Shrink the card table + Universe::heap()->barrier_set()->resize_covered_region(mr); + + if (Verbose && PrintGC) { + size_t new_mem_size = _virtual_space.committed_size(); + size_t old_mem_size = new_mem_size + size; + gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", + name(), old_mem_size/K, new_mem_size/K); + } +} + +// No young generation references, clear this generation's cards. +void CardGeneration::clear_remembered_set() { + _rs->clear(reserved()); +} + +// Objects in this generation may have moved, invalidate this +// generation's cards. +void CardGeneration::invalidate_remembered_set() { + _rs->invalidate(used_region()); +} + +void CardGeneration::compute_new_size() { + assert(_shrink_factor <= 100, "invalid shrink factor"); + size_t current_shrink_factor = _shrink_factor; + _shrink_factor = 0; + + // We don't have floating point command-line arguments + // Note: argument processing ensures that MinHeapFreeRatio < 100. + const double minimum_free_percentage = MinHeapFreeRatio / 100.0; + const double maximum_used_percentage = 1.0 - minimum_free_percentage; + + // Compute some numbers about the state of the heap. + const size_t used_after_gc = used(); + const size_t capacity_after_gc = capacity(); + + const double min_tmp = used_after_gc / maximum_used_percentage; + size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx)); + // Don't shrink less than the initial generation size + minimum_desired_capacity = MAX2(minimum_desired_capacity, + spec()->init_size()); + assert(used_after_gc <= minimum_desired_capacity, "sanity check"); + + if (PrintGC && Verbose) { + const size_t free_after_gc = free(); + const double free_percentage = ((double)free_after_gc) / capacity_after_gc; + gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: "); + gclog_or_tty->print_cr(" " + " minimum_free_percentage: %6.2f" + " maximum_used_percentage: %6.2f", + minimum_free_percentage, + maximum_used_percentage); + gclog_or_tty->print_cr(" " + " free_after_gc : %6.1fK" + " used_after_gc : %6.1fK" + " capacity_after_gc : %6.1fK", + free_after_gc / (double) K, + used_after_gc / (double) K, + capacity_after_gc / (double) K); + gclog_or_tty->print_cr(" " + " free_percentage: %6.2f", + free_percentage); + } + + if (capacity_after_gc < minimum_desired_capacity) { + // If we have less free space than we want then expand + size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; + // Don't expand unless it's significant + if (expand_bytes >= _min_heap_delta_bytes) { + expand(expand_bytes, 0); // safe if expansion fails + } + if (PrintGC && Verbose) { + gclog_or_tty->print_cr(" expanding:" + " minimum_desired_capacity: %6.1fK" + " expand_bytes: %6.1fK" + " _min_heap_delta_bytes: %6.1fK", + minimum_desired_capacity / (double) K, + expand_bytes / (double) K, + _min_heap_delta_bytes / (double) K); + } + return; + } + + // No expansion, now see if we want to shrink + size_t shrink_bytes = 0; + // We would never want to shrink more than this + size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity; + + if (MaxHeapFreeRatio < 100) { + const double maximum_free_percentage = MaxHeapFreeRatio / 100.0; + const double minimum_used_percentage = 1.0 - maximum_free_percentage; + const double max_tmp = used_after_gc / minimum_used_percentage; + size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); + maximum_desired_capacity = MAX2(maximum_desired_capacity, + spec()->init_size()); + if (PrintGC && Verbose) { + gclog_or_tty->print_cr(" " + " maximum_free_percentage: %6.2f" + " minimum_used_percentage: %6.2f", + maximum_free_percentage, + minimum_used_percentage); + gclog_or_tty->print_cr(" " + " _capacity_at_prologue: %6.1fK" + " minimum_desired_capacity: %6.1fK" + " maximum_desired_capacity: %6.1fK", + _capacity_at_prologue / (double) K, + minimum_desired_capacity / (double) K, + maximum_desired_capacity / (double) K); + } + assert(minimum_desired_capacity <= maximum_desired_capacity, + "sanity check"); + + if (capacity_after_gc > maximum_desired_capacity) { + // Capacity too large, compute shrinking size + shrink_bytes = capacity_after_gc - maximum_desired_capacity; + // We don't want shrink all the way back to initSize if people call + // System.gc(), because some programs do that between "phases" and then + // we'd just have to grow the heap up again for the next phase. So we + // damp the shrinking: 0% on the first call, 10% on the second call, 40% + // on the third call, and 100% by the fourth call. But if we recompute + // size without shrinking, it goes back to 0%. + shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); + if (current_shrink_factor == 0) { + _shrink_factor = 10; + } else { + _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100); + } + if (PrintGC && Verbose) { + gclog_or_tty->print_cr(" " + " shrinking:" + " initSize: %.1fK" + " maximum_desired_capacity: %.1fK", + spec()->init_size() / (double) K, + maximum_desired_capacity / (double) K); + gclog_or_tty->print_cr(" " + " shrink_bytes: %.1fK" + " current_shrink_factor: " SIZE_FORMAT + " new shrink factor: " SIZE_FORMAT + " _min_heap_delta_bytes: %.1fK", + shrink_bytes / (double) K, + current_shrink_factor, + _shrink_factor, + _min_heap_delta_bytes / (double) K); + } + } + } + + if (capacity_after_gc > _capacity_at_prologue) { + // We might have expanded for promotions, in which case we might want to + // take back that expansion if there's room after GC. That keeps us from + // stretching the heap with promotions when there's plenty of room. + size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue; + expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes); + // We have two shrinking computations, take the largest + shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion); + assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); + if (PrintGC && Verbose) { + gclog_or_tty->print_cr(" " + " aggressive shrinking:" + " _capacity_at_prologue: %.1fK" + " capacity_after_gc: %.1fK" + " expansion_for_promotion: %.1fK" + " shrink_bytes: %.1fK", + capacity_after_gc / (double) K, + _capacity_at_prologue / (double) K, + expansion_for_promotion / (double) K, + shrink_bytes / (double) K); + } + } + // Don't shrink unless it's significant + if (shrink_bytes >= _min_heap_delta_bytes) { + shrink(shrink_bytes); + } +} + +// Currently nothing to do. +void CardGeneration::prepare_for_verify() {} + +void CardGeneration::space_iterate(SpaceClosure* blk, + bool usedOnly) { + blk->do_space(space()); +} + +void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { + blk->set_generation(this); + younger_refs_in_space_iterate(space(), blk); + blk->reset_generation(); +} diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/cardGeneration.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/memory/cardGeneration.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_CARDGENERATION_HPP +#define SHARE_VM_MEMORY_CARDGENERATION_HPP + +// Class CardGeneration is a generation that is covered by a card table, +// and uses a card-size block-offset array to implement block_start. + +#include "memory/generation.hpp" + +class BlockOffsetSharedArray; +class CompactibleSpace; + +class CardGeneration: public Generation { + friend class VMStructs; + protected: + // This is shared with other generations. + GenRemSet* _rs; + // This is local to this generation. + BlockOffsetSharedArray* _bts; + + // Current shrinking effect: this damps shrinking when the heap gets empty. + size_t _shrink_factor; + + size_t _min_heap_delta_bytes; // Minimum amount to expand. + + // Some statistics from before gc started. + // These are gathered in the gc_prologue (and should_collect) + // to control growing/shrinking policy in spite of promotions. + size_t _capacity_at_prologue; + size_t _used_at_prologue; + + CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, + GenRemSet* remset); + + virtual void assert_correct_size_change_locking() = 0; + + virtual CompactibleSpace* space() const = 0; + + public: + + // Attempt to expand the generation by "bytes". Expand by at a + // minimum "expand_bytes". Return true if some amount (not + // necessarily the full "bytes") was done. + virtual bool expand(size_t bytes, size_t expand_bytes); + + // Shrink generation with specified size + virtual void shrink(size_t bytes); + + virtual void compute_new_size(); + + virtual void clear_remembered_set(); + + virtual void invalidate_remembered_set(); + + virtual void prepare_for_verify(); + + // Grow generation with specified size (returns false if unable to grow) + bool grow_by(size_t bytes); + // Grow generation to reserved size. + bool grow_to_reserved(); + + size_t capacity() const; + size_t used() const; + size_t free() const; + MemRegion used_region() const; + + void space_iterate(SpaceClosure* blk, bool usedOnly = false); + + void younger_refs_iterate(OopsInGenClosure* blk); + + bool is_in(const void* p) const; + + CompactibleSpace* first_compaction_space() const; +}; + +#endif // SHARE_VM_MEMORY_CARDGENERATION_HPP diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/cardGeneration.inline.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/memory/cardGeneration.inline.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP +#define SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP + +#include "memory/cardGeneration.hpp" +#include "memory/space.hpp" + +inline size_t CardGeneration::capacity() const { + return space()->capacity(); +} + +inline size_t CardGeneration::used() const { + return space()->used(); +} + +inline size_t CardGeneration::free() const { + return space()->free(); +} + +inline MemRegion CardGeneration::used_region() const { + return space()->used_region(); +} + +inline bool CardGeneration::is_in(const void* p) const { + return space()->is_in(p); +} + +inline CompactibleSpace* CardGeneration::first_compaction_space() const { + return space(); +} + +#endif // SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/cardTableModRefBS.cpp --- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -275,29 +275,26 @@ // the new_end_aligned does not intrude onto the committed // space of another region. int ri = 0; - for (ri = 0; ri < _cur_covered_regions; ri++) { - if (ri != ind) { - if (_committed[ri].contains(new_end_aligned)) { - // The prior check included in the assert - // (new_end_aligned >= _committed[ri].start()) - // is redundant with the "contains" test. - // Any region containing the new end - // should start at or beyond the region found (ind) - // for the new end (committed regions are not expected to - // be proper subsets of other committed regions). - assert(_committed[ri].start() >= _committed[ind].start(), - "New end of committed region is inconsistent"); - new_end_aligned = _committed[ri].start(); - // new_end_aligned can be equal to the start of its - // committed region (i.e., of "ind") if a second - // region following "ind" also start at the same location - // as "ind". - assert(new_end_aligned >= _committed[ind].start(), - "New end of committed region is before start"); - debug_only(collided = true;) - // Should only collide with 1 region - break; - } + for (ri = ind + 1; ri < _cur_covered_regions; ri++) { + if (new_end_aligned > _committed[ri].start()) { + assert(new_end_aligned <= _committed[ri].end(), + "An earlier committed region can't cover a later committed region"); + // Any region containing the new end + // should start at or beyond the region found (ind) + // for the new end (committed regions are not expected to + // be proper subsets of other committed regions). + assert(_committed[ri].start() >= _committed[ind].start(), + "New end of committed region is inconsistent"); + new_end_aligned = _committed[ri].start(); + // new_end_aligned can be equal to the start of its + // committed region (i.e., of "ind") if a second + // region following "ind" also start at the same location + // as "ind". + assert(new_end_aligned >= _committed[ind].start(), + "New end of committed region is before start"); + debug_only(collided = true;) + // Should only collide with 1 region + break; } } #ifdef ASSERT diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/filemap.cpp --- a/hotspot/src/share/vm/memory/filemap.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/filemap.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -98,11 +98,11 @@ tty->print_cr("UseSharedSpaces: %s", msg); } } + UseSharedSpaces = false; + assert(current_info() != NULL, "singleton must be registered"); + current_info()->close(); } va_end(ap); - UseSharedSpaces = false; - assert(current_info() != NULL, "singleton must be registered"); - current_info()->close(); } // Fill in the fileMapInfo structure with data about this VM instance. diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/generation.cpp --- a/hotspot/src/share/vm/memory/generation.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/generation.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -361,244 +361,3 @@ sp = sp->next_compaction_space(); } } - -CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, - int level, - GenRemSet* remset) : - Generation(rs, initial_byte_size, level), _rs(remset), - _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), - _used_at_prologue() -{ - HeapWord* start = (HeapWord*)rs.base(); - size_t reserved_byte_size = rs.size(); - assert((uintptr_t(start) & 3) == 0, "bad alignment"); - assert((reserved_byte_size & 3) == 0, "bad alignment"); - MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); - _bts = new BlockOffsetSharedArray(reserved_mr, - heap_word_size(initial_byte_size)); - MemRegion committed_mr(start, heap_word_size(initial_byte_size)); - _rs->resize_covered_region(committed_mr); - if (_bts == NULL) - vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); - - // Verify that the start and end of this generation is the start of a card. - // If this wasn't true, a single card could span more than on generation, - // which would cause problems when we commit/uncommit memory, and when we - // clear and dirty cards. - guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); - if (reserved_mr.end() != Universe::heap()->reserved_region().end()) { - // Don't check at the very end of the heap as we'll assert that we're probing off - // the end if we try. - guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); - } - _min_heap_delta_bytes = MinHeapDeltaBytes; - _capacity_at_prologue = initial_byte_size; - _used_at_prologue = 0; -} - -bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { - assert_locked_or_safepoint(Heap_lock); - if (bytes == 0) { - return true; // That's what grow_by(0) would return - } - size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); - if (aligned_bytes == 0){ - // The alignment caused the number of bytes to wrap. An expand_by(0) will - // return true with the implication that an expansion was done when it - // was not. A call to expand implies a best effort to expand by "bytes" - // but not a guarantee. Align down to give a best effort. This is likely - // the most that the generation can expand since it has some capacity to - // start with. - aligned_bytes = ReservedSpace::page_align_size_down(bytes); - } - size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); - bool success = false; - if (aligned_expand_bytes > aligned_bytes) { - success = grow_by(aligned_expand_bytes); - } - if (!success) { - success = grow_by(aligned_bytes); - } - if (!success) { - success = grow_to_reserved(); - } - if (PrintGC && Verbose) { - if (success && GC_locker::is_active_and_needs_gc()) { - gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); - } - } - - return success; -} - - -// No young generation references, clear this generation's cards. -void CardGeneration::clear_remembered_set() { - _rs->clear(reserved()); -} - - -// Objects in this generation may have moved, invalidate this -// generation's cards. -void CardGeneration::invalidate_remembered_set() { - _rs->invalidate(used_region()); -} - - -void CardGeneration::compute_new_size() { - assert(_shrink_factor <= 100, "invalid shrink factor"); - size_t current_shrink_factor = _shrink_factor; - _shrink_factor = 0; - - // We don't have floating point command-line arguments - // Note: argument processing ensures that MinHeapFreeRatio < 100. - const double minimum_free_percentage = MinHeapFreeRatio / 100.0; - const double maximum_used_percentage = 1.0 - minimum_free_percentage; - - // Compute some numbers about the state of the heap. - const size_t used_after_gc = used(); - const size_t capacity_after_gc = capacity(); - - const double min_tmp = used_after_gc / maximum_used_percentage; - size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx)); - // Don't shrink less than the initial generation size - minimum_desired_capacity = MAX2(minimum_desired_capacity, - spec()->init_size()); - assert(used_after_gc <= minimum_desired_capacity, "sanity check"); - - if (PrintGC && Verbose) { - const size_t free_after_gc = free(); - const double free_percentage = ((double)free_after_gc) / capacity_after_gc; - gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: "); - gclog_or_tty->print_cr(" " - " minimum_free_percentage: %6.2f" - " maximum_used_percentage: %6.2f", - minimum_free_percentage, - maximum_used_percentage); - gclog_or_tty->print_cr(" " - " free_after_gc : %6.1fK" - " used_after_gc : %6.1fK" - " capacity_after_gc : %6.1fK", - free_after_gc / (double) K, - used_after_gc / (double) K, - capacity_after_gc / (double) K); - gclog_or_tty->print_cr(" " - " free_percentage: %6.2f", - free_percentage); - } - - if (capacity_after_gc < minimum_desired_capacity) { - // If we have less free space than we want then expand - size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; - // Don't expand unless it's significant - if (expand_bytes >= _min_heap_delta_bytes) { - expand(expand_bytes, 0); // safe if expansion fails - } - if (PrintGC && Verbose) { - gclog_or_tty->print_cr(" expanding:" - " minimum_desired_capacity: %6.1fK" - " expand_bytes: %6.1fK" - " _min_heap_delta_bytes: %6.1fK", - minimum_desired_capacity / (double) K, - expand_bytes / (double) K, - _min_heap_delta_bytes / (double) K); - } - return; - } - - // No expansion, now see if we want to shrink - size_t shrink_bytes = 0; - // We would never want to shrink more than this - size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity; - - if (MaxHeapFreeRatio < 100) { - const double maximum_free_percentage = MaxHeapFreeRatio / 100.0; - const double minimum_used_percentage = 1.0 - maximum_free_percentage; - const double max_tmp = used_after_gc / minimum_used_percentage; - size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); - maximum_desired_capacity = MAX2(maximum_desired_capacity, - spec()->init_size()); - if (PrintGC && Verbose) { - gclog_or_tty->print_cr(" " - " maximum_free_percentage: %6.2f" - " minimum_used_percentage: %6.2f", - maximum_free_percentage, - minimum_used_percentage); - gclog_or_tty->print_cr(" " - " _capacity_at_prologue: %6.1fK" - " minimum_desired_capacity: %6.1fK" - " maximum_desired_capacity: %6.1fK", - _capacity_at_prologue / (double) K, - minimum_desired_capacity / (double) K, - maximum_desired_capacity / (double) K); - } - assert(minimum_desired_capacity <= maximum_desired_capacity, - "sanity check"); - - if (capacity_after_gc > maximum_desired_capacity) { - // Capacity too large, compute shrinking size - shrink_bytes = capacity_after_gc - maximum_desired_capacity; - // We don't want shrink all the way back to initSize if people call - // System.gc(), because some programs do that between "phases" and then - // we'd just have to grow the heap up again for the next phase. So we - // damp the shrinking: 0% on the first call, 10% on the second call, 40% - // on the third call, and 100% by the fourth call. But if we recompute - // size without shrinking, it goes back to 0%. - shrink_bytes = shrink_bytes / 100 * current_shrink_factor; - assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); - if (current_shrink_factor == 0) { - _shrink_factor = 10; - } else { - _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100); - } - if (PrintGC && Verbose) { - gclog_or_tty->print_cr(" " - " shrinking:" - " initSize: %.1fK" - " maximum_desired_capacity: %.1fK", - spec()->init_size() / (double) K, - maximum_desired_capacity / (double) K); - gclog_or_tty->print_cr(" " - " shrink_bytes: %.1fK" - " current_shrink_factor: " SIZE_FORMAT - " new shrink factor: " SIZE_FORMAT - " _min_heap_delta_bytes: %.1fK", - shrink_bytes / (double) K, - current_shrink_factor, - _shrink_factor, - _min_heap_delta_bytes / (double) K); - } - } - } - - if (capacity_after_gc > _capacity_at_prologue) { - // We might have expanded for promotions, in which case we might want to - // take back that expansion if there's room after GC. That keeps us from - // stretching the heap with promotions when there's plenty of room. - size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue; - expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes); - // We have two shrinking computations, take the largest - shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion); - assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); - if (PrintGC && Verbose) { - gclog_or_tty->print_cr(" " - " aggressive shrinking:" - " _capacity_at_prologue: %.1fK" - " capacity_after_gc: %.1fK" - " expansion_for_promotion: %.1fK" - " shrink_bytes: %.1fK", - capacity_after_gc / (double) K, - _capacity_at_prologue / (double) K, - expansion_for_promotion / (double) K, - shrink_bytes / (double) K); - } - } - // Don't shrink unless it's significant - if (shrink_bytes >= _min_heap_delta_bytes) { - shrink(shrink_bytes); - } -} - -// Currently nothing to do. -void CardGeneration::prepare_for_verify() {} - diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/generation.hpp --- a/hotspot/src/share/vm/memory/generation.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/generation.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -584,57 +584,4 @@ virtual CollectorCounters* counters() { return _gc_counters; } }; -// Class CardGeneration is a generation that is covered by a card table, -// and uses a card-size block-offset array to implement block_start. - -// class BlockOffsetArray; -// class BlockOffsetArrayContigSpace; -class BlockOffsetSharedArray; - -class CardGeneration: public Generation { - friend class VMStructs; - protected: - // This is shared with other generations. - GenRemSet* _rs; - // This is local to this generation. - BlockOffsetSharedArray* _bts; - - // current shrinking effect: this damps shrinking when the heap gets empty. - size_t _shrink_factor; - - size_t _min_heap_delta_bytes; // Minimum amount to expand. - - // Some statistics from before gc started. - // These are gathered in the gc_prologue (and should_collect) - // to control growing/shrinking policy in spite of promotions. - size_t _capacity_at_prologue; - size_t _used_at_prologue; - - CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, - GenRemSet* remset); - - public: - - // Attempt to expand the generation by "bytes". Expand by at a - // minimum "expand_bytes". Return true if some amount (not - // necessarily the full "bytes") was done. - virtual bool expand(size_t bytes, size_t expand_bytes); - - // Shrink generation with specified size (returns false if unable to shrink) - virtual void shrink(size_t bytes) = 0; - - virtual void compute_new_size(); - - virtual void clear_remembered_set(); - - virtual void invalidate_remembered_set(); - - virtual void prepare_for_verify(); - - // Grow generation with specified size (returns false if unable to grow) - virtual bool grow_by(size_t bytes) = 0; - // Grow generation to reserved size. - virtual bool grow_to_reserved() = 0; -}; - #endif // SHARE_VM_MEMORY_GENERATION_HPP diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/metaspace.cpp --- a/hotspot/src/share/vm/memory/metaspace.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/metaspace.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -3158,7 +3158,25 @@ SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); - // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods() + // make sure SharedReadOnlySize and SharedReadWriteSize are not less than + // the minimum values. + if (SharedReadOnlySize < MetaspaceShared::min_ro_size){ + report_out_of_shared_space(SharedReadOnly); + } + + if (SharedReadWriteSize < MetaspaceShared::min_rw_size){ + report_out_of_shared_space(SharedReadWrite); + } + + // the min_misc_data_size and min_misc_code_size estimates are based on + // MetaspaceShared::generate_vtable_methods() + uint min_misc_data_size = align_size_up( + MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size * sizeof(void*), max_alignment); + + if (SharedMiscDataSize < min_misc_data_size) { + report_out_of_shared_space(SharedMiscData); + } + uintx min_misc_code_size = align_size_up( (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) * (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size, diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/metaspaceShared.cpp --- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -969,7 +969,7 @@ #endif // If -Xshare:on is specified, print out the error message and exit VM, // otherwise, set UseSharedSpaces to false and continue. - if (RequireSharedSpaces) { + if (RequireSharedSpaces || PrintSharedArchiveAndExit) { vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on."); } else { FLAG_SET_DEFAULT(UseSharedSpaces, false); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/metaspaceShared.hpp --- a/hotspot/src/share/vm/memory/metaspaceShared.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -70,6 +70,11 @@ }; enum { + min_ro_size = NOT_LP64(8*M) LP64_ONLY(9*M), // minimum ro and rw regions sizes based on dumping + min_rw_size = NOT_LP64(7*M) LP64_ONLY(12*M) // of a shared archive using the default classlist + }; + + enum { ro = 0, // read-only shared space in the heap rw = 1, // read-write shared space in the heap md = 2, // miscellaneous data for initializing tables, etc. diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/tenuredGeneration.cpp --- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -235,34 +235,6 @@ return CardGeneration::expand(bytes, expand_bytes); } - -void TenuredGeneration::shrink(size_t bytes) { - assert_locked_or_safepoint(ExpandHeap_lock); - size_t size = ReservedSpace::page_align_size_down(bytes); - if (size > 0) { - shrink_by(size); - } -} - - -size_t TenuredGeneration::capacity() const { - return _the_space->capacity(); -} - - -size_t TenuredGeneration::used() const { - return _the_space->used(); -} - - -size_t TenuredGeneration::free() const { - return _the_space->free(); -} - -MemRegion TenuredGeneration::used_region() const { - return the_space()->used_region(); -} - size_t TenuredGeneration::unsafe_max_alloc_nogc() const { return _the_space->free(); } @@ -271,74 +243,8 @@ return _the_space->free() + _virtual_space.uncommitted_size(); } -bool TenuredGeneration::grow_by(size_t bytes) { +void TenuredGeneration::assert_correct_size_change_locking() { assert_locked_or_safepoint(ExpandHeap_lock); - bool result = _virtual_space.expand_by(bytes); - if (result) { - size_t new_word_size = - heap_word_size(_virtual_space.committed_size()); - MemRegion mr(_the_space->bottom(), new_word_size); - // Expand card table - Universe::heap()->barrier_set()->resize_covered_region(mr); - // Expand shared block offset array - _bts->resize(new_word_size); - - // Fix for bug #4668531 - if (ZapUnusedHeapArea) { - MemRegion mangle_region(_the_space->end(), - (HeapWord*)_virtual_space.high()); - SpaceMangler::mangle_region(mangle_region); - } - - // Expand space -- also expands space's BOT - // (which uses (part of) shared array above) - _the_space->set_end((HeapWord*)_virtual_space.high()); - - // update the space and generation capacity counters - update_counters(); - - if (Verbose && PrintGC) { - size_t new_mem_size = _virtual_space.committed_size(); - size_t old_mem_size = new_mem_size - bytes; - gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " - SIZE_FORMAT "K to " SIZE_FORMAT "K", - name(), old_mem_size/K, bytes/K, new_mem_size/K); - } - } - return result; -} - - -bool TenuredGeneration::grow_to_reserved() { - assert_locked_or_safepoint(ExpandHeap_lock); - bool success = true; - const size_t remaining_bytes = _virtual_space.uncommitted_size(); - if (remaining_bytes > 0) { - success = grow_by(remaining_bytes); - DEBUG_ONLY(if (!success) warning("grow to reserved failed");) - } - return success; -} - -void TenuredGeneration::shrink_by(size_t bytes) { - assert_locked_or_safepoint(ExpandHeap_lock); - // Shrink committed space - _virtual_space.shrink_by(bytes); - // Shrink space; this also shrinks the space's BOT - _the_space->set_end((HeapWord*) _virtual_space.high()); - size_t new_word_size = heap_word_size(_the_space->capacity()); - // Shrink the shared block offset array - _bts->resize(new_word_size); - MemRegion mr(_the_space->bottom(), new_word_size); - // Shrink the card table - Universe::heap()->barrier_set()->resize_covered_region(mr); - - if (Verbose && PrintGC) { - size_t new_mem_size = _virtual_space.committed_size(); - size_t old_mem_size = new_mem_size + bytes; - gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", - name(), old_mem_size/K, new_mem_size/K); - } } // Currently nothing to do. @@ -348,27 +254,14 @@ _the_space->object_iterate(blk); } -void TenuredGeneration::space_iterate(SpaceClosure* blk, - bool usedOnly) { - blk->do_space(_the_space); -} - -void TenuredGeneration::younger_refs_iterate(OopsInGenClosure* blk) { - blk->set_generation(this); - younger_refs_in_space_iterate(_the_space, blk); - blk->reset_generation(); -} - void TenuredGeneration::save_marks() { _the_space->set_saved_mark(); } - void TenuredGeneration::reset_saved_marks() { _the_space->reset_saved_mark(); } - bool TenuredGeneration::no_allocs_since_save_marks() { return _the_space->saved_mark_at_top(); } @@ -387,28 +280,25 @@ #undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN - void TenuredGeneration::gc_epilogue(bool full) { - _last_gc = WaterMark(the_space(), the_space()->top()); - // update the generation and space performance counters update_counters(); if (ZapUnusedHeapArea) { - the_space()->check_mangled_unused_area_complete(); + _the_space->check_mangled_unused_area_complete(); } } void TenuredGeneration::record_spaces_top() { assert(ZapUnusedHeapArea, "Not mangling unused space"); - the_space()->set_top_for_allocations(); + _the_space->set_top_for_allocations(); } void TenuredGeneration::verify() { - the_space()->verify(); + _the_space->verify(); } void TenuredGeneration::print_on(outputStream* st) const { Generation::print_on(st); st->print(" the"); - the_space()->print_on(st); + _the_space->print_on(st); } diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/tenuredGeneration.hpp --- a/hotspot/src/share/vm/memory/tenuredGeneration.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/tenuredGeneration.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "gc_implementation/shared/cSpaceCounters.hpp" #include "gc_implementation/shared/gcStats.hpp" #include "gc_implementation/shared/generationCounters.hpp" -#include "memory/generation.hpp" +#include "memory/cardGeneration.hpp" #include "utilities/macros.hpp" // TenuredGeneration models the heap containing old (promoted/tenured) objects @@ -42,27 +42,18 @@ friend class VM_PopulateDumpSharedSpace; protected: - ContiguousSpace* _the_space; // actual space holding objects - WaterMark _last_gc; // watermark between objects allocated before - // and after last GC. + ContiguousSpace* _the_space; // Actual space holding objects GenerationCounters* _gen_counters; CSpaceCounters* _space_counters; - // Grow generation with specified size (returns false if unable to grow) - virtual bool grow_by(size_t bytes); - // Grow generation to reserved size. - virtual bool grow_to_reserved(); - // Shrink generation with specified size (returns false if unable to shrink) - void shrink_by(size_t bytes); - // Allocation failure virtual bool expand(size_t bytes, size_t expand_bytes); - void shrink(size_t bytes); // Accessing spaces - ContiguousSpace* the_space() const { return _the_space; } + ContiguousSpace* space() const { return _the_space; } + void assert_correct_size_change_locking(); public: TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, int level, GenRemSet* remset); @@ -81,33 +72,15 @@ return !ScavengeBeforeFullGC; } - inline bool is_in(const void* p) const; - - // Space enquiries - size_t capacity() const; - size_t used() const; - size_t free() const; - - MemRegion used_region() const; - size_t unsafe_max_alloc_nogc() const; size_t contiguous_available() const; // Iteration void object_iterate(ObjectClosure* blk); - void space_iterate(SpaceClosure* blk, bool usedOnly = false); - - void younger_refs_iterate(OopsInGenClosure* blk); - - inline CompactibleSpace* first_compaction_space() const; virtual inline HeapWord* allocate(size_t word_size, bool is_tlab); virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab); - // Accessing marks - inline WaterMark top_mark(); - inline WaterMark bottom_mark(); - #define TenuredGen_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); TenuredGen_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v) diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/tenuredGeneration.inline.hpp --- a/hotspot/src/share/vm/memory/tenuredGeneration.inline.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/tenuredGeneration.inline.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,53 +22,35 @@ * */ -#ifndef SHARE_VM_MEMORY_GENERATION_INLINE_HPP -#define SHARE_VM_MEMORY_GENERATION_INLINE_HPP +#ifndef SHARE_VM_MEMORY_TENUREDGENERATION_INLINE_HPP +#define SHARE_VM_MEMORY_TENUREDGENERATION_INLINE_HPP -#include "memory/genCollectedHeap.hpp" #include "memory/space.hpp" #include "memory/tenuredGeneration.hpp" -bool TenuredGeneration::is_in(const void* p) const { - return the_space()->is_in(p); -} - - -WaterMark TenuredGeneration::top_mark() { - return the_space()->top_mark(); -} - -CompactibleSpace* -TenuredGeneration::first_compaction_space() const { - return the_space(); -} - HeapWord* TenuredGeneration::allocate(size_t word_size, bool is_tlab) { assert(!is_tlab, "TenuredGeneration does not support TLAB allocation"); - return the_space()->allocate(word_size); + return _the_space->allocate(word_size); } HeapWord* TenuredGeneration::par_allocate(size_t word_size, bool is_tlab) { assert(!is_tlab, "TenuredGeneration does not support TLAB allocation"); - return the_space()->par_allocate(word_size); -} - -WaterMark TenuredGeneration::bottom_mark() { - return the_space()->bottom_mark(); + return _the_space->par_allocate(word_size); } size_t TenuredGeneration::block_size(const HeapWord* addr) const { - if (addr < the_space()->top()) return oop(addr)->size(); - else { - assert(addr == the_space()->top(), "non-block head arg to block_size"); - return the_space()->end() - the_space()->top(); + if (addr < _the_space->top()) { + return oop(addr)->size(); + } else { + assert(addr == _the_space->top(), "non-block head arg to block_size"); + return _the_space->end() - _the_space->top(); } } bool TenuredGeneration::block_is_obj(const HeapWord* addr) const { - return addr < the_space()->top(); + return addr < _the_space ->top(); } -#endif // SHARE_VM_MEMORY_GENERATION_INLINE_HPP +#endif // SHARE_VM_MEMORY_TENUREDGENERATION_INLINE_HPP diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/universe.cpp --- a/hotspot/src/share/vm/memory/universe.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/universe.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -120,6 +120,7 @@ oop Universe::_out_of_memory_error_class_metaspace = NULL; oop Universe::_out_of_memory_error_array_size = NULL; oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; +oop Universe::_out_of_memory_error_realloc_objects = NULL; objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0; bool Universe::_verify_in_progress = false; @@ -191,6 +192,7 @@ f->do_oop((oop*)&_out_of_memory_error_class_metaspace); f->do_oop((oop*)&_out_of_memory_error_array_size); f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); + f->do_oop((oop*)&_out_of_memory_error_realloc_objects); f->do_oop((oop*)&_preallocated_out_of_memory_error_array); f->do_oop((oop*)&_null_ptr_exception_instance); f->do_oop((oop*)&_arithmetic_exception_instance); @@ -575,7 +577,8 @@ (throwable() != Universe::_out_of_memory_error_metaspace) && (throwable() != Universe::_out_of_memory_error_class_metaspace) && (throwable() != Universe::_out_of_memory_error_array_size) && - (throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); + (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) && + (throwable() != Universe::_out_of_memory_error_realloc_objects)); } @@ -1039,6 +1042,7 @@ Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); Universe::_out_of_memory_error_gc_overhead_limit = k_h->allocate_instance(CHECK_false); + Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false); // Setup preallocated NullPointerException // (this is currently used for a cheap & dirty solution in compiler exception handling) @@ -1078,6 +1082,9 @@ msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg()); + msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false); + java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg()); + msg = java_lang_String::create_from_str("/ by zero", CHECK_false); java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg()); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/memory/universe.hpp --- a/hotspot/src/share/vm/memory/universe.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/memory/universe.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -157,6 +157,7 @@ static oop _out_of_memory_error_class_metaspace; static oop _out_of_memory_error_array_size; static oop _out_of_memory_error_gc_overhead_limit; + static oop _out_of_memory_error_realloc_objects; static Array* _the_empty_int_array; // Canonicalized int array static Array* _the_empty_short_array; // Canonicalized short array @@ -328,6 +329,7 @@ static oop out_of_memory_error_class_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace); } static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); } static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); } + static oop out_of_memory_error_realloc_objects() { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects); } // Accessors needed for fast allocation static Klass** boolArrayKlassObj_addr() { return &_boolArrayKlassObj; } diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/oops/cpCache.cpp --- a/hotspot/src/share/vm/oops/cpCache.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/oops/cpCache.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -287,9 +287,13 @@ // the lock, so that when the losing writer returns, he can use the linked // cache entry. - // Use the lock from the metaspace for this, which cannot stop for safepoint. - Mutex* metaspace_lock = cpool->pool_holder()->class_loader_data()->metaspace_lock(); - MutexLockerEx ml(metaspace_lock, Mutex::_no_safepoint_check_flag); + objArrayHandle resolved_references = cpool->resolved_references(); + // Use the resolved_references() lock for this cpCache entry. + // resolved_references are created for all classes with Invokedynamic, MethodHandle + // or MethodType constant pool cache entries. + assert(resolved_references() != NULL, + "a resolved_references array should have been created for this class"); + ObjectLocker ol(resolved_references, Thread::current()); if (!is_f1_null()) { return; } @@ -336,7 +340,6 @@ // This allows us to create fewer Methods, while keeping type safety. // - objArrayHandle resolved_references = cpool->resolved_references(); // Store appendix, if any. if (has_appendix) { const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset; diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/opto/castnode.cpp --- a/hotspot/src/share/vm/opto/castnode.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/opto/castnode.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -104,7 +104,8 @@ // Try to improve the type of the CastII if we recognize a CmpI/If // pattern. if (_carry_dependency) { - if (in(0) != NULL && (in(0)->is_IfFalse() || in(0)->is_IfTrue())) { + if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) { + assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj"); Node* proj = in(0); if (proj->in(0)->in(1)->is_Bool()) { Node* b = proj->in(0)->in(1); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/opto/ifnode.cpp --- a/hotspot/src/share/vm/opto/ifnode.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/opto/ifnode.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -820,6 +820,11 @@ static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff); +struct RangeCheck { + Node* ctl; + jint off; +}; + //------------------------------Ideal------------------------------------------ // Return a node which is more "ideal" than the current node. Strip out // control copies @@ -861,83 +866,141 @@ jint offset1; int flip1 = is_range_check(range1, index1, offset1); if( flip1 ) { - Node *first_prev_dom = NULL; - // Try to remove extra range checks. All 'up_one_dom' gives up at merges // so all checks we inspect post-dominate the top-most check we find. // If we are going to fail the current check and we reach the top check // then we are guaranteed to fail, so just start interpreting there. - // We 'expand' the top 2 range checks to include all post-dominating + // We 'expand' the top 3 range checks to include all post-dominating // checks. - // The top 2 range checks seen - Node *prev_chk1 = NULL; - Node *prev_chk2 = NULL; + // The top 3 range checks seen + const int NRC =3; + RangeCheck prev_checks[NRC]; + int nb_checks = 0; + // Low and high offsets seen so far jint off_lo = offset1; jint off_hi = offset1; - // Scan for the top 2 checks and collect range of offsets - for( int dist = 0; dist < 999; dist++ ) { // Range-Check scan limit - if( dom->Opcode() == Op_If && // Not same opcode? - prev_dom->in(0) == dom ) { // One path of test does dominate? - if( dom == this ) return NULL; // dead loop + bool found_immediate_dominator = false; + + // Scan for the top checks and collect range of offsets + for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit + if (dom->Opcode() == Op_If && // Not same opcode? + prev_dom->in(0) == dom) { // One path of test does dominate? + if (dom == this) return NULL; // dead loop // See if this is a range check Node *index2, *range2; jint offset2; int flip2 = dom->as_If()->is_range_check(range2, index2, offset2); // See if this is a _matching_ range check, checking against // the same array bounds. - if( flip2 == flip1 && range2 == range1 && index2 == index1 && - dom->outcnt() == 2 ) { + if (flip2 == flip1 && range2 == range1 && index2 == index1 && + dom->outcnt() == 2) { + if (nb_checks == 0 && dom->in(1) == in(1)) { + // Found an immediately dominating test at the same offset. + // This kind of back-to-back test can be eliminated locally, + // and there is no need to search further for dominating tests. + assert(offset2 == offset1, "Same test but different offsets"); + found_immediate_dominator = true; + break; + } // Gather expanded bounds off_lo = MIN2(off_lo,offset2); off_hi = MAX2(off_hi,offset2); - // Record top 2 range checks - prev_chk2 = prev_chk1; - prev_chk1 = prev_dom; - // If we match the test exactly, then the top test covers - // both our lower and upper bounds. - if( dom->in(1) == in(1) ) - prev_chk2 = prev_chk1; + // Record top NRC range checks + prev_checks[nb_checks%NRC].ctl = prev_dom; + prev_checks[nb_checks%NRC].off = offset2; + nb_checks++; } } prev_dom = dom; - dom = up_one_dom( dom ); - if( !dom ) break; + dom = up_one_dom(dom); + if (!dom) break; } - - // Attempt to widen the dominating range check to cover some later - // ones. Since range checks "fail" by uncommon-trapping to the - // interpreter, widening a check can make us speculative enter the - // interpreter. If we see range-check deopt's, do not widen! - if (!phase->C->allow_range_check_smearing()) return NULL; + if (!found_immediate_dominator) { + // Attempt to widen the dominating range check to cover some later + // ones. Since range checks "fail" by uncommon-trapping to the + // interpreter, widening a check can make us speculatively enter + // the interpreter. If we see range-check deopt's, do not widen! + if (!phase->C->allow_range_check_smearing()) return NULL; - // Constant indices only need to check the upper bound. - // Non-constance indices must check both low and high. - if( index1 ) { - // Didn't find 2 prior covering checks, so cannot remove anything. - if( !prev_chk2 ) return NULL; - // 'Widen' the offsets of the 1st and 2nd covering check - adjust_check( prev_chk1, range1, index1, flip1, off_lo, igvn ); - // Do not call adjust_check twice on the same projection - // as the first call may have transformed the BoolNode to a ConI - if( prev_chk1 != prev_chk2 ) { - adjust_check( prev_chk2, range1, index1, flip1, off_hi, igvn ); + // Didn't find prior covering check, so cannot remove anything. + if (nb_checks == 0) { + return NULL; } - // Test is now covered by prior checks, dominate it out - prev_dom = prev_chk2; - } else { - // Didn't find prior covering check, so cannot remove anything. - if( !prev_chk1 ) return NULL; - // 'Widen' the offset of the 1st and only covering check - adjust_check( prev_chk1, range1, index1, flip1, off_hi, igvn ); - // Test is now covered by prior checks, dominate it out - prev_dom = prev_chk1; + // Constant indices only need to check the upper bound. + // Non-constant indices must check both low and high. + int chk0 = (nb_checks - 1) % NRC; + if (index1) { + if (nb_checks == 1) { + return NULL; + } else { + // If the top range check's constant is the min or max of + // all constants we widen the next one to cover the whole + // range of constants. + RangeCheck rc0 = prev_checks[chk0]; + int chk1 = (nb_checks - 2) % NRC; + RangeCheck rc1 = prev_checks[chk1]; + if (rc0.off == off_lo) { + adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn); + prev_dom = rc1.ctl; + } else if (rc0.off == off_hi) { + adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn); + prev_dom = rc1.ctl; + } else { + // If the top test's constant is not the min or max of all + // constants, we need 3 range checks. We must leave the + // top test unchanged because widening it would allow the + // accesses it protects to successfully read/write out of + // bounds. + if (nb_checks == 2) { + return NULL; + } + int chk2 = (nb_checks - 3) % NRC; + RangeCheck rc2 = prev_checks[chk2]; + // The top range check a+i covers interval: -a <= i < length-a + // The second range check b+i covers interval: -b <= i < length-b + if (rc1.off <= rc0.off) { + // if b <= a, we change the second range check to: + // -min_of_all_constants <= i < length-min_of_all_constants + // Together top and second range checks now cover: + // -min_of_all_constants <= i < length-a + // which is more restrictive than -b <= i < length-b: + // -b <= -min_of_all_constants <= i < length-a <= length-b + // The third check is then changed to: + // -max_of_all_constants <= i < length-max_of_all_constants + // so 2nd and 3rd checks restrict allowed values of i to: + // -min_of_all_constants <= i < length-max_of_all_constants + adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn); + adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn); + } else { + // if b > a, we change the second range check to: + // -max_of_all_constants <= i < length-max_of_all_constants + // Together top and second range checks now cover: + // -a <= i < length-max_of_all_constants + // which is more restrictive than -b <= i < length-b: + // -b < -a <= i < length-max_of_all_constants <= length-b + // The third check is then changed to: + // -max_of_all_constants <= i < length-max_of_all_constants + // so 2nd and 3rd checks restrict allowed values of i to: + // -min_of_all_constants <= i < length-max_of_all_constants + adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn); + adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn); + } + prev_dom = rc2.ctl; + } + } + } else { + RangeCheck rc0 = prev_checks[chk0]; + // 'Widen' the offset of the 1st and only covering check + adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn); + // Test is now covered by prior checks, dominate it out + prev_dom = rc0.ctl; + } } - } else { // Scan for an equivalent test Node *cmp; @@ -1019,7 +1082,7 @@ // for lower and upper bounds. ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj(); if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) - prev_dom = idom; + prev_dom = idom; // Now walk the current IfNode's projections. // Loop ends when 'this' has no more uses. diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/opto/loopopts.cpp --- a/hotspot/src/share/vm/opto/loopopts.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/opto/loopopts.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -241,8 +241,13 @@ ProjNode* dp_proj = dp->as_Proj(); ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj(); if (exclude_loop_predicate && - unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate)) + (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) || + unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check))) { + // If this is a range check (IfNode::is_range_check), do not + // reorder because Compile::allow_range_check_smearing might have + // changed the check. return; // Let IGVN transformation change control dependence. + } IdealLoopTree *old_loop = get_loop(dp); @@ -898,23 +903,23 @@ int n_op = n->Opcode(); // Check for an IF being dominated by another IF same test - if( n_op == Op_If ) { + if (n_op == Op_If) { Node *bol = n->in(1); uint max = bol->outcnt(); // Check for same test used more than once? - if( n_op == Op_If && max > 1 && bol->is_Bool() ) { + if (max > 1 && bol->is_Bool()) { // Search up IDOMs to see if this IF is dominated. Node *cutoff = get_ctrl(bol); // Now search up IDOMs till cutoff, looking for a dominating test Node *prevdom = n; Node *dom = idom(prevdom); - while( dom != cutoff ) { - if( dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom ) { + while (dom != cutoff) { + if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) { // Replace the dominated test with an obvious true or false. // Place it on the IGVN worklist for later cleanup. C->set_major_progress(); - dominated_by( prevdom, n, false, true ); + dominated_by(prevdom, n, false, true); #ifndef PRODUCT if( VerifyLoopOptimizations ) verify(); #endif diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/opto/macro.cpp --- a/hotspot/src/share/vm/opto/macro.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/opto/macro.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -971,7 +971,11 @@ } bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { - if (!EliminateAllocations || !alloc->_is_non_escaping) { + // Don't do scalar replacement if the frame can be popped by JVMTI: + // if reallocation fails during deoptimization we'll pop all + // interpreter frames for this compiled frame and that won't play + // nice with JVMTI popframe. + if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) { return false; } Node* klass = alloc->in(AllocateNode::KlassNode); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/prims/whitebox.hpp --- a/hotspot/src/share/vm/prims/whitebox.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/prims/whitebox.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -74,7 +74,7 @@ static JavaThread* create_sweeper_thread(TRAPS); static int get_blob_type(const CodeBlob* code); static CodeHeap* get_code_heap(int blob_type); - static CodeBlob* allocate_code_blob(int blob_type, int size); + static CodeBlob* allocate_code_blob(int size, int blob_type); static int array_bytes_to_length(size_t bytes); static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread, JNINativeMethod* method_array, int method_count); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/runtime/arguments.cpp --- a/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/runtime/arguments.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -125,8 +125,8 @@ char* Arguments::_meta_index_dir = NULL; char* Arguments::_ext_dirs = NULL; -// Check if head of 'option' matches 'name', and sets 'tail' remaining part of option string - +// Check if head of 'option' matches 'name', and sets 'tail' to the remaining +// part of the option string. static bool match_option(const JavaVMOption *option, const char* name, const char** tail) { int len = (int)strlen(name); @@ -138,6 +138,32 @@ } } +// Check if 'option' matches 'name'. No "tail" is allowed. +static bool match_option(const JavaVMOption *option, const char* name) { + const char* tail = NULL; + bool result = match_option(option, name, &tail); + if (tail != NULL && *tail == '\0') { + return result; + } else { + return false; + } +} + +// Return true if any of the strings in null-terminated array 'names' matches. +// If tail_allowed is true, then the tail must begin with a colon; otherwise, +// the option must match exactly. +static bool match_option(const JavaVMOption* option, const char** names, const char** tail, + bool tail_allowed) { + for (/* empty */; *names != NULL; ++names) { + if (match_option(option, *names, tail)) { + if (**tail == '\0' || tail_allowed && **tail == ':') { + return true; + } + } + } + return false; +} + static void logOption(const char* opt) { if (PrintVMOptions) { jio_fprintf(defaultStream::output_stream(), "VM option '%s'\n", opt); @@ -2526,21 +2552,6 @@ "-dsa", "-esa", "-disablesystemassertions", "-enablesystemassertions", 0 }; -// Return true if any of the strings in null-terminated array 'names' matches. -// If tail_allowed is true, then the tail must begin with a colon; otherwise, -// the option must match exactly. -static bool match_option(const JavaVMOption* option, const char** names, const char** tail, - bool tail_allowed) { - for (/* empty */; *names != NULL; ++names) { - if (match_option(option, *names, tail)) { - if (**tail == '\0' || tail_allowed && **tail == ':') { - return true; - } - } - } - return false; -} - bool Arguments::parse_uintx(const char* value, uintx* uintx_arg, uintx min_size) { @@ -2782,16 +2793,16 @@ } #endif // !INCLUDE_JVMTI // -Xnoclassgc - } else if (match_option(option, "-Xnoclassgc", &tail)) { + } else if (match_option(option, "-Xnoclassgc")) { FLAG_SET_CMDLINE(bool, ClassUnloading, false); // -Xconcgc - } else if (match_option(option, "-Xconcgc", &tail)) { + } else if (match_option(option, "-Xconcgc")) { FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true); // -Xnoconcgc - } else if (match_option(option, "-Xnoconcgc", &tail)) { + } else if (match_option(option, "-Xnoconcgc")) { FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false); // -Xbatch - } else if (match_option(option, "-Xbatch", &tail)) { + } else if (match_option(option, "-Xbatch")) { FLAG_SET_CMDLINE(bool, BackgroundCompilation, false); // -Xmn for compatibility with other JVM vendors } else if (match_option(option, "-Xmn", &tail)) { @@ -2936,28 +2947,28 @@ } FLAG_SET_CMDLINE(uintx, IncreaseFirstTierCompileThresholdAt, (uintx)uint_IncreaseFirstTierCompileThresholdAt); // -green - } else if (match_option(option, "-green", &tail)) { + } else if (match_option(option, "-green")) { jio_fprintf(defaultStream::error_stream(), "Green threads support not available\n"); return JNI_EINVAL; // -native - } else if (match_option(option, "-native", &tail)) { + } else if (match_option(option, "-native")) { // HotSpot always uses native threads, ignore silently for compatibility // -Xsqnopause - } else if (match_option(option, "-Xsqnopause", &tail)) { + } else if (match_option(option, "-Xsqnopause")) { // EVM option, ignore silently for compatibility // -Xrs - } else if (match_option(option, "-Xrs", &tail)) { + } else if (match_option(option, "-Xrs")) { // Classic/EVM option, new functionality FLAG_SET_CMDLINE(bool, ReduceSignalUsage, true); - } else if (match_option(option, "-Xusealtsigs", &tail)) { + } else if (match_option(option, "-Xusealtsigs")) { // change default internal VM signals used - lower case for back compat FLAG_SET_CMDLINE(bool, UseAltSigs, true); // -Xoptimize - } else if (match_option(option, "-Xoptimize", &tail)) { + } else if (match_option(option, "-Xoptimize")) { // EVM option, ignore silently for compatibility // -Xprof - } else if (match_option(option, "-Xprof", &tail)) { + } else if (match_option(option, "-Xprof")) { #if INCLUDE_FPROF _has_profile = true; #else // INCLUDE_FPROF @@ -2966,7 +2977,7 @@ return JNI_ERR; #endif // INCLUDE_FPROF // -Xconcurrentio - } else if (match_option(option, "-Xconcurrentio", &tail)) { + } else if (match_option(option, "-Xconcurrentio")) { FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true); FLAG_SET_CMDLINE(bool, BackgroundCompilation, false); FLAG_SET_CMDLINE(intx, DeferThrSuspendLoopCount, 1); @@ -2974,13 +2985,13 @@ FLAG_SET_CMDLINE(uintx, NewSizeThreadIncrease, 16 * K); // 20Kb per thread added to new generation // -Xinternalversion - } else if (match_option(option, "-Xinternalversion", &tail)) { + } else if (match_option(option, "-Xinternalversion")) { jio_fprintf(defaultStream::output_stream(), "%s\n", VM_Version::internal_vm_info_string()); vm_exit(0); #ifndef PRODUCT // -Xprintflags - } else if (match_option(option, "-Xprintflags", &tail)) { + } else if (match_option(option, "-Xprintflags")) { CommandLineFlags::printFlags(tty, false); vm_exit(0); #endif @@ -3014,29 +3025,29 @@ #endif } // -Xint - } else if (match_option(option, "-Xint", &tail)) { + } else if (match_option(option, "-Xint")) { set_mode_flags(_int); // -Xmixed - } else if (match_option(option, "-Xmixed", &tail)) { + } else if (match_option(option, "-Xmixed")) { set_mode_flags(_mixed); // -Xcomp - } else if (match_option(option, "-Xcomp", &tail)) { + } else if (match_option(option, "-Xcomp")) { // for testing the compiler; turn off all flags that inhibit compilation set_mode_flags(_comp); // -Xshare:dump - } else if (match_option(option, "-Xshare:dump", &tail)) { + } else if (match_option(option, "-Xshare:dump")) { FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true); set_mode_flags(_int); // Prevent compilation, which creates objects // -Xshare:on - } else if (match_option(option, "-Xshare:on", &tail)) { + } else if (match_option(option, "-Xshare:on")) { FLAG_SET_CMDLINE(bool, UseSharedSpaces, true); FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true); // -Xshare:auto - } else if (match_option(option, "-Xshare:auto", &tail)) { + } else if (match_option(option, "-Xshare:auto")) { FLAG_SET_CMDLINE(bool, UseSharedSpaces, true); FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false); // -Xshare:off - } else if (match_option(option, "-Xshare:off", &tail)) { + } else if (match_option(option, "-Xshare:off")) { FLAG_SET_CMDLINE(bool, UseSharedSpaces, false); FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false); // -Xverify @@ -3054,13 +3065,13 @@ return JNI_EINVAL; } // -Xdebug - } else if (match_option(option, "-Xdebug", &tail)) { + } else if (match_option(option, "-Xdebug")) { // note this flag has been used, then ignore set_xdebug_mode(true); // -Xnoagent - } else if (match_option(option, "-Xnoagent", &tail)) { + } else if (match_option(option, "-Xnoagent")) { // For compatibility with classic. HotSpot refuses to load the old style agent.dll. - } else if (match_option(option, "-Xboundthreads", &tail)) { + } else if (match_option(option, "-Xboundthreads")) { // Bind user level threads to kernel threads (Solaris only) FLAG_SET_CMDLINE(bool, UseBoundThreads, true); } else if (match_option(option, "-Xloggc:", &tail)) { @@ -3090,14 +3101,14 @@ "check")) { return JNI_EINVAL; } - } else if (match_option(option, "vfprintf", &tail)) { + } else if (match_option(option, "vfprintf")) { _vfprintf_hook = CAST_TO_FN_PTR(vfprintf_hook_t, option->extraInfo); - } else if (match_option(option, "exit", &tail)) { + } else if (match_option(option, "exit")) { _exit_hook = CAST_TO_FN_PTR(exit_hook_t, option->extraInfo); - } else if (match_option(option, "abort", &tail)) { + } else if (match_option(option, "abort")) { _abort_hook = CAST_TO_FN_PTR(abort_hook_t, option->extraInfo); // -XX:+AggressiveHeap - } else if (match_option(option, "-XX:+AggressiveHeap", &tail)) { + } else if (match_option(option, "-XX:+AggressiveHeap")) { // This option inspects the machine and attempts to set various // parameters to be optimal for long-running, memory allocation @@ -3188,11 +3199,11 @@ // Need to keep consistency of MaxTenuringThreshold and AlwaysTenure/NeverTenure; // and the last option wins. - } else if (match_option(option, "-XX:+NeverTenure", &tail)) { + } else if (match_option(option, "-XX:+NeverTenure")) { FLAG_SET_CMDLINE(bool, NeverTenure, true); FLAG_SET_CMDLINE(bool, AlwaysTenure, false); FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, markOopDesc::max_age + 1); - } else if (match_option(option, "-XX:+AlwaysTenure", &tail)) { + } else if (match_option(option, "-XX:+AlwaysTenure")) { FLAG_SET_CMDLINE(bool, NeverTenure, false); FLAG_SET_CMDLINE(bool, AlwaysTenure, true); FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, 0); @@ -3211,17 +3222,17 @@ FLAG_SET_CMDLINE(bool, NeverTenure, false); FLAG_SET_CMDLINE(bool, AlwaysTenure, false); } - } else if (match_option(option, "-XX:+CMSPermGenSweepingEnabled", &tail) || - match_option(option, "-XX:-CMSPermGenSweepingEnabled", &tail)) { + } else if (match_option(option, "-XX:+CMSPermGenSweepingEnabled") || + match_option(option, "-XX:-CMSPermGenSweepingEnabled")) { jio_fprintf(defaultStream::error_stream(), "Please use CMSClassUnloadingEnabled in place of " "CMSPermGenSweepingEnabled in the future\n"); - } else if (match_option(option, "-XX:+UseGCTimeLimit", &tail)) { + } else if (match_option(option, "-XX:+UseGCTimeLimit")) { FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, true); jio_fprintf(defaultStream::error_stream(), "Please use -XX:+UseGCOverheadLimit in place of " "-XX:+UseGCTimeLimit in the future\n"); - } else if (match_option(option, "-XX:-UseGCTimeLimit", &tail)) { + } else if (match_option(option, "-XX:-UseGCTimeLimit")) { FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, false); jio_fprintf(defaultStream::error_stream(), "Please use -XX:-UseGCOverheadLimit in place of " @@ -3231,13 +3242,13 @@ // are not to be documented. } else if (match_option(option, "-XX:MaxTLERatio=", &tail)) { // No longer used. - } else if (match_option(option, "-XX:+ResizeTLE", &tail)) { + } else if (match_option(option, "-XX:+ResizeTLE")) { FLAG_SET_CMDLINE(bool, ResizeTLAB, true); - } else if (match_option(option, "-XX:-ResizeTLE", &tail)) { + } else if (match_option(option, "-XX:-ResizeTLE")) { FLAG_SET_CMDLINE(bool, ResizeTLAB, false); - } else if (match_option(option, "-XX:+PrintTLE", &tail)) { + } else if (match_option(option, "-XX:+PrintTLE")) { FLAG_SET_CMDLINE(bool, PrintTLAB, true); - } else if (match_option(option, "-XX:-PrintTLE", &tail)) { + } else if (match_option(option, "-XX:-PrintTLE")) { FLAG_SET_CMDLINE(bool, PrintTLAB, false); } else if (match_option(option, "-XX:TLEFragmentationRatio=", &tail)) { // No longer used. @@ -3253,17 +3264,17 @@ FLAG_SET_CMDLINE(uintx, TLABSize, long_tlab_size); } else if (match_option(option, "-XX:TLEThreadRatio=", &tail)) { // No longer used. - } else if (match_option(option, "-XX:+UseTLE", &tail)) { + } else if (match_option(option, "-XX:+UseTLE")) { FLAG_SET_CMDLINE(bool, UseTLAB, true); - } else if (match_option(option, "-XX:-UseTLE", &tail)) { + } else if (match_option(option, "-XX:-UseTLE")) { FLAG_SET_CMDLINE(bool, UseTLAB, false); - } else if (match_option(option, "-XX:+DisplayVMOutputToStderr", &tail)) { + } else if (match_option(option, "-XX:+DisplayVMOutputToStderr")) { FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false); FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true); - } else if (match_option(option, "-XX:+DisplayVMOutputToStdout", &tail)) { + } else if (match_option(option, "-XX:+DisplayVMOutputToStdout")) { FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, false); FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, true); - } else if (match_option(option, "-XX:+ExtendedDTraceProbes", &tail)) { + } else if (match_option(option, "-XX:+ExtendedDTraceProbes")) { #if defined(DTRACE_ENABLED) FLAG_SET_CMDLINE(bool, ExtendedDTraceProbes, true); FLAG_SET_CMDLINE(bool, DTraceMethodProbes, true); @@ -3275,7 +3286,7 @@ return JNI_EINVAL; #endif // defined(DTRACE_ENABLED) #ifdef ASSERT - } else if (match_option(option, "-XX:+FullGCALot", &tail)) { + } else if (match_option(option, "-XX:+FullGCALot")) { FLAG_SET_CMDLINE(bool, FullGCALot, true); // disable scavenge before parallel mark-compact FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false); @@ -3361,7 +3372,7 @@ } FLAG_SET_CMDLINE(uintx, MaxDirectMemorySize, max_direct_memory_size); #if !INCLUDE_MANAGEMENT - } else if (match_option(option, "-XX:+ManagementServer", &tail)) { + } else if (match_option(option, "-XX:+ManagementServer")) { jio_fprintf(defaultStream::error_stream(), "ManagementServer is not supported in this VM.\n"); return JNI_ERR; @@ -3796,23 +3807,23 @@ settings_file_specified = true; continue; } - if (match_option(option, "-XX:+PrintVMOptions", &tail)) { + if (match_option(option, "-XX:+PrintVMOptions")) { PrintVMOptions = true; continue; } - if (match_option(option, "-XX:-PrintVMOptions", &tail)) { + if (match_option(option, "-XX:-PrintVMOptions")) { PrintVMOptions = false; continue; } - if (match_option(option, "-XX:+IgnoreUnrecognizedVMOptions", &tail)) { + if (match_option(option, "-XX:+IgnoreUnrecognizedVMOptions")) { IgnoreUnrecognizedVMOptions = true; continue; } - if (match_option(option, "-XX:-IgnoreUnrecognizedVMOptions", &tail)) { + if (match_option(option, "-XX:-IgnoreUnrecognizedVMOptions")) { IgnoreUnrecognizedVMOptions = false; continue; } - if (match_option(option, "-XX:+PrintFlagsInitial", &tail)) { + if (match_option(option, "-XX:+PrintFlagsInitial")) { CommandLineFlags::printFlags(tty, false); vm_exit(0); } @@ -3838,7 +3849,7 @@ #ifndef PRODUCT - if (match_option(option, "-XX:+PrintFlagsWithComments", &tail)) { + if (match_option(option, "-XX:+PrintFlagsWithComments")) { CommandLineFlags::printFlags(tty, true); vm_exit(0); } diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/runtime/deoptimization.cpp --- a/hotspot/src/share/vm/runtime/deoptimization.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/runtime/deoptimization.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -176,6 +176,8 @@ assert(vf->is_compiled_frame(), "Wrong frame type"); chunk->push(compiledVFrame::cast(vf)); + bool realloc_failures = false; + #ifdef COMPILER2 // Reallocate the non-escaping objects and restore their fields. Then // relock objects if synchronization on them was eliminated. @@ -206,19 +208,16 @@ tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread); } } - bool reallocated = false; if (objects != NULL) { JRT_BLOCK - reallocated = realloc_objects(thread, &deoptee, objects, THREAD); + realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD); JRT_END - } - if (reallocated) { - reassign_fields(&deoptee, &map, objects); + reassign_fields(&deoptee, &map, objects, realloc_failures); #ifndef PRODUCT if (TraceDeoptimization) { ttyLocker ttyl; tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread); - print_objects(objects); + print_objects(objects, realloc_failures); } #endif } @@ -236,7 +235,7 @@ assert (cvf->scope() != NULL,"expect only compiled java frames"); GrowableArray* monitors = cvf->monitors(); if (monitors->is_nonempty()) { - relock_objects(monitors, thread); + relock_objects(monitors, thread, realloc_failures); #ifndef PRODUCT if (TraceDeoptimization) { ttyLocker ttyl; @@ -247,7 +246,12 @@ first = false; tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread); } - tty->print_cr(" object <" INTPTR_FORMAT "> locked", (void *)mi->owner()); + if (mi->owner_is_scalar_replaced()) { + Klass* k = java_lang_Class::as_Klass(mi->owner_klass()); + tty->print_cr(" failed reallocation for klass %s", k->external_name()); + } else { + tty->print_cr(" object <" INTPTR_FORMAT "> locked", (void *)mi->owner()); + } } } } @@ -262,9 +266,14 @@ // out the java state residing in the vframeArray will be missed. No_Safepoint_Verifier no_safepoint; - vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk); + vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures); +#ifdef COMPILER2 + if (realloc_failures) { + pop_frames_failed_reallocs(thread, array); + } +#endif - assert(thread->vframe_array_head() == NULL, "Pending deopt!");; + assert(thread->vframe_array_head() == NULL, "Pending deopt!"); thread->set_vframe_array_head(array); // Now that the vframeArray has been created if we have any deferred local writes @@ -718,6 +727,8 @@ int exception_line = thread->exception_line(); thread->clear_pending_exception(); + bool failures = false; + for (int i = 0; i < objects->length(); i++) { assert(objects->at(i)->is_object(), "invalid debug information"); ObjectValue* sv = (ObjectValue*) objects->at(i); @@ -727,27 +738,34 @@ if (k->oop_is_instance()) { InstanceKlass* ik = InstanceKlass::cast(k()); - obj = ik->allocate_instance(CHECK_(false)); + obj = ik->allocate_instance(THREAD); } else if (k->oop_is_typeArray()) { TypeArrayKlass* ak = TypeArrayKlass::cast(k()); assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); int len = sv->field_size() / type2size[ak->element_type()]; - obj = ak->allocate(len, CHECK_(false)); + obj = ak->allocate(len, THREAD); } else if (k->oop_is_objArray()) { ObjArrayKlass* ak = ObjArrayKlass::cast(k()); - obj = ak->allocate(sv->field_size(), CHECK_(false)); + obj = ak->allocate(sv->field_size(), THREAD); } - assert(obj != NULL, "allocation failed"); + if (obj == NULL) { + failures = true; + } + assert(sv->value().is_null(), "redundant reallocation"); + assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception"); + CLEAR_PENDING_EXCEPTION; sv->set_value(obj); } - if (pending_exception.not_null()) { + if (failures) { + THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures); + } else if (pending_exception.not_null()) { thread->set_pending_exception(pending_exception(), exception_file, exception_line); } - return true; + return failures; } // This assumes that the fields are stored in ObjectValue in the same order @@ -885,12 +903,15 @@ // restore fields of all eliminated objects and arrays -void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects) { +void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects, bool realloc_failures) { for (int i = 0; i < objects->length(); i++) { ObjectValue* sv = (ObjectValue*) objects->at(i); KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()())); Handle obj = sv->value(); - assert(obj.not_null(), "reallocation was missed"); + assert(obj.not_null() || realloc_failures, "reallocation was missed"); + if (obj.is_null()) { + continue; + } if (k->oop_is_instance()) { InstanceKlass* ik = InstanceKlass::cast(k()); @@ -907,34 +928,36 @@ // relock objects for which synchronization was eliminated -void Deoptimization::relock_objects(GrowableArray* monitors, JavaThread* thread) { +void Deoptimization::relock_objects(GrowableArray* monitors, JavaThread* thread, bool realloc_failures) { for (int i = 0; i < monitors->length(); i++) { MonitorInfo* mon_info = monitors->at(i); if (mon_info->eliminated()) { - assert(mon_info->owner() != NULL, "reallocation was missed"); - Handle obj = Handle(mon_info->owner()); - markOop mark = obj->mark(); - if (UseBiasedLocking && mark->has_bias_pattern()) { - // New allocated objects may have the mark set to anonymously biased. - // Also the deoptimized method may called methods with synchronization - // where the thread-local object is bias locked to the current thread. - assert(mark->is_biased_anonymously() || - mark->biased_locker() == thread, "should be locked to current thread"); - // Reset mark word to unbiased prototype. - markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); - obj->set_mark(unbiased_prototype); + assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed"); + if (!mon_info->owner_is_scalar_replaced()) { + Handle obj = Handle(mon_info->owner()); + markOop mark = obj->mark(); + if (UseBiasedLocking && mark->has_bias_pattern()) { + // New allocated objects may have the mark set to anonymously biased. + // Also the deoptimized method may called methods with synchronization + // where the thread-local object is bias locked to the current thread. + assert(mark->is_biased_anonymously() || + mark->biased_locker() == thread, "should be locked to current thread"); + // Reset mark word to unbiased prototype. + markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); + obj->set_mark(unbiased_prototype); + } + BasicLock* lock = mon_info->lock(); + ObjectSynchronizer::slow_enter(obj, lock, thread); + assert(mon_info->owner()->is_locked(), "object must be locked now"); } - BasicLock* lock = mon_info->lock(); - ObjectSynchronizer::slow_enter(obj, lock, thread); } - assert(mon_info->owner()->is_locked(), "object must be locked now"); } } #ifndef PRODUCT // print information about reallocated objects -void Deoptimization::print_objects(GrowableArray* objects) { +void Deoptimization::print_objects(GrowableArray* objects, bool realloc_failures) { fieldDescriptor fd; for (int i = 0; i < objects->length(); i++) { @@ -944,10 +967,15 @@ tty->print(" object <" INTPTR_FORMAT "> of type ", (void *)sv->value()()); k->print_value(); - tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); + assert(obj.not_null() || realloc_failures, "reallocation was missed"); + if (obj.is_null()) { + tty->print(" allocation failed"); + } else { + tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); + } tty->cr(); - if (Verbose) { + if (Verbose && !obj.is_null()) { k->oop_print_on(obj(), tty); } } @@ -955,7 +983,7 @@ #endif #endif // COMPILER2 -vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray* chunk) { +vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray* chunk, bool realloc_failures) { Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp()); #ifndef PRODUCT @@ -998,7 +1026,7 @@ // Since the Java thread being deoptimized will eventually adjust it's own stack, // the vframeArray containing the unpacking information is allocated in the C heap. // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). - vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr); + vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures); // Compare the vframeArray to the collected vframes assert(array->structural_compare(thread, chunk), "just checking"); @@ -1013,6 +1041,33 @@ return array; } +#ifdef COMPILER2 +void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) { + // Reallocation of some scalar replaced objects failed. Record + // that we need to pop all the interpreter frames for the + // deoptimized compiled frame. + assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?"); + thread->set_frames_to_pop_failed_realloc(array->frames()); + // Unlock all monitors here otherwise the interpreter will see a + // mix of locked and unlocked monitors (because of failed + // reallocations of synchronized objects) and be confused. + for (int i = 0; i < array->frames(); i++) { + MonitorChunk* monitors = array->element(i)->monitors(); + if (monitors != NULL) { + for (int j = 0; j < monitors->number_of_monitors(); j++) { + BasicObjectLock* src = monitors->at(j); + if (src->obj() != NULL) { + ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread); + } + } + array->element(i)->free_monitors(thread); +#ifdef ASSERT + array->element(i)->set_removed_monitors(); +#endif + } + } +} +#endif static void collect_monitors(compiledVFrame* cvf, GrowableArray* objects_to_revoke) { GrowableArray* monitors = cvf->monitors(); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/runtime/deoptimization.hpp --- a/hotspot/src/share/vm/runtime/deoptimization.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/runtime/deoptimization.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -125,13 +125,14 @@ static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray* objects, TRAPS); static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type); static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj); - static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects); - static void relock_objects(GrowableArray* monitors, JavaThread* thread); - NOT_PRODUCT(static void print_objects(GrowableArray* objects);) + static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray* objects, bool realloc_failures); + static void relock_objects(GrowableArray* monitors, JavaThread* thread, bool realloc_failures); + static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array); + NOT_PRODUCT(static void print_objects(GrowableArray* objects, bool realloc_failures);) #endif // COMPILER2 public: - static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray* chunk); + static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray* chunk, bool realloc_failures); // Interface used for unpacking deoptimized frames diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/runtime/sharedRuntime.cpp --- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -456,6 +456,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address)); + assert(thread->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?"); // Reset method handle flag. thread->set_is_method_handle_return(false); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/runtime/thread.cpp --- a/hotspot/src/share/vm/runtime/thread.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/runtime/thread.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -1448,6 +1448,7 @@ _popframe_condition = popframe_inactive; _popframe_preserved_args = NULL; _popframe_preserved_args_size = 0; + _frames_to_pop_failed_realloc = 0; pd_initialize(); } diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/runtime/thread.hpp --- a/hotspot/src/share/vm/runtime/thread.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/runtime/thread.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -908,6 +908,12 @@ // This is set to popframe_pending to signal that top Java frame should be popped immediately int _popframe_condition; + // If reallocation of scalar replaced objects fails, we throw OOM + // and during exception propagation, pop the top + // _frames_to_pop_failed_realloc frames, the ones that reference + // failed reallocations. + int _frames_to_pop_failed_realloc; + #ifndef PRODUCT int _jmp_ring_index; struct { @@ -1567,6 +1573,10 @@ void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } #endif + int frames_to_pop_failed_realloc() const { return _frames_to_pop_failed_realloc; } + void set_frames_to_pop_failed_realloc(int nb) { _frames_to_pop_failed_realloc = nb; } + void dec_frames_to_pop_failed_realloc() { _frames_to_pop_failed_realloc--; } + private: // Saved incoming arguments to popped frame. // Used only when popped interpreted frame returns to deoptimized frame. diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/runtime/vframeArray.cpp --- a/hotspot/src/share/vm/runtime/vframeArray.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/runtime/vframeArray.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -57,7 +57,7 @@ } } -void vframeArrayElement::fill_in(compiledVFrame* vf) { +void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) { // Copy the information from the compiled vframe to the // interpreter frame we will be creating to replace vf @@ -65,6 +65,9 @@ _method = vf->method(); _bci = vf->raw_bci(); _reexecute = vf->should_reexecute(); +#ifdef ASSERT + _removed_monitors = false; +#endif int index; @@ -82,11 +85,15 @@ // Migrate the BasicLocks from the stack to the monitor chunk for (index = 0; index < list->length(); index++) { MonitorInfo* monitor = list->at(index); - assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already"); - assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased"); + assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already"); BasicObjectLock* dest = _monitors->at(index); - dest->set_obj(monitor->owner()); - monitor->lock()->move_to(monitor->owner(), dest->lock()); + if (monitor->owner_is_scalar_replaced()) { + dest->set_obj(NULL); + } else { + assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased"); + dest->set_obj(monitor->owner()); + monitor->lock()->move_to(monitor->owner(), dest->lock()); + } } } @@ -111,7 +118,7 @@ StackValue* value = locs->at(index); switch(value->type()) { case T_OBJECT: - assert(!value->obj_is_scalar_replaced(), "object should be reallocated already"); + assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already"); // preserve object type _locals->add( new StackValue(cast_from_oop((value->get_obj()())), T_OBJECT )); break; @@ -136,7 +143,7 @@ StackValue* value = exprs->at(index); switch(value->type()) { case T_OBJECT: - assert(!value->obj_is_scalar_replaced(), "object should be reallocated already"); + assert(!value->obj_is_scalar_replaced() || realloc_failures, "object should be reallocated already"); // preserve object type _expressions->add( new StackValue(cast_from_oop((value->get_obj()())), T_OBJECT )); break; @@ -287,7 +294,7 @@ _frame.patch_pc(thread, pc); - assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors"); + assert (!method()->is_synchronized() || locks > 0 || _removed_monitors, "synchronized methods must have monitors"); BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin(); for (int index = 0; index < locks; index++) { @@ -439,7 +446,8 @@ vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray* chunk, - RegisterMap *reg_map, frame sender, frame caller, frame self) { + RegisterMap *reg_map, frame sender, frame caller, frame self, + bool realloc_failures) { // Allocate the vframeArray vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part @@ -451,19 +459,20 @@ result->_caller = caller; result->_original = self; result->set_unroll_block(NULL); // initialize it - result->fill_in(thread, frame_size, chunk, reg_map); + result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures); return result; } void vframeArray::fill_in(JavaThread* thread, int frame_size, GrowableArray* chunk, - const RegisterMap *reg_map) { + const RegisterMap *reg_map, + bool realloc_failures) { // Set owner first, it is used when adding monitor chunks _frame_size = frame_size; for(int i = 0; i < chunk->length(); i++) { - element(i)->fill_in(chunk->at(i)); + element(i)->fill_in(chunk->at(i), realloc_failures); } // Copy registers for callee-saved registers diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/runtime/vframeArray.hpp --- a/hotspot/src/share/vm/runtime/vframeArray.hpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/runtime/vframeArray.hpp Thu Dec 11 23:06:14 2014 -0800 @@ -58,6 +58,9 @@ MonitorChunk* _monitors; // active monitors for this vframe StackValueCollection* _locals; StackValueCollection* _expressions; +#ifdef ASSERT + bool _removed_monitors; +#endif public: @@ -78,7 +81,7 @@ StackValueCollection* expressions(void) const { return _expressions; } - void fill_in(compiledVFrame* vf); + void fill_in(compiledVFrame* vf, bool realloc_failures); // Formerly part of deoptimizedVFrame @@ -99,6 +102,12 @@ bool is_bottom_frame, int exec_mode); +#ifdef ASSERT + void set_removed_monitors() { + _removed_monitors = true; + } +#endif + #ifndef PRODUCT void print(outputStream* st); #endif /* PRODUCT */ @@ -160,13 +169,14 @@ int frames() const { return _frames; } static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray* chunk, - RegisterMap* reg_map, frame sender, frame caller, frame self); + RegisterMap* reg_map, frame sender, frame caller, frame self, + bool realloc_failures); vframeArrayElement* element(int index) { assert(is_within_bounds(index), "Bad index"); return &_elements[index]; } // Allocates a new vframe in the array and fills the array with vframe information in chunk - void fill_in(JavaThread* thread, int frame_size, GrowableArray* chunk, const RegisterMap *reg_map); + void fill_in(JavaThread* thread, int frame_size, GrowableArray* chunk, const RegisterMap *reg_map, bool realloc_failures); // Returns the owner of this vframeArray JavaThread* owner_thread() const { return _owner_thread; } diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/runtime/vmStructs.cpp --- a/hotspot/src/share/vm/runtime/vmStructs.cpp Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Dec 11 23:06:14 2014 -0800 @@ -556,9 +556,6 @@ \ nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t) \ nonstatic_field(TenuredGeneration, _the_space, ContiguousSpace*) \ - nonstatic_field(TenuredGeneration, _last_gc, WaterMark) \ - \ - \ \ nonstatic_field(Space, _bottom, HeapWord*) \ nonstatic_field(Space, _end, HeapWord*) \ diff -r 6494b13f88a8 -r 62648789b8ba hotspot/src/share/vm/trace/trace.xml --- a/hotspot/src/share/vm/trace/trace.xml Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/src/share/vm/trace/trace.xml Thu Dec 11 23:06:14 2014 -0800 @@ -314,6 +314,28 @@ + + + + + + + + + + + + + + + + + + diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/Makefile --- a/hotspot/test/Makefile Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/test/Makefile Thu Dec 11 23:06:14 2014 -0800 @@ -275,6 +275,9 @@ # Ignore tests are not run and completely silent about it JTREG_IGNORE_OPTION = -ignore:quiet JTREG_BASIC_OPTIONS += $(JTREG_IGNORE_OPTION) +# Multiply by 4 the timeout factor +JTREG_TIMEOUT_OPTION = -timeoutFactor:4 +JTREG_BASIC_OPTIONS += $(JTREG_TIMEOUT_OPTION) # Add any extra options JTREG_BASIC_OPTIONS += $(EXTRA_JTREG_OPTIONS) # Set other vm and test options diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/compiler/dependencies/MonomorphicObjectCall/TestMonomorphicObjectCall.java --- a/hotspot/test/compiler/dependencies/MonomorphicObjectCall/TestMonomorphicObjectCall.java Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/test/compiler/dependencies/MonomorphicObjectCall/TestMonomorphicObjectCall.java Thu Dec 11 23:06:14 2014 -0800 @@ -61,6 +61,7 @@ String[] vmOpts = new String[] { "-Xbootclasspath/p:" + testClasses, "-Xcomp", + "-XX:+IgnoreUnrecognizedVMOptions", "-XX:-VerifyDependencies", "-XX:CompileOnly=TestMonomorphicObjectCall::callFinalize", "-XX:CompileOnly=Object::finalizeObject", diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/compiler/exceptions/SumTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/compiler/exceptions/SumTest.java Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8066900 + * @summary FP registers are not properly restored by C1 when handling exceptions + * @run main/othervm -Xbatch SumTest + * + */ +public class SumTest { + private static class Sum { + + double[] sums; + + /** + * Construct empty Sum + */ + public Sum() { + sums = new double[0]; + } + + /** + * Return the sum of all numbers added to this Sum + * + * @return the sum + */ + final public double getSum() { + double sum = 0; + for (final double s : sums) { + sum += s; + } + + return sum; + } + + /** + * Add a new number to this Sum + * + * @param a number to be added. + */ + final public void add(double a) { + try { + sums[sums.length] = -1; // Cause IndexOutOfBoundsException + } catch (final IndexOutOfBoundsException e) { + final double[] oldSums = sums; + sums = new double[oldSums.length + 1]; // Extend sums + System.arraycopy(oldSums, 0, sums, 0, oldSums.length); + sums[oldSums.length] = a; // Append a + } + } + } + + public static void main(String[] args) throws Exception { + final Sum sum = new Sum(); + for (int i = 1; i <= 10000; ++i) { + sum.add(1); + double ii = sum.getSum(); + if (i != ii) { + throw new Exception("Failure: computed = " + ii + ", expected = " + i); + } + } + } + +} + diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/compiler/rangechecks/TestRangeCheckSmearing.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/compiler/rangechecks/TestRangeCheckSmearing.java Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8066103 + * @summary C2's range check smearing allows out of bound array accesses + * @library /testlibrary /testlibrary/whitebox /compiler/whitebox /testlibrary/com/oracle/java/testlibrary + * @build TestRangeCheckSmearing + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform + * @run main/othervm -ea -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearing + * + */ + +import java.lang.annotation.*; +import java.lang.reflect.*; +import java.util.*; +import sun.hotspot.WhiteBox; +import sun.hotspot.code.NMethod; +import com.oracle.java.testlibrary.Platform; + +public class TestRangeCheckSmearing { + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + + @Retention(RetentionPolicy.RUNTIME) + @interface Args { int[] value(); } + + // first range check is i + max of all constants + @Args({0, 8}) + static int m1(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+9]; + if (allaccesses) { + res += array[i+8]; + res += array[i+7]; + res += array[i+6]; + res += array[i+5]; + res += array[i+4]; + res += array[i+3]; + res += array[i+2]; + res += array[i+1]; + } + return res; + } + + // first range check is i + min of all constants + @Args({0, -9}) + static int m2(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+1]; + if (allaccesses) { + res += array[i+2]; + res += array[i+3]; + res += array[i+4]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + // first range check is not i + min/max of all constants + @Args({0, 8}) + static int m3(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + if (allaccesses) { + res += array[i+2]; + res += array[i+1]; + res += array[i+4]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, -9}) + static int m4(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + if (allaccesses) { + res += array[i+4]; + res += array[i+1]; + res += array[i+2]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, -3}) + static int m5(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+2]; + if (allaccesses) { + res += array[i+1]; + res += array[i+4]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, 6}) + static int m6(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+4]; + if (allaccesses) { + res += array[i+2]; + res += array[i+1]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, 6}) + static int m7(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+2]; + res += array[i+4]; + if (allaccesses) { + res += array[i+1]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({0, -3}) + static int m8(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+4]; + res += array[i+2]; + if (allaccesses) { + res += array[i+1]; + res += array[i+5]; + res += array[i+6]; + res += array[i+7]; + res += array[i+8]; + res += array[i+9]; + } + return res; + } + + @Args({6, 15}) + static int m9(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + if (allaccesses) { + res += array[i-2]; + res += array[i-1]; + res += array[i-4]; + res += array[i-5]; + res += array[i-6]; + } + return res; + } + + @Args({3, 12}) + static int m10(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + if (allaccesses) { + res += array[i-2]; + res += array[i-1]; + res += array[i-3]; + res += array[i+4]; + res += array[i+5]; + res += array[i+6]; + } + return res; + } + + @Args({3, -3}) + static int m11(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i-2]; + if (allaccesses) { + res += array[i+5]; + res += array[i+6]; + } + return res; + } + + @Args({3, 6}) + static int m12(int[] array, int i, boolean allaccesses) { + int res = 0; + res += array[i+3]; + res += array[i+6]; + if (allaccesses) { + res += array[i-2]; + res += array[i-3]; + } + return res; + } + + // check that identical range check is replaced by dominating one + // only when correct + @Args({0}) + static int m13(int[] array, int i, boolean ignore) { + int res = 0; + res += array[i+3]; + res += array[i+3]; + return res; + } + + @Args({2, 0}) + static int m14(int[] array, int i, boolean ignore) { + int res = 0; + + res += array[i]; + res += array[i-2]; + res += array[i]; // If range check below were to be removed first this cannot be considered identical to first range check + res += array[i-1]; // range check removed so i-1 array access depends on previous check + + return res; + } + + static int[] m15_dummy = new int[10]; + @Args({2, 0}) + static int m15(int[] array, int i, boolean ignore) { + int res = 0; + res += array[i]; + + // When the loop is optimized out we don't want the + // array[i-1] access which is dependent on array[i]'s + // range check to become dependent on the identical range + // check above. + + int[] array2 = m15_dummy; + int j = 0; + for (; j < 10; j++); + if (j == 10) { + array2 = array; + } + + res += array2[i-2]; + res += array2[i]; + res += array2[i-1]; // range check removed so i-1 array access depends on previous check + + return res; + } + + @Args({2, 0}) + static int m16(int[] array, int i, boolean ignore) { + int res = 0; + + res += array[i]; + res += array[i-1]; + res += array[i-1]; + res += array[i-2]; + + return res; + } + + @Args({2, 0}) + static int m17(int[] array, int i, boolean ignore) { + int res = 0; + + res += array[i]; + res += array[i-2]; + res += array[i-2]; + res += array[i+2]; + res += array[i+2]; + res += array[i-1]; + res += array[i-1]; + + return res; + } + + static public void main(String[] args) { + if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) { + throw new AssertionError("Background compilation enabled"); + } + new TestRangeCheckSmearing().doTests(); + } + boolean success = true; + boolean exception = false; + final int[] array = new int[10]; + final HashMap tests = new HashMap<>(); + { + final Class TEST_PARAM_TYPES[] = { int[].class, int.class, boolean.class }; + for (Method m : this.getClass().getDeclaredMethods()) { + if (m.getName().matches("m[0-9]+")) { + assert(Modifier.isStatic(m.getModifiers())) : m; + assert(m.getReturnType() == int.class) : m; + assert(Arrays.equals(m.getParameterTypes(), TEST_PARAM_TYPES)) : m; + tests.put(m.getName(), m); + } + } + } + + void invokeTest(Method m, int[] array, int index, boolean z) { + try { + m.invoke(null, array, index, z); + } catch (ReflectiveOperationException roe) { + Throwable ex = roe.getCause(); + if (ex instanceof ArrayIndexOutOfBoundsException) + throw (ArrayIndexOutOfBoundsException) ex; + throw new AssertionError(roe); + } + } + + void doTest(String name) { + Method m = tests.get(name); + tests.remove(name); + int[] args = m.getAnnotation(Args.class).value(); + int index0 = args[0], index1; + boolean exceptionRequired = true; + if (args.length == 2) { + index1 = args[1]; + } else { + // no negative test for this one + assert(args.length == 1); + assert(name.equals("m13")); + exceptionRequired = false; + index1 = index0; + } + // Get the method compiled. + if (!WHITE_BOX.isMethodCompiled(m)) { + // If not, try to compile it with C2 + if(!WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION)) { + // C2 compiler not available, try to compile with C1 + WHITE_BOX.enqueueMethodForCompilation(m, CompilerWhiteBoxTest.COMP_LEVEL_SIMPLE); + } + } + if (!WHITE_BOX.isMethodCompiled(m)) { + throw new RuntimeException(m + " not compiled"); + } + + // valid access + invokeTest(m, array, index0, true); + + if (!WHITE_BOX.isMethodCompiled(m)) { + throw new RuntimeException(m + " deoptimized on valid array access"); + } + + exception = false; + boolean test_success = true; + try { + invokeTest(m, array, index1, false); + } catch(ArrayIndexOutOfBoundsException aioob) { + exception = true; + System.out.println("ArrayIndexOutOfBoundsException thrown in "+name); + } + if (!exception) { + System.out.println("ArrayIndexOutOfBoundsException was not thrown in "+name); + } + + if (Platform.isServer()) { + if (exceptionRequired == WHITE_BOX.isMethodCompiled(m)) { + System.out.println((exceptionRequired?"Didn't deoptimized":"deoptimized") + " in "+name); + test_success = false; + } + } + + if (exception != exceptionRequired) { + System.out.println((exceptionRequired?"exception required but not thrown":"not exception required but thrown") + " in "+name); + test_success = false; + } + + if (!test_success) { + success = false; + System.out.println("TEST FAILED: "+name); + } + + } + void doTests() { + doTest("m1"); + doTest("m2"); + doTest("m3"); + doTest("m4"); + doTest("m5"); + doTest("m6"); + doTest("m7"); + doTest("m8"); + doTest("m9"); + doTest("m10"); + doTest("m11"); + doTest("m12"); + doTest("m13"); + doTest("m14"); + doTest("m15"); + doTest("m16"); + doTest("m17"); + if (!success) { + throw new RuntimeException("Some tests failed"); + } + assert(tests.isEmpty()) : tests; + } +} diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/compiler/rangechecks/TestRangeCheckSmearingLoopOpts.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/compiler/rangechecks/TestRangeCheckSmearingLoopOpts.java Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8048170 + * @summary Following range check smearing, range check cannot be replaced by dominating identical test. + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestRangeCheckSmearingLoopOpts + * + */ +public class TestRangeCheckSmearingLoopOpts { + + static int dummy; + + static int m1(int[] array, int i) { + for (;;) { + for (;;) { + if (array[i] < 0) { // range check (i+0) dominates equivalent check below + break; + } + i++; + } + + // A control flow that stops IfNode::up_one_dom() + if ((i % 2)== 0) { + if ((array[i] % 2) == 0) { + dummy = i; + } + } + + // IfNode::Ideal will rewrite some range checks if Compile::allow_range_check_smearing + if (array[i-1] == 9) { // range check (i-1) unchanged + int res = array[i-3]; // range check (i-3) unchanged + res += array[i]; // range check (i+0) unchanged + res += array[i-2]; // removed redundant range check + // the previous access might be hoisted by + // PhaseIdealLoop::split_if_with_blocks_post because + // it appears to have the same guard, but it also + // depends on the previous guards + return res; + } + i++; + } + } + + static public void main(String[] args) { + int[] array = { 0, 1, 2, -3, 4, 5, -2, 7, 8, 9, -1 }; + for (int i = 0; i < 20000; i++) { + m1(array, 0); + } + array[0] = -1; + try { + m1(array, 0); + } catch(ArrayIndexOutOfBoundsException aioobe) {} + } +} diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/compiler/uncommontrap/TestDeoptOOM.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/compiler/uncommontrap/TestDeoptOOM.java Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,426 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 6898462 + * @summary failed reallocations of scalar replaced objects during deoptimization causes crash + * @run main/othervm -XX:-BackgroundCompilation -XX:CompileCommand=exclude,TestDeoptOOM::main -XX:CompileCommand=exclude,TestDeoptOOM::m9_1 -Xmx128M TestDeoptOOM + * + */ + +public class TestDeoptOOM { + + long f1; + long f2; + long f3; + long f4; + long f5; + + static class LinkedList { + LinkedList l; + long[] array; + LinkedList(LinkedList l, int size) { + array = new long[size]; + this.l = l; + } + } + + static LinkedList ll; + + static void consume_all_memory() { + int size = 128 * 1024 * 1024; + while(size > 0) { + try { + while(true) { + ll = new LinkedList(ll, size); + } + } catch(OutOfMemoryError oom) { + } + size = size / 2; + } + } + + static void free_memory() { + ll = null; + } + + static TestDeoptOOM m1(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m1"); + } + return null; + } + + static TestDeoptOOM m2_1(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m2_1"); + } + return null; + } + + static TestDeoptOOM m2(boolean deopt) { + try { + return m2_1(deopt); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m2"); + } + return null; + } + + static TestDeoptOOM m3_3(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m3_3"); + } + return null; + } + + static boolean m3_2(boolean deopt) { + try { + return m3_3(deopt) != null; + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m3_2"); + } + return false; + } + + static TestDeoptOOM m3_1(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (m3_2(deopt)) { + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m3_1"); + } + return null; + } + + static TestDeoptOOM m3(boolean deopt) { + try { + return m3_1(deopt); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m3"); + } + return null; + } + + static TestDeoptOOM m4(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + tdoom.f1 = 1l; + tdoom.f2 = 2l; + tdoom.f3 = 3l; + return tdoom; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m4"); + } + return null; + } + + static TestDeoptOOM m5(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + synchronized(tdoom) { + if (deopt) { + return tdoom; + } + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m5"); + } + return null; + } + + synchronized TestDeoptOOM m6_1(boolean deopt) { + if (deopt) { + return this; + } + return null; + } + + static TestDeoptOOM m6(boolean deopt) { + try { + TestDeoptOOM tdoom = new TestDeoptOOM(); + return tdoom.m6_1(deopt); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m6"); + } + return null; + } + + static TestDeoptOOM m7_1(boolean deopt, Object lock) { + try { + synchronized(lock) { + TestDeoptOOM tdoom = new TestDeoptOOM(); + if (deopt) { + return tdoom; + } + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m7_1"); + } + return null; + } + + static TestDeoptOOM m7(boolean deopt, Object lock) { + try { + return m7_1(deopt, lock); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m7"); + } + return null; + } + + static class A { + long f1; + long f2; + long f3; + long f4; + long f5; + } + + static class B { + long f1; + long f2; + long f3; + long f4; + long f5; + + A a; + } + + static B m8(boolean deopt) { + try { + A a = new A(); + B b = new B(); + b.a = a; + if (deopt) { + return b; + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m8"); + } + return null; + } + + static void m9_1(int i) { + if (i > 90000) { + consume_all_memory(); + } + } + + static TestDeoptOOM m9() { + try { + for (int i = 0; i < 100000; i++) { + TestDeoptOOM tdoom = new TestDeoptOOM(); + m9_1(i); + if (i > 90000) { + return tdoom; + } + } + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in m1"); + } + return null; + } + + public static void main(String[] args) { + for (int i = 0; i < 20000; i++) { + m1(false); + } + + consume_all_memory(); + + try { + m1(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main " + oom.getMessage()); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m2(false); + } + + consume_all_memory(); + + try { + m2(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m3(false); + } + + consume_all_memory(); + + try { + m3(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m4(false); + } + + consume_all_memory(); + + try { + m4(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m5(false); + } + + consume_all_memory(); + + try { + m5(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + for (int i = 0; i < 20000; i++) { + m6(false); + } + + consume_all_memory(); + + try { + m6(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + final Object lock = new Object(); + + for (int i = 0; i < 20000; i++) { + m7(false, lock); + } + + consume_all_memory(); + + try { + m7(true, lock); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + Thread thread = new Thread() { + public void run() { + System.out.println("Acquiring lock"); + synchronized(lock) { + System.out.println("Lock acquired"); + } + System.out.println("Lock released"); + } + }; + thread.start(); + try { + thread.join(); + } catch(InterruptedException ie) { + } + + for (int i = 0; i < 20000; i++) { + m8(false); + } + + consume_all_memory(); + + try { + m8(true); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + + try { + m9(); + } catch(OutOfMemoryError oom) { + free_memory(); + System.out.println("OOM caught in main"); + } + + free_memory(); + } +} diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/compiler/uncommontrap/TraceDeoptimizationNoRealloc.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/compiler/uncommontrap/TraceDeoptimizationNoRealloc.java Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8067144 + * @summary -XX:+TraceDeoptimization tries to print realloc'ed objects even when there are none + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:+IgnoreUnrecognizedVMOptions -XX:+TraceDeoptimization TraceDeoptimizationNoRealloc + * + */ + +public class TraceDeoptimizationNoRealloc { + + static void m(boolean some_condition) { + if (some_condition) { + return; + } + } + + + static public void main(String[] args) { + for (int i = 0; i < 20000; i++) { + m(false); + } + m(true); + } +} diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/gc/TestCardTablePageCommits.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/TestCardTablePageCommits.java Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,49 @@ +/* +* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +*/ + +import com.oracle.java.testlibrary.JDKToolFinder; +import com.oracle.java.testlibrary.OutputAnalyzer; +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.Platform; + +/* + * @test TestCardTablePageCommits + * @key gc + * @bug 8059066 + * @summary Tests that the card table does not commit the same page twice + * @library /testlibrary + * @run driver TestCardTablePageCommits + */ +public class TestCardTablePageCommits { + public static void main(String args[]) throws Exception { + // The test is run with a small heap to make sure all pages in the card + // table gets committed. Need 8 MB heap to trigger the bug on SPARC + // because of 8kB pages, assume 4 KB pages for all other CPUs. + String Xmx = Platform.isSparc() ? "-Xmx8m" : "-Xmx4m"; + + String[] opts = {Xmx, "-XX:NativeMemoryTracking=detail", "-XX:+UseParallelGC", "-version"}; + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(opts); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + } +} diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java --- a/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/test/runtime/SharedArchiveFile/LimitSharedSizes.java Thu Dec 11 23:06:14 2014 -0800 @@ -30,40 +30,96 @@ import com.oracle.java.testlibrary.*; public class LimitSharedSizes { + static enum Region { + RO, RW, MD, MC + } + private static class SharedSizeTestData { public String optionName; public String optionValue; public String expectedErrorMsg; - public SharedSizeTestData(String name, String value, String msg) { - optionName = name; + public SharedSizeTestData(Region region, String value, String msg) { + optionName = getName(region); optionValue = value; expectedErrorMsg = msg; } + + public SharedSizeTestData(Region region, String msg) { + optionName = getName(region); + optionValue = getValue(region); + expectedErrorMsg = msg; + } + + private String getName(Region region) { + String name; + switch (region) { + case RO: + name = "-XX:SharedReadOnlySize"; + break; + case RW: + name = "-XX:SharedReadWriteSize"; + break; + case MD: + name = "-XX:SharedMiscDataSize"; + break; + case MC: + name = "-XX:SharedMiscCodeSize"; + break; + default: + name = "Unknown"; + break; + } + return name; + } + + private String getValue(Region region) { + String value; + switch (region) { + case RO: + value = Platform.is64bit() ? "9M" : "8M"; + break; + case RW: + value = Platform.is64bit() ? "12M" : "7M"; + break; + case MD: + value = Platform.is64bit() ? "4M" : "2M"; + break; + case MC: + value = "120k"; + break; + default: + value = "0M"; + break; + } + return value; + } } private static final SharedSizeTestData[] testTable = { - // values in this part of the test table should cause failure - // (shared space sizes are deliberately too small) - new SharedSizeTestData("-XX:SharedReadOnlySize", "4M", "read only"), - new SharedSizeTestData("-XX:SharedReadWriteSize","4M", "read write"), - - // Known issue, JDK-8038422 (assert() on Windows) - // new SharedSizeTestData("-XX:SharedMiscDataSize", "500k", "miscellaneous data"), - - // Too small of a misc code size should not cause a vm crash. - // It should result in the following error message: + // Too small of a region size should not cause a vm crash. + // It should result in an error message like the following: // The shared miscellaneous code space is not large enough // to preload requested classes. Use -XX:SharedMiscCodeSize= // to increase the initial size of shared miscellaneous code space. - new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k", "miscellaneous code"), + new SharedSizeTestData(Region.RO, "4M", "read only"), + new SharedSizeTestData(Region.RW, "4M", "read write"), + new SharedSizeTestData(Region.MD, "50k", "miscellaneous data"), + new SharedSizeTestData(Region.MC, "20k", "miscellaneous code"), // these values are larger than default ones, but should // be acceptable and not cause failure - new SharedSizeTestData("-XX:SharedReadOnlySize", "20M", null), - new SharedSizeTestData("-XX:SharedReadWriteSize", "20M", null), - new SharedSizeTestData("-XX:SharedMiscDataSize", "20M", null), - new SharedSizeTestData("-XX:SharedMiscCodeSize", "20M", null) + new SharedSizeTestData(Region.RO, "20M", null), + new SharedSizeTestData(Region.RW, "20M", null), + new SharedSizeTestData(Region.MD, "20M", null), + new SharedSizeTestData(Region.MC, "20M", null), + + // test with sizes which just meet the minimum required sizes + // the following tests also attempt to use the shared archive + new SharedSizeTestData(Region.RO, "UseArchive"), + new SharedSizeTestData(Region.RW, "UseArchive"), + new SharedSizeTestData(Region.MD, "UseArchive"), + new SharedSizeTestData(Region.MC, "UseArchive") }; public static void main(String[] args) throws Exception { @@ -82,10 +138,39 @@ OutputAnalyzer output = new OutputAnalyzer(pb.start()); if (td.expectedErrorMsg != null) { - output.shouldContain("The shared " + td.expectedErrorMsg - + " space is not large enough"); + if (!td.expectedErrorMsg.equals("UseArchive")) { + output.shouldContain("The shared " + td.expectedErrorMsg + + " space is not large enough"); + + output.shouldHaveExitValue(2); + } else { + output.shouldNotContain("space is not large enough"); + output.shouldHaveExitValue(0); + + // try to use the archive + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-XX:+PrintSharedArchiveAndExit", + "-version"); - output.shouldHaveExitValue(2); + try { + output = new OutputAnalyzer(pb.start()); + output.shouldContain("archive is valid"); + } catch (RuntimeException e) { + // if sharing failed due to ASLR or similar reasons, + // check whether sharing was attempted at all (UseSharedSpaces) + if ((output.getOutput().contains("Unable to use shared archive") || + output.getOutput().contains("Unable to map ReadOnly shared space at required address.") || + output.getOutput().contains("Unable to map ReadWrite shared space at required address.") || + output.getOutput().contains("Unable to reserve shared space at required address")) && + output.getExitValue() == 1) { + System.out.println("Unable to use shared archive: test not executed; assumed passed"); + return; + } + } + output.shouldHaveExitValue(0); + } } else { output.shouldNotContain("space is not large enough"); output.shouldHaveExitValue(0); diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/runtime/SharedArchiveFile/PrintSharedArchiveAndExit.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/SharedArchiveFile/PrintSharedArchiveAndExit.java Thu Dec 11 23:06:14 2014 -0800 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8066670 + * @summary Testing -XX:+PrintSharedArchiveAndExit option + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; + +public class PrintSharedArchiveAndExit { + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + try { + output.shouldContain("Loading classes to share"); + output.shouldHaveExitValue(0); + + // (1) With a valid archive + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", + "-XX:+PrintSharedArchiveAndExit", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("archive is valid"); + output.shouldNotContain("java version"); // Should not print JVM version + output.shouldHaveExitValue(0); // Should report success in error code. + + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", + "-XX:+PrintSharedArchiveAndExit"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("archive is valid"); + output.shouldNotContain("Usage:"); // Should not print JVM help message + output.shouldHaveExitValue(0); // Should report success in error code. + + // (2) With an invalid archive (boot class path has been prepended) + pb = ProcessTools.createJavaProcessBuilder( + "-Xbootclasspath/p:foo.jar", + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", + "-XX:+PrintSharedArchiveAndExit", "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("archive is invalid"); + output.shouldNotContain("java version"); // Should not print JVM version + output.shouldHaveExitValue(1); // Should report failure in error code. + + pb = ProcessTools.createJavaProcessBuilder( + "-Xbootclasspath/p:foo.jar", + "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", + "-XX:+PrintSharedArchiveAndExit"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("archive is invalid"); + output.shouldNotContain("Usage:"); // Should not print JVM help message + output.shouldHaveExitValue(1); // Should report failure in error code. + } catch (RuntimeException e) { + e.printStackTrace(); + output.shouldContain("Unable to use shared archive"); + output.shouldHaveExitValue(1); + } + } +} diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/serviceability/sa/jmap-hashcode/Test8028623.java --- a/hotspot/test/serviceability/sa/jmap-hashcode/Test8028623.java Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/test/serviceability/sa/jmap-hashcode/Test8028623.java Thu Dec 11 23:06:14 2014 -0800 @@ -41,12 +41,12 @@ public class Test8028623 { - public static int à = 1; + public static int \u00CB = 1; public static String dumpFile = "heap.out"; public static void main (String[] args) { - System.out.println(Ã); + System.out.println(\u00CB); try { if (!Platform.shouldSAAttach()) { diff -r 6494b13f88a8 -r 62648789b8ba hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java --- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Wed Jul 05 20:11:08 2017 +0200 +++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Thu Dec 11 23:06:14 2014 -0800 @@ -153,6 +153,14 @@ public native int getMethodEntryBci(Executable method); public native Object[] getNMethod(Executable method, boolean isOsr); public native long allocateCodeBlob(int size, int type); + public long allocateCodeBlob(long size, int type) { + int intSize = (int) size; + if ((long) intSize != size || size < 0) { + throw new IllegalArgumentException( + "size argument has illegal value " + size); + } + return allocateCodeBlob( intSize, type); + } public native void freeCodeBlob(long addr); public void forceNMethodSweep() { try {