# HG changeset patch # User trims # Date 1287619645 25200 # Node ID 8581cacbdee73d71b728dc15fa5f6ce0bb33d61c # Parent 9cb24917216bc68997154f6e9566c3de62acb2f4# Parent 1068100bfbff168adba34c902ae101fbfb00cc34 Merge diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp --- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -664,7 +664,7 @@ // Use temps to avoid kills LIR_Opr t1 = FrameMap::G1_opr; LIR_Opr t2 = FrameMap::G3_opr; - LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register(); + LIR_Opr addr = new_pointer_register(); // get address of field obj.load_item(); diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/cpu/sparc/vm/globals_sparc.hpp --- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -62,3 +62,5 @@ define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); + +define_pd_global(bool, UseMembar, false); diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp --- a/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -499,7 +499,7 @@ Register new_val_reg = new_val()->as_register(); __ cmpptr(new_val_reg, (int32_t) NULL_WORD); __ jcc(Assembler::equal, _continuation); - ce->store_parameter(addr()->as_register(), 0); + ce->store_parameter(addr()->as_pointer_register(), 0); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id))); __ jmp(_continuation); } diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp --- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -765,7 +765,7 @@ ShouldNotReachHere(); } - LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register(); + LIR_Opr addr = new_pointer_register(); LIR_Address* a; if(offset.result()->is_constant()) { a = new LIR_Address(obj.result(), diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/cpu/x86/vm/globals_x86.hpp --- a/hotspot/src/cpu/x86/vm/globals_x86.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -63,3 +63,5 @@ define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); + +define_pd_global(bool, UseMembar, false); diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/cpu/x86/vm/methodHandles_x86.cpp diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/cpu/zero/vm/globals_zero.hpp --- a/hotspot/src/cpu/zero/vm/globals_zero.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -45,3 +45,5 @@ define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true); + +define_pd_global(bool, UseMembar, false); diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/os/linux/vm/attachListener_linux.cpp --- a/hotspot/src/os/linux/vm/attachListener_linux.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/os/linux/vm/attachListener_linux.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -176,10 +176,10 @@ int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id()); - if (n <= (int)UNIX_PATH_MAX) { + if (n < (int)UNIX_PATH_MAX) { n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path); } - if (n > (int)UNIX_PATH_MAX) { + if (n >= (int)UNIX_PATH_MAX) { return -1; } diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/os/linux/vm/os_linux.cpp --- a/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -827,8 +827,10 @@ switch (thr_type) { case os::java_thread: - // Java threads use ThreadStackSize which default value can be changed with the flag -Xss - if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); + // Java threads use ThreadStackSize which default value can be + // changed with the flag -Xss + assert (JavaThread::stack_size_at_create() > 0, "this should be set"); + stack_size = JavaThread::stack_size_at_create(); break; case os::compiler_thread: if (CompilerThreadStackSize > 0) { @@ -3922,12 +3924,21 @@ Linux::signal_sets_init(); Linux::install_signal_handlers(); + // Check minimum allowable stack size for thread creation and to initialize + // the java system classes, including StackOverflowError - depends on page + // size. Add a page for compiler2 recursion in main thread. + // Add in 2*BytesPerWord times page size to account for VM stack during + // class initialization depending on 32 or 64 bit VM. + os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed, + (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ + 2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size()); + size_t threadStackSizeInBytes = ThreadStackSize * K; if (threadStackSizeInBytes != 0 && - threadStackSizeInBytes < Linux::min_stack_allowed) { + threadStackSizeInBytes < os::Linux::min_stack_allowed) { tty->print_cr("\nThe stack size specified is too small, " "Specify at least %dk", - Linux::min_stack_allowed / K); + os::Linux::min_stack_allowed/ K); return JNI_ERR; } @@ -4839,7 +4850,7 @@ // Next, demultiplex/decode time arguments timespec absTime; - if (time < 0) { // don't wait at all + if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all return; } if (time > 0) { diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/os/solaris/vm/os_solaris.cpp --- a/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -4878,18 +4878,17 @@ // Check minimum allowable stack size for thread creation and to initialize // the java system classes, including StackOverflowError - depends on page // size. Add a page for compiler2 recursion in main thread. - // Add in BytesPerWord times page size to account for VM stack during + // Add in 2*BytesPerWord times page size to account for VM stack during // class initialization depending on 32 or 64 bit VM. - guarantee((Solaris::min_stack_allowed >= - (StackYellowPages+StackRedPages+StackShadowPages+BytesPerWord - COMPILER2_PRESENT(+1)) * page_size), - "need to increase Solaris::min_stack_allowed on this platform"); + os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, + (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ + 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); size_t threadStackSizeInBytes = ThreadStackSize * K; if (threadStackSizeInBytes != 0 && - threadStackSizeInBytes < Solaris::min_stack_allowed) { + threadStackSizeInBytes < os::Solaris::min_stack_allowed) { tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", - Solaris::min_stack_allowed/K); + os::Solaris::min_stack_allowed/K); return JNI_ERR; } @@ -5837,7 +5836,7 @@ // First, demultiplex/decode time arguments timespec absTime; - if (time < 0) { // don't wait at all + if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all return; } if (time > 0) { diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/os/windows/vm/os_windows.cpp --- a/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -3311,7 +3311,6 @@ } } - // this is called _after_ the global arguments have been parsed jint os::init_2(void) { // Allocate a single page and mark it as readable for safepoint polling @@ -3390,6 +3389,21 @@ actual_reserve_size = default_reserve_size; } + // Check minimum allowable stack size for thread creation and to initialize + // the java system classes, including StackOverflowError - depends on page + // size. Add a page for compiler2 recursion in main thread. + // Add in 2*BytesPerWord times page size to account for VM stack during + // class initialization depending on 32 or 64 bit VM. + size_t min_stack_allowed = + (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ + 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); + if (actual_reserve_size < min_stack_allowed) { + tty->print_cr("\nThe stack size specified is too small, " + "Specify at least %dk", + min_stack_allowed / K); + return JNI_ERR; + } + JavaThread::set_stack_size_at_create(stack_commit_size); // Calculate theoretical max. size of Threads to guard gainst artifical @@ -3992,7 +4006,7 @@ if (time < 0) { // don't wait return; } - else if (time == 0) { + else if (time == 0 && !isAbsolute) { time = INFINITE; } else if (isAbsolute) { diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/c1/c1_LIRGenerator.cpp --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -1350,7 +1350,6 @@ addr = ptr; } assert(addr->is_register(), "must be a register at this point"); - assert(addr->type() == T_OBJECT, "addr should point to an object"); LIR_Opr xor_res = new_pointer_register(); LIR_Opr xor_shift_res = new_pointer_register(); diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -791,7 +791,7 @@ int _worker_i; public: RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) : - _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i), + _cl(g1->g1_rem_set(), worker_i), _worker_i(worker_i), _g1h(g1) { } @@ -890,7 +890,7 @@ abandon_cur_alloc_region(); abandon_gc_alloc_regions(); assert(_cur_alloc_region == NULL, "Invariant."); - g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS(); + g1_rem_set()->cleanupHRRS(); tear_down_region_lists(); set_used_regions_to_need_zero_fill(); @@ -1506,15 +1506,11 @@ } // Also create a G1 rem set. - if (G1UseHRIntoRS) { - if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { - _g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs()); - } else { - vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); - return JNI_ENOMEM; - } + if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { + _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); } else { - _g1_rem_set = new StupidG1RemSet(this); + vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); + return JNI_ENOMEM; } // Carve out the G1 part of the heap. @@ -2706,8 +2702,7 @@ } size_t G1CollectedHeap::cards_scanned() { - HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set(); - return g1_rset->cardsScanned(); + return g1_rem_set()->cardsScanned(); } void @@ -3850,6 +3845,54 @@ undo_waste() * HeapWordSize / K); } +#ifdef ASSERT +bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { + assert(ref != NULL, "invariant"); + assert(UseCompressedOops, "sanity"); + assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref)); + oop p = oopDesc::load_decode_heap_oop(ref); + assert(_g1h->is_in_g1_reserved(p), + err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); + return true; +} + +bool G1ParScanThreadState::verify_ref(oop* ref) const { + assert(ref != NULL, "invariant"); + if (has_partial_array_mask(ref)) { + // Must be in the collection set--it's already been copied. + oop p = clear_partial_array_mask(ref); + assert(_g1h->obj_in_cs(p), + err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); + } else { + oop p = oopDesc::load_decode_heap_oop(ref); + assert(_g1h->is_in_g1_reserved(p), + err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p))); + } + return true; +} + +bool G1ParScanThreadState::verify_task(StarTask ref) const { + if (ref.is_narrow()) { + return verify_ref((narrowOop*) ref); + } else { + return verify_ref((oop*) ref); + } +} +#endif // ASSERT + +void G1ParScanThreadState::trim_queue() { + StarTask ref; + do { + // Drain the overflow stack first, so other threads can steal. + while (refs()->pop_overflow(ref)) { + deal_with_reference(ref); + } + while (refs()->pop_local(ref)) { + deal_with_reference(ref); + } + } while (!refs()->is_empty()); +} + G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()), _par_scan_state(par_scan_state) { } @@ -4052,38 +4095,39 @@ : _g1h(g1h), _par_scan_state(par_scan_state), _queues(queues), _terminator(terminator) {} - void do_void() { - G1ParScanThreadState* pss = par_scan_state(); - while (true) { + void do_void(); + +private: + inline bool offer_termination(); +}; + +bool G1ParEvacuateFollowersClosure::offer_termination() { + G1ParScanThreadState* const pss = par_scan_state(); + pss->start_term_time(); + const bool res = terminator()->offer_termination(); + pss->end_term_time(); + return res; +} + +void G1ParEvacuateFollowersClosure::do_void() { + StarTask stolen_task; + G1ParScanThreadState* const pss = par_scan_state(); + pss->trim_queue(); + + do { + while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { + assert(pss->verify_task(stolen_task), "sanity"); + if (stolen_task.is_narrow()) { + pss->push_on_queue((narrowOop*) stolen_task); + } else { + pss->push_on_queue((oop*) stolen_task); + } pss->trim_queue(); - - StarTask stolen_task; - if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) { - // slightly paranoid tests; I'm trying to catch potential - // problems before we go into push_on_queue to know where the - // problem is coming from - assert((oop*)stolen_task != NULL, "Error"); - if (stolen_task.is_narrow()) { - assert(UseCompressedOops, "Error"); - narrowOop* p = (narrowOop*) stolen_task; - assert(has_partial_array_mask(p) || - _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error"); - pss->push_on_queue(p); - } else { - oop* p = (oop*) stolen_task; - assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error"); - pss->push_on_queue(p); - } - continue; - } - pss->start_term_time(); - if (terminator()->offer_termination()) break; - pss->end_term_time(); } - pss->end_term_time(); - pss->retire_alloc_buffers(); - } -}; + } while (!offer_termination()); + + pss->retire_alloc_buffers(); +} class G1ParTask : public AbstractGangTask { protected: @@ -4182,8 +4226,7 @@ pss.print_termination_stats(i); } - assert(pss.refs_to_scan() == 0, "Task queue should be empty"); - assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); + assert(pss.refs()->is_empty(), "should be empty"); double end_time_ms = os::elapsedTime() * 1000.0; _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); } diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -1651,49 +1651,17 @@ size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } size_t undo_waste() const { return _undo_waste; } +#ifdef ASSERT + bool verify_ref(narrowOop* ref) const; + bool verify_ref(oop* ref) const; + bool verify_task(StarTask ref) const; +#endif // ASSERT + template void push_on_queue(T* ref) { - assert(ref != NULL, "invariant"); - assert(has_partial_array_mask(ref) || - _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant"); -#ifdef ASSERT - if (has_partial_array_mask(ref)) { - oop p = clear_partial_array_mask(ref); - // Verify that we point into the CS - assert(_g1h->obj_in_cs(p), "Should be in CS"); - } -#endif + assert(verify_ref(ref), "sanity"); refs()->push(ref); } - void pop_from_queue(StarTask& ref) { - if (refs()->pop_local(ref)) { - assert((oop*)ref != NULL, "pop_local() returned true"); - assert(UseCompressedOops || !ref.is_narrow(), "Error"); - assert(has_partial_array_mask((oop*)ref) || - _g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref) - : oopDesc::load_decode_heap_oop((oop*)ref)), - "invariant"); - } else { - StarTask null_task; - ref = null_task; - } - } - - void pop_from_overflow_queue(StarTask& ref) { - StarTask new_ref; - refs()->pop_overflow(new_ref); - assert((oop*)new_ref != NULL, "pop() from a local non-empty stack"); - assert(UseCompressedOops || !new_ref.is_narrow(), "Error"); - assert(has_partial_array_mask((oop*)new_ref) || - _g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref) - : oopDesc::load_decode_heap_oop((oop*)new_ref)), - "invariant"); - ref = new_ref; - } - - int refs_to_scan() { return (int)refs()->size(); } - int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); } - template void update_rs(HeapRegion* from, T* p, int tid) { if (G1DeferredRSUpdate) { deferred_rs_update(from, p, tid); @@ -1818,59 +1786,15 @@ } } -public: - void trim_queue() { - // I've replicated the loop twice, first to drain the overflow - // queue, second to drain the task queue. This is better than - // having a single loop, which checks both conditions and, inside - // it, either pops the overflow queue or the task queue, as each - // loop is tighter. Also, the decision to drain the overflow queue - // first is not arbitrary, as the overflow queue is not visible - // to the other workers, whereas the task queue is. So, we want to - // drain the "invisible" entries first, while allowing the other - // workers to potentially steal the "visible" entries. - - while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { - while (overflowed_refs_to_scan() > 0) { - StarTask ref_to_scan; - assert((oop*)ref_to_scan == NULL, "Constructed above"); - pop_from_overflow_queue(ref_to_scan); - // We shouldn't have pushed it on the queue if it was not - // pointing into the CSet. - assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant"); - if (ref_to_scan.is_narrow()) { - assert(UseCompressedOops, "Error"); - narrowOop* p = (narrowOop*)ref_to_scan; - assert(!has_partial_array_mask(p) && - _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); - deal_with_reference(p); - } else { - oop* p = (oop*)ref_to_scan; - assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) || - _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); - deal_with_reference(p); - } - } - - while (refs_to_scan() > 0) { - StarTask ref_to_scan; - assert((oop*)ref_to_scan == NULL, "Constructed above"); - pop_from_queue(ref_to_scan); - if ((oop*)ref_to_scan != NULL) { - if (ref_to_scan.is_narrow()) { - assert(UseCompressedOops, "Error"); - narrowOop* p = (narrowOop*)ref_to_scan; - assert(!has_partial_array_mask(p) && - _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); - deal_with_reference(p); - } else { - oop* p = (oop*)ref_to_scan; - assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) || - _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); - deal_with_reference(p); - } - } - } + void deal_with_reference(StarTask ref) { + assert(verify_task(ref), "sanity"); + if (ref.is_narrow()) { + deal_with_reference((narrowOop*)ref); + } else { + deal_with_reference((oop*)ref); } } + +public: + void trim_queue(); }; diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -25,8 +25,6 @@ class HeapRegion; class G1CollectedHeap; class G1RemSet; -class HRInto_G1RemSet; -class G1RemSet; class ConcurrentMark; class DirtyCardToOopClosure; class CMBitMap; diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -97,13 +97,6 @@ } }; -void -StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, - int worker_i) { - IntoCSRegionClosure rc(_g1, oc); - _g1->heap_region_iterate(&rc); -} - class VerifyRSCleanCardOopClosure: public OopClosure { G1CollectedHeap* _g1; public: @@ -119,8 +112,9 @@ } }; -HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) - : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()), +G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs) + : _g1(g1), _conc_refine_cards(0), + _ct_bs(ct_bs), _g1p(_g1->g1_policy()), _cg1r(g1->concurrent_g1_refine()), _traversal_in_progress(false), _cset_rs_update_cl(NULL), @@ -134,7 +128,7 @@ } } -HRInto_G1RemSet::~HRInto_G1RemSet() { +G1RemSet::~G1RemSet() { delete _seq_task; for (uint i = 0; i < n_workers(); i++) { assert(_cset_rs_update_cl[i] == NULL, "it should be"); @@ -277,7 +271,7 @@ // p threads // Then thread t will start at region t * floor (n/p) -HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) { +HeapRegion* G1RemSet::calculateStartRegion(int worker_i) { HeapRegion* result = _g1p->collection_set(); if (ParallelGCThreads > 0) { size_t cs_size = _g1p->collection_set_size(); @@ -290,7 +284,7 @@ return result; } -void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) { +void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) { double rs_time_start = os::elapsedTime(); HeapRegion *startRegion = calculateStartRegion(worker_i); @@ -340,7 +334,7 @@ } }; -void HRInto_G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) { +void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) { double start = os::elapsedTime(); // Apply the given closure to all remaining log entries. RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); @@ -439,12 +433,11 @@ } }; -void HRInto_G1RemSet::cleanupHRRS() { +void G1RemSet::cleanupHRRS() { HeapRegionRemSet::cleanup(); } -void -HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, +void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, int worker_i) { #if CARD_REPEAT_HISTO ct_freq_update_histo_and_reset(); @@ -508,8 +501,7 @@ _cset_rs_update_cl[worker_i] = NULL; } -void HRInto_G1RemSet:: -prepare_for_oops_into_collection_set_do() { +void G1RemSet::prepare_for_oops_into_collection_set_do() { #if G1_REM_SET_LOGGING PrintRSClosure cl; _g1->collection_set_iterate(&cl); @@ -581,7 +573,7 @@ // RSet updating, // * the post-write barrier shouldn't be logging updates to young // regions (but there is a situation where this can happen - see - // the comment in HRInto_G1RemSet::concurrentRefineOneCard below - + // the comment in G1RemSet::concurrentRefineOneCard below - // that should not be applicable here), and // * during actual RSet updating, the filtering of cards in young // regions in HeapRegion::oops_on_card_seq_iterate_careful is @@ -601,7 +593,7 @@ } }; -void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() { +void G1RemSet::cleanup_after_oops_into_collection_set_do() { guarantee( _cards_scanned != NULL, "invariant" ); _total_cards_scanned = 0; for (uint i = 0; i < n_workers(); ++i) @@ -692,12 +684,12 @@ } }; -void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) { +void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) { ScrubRSClosure scrub_cl(region_bm, card_bm); _g1->heap_region_iterate(&scrub_cl); } -void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm, +void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm, int worker_num, int claim_val) { ScrubRSClosure scrub_cl(region_bm, card_bm); _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val); @@ -741,7 +733,7 @@ virtual void do_oop(narrowOop* p) { do_oop_nv(p); } }; -bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, +bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, bool check_for_refs_into_cset) { // Construct the region representing the card. HeapWord* start = _ct_bs->addr_for(card_ptr); @@ -820,7 +812,7 @@ return trigger_cl.value(); } -bool HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i, +bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i, bool check_for_refs_into_cset) { // If the card is no longer dirty, nothing to do. if (*card_ptr != CardTableModRefBS::dirty_card_val()) { @@ -995,7 +987,7 @@ } }; -void HRInto_G1RemSet::print_summary_info() { +void G1RemSet::print_summary_info() { G1CollectedHeap* g1 = G1CollectedHeap::heap(); #if CARD_REPEAT_HISTO @@ -1029,30 +1021,26 @@ g1->concurrent_g1_refine()->threads_do(&p); gclog_or_tty->print_cr(""); - if (G1UseHRIntoRS) { - HRRSStatsIter blk; - g1->heap_region_iterate(&blk); - gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K." - " Max = " SIZE_FORMAT "K.", - blk.total_mem_sz()/K, blk.max_mem_sz()/K); - gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K," - " free_lists = " SIZE_FORMAT "K.", - HeapRegionRemSet::static_mem_size()/K, - HeapRegionRemSet::fl_mem_size()/K); - gclog_or_tty->print_cr(" %d occupied cards represented.", - blk.occupied()); - gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )" - ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.", - blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(), - (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K, - (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K); - gclog_or_tty->print_cr(" Did %d coarsenings.", - HeapRegionRemSet::n_coarsenings()); - - } + HRRSStatsIter blk; + g1->heap_region_iterate(&blk); + gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K." + " Max = " SIZE_FORMAT "K.", + blk.total_mem_sz()/K, blk.max_mem_sz()/K); + gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K," + " free_lists = " SIZE_FORMAT "K.", + HeapRegionRemSet::static_mem_size()/K, + HeapRegionRemSet::fl_mem_size()/K); + gclog_or_tty->print_cr(" %d occupied cards represented.", + blk.occupied()); + gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )" + ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.", + blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(), + (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K, + (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K); + gclog_or_tty->print_cr(" Did %d coarsenings.", HeapRegionRemSet::n_coarsenings()); } -void HRInto_G1RemSet::prepare_for_verify() { +void G1RemSet::prepare_for_verify() { if (G1HRRSFlushLogBuffersOnVerify && (VerifyBeforeGC || VerifyAfterGC) && !_g1->full_collection()) { diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -27,107 +27,18 @@ class G1CollectedHeap; class CardTableModRefBarrierSet; -class HRInto_G1RemSet; class ConcurrentG1Refine; +// A G1RemSet in which each heap region has a rem set that records the +// external heap references into it. Uses a mod ref bs to track updates, +// so that they can be used to update the individual region remsets. + class G1RemSet: public CHeapObj { protected: G1CollectedHeap* _g1; unsigned _conc_refine_cards; size_t n_workers(); -public: - G1RemSet(G1CollectedHeap* g1) : - _g1(g1), _conc_refine_cards(0) - {} - - // Invoke "blk->do_oop" on all pointers into the CS in object in regions - // outside the CS (having invoked "blk->set_region" to set the "from" - // region correctly beforehand.) The "worker_i" param is for the - // parallel case where the number of the worker thread calling this - // function can be helpful in partitioning the work to be done. It - // should be the same as the "i" passed to the calling thread's - // work(i) function. In the sequential case this param will be ingored. - virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, - int worker_i) = 0; - - // Prepare for and cleanup after an oops_into_collection_set_do - // call. Must call each of these once before and after (in sequential - // code) any threads call oops into collection set do. (This offers an - // opportunity to sequential setup and teardown of structures needed by a - // parallel iteration over the CS's RS.) - virtual void prepare_for_oops_into_collection_set_do() = 0; - virtual void cleanup_after_oops_into_collection_set_do() = 0; - - // If "this" is of the given subtype, return "this", else "NULL". - virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; } - - // Record, if necessary, the fact that *p (where "p" is in region "from", - // and is, a fortiori, required to be non-NULL) has changed to its new value. - virtual void write_ref(HeapRegion* from, oop* p) = 0; - virtual void write_ref(HeapRegion* from, narrowOop* p) = 0; - virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0; - virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0; - - // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region - // or card, respectively, such that a region or card with a corresponding - // 0 bit contains no part of any live object. Eliminates any remembered - // set entries that correspond to dead heap ranges. - virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0; - // Like the above, but assumes is called in parallel: "worker_num" is the - // parallel thread id of the current thread, and "claim_val" is the - // value that should be used to claim heap regions. - virtual void scrub_par(BitMap* region_bm, BitMap* card_bm, - int worker_num, int claim_val) = 0; - - // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, - // join and leave around parts that must be atomic wrt GC. (NULL means - // being done at a safepoint.) - // With some implementations of this routine, when check_for_refs_into_cset - // is true, a true result may be returned if the given card contains oops - // that have references into the current collection set. - virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i, - bool check_for_refs_into_cset) { - return false; - } - - // Print any relevant summary info. - virtual void print_summary_info() {} - - // Prepare remebered set for verification. - virtual void prepare_for_verify() {}; -}; - - -// The simplest possible G1RemSet: iterates over all objects in non-CS -// regions, searching for pointers into the CS. -class StupidG1RemSet: public G1RemSet { -public: - StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {} - - void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, - int worker_i); - - void prepare_for_oops_into_collection_set_do() {} - void cleanup_after_oops_into_collection_set_do() {} - - // Nothing is necessary in the version below. - void write_ref(HeapRegion* from, oop* p) {} - void write_ref(HeapRegion* from, narrowOop* p) {} - void par_write_ref(HeapRegion* from, oop* p, int tid) {} - void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {} - - void scrub(BitMap* region_bm, BitMap* card_bm) {} - void scrub_par(BitMap* region_bm, BitMap* card_bm, - int worker_num, int claim_val) {} - -}; - -// A G1RemSet in which each heap region has a rem set that records the -// external heap references into it. Uses a mod ref bs to track updates, -// so that they can be used to update the individual region remsets. - -class HRInto_G1RemSet: public G1RemSet { protected: enum SomePrivateConstants { UpdateRStoMergeSync = 0, @@ -175,28 +86,32 @@ // scanned. void cleanupHRRS(); - HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); - ~HRInto_G1RemSet(); + G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); + ~G1RemSet(); + // Invoke "blk->do_oop" on all pointers into the CS in objects in regions + // outside the CS (having invoked "blk->set_region" to set the "from" + // region correctly beforehand.) The "worker_i" param is for the + // parallel case where the number of the worker thread calling this + // function can be helpful in partitioning the work to be done. It + // should be the same as the "i" passed to the calling thread's + // work(i) function. In the sequential case this param will be ingored. void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i); + // Prepare for and cleanup after an oops_into_collection_set_do + // call. Must call each of these once before and after (in sequential + // code) any threads call oops_into_collection_set_do. (This offers an + // opportunity to sequential setup and teardown of structures needed by a + // parallel iteration over the CS's RS.) void prepare_for_oops_into_collection_set_do(); void cleanup_after_oops_into_collection_set_do(); + void scanRS(OopsInHeapRegionClosure* oc, int worker_i); - template void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i); - void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) { - if (UseCompressedOops) { - scanNewRefsRS_work(oc, worker_i); - } else { - scanNewRefsRS_work(oc, worker_i); - } - } void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i); + HeapRegion* calculateStartRegion(int i); - HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; } - CardTableModRefBS* ct_bs() { return _ct_bs; } size_t cardsScanned() { return _total_cards_scanned; } @@ -219,17 +134,31 @@ bool self_forwarded(oop obj); + // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region + // or card, respectively, such that a region or card with a corresponding + // 0 bit contains no part of any live object. Eliminates any remembered + // set entries that correspond to dead heap ranges. void scrub(BitMap* region_bm, BitMap* card_bm); + + // Like the above, but assumes is called in parallel: "worker_num" is the + // parallel thread id of the current thread, and "claim_val" is the + // value that should be used to claim heap regions. void scrub_par(BitMap* region_bm, BitMap* card_bm, int worker_num, int claim_val); - // If check_for_refs_into_cset is true then a true result is returned - // if the card contains oops that have references into the current - // collection set. + // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, + // join and leave around parts that must be atomic wrt GC. (NULL means + // being done at a safepoint.) + // If check_for_refs_into_cset is true, a true result is returned + // if the given card contains oops that have references into the + // current collection set. virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i, bool check_for_refs_into_cset); + // Print any relevant summary info. virtual void print_summary_info(); + + // Prepare remembered set for verification. virtual void prepare_for_verify(); }; @@ -250,13 +179,13 @@ class UpdateRSOopClosure: public OopClosure { HeapRegion* _from; - HRInto_G1RemSet* _rs; + G1RemSet* _rs; int _worker_i; template void do_oop_work(T* p); public: - UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) : + UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) : _from(NULL), _rs(rs), _worker_i(worker_i) { guarantee(_rs != NULL, "Requires an HRIntoG1RemSet"); } diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -30,16 +30,18 @@ } } -template inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) { +template +inline void G1RemSet::write_ref_nv(HeapRegion* from, T* p) { par_write_ref_nv(from, p, 0); } -inline bool HRInto_G1RemSet::self_forwarded(oop obj) { +inline bool G1RemSet::self_forwarded(oop obj) { bool result = (obj->is_forwarded() && (obj->forwardee()== obj)); return result; } -template inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) { +template +inline void G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) { oop obj = oopDesc::load_decode_heap_oop(p); #ifdef ASSERT // can't do because of races @@ -77,7 +79,7 @@ // Deferred updates to the CSet are either discarded (in the normal case), // or processed (if an evacuation failure occurs) at the end // of the collection. - // See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do(). + // See G1RemSet::cleanup_after_oops_into_collection_set_do(). } else { #if G1_REM_SET_LOGGING gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS" @@ -91,12 +93,14 @@ } } -template inline void UpdateRSOopClosure::do_oop_work(T* p) { +template +inline void UpdateRSOopClosure::do_oop_work(T* p) { assert(_from != NULL, "from region must be non-NULL"); _rs->par_write_ref(_from, p, _worker_i); } -template inline void UpdateRSetImmediate::do_oop_work(T* p) { +template +inline void UpdateRSetImmediate::do_oop_work(T* p) { assert(_from->is_in_reserved(p), "paranoia"); T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) { diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp --- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -40,9 +40,6 @@ develop(intx, G1PolicyVerbose, 0, \ "The verbosity level on G1 policy decisions") \ \ - develop(bool, G1UseHRIntoRS, true, \ - "Determines whether the 'advanced' HR Into rem set is used.") \ - \ develop(intx, G1MarkingVerboseLevel, 0, \ "Level (0-4) of verboseness of the marking code") \ \ diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/gc_implementation/includeDB_gc_g1 --- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1 Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1 Wed Oct 20 17:07:25 2010 -0700 @@ -310,10 +310,16 @@ heapRegionSeq.inline.hpp heapRegionSeq.hpp +instanceKlass.cpp g1RemSet.inline.hpp + +instanceRefKlass.cpp g1RemSet.inline.hpp + klass.hpp g1OopClosures.hpp memoryService.cpp g1MemoryPool.hpp +objArrayKlass.cpp g1RemSet.inline.hpp + ptrQueue.cpp allocation.hpp ptrQueue.cpp allocation.inline.hpp ptrQueue.cpp mutex.hpp diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/includeDB_core --- a/hotspot/src/share/vm/includeDB_core Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/includeDB_core Wed Oct 20 17:07:25 2010 -0700 @@ -3231,6 +3231,7 @@ orderAccess.hpp os.hpp orderAccess_.inline.hpp orderAccess.hpp +orderAccess_.inline.hpp vm_version_.hpp os.cpp allocation.inline.hpp os.cpp arguments.hpp diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/runtime/arguments.cpp --- a/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/runtime/arguments.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -119,11 +119,8 @@ PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.version", "1.0", false)); PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.name", "Java Virtual Machine Specification", false)); - PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor", - JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false)); PropertyList_add(&_system_properties, new SystemProperty("java.vm.version", VM_Version::vm_release(), false)); PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(), false)); - PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(), false)); PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(), true)); // following are JVMTI agent writeable properties. @@ -151,6 +148,14 @@ os::init_system_properties_values(); } + + // Update/Initialize System properties after JDK version number is known +void Arguments::init_version_specific_system_properties() { + PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.vendor", + JDK_Version::is_gte_jdk17x_version() ? "Oracle Corporation" : "Sun Microsystems Inc.", false)); + PropertyList_add(&_system_properties, new SystemProperty("java.vm.vendor", VM_Version::vm_vendor(), false)); +} + /** * Provide a slightly more user-friendly way of eliminating -XX flags. * When a flag is eliminated, it can be added to this list in order to @@ -1676,7 +1681,8 @@ bool status = true; status = status && verify_min_value(StackYellowPages, 1, "StackYellowPages"); status = status && verify_min_value(StackRedPages, 1, "StackRedPages"); - status = status && verify_min_value(StackShadowPages, 1, "StackShadowPages"); + // greater stack shadow pages can't generate instruction to bang stack + status = status && verify_interval(StackShadowPages, 1, 50, "StackShadowPages"); return status; } @@ -2973,6 +2979,13 @@ UseCompressedOops = false; #endif +#if defined(_LP64) + if ((DumpSharedSpaces || RequireSharedSpaces) && UseCompressedOops) { + // Disable compressed oops with shared spaces + UseCompressedOops = false; + } +#endif + // Set object alignment values. set_object_alignment(); diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/runtime/arguments.hpp --- a/hotspot/src/share/vm/runtime/arguments.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/runtime/arguments.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -484,6 +484,9 @@ // System properties static void init_system_properties(); + // Update/Initialize System properties after JDK version number is known + static void init_version_specific_system_properties(); + // Property List manipulation static void PropertyList_add(SystemProperty** plist, SystemProperty *element); static void PropertyList_add(SystemProperty** plist, const char* k, char* v); diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/runtime/globals.hpp --- a/hotspot/src/share/vm/runtime/globals.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/runtime/globals.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -327,10 +327,10 @@ /* UseMembar is theoretically a temp flag used for memory barrier \ * removal testing. It was supposed to be removed before FCS but has \ * been re-added (see 6401008) */ \ - product(bool, UseMembar, false, \ + product_pd(bool, UseMembar, \ "(Unstable) Issues membars on thread state transitions") \ \ - /* Temporary: See 6948537 */ \ + /* Temporary: See 6948537 */ \ experimental(bool, UseMemSetInBOT, true, \ "(Unstable) uses memset in BOT updates in GC code") \ \ @@ -822,6 +822,9 @@ develop(bool, PrintJVMWarnings, false, \ "Prints warnings for unimplemented JVM functions") \ \ + product(bool, PrintWarnings, true, \ + "Prints JVM warnings to output stream") \ + \ notproduct(uintx, WarnOnStalledSpinLock, 0, \ "Prints warnings for stalled SpinLocks") \ \ @@ -3542,7 +3545,7 @@ product(uintx, SharedDummyBlockSize, 512*M, \ "Size of dummy block used to shift heap addresses (in bytes)") \ \ - product(uintx, SharedReadWriteSize, 12*M, \ + product(uintx, SharedReadWriteSize, NOT_LP64(12*M) LP64_ONLY(13*M), \ "Size of read-write space in permanent generation (in bytes)") \ \ product(uintx, SharedReadOnlySize, 10*M, \ diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/runtime/sharedRuntime.cpp --- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -302,6 +302,9 @@ return (f <= (double)0.0) ? (double)0.0 - f : f; } +#endif + +#if defined(__SOFTFP__) || defined(PPC) double SharedRuntime::dsqrt(double f) { return sqrt(f); } diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/runtime/sharedRuntime.hpp --- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp Wed Oct 20 17:07:25 2010 -0700 @@ -116,6 +116,9 @@ #if defined(__SOFTFP__) || defined(E500V2) static double dabs(double f); +#endif + +#if defined(__SOFTFP__) || defined(PPC) static double dsqrt(double f); #endif diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/runtime/thread.cpp --- a/hotspot/src/share/vm/runtime/thread.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/runtime/thread.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -2921,6 +2921,9 @@ // So that JDK version can be used as a discrimintor when parsing arguments JDK_Version_init(); + // Update/Initialize System properties after JDK version number is known + Arguments::init_version_specific_system_properties(); + // Parse arguments jint parse_result = Arguments::parse(args); if (parse_result != JNI_OK) return parse_result; diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/utilities/debug.cpp --- a/hotspot/src/share/vm/utilities/debug.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/utilities/debug.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -51,14 +51,16 @@ void warning(const char* format, ...) { - // In case error happens before init or during shutdown - if (tty == NULL) ostream_init(); + if (PrintWarnings) { + // In case error happens before init or during shutdown + if (tty == NULL) ostream_init(); - tty->print("%s warning: ", VM_Version::vm_name()); - va_list ap; - va_start(ap, format); - tty->vprint_cr(format, ap); - va_end(ap); + tty->print("%s warning: ", VM_Version::vm_name()); + va_list ap; + va_start(ap, format); + tty->vprint_cr(format, ap); + va_end(ap); + } if (BreakAtWarning) BREAKPOINT; } diff -r 9cb24917216b -r 8581cacbdee7 hotspot/src/share/vm/utilities/exceptions.cpp --- a/hotspot/src/share/vm/utilities/exceptions.cpp Wed Jul 05 17:24:57 2017 +0200 +++ b/hotspot/src/share/vm/utilities/exceptions.cpp Wed Oct 20 17:07:25 2010 -0700 @@ -61,6 +61,18 @@ ShouldNotReachHere(); } +#ifdef ASSERT + // Check for trying to throw stack overflow before initialization is complete + // to prevent infinite recursion trying to initialize stack overflow without + // adequate stack space. + // This can happen with stress testing a large value of StackShadowPages + if (h_exception()->klass() == SystemDictionary::StackOverflowError_klass()) { + instanceKlass* ik = instanceKlass::cast(h_exception->klass()); + assert(ik->is_initialized(), + "need to increase min_stack_allowed calculation"); + } +#endif // ASSERT + if (thread->is_VM_thread() || thread->is_Compiler_thread() ) { // We do not care what kind of exception we get for the vm-thread or a thread which @@ -91,7 +103,6 @@ thread->set_pending_exception(Universe::vm_exception(), file, line); return true; } - return false; } @@ -193,6 +204,7 @@ klassOop k = SystemDictionary::StackOverflowError_klass(); oop e = instanceKlass::cast(k)->allocate_instance(CHECK); exception = Handle(THREAD, e); // fill_in_stack trace does gc + assert(instanceKlass::cast(k)->is_initialized(), "need to increase min_stack_allowed calculation"); if (StackTraceInThrowable) { java_lang_Throwable::fill_in_stack_trace(exception); }