# HG changeset patch # User ctornqvi # Date 1423872228 0 # Node ID fbd83ffd242b3426421b96aa449bc5c6b8c47e9b # Parent 4d325459c4059ed08f8bda2fc6492443a91bf56e# Parent 63ffd2db736d113f771912265f3c835b538ec29e Merge diff -r 4d325459c405 -r fbd83ffd242b hotspot/agent/src/os/linux/ps_proc.c --- a/hotspot/agent/src/os/linux/ps_proc.c Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/agent/src/os/linux/ps_proc.c Sat Feb 14 00:03:48 2015 +0000 @@ -27,9 +27,11 @@ #include #include #include +#include #include #include #include +#include #include "libproc_impl.h" #if defined(x86_64) && !defined(amd64) @@ -138,6 +140,15 @@ return false; } return true; +#elif defined(PTRACE_GETREGSET) + struct iovec iov; + iov.iov_base = user; + iov.iov_len = sizeof(*user); + if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, (void*) &iov) < 0) { + print_debug("ptrace(PTRACE_GETREGSET, ...) failed for lwp %d\n", pid); + return false; + } + return true; #else print_debug("ptrace(PTRACE_GETREGS, ...) not supported\n"); return false; diff -r 4d325459c405 -r fbd83ffd242b hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Sat Feb 14 00:03:48 2015 +0000 @@ -423,12 +423,22 @@ protected void writeMethods() throws IOException { MethodArray methods = klass.getMethods(); - final int len = methods.length(); + ArrayList valid_methods = new ArrayList(); + for (int i = 0; i < methods.length(); i++) { + Method m = methods.at(i); + long accessFlags = m.getAccessFlags(); + // overpass method + if (accessFlags == (JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE)) { + continue; + } + valid_methods.add(m); + } + final int len = valid_methods.size(); // write number of methods dos.writeShort((short) len); if (DEBUG) debugMessage("number of methods = " + len); for (int m = 0; m < len; m++) { - writeMethod(methods.at(m)); + writeMethod(valid_methods.get(m)); } } diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp --- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -630,7 +630,12 @@ inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { relocate(a.rspec(offset)); - ldf(w, a.base(), a.disp() + offset, d); + if (a.has_index()) { + assert(offset == 0, ""); + ldf(w, a.base(), a.index(), d); + } else { + ldf(w, a.base(), a.disp() + offset, d); + } } // returns if membar generates anything, obviously this code should mirror diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/ci/ciInstanceKlass.cpp --- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -501,32 +501,31 @@ return fields; } -void ciInstanceKlass::compute_injected_fields_helper() { +bool ciInstanceKlass::compute_injected_fields_helper() { ASSERT_IN_VM; InstanceKlass* k = get_instanceKlass(); for (InternalFieldStream fs(k); !fs.done(); fs.next()) { if (fs.access_flags().is_static()) continue; - _has_injected_fields++; - break; + return true; } + return false; } -bool ciInstanceKlass::compute_injected_fields() { - assert(_has_injected_fields == -1, "shouldn't be initialized yet"); +void ciInstanceKlass::compute_injected_fields() { assert(is_loaded(), "must be loaded"); + int has_injected_fields = 0; if (super() != NULL && super()->has_injected_fields()) { - _has_injected_fields = 1; - return true; + has_injected_fields = 1; + } else { + GUARDED_VM_ENTRY({ + has_injected_fields = compute_injected_fields_helper() ? 1 : 0; + }); } - - _has_injected_fields = 0; - GUARDED_VM_ENTRY({ - compute_injected_fields_helper(); - }); - - return _has_injected_fields > 0 ? true : false; + // may be concurrently initialized for shared ciInstanceKlass objects + assert(_has_injected_fields == -1 || _has_injected_fields == has_injected_fields, "broken concurrent initialization"); + _has_injected_fields = has_injected_fields; } // ------------------------------------------------------------------ diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/ci/ciInstanceKlass.hpp --- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -72,8 +72,8 @@ // Itsef: more than one implementors. ciInstanceKlass* _implementor; - bool compute_injected_fields(); - void compute_injected_fields_helper(); + void compute_injected_fields(); + bool compute_injected_fields_helper(); protected: ciInstanceKlass(KlassHandle h_k); @@ -193,7 +193,7 @@ bool has_injected_fields() { if (_has_injected_fields == -1) { - return compute_injected_fields(); + compute_injected_fields(); } return _has_injected_fields > 0 ? true : false; } diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/ci/ciMethod.cpp --- a/hotspot/src/share/vm/ci/ciMethod.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/ci/ciMethod.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -70,7 +70,8 @@ // Loaded method. ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) : ciMetadata(h_m()), - _holder(holder) + _holder(holder), + _has_injected_profile(false) { assert(h_m() != NULL, "no null method"); @@ -168,7 +169,8 @@ _liveness( NULL), _can_be_statically_bound(false), _method_blocks( NULL), - _method_data( NULL) + _method_data( NULL), + _has_injected_profile( false) #if defined(COMPILER2) || defined(SHARK) , _flow( NULL), diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/ci/ciMethod.hpp --- a/hotspot/src/share/vm/ci/ciMethod.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/ci/ciMethod.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -79,6 +79,7 @@ bool _is_c1_compilable; bool _is_c2_compilable; bool _can_be_statically_bound; + bool _has_injected_profile; // Lazy fields, filled in on demand address _code; @@ -286,6 +287,9 @@ int instructions_size(); int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC + bool has_injected_profile() const { return _has_injected_profile; } + void set_injected_profile(bool x) { _has_injected_profile = x; } + // Stack walking support bool is_ignored_by_security_stack_walk() const; diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/classfile/vmSymbols.hpp --- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -243,7 +243,6 @@ template(returnType_name, "returnType") \ template(signature_name, "signature") \ template(slot_name, "slot") \ - template(selectAlternative_name, "selectAlternative") \ \ /* Support for annotations (JDK 1.5 and above) */ \ \ @@ -295,8 +294,7 @@ template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \ NOT_LP64( do_alias(intptr_signature, int_signature) ) \ LP64_ONLY( do_alias(intptr_signature, long_signature) ) \ - template(selectAlternative_signature, "(ZLjava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodHandle;)Ljava/lang/invoke/MethodHandle;") \ - \ + \ /* common method and field names */ \ template(object_initializer_name, "") \ template(class_initializer_name, "") \ @@ -868,6 +866,12 @@ do_name( fullFence_name, "fullFence") \ do_alias( fullFence_signature, void_method_signature) \ \ + /* Custom branch frequencies profiling support for JSR292 */ \ + do_class(java_lang_invoke_MethodHandleImpl, "java/lang/invoke/MethodHandleImpl") \ + do_intrinsic(_profileBoolean, java_lang_invoke_MethodHandleImpl, profileBoolean_name, profileBoolean_signature, F_S) \ + do_name( profileBoolean_name, "profileBoolean") \ + do_signature(profileBoolean_signature, "(Z[I)Z") \ + \ /* unsafe memory references (there are a lot of them...) */ \ do_signature(getObject_signature, "(Ljava/lang/Object;J)Ljava/lang/Object;") \ do_signature(putObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;)V") \ diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/code/nmethod.cpp --- a/hotspot/src/share/vm/code/nmethod.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/code/nmethod.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -2184,17 +2184,6 @@ #endif // !SHARK } - -oop nmethod::embeddedOop_at(u_char* p) { - RelocIterator iter(this, p, p + 1); - while (iter.next()) - if (iter.type() == relocInfo::oop_type) { - return iter.oop_reloc()->oop_value(); - } - return NULL; -} - - inline bool includes(void* p, void* from, void* to) { return from <= p && p < to; } diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/code/nmethod.hpp --- a/hotspot/src/share/vm/code/nmethod.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/code/nmethod.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -702,11 +702,6 @@ int compile_id() const { return _compile_id; } const char* compile_kind() const; - // For debugging - // CompiledIC* IC_at(char* p) const; - // PrimitiveIC* primitiveIC_at(char* p) const; - oop embeddedOop_at(address p); - // tells if any of this method's dependencies have been invalidated // (this is expensive!) static void check_all_dependencies(DepChange& changes); diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/compiler/disassembler.cpp --- a/hotspot/src/share/vm/compiler/disassembler.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/compiler/disassembler.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -345,21 +345,6 @@ if (WizardMode) st->print(" " INTPTR_FORMAT, (intptr_t)adr); return; } - - oop obj; - if (_nm != NULL - && (obj = _nm->embeddedOop_at(cur_insn())) != NULL - && (address) obj == adr - && Universe::heap()->is_in(obj) - && Universe::heap()->is_in(obj->klass())) { - julong c = st->count(); - obj->print_value_on(st); - if (st->count() == c) { - // No output. (Can happen in product builds.) - st->print("(a %s)", obj->klass()->external_name()); - } - return; - } } // Fall through to a simple (hexadecimal) numeral. diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -308,7 +308,7 @@ inline ParScanThreadState& thread_state(int i); - void trace_promotion_failed(YoungGCTracer& gc_tracer); + void trace_promotion_failed(const YoungGCTracer* gc_tracer); void reset(int active_workers, bool promotion_failed); void flush(); @@ -357,10 +357,10 @@ return ((ParScanThreadState*)_data)[i]; } -void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) { +void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) { for (int i = 0; i < length(); ++i) { if (thread_state(i).promotion_failed()) { - gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info()); + gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info()); thread_state(i).promotion_failed_info().reset(); } } @@ -883,7 +883,7 @@ // A Generation that does parallel young-gen collection. -void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) { +void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) { assert(_promo_failure_scan_stack.is_empty(), "post condition"); _promo_failure_scan_stack.clear(true); // Clear cached segments. @@ -899,10 +899,10 @@ _next_gen->promotion_failure_occurred(); // Trace promotion failure in the parallel GC threads - thread_state_set.trace_promotion_failed(gc_tracer); + thread_state_set.trace_promotion_failed(gc_tracer()); // Single threaded code may have reported promotion failure to the global state if (_promotion_failed_info.has_failed()) { - gc_tracer.report_promotion_failed(_promotion_failed_info); + _gc_tracer.report_promotion_failed(_promotion_failed_info); } // Reset the PromotionFailureALot counters. NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) @@ -941,9 +941,8 @@ } assert(to()->is_empty(), "Else not collection_attempt_is_safe"); - ParNewTracer gc_tracer; - gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); - gch->trace_heap_before_gc(&gc_tracer); + _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); + gch->trace_heap_before_gc(gc_tracer()); init_assuming_no_promotion_failure(); @@ -952,7 +951,7 @@ size_policy->minor_collection_begin(); } - GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id()); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); @@ -994,7 +993,7 @@ // Trace and reset failed promotion info. if (promotion_failed()) { - thread_state_set.trace_promotion_failed(gc_tracer); + thread_state_set.trace_promotion_failed(gc_tracer()); } // Process (weak) reference objects found during scavenge. @@ -1015,16 +1014,16 @@ ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, &task_executor, - _gc_timer, gc_tracer.gc_id()); + _gc_timer, _gc_tracer.gc_id()); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, - _gc_timer, gc_tracer.gc_id()); + _gc_timer, _gc_tracer.gc_id()); } - gc_tracer.report_gc_reference_stats(stats); + _gc_tracer.report_gc_reference_stats(stats); if (!promotion_failed()) { // Swap the survivor spaces. eden()->clear(SpaceDecorator::Mangle); @@ -1049,7 +1048,7 @@ adjust_desired_tenuring_threshold(); } else { - handle_promotion_failed(gch, thread_state_set, gc_tracer); + handle_promotion_failed(gch, thread_state_set); } // set new iteration safe limit for the survivor spaces from()->set_concurrent_iteration_safe_limit(from()->top()); @@ -1088,12 +1087,12 @@ } rp->verify_no_references_recorded(); - gch->trace_heap_after_gc(&gc_tracer); - gc_tracer.report_tenuring_threshold(tenuring_threshold()); + gch->trace_heap_after_gc(gc_tracer()); + _gc_tracer.report_tenuring_threshold(tenuring_threshold()); _gc_timer->register_gc_end(); - gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); + _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); } static int sum; diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp --- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -333,6 +333,9 @@ // references to live referent. DefNewGeneration::IsAliveClosure _is_alive_closure; + // GC tracer that should be used during collection. + ParNewTracer _gc_tracer; + static oop real_forwardee_slow(oop obj); static void waste_some_time(); @@ -340,7 +343,7 @@ // word being overwritten with a self-forwarding-pointer. void preserve_mark_if_necessary(oop obj, markOop m); - void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer); + void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set); protected: @@ -411,6 +414,10 @@ return _plab_stats.desired_plab_sz(); } + const ParNewTracer* gc_tracer() const { + return &_gc_tracer; + } + static oop real_forwardee(oop obj); DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);) diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -663,7 +663,7 @@ } } -void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { +void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { const PSHeapSummary& heap_summary = create_ps_heap_summary(); gc_tracer->report_gc_heap_summary(when, heap_summary); diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -64,7 +64,7 @@ // The task manager static GCTaskManager* _gc_task_manager; - void trace_heap(GCWhen::Type when, GCTracer* tracer); + void trace_heap(GCWhen::Type when, const GCTracer* tracer); protected: static inline size_t total_invocations(); diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp --- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -162,7 +162,7 @@ _tenuring_threshold = UNSET_TENURING_THRESHOLD; } -void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) { +void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) const { assert_set_gc_id(); send_promotion_failed_event(pf_info); diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp --- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -153,7 +153,7 @@ virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions); public: - void report_promotion_failed(const PromotionFailedInfo& pf_info); + void report_promotion_failed(const PromotionFailedInfo& pf_info) const; void report_tenuring_threshold(const uint tenuring_threshold); /* diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_interface/collectedHeap.cpp --- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -132,7 +132,7 @@ assert_locked_or_safepoint(CodeCache_lock); } -void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) { +void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) { const GCHeapSummary& heap_summary = create_heap_summary(); gc_tracer->report_gc_heap_summary(when, heap_summary); @@ -140,11 +140,11 @@ gc_tracer->report_metaspace_summary(when, metaspace_summary); } -void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) { +void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) { trace_heap(GCWhen::BeforeGC, gc_tracer); } -void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) { +void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) { trace_heap(GCWhen::AfterGC, gc_tracer); } diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_interface/collectedHeap.hpp --- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -175,7 +175,7 @@ // Fill with a single object (either an int array or a java.lang.Object). static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); - virtual void trace_heap(GCWhen::Type when, GCTracer* tracer); + virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer); // Verification functions virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) @@ -606,8 +606,8 @@ virtual void register_nmethod(nmethod* nm); virtual void unregister_nmethod(nmethod* nm); - void trace_heap_before_gc(GCTracer* gc_tracer); - void trace_heap_after_gc(GCTracer* gc_tracer); + void trace_heap_before_gc(const GCTracer* gc_tracer); + void trace_heap_after_gc(const GCTracer* gc_tracer); // Heap verification virtual void verify(bool silent, VerifyOption option) = 0; diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_interface/gcCause.cpp --- a/hotspot/src/share/vm/gc_interface/gcCause.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_interface/gcCause.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -103,9 +103,6 @@ case _last_ditch_collection: return "Last ditch collection"; - case _dcmd_gc_run: - return "Diagnostic Command"; - case _last_gc_cause: return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE"; diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/gc_interface/gcCause.hpp --- a/hotspot/src/share/vm/gc_interface/gcCause.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/gc_interface/gcCause.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -74,9 +74,6 @@ _g1_humongous_allocation, _last_ditch_collection, - - _dcmd_gc_run, - _last_gc_cause }; diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/memory/genCollectedHeap.cpp --- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,12 +108,11 @@ // Allocate space for the heap. char* heap_address; - size_t total_reserved = 0; ReservedSpace heap_rs; size_t heap_alignment = collector_policy()->heap_alignment(); - heap_address = allocate(heap_alignment, &total_reserved, &heap_rs); + heap_address = allocate(heap_alignment, &heap_rs); if (!heap_rs.is_reserved()) { vm_shutdown_during_initialization( @@ -149,7 +148,6 @@ char* GenCollectedHeap::allocate(size_t alignment, - size_t* _total_reserved, ReservedSpace* heap_rs){ const char overflow_msg[] = "The size of the object heap + VM data exceeds " "the maximum representable size"; @@ -171,8 +169,6 @@ err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" SIZE_FORMAT, total_reserved, alignment)); - *_total_reserved = total_reserved; - *heap_rs = Universe::reserve_heap(total_reserved, alignment); return heap_rs->base(); } diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/memory/genCollectedHeap.hpp --- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -124,7 +124,9 @@ // Returns JNI_OK on success virtual jint initialize(); - char* allocate(size_t alignment, size_t* _total_reserved, ReservedSpace* heap_rs); + + // Reserve aligned space for the heap as needed by the contained generations. + char* allocate(size_t alignment, ReservedSpace* heap_rs); // Does operations required after initialization has been done. void post_initialize(); diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/memory/tenuredGeneration.cpp --- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "gc_implementation/shared/collectorCounters.hpp" #include "gc_implementation/shared/gcTimer.hpp" -#include "gc_implementation/shared/parGCAllocBuffer.hpp" #include "memory/allocation.inline.hpp" #include "memory/blockOffsetTable.inline.hpp" #include "memory/cardGeneration.inline.hpp" diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/callnode.cpp --- a/hotspot/src/share/vm/opto/callnode.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/callnode.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -1982,6 +1982,7 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { + if (remove_dead_region(phase, can_reshape)) return this; if (StressArrayCopyMacroNode && !can_reshape) return NULL; diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/classes.hpp --- a/hotspot/src/share/vm/opto/classes.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/classes.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -200,6 +200,7 @@ macro(Opaque1) macro(Opaque2) macro(Opaque3) +macro(ProfileBoolean) macro(OrI) macro(OrL) macro(OverflowAddI) diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/compile.cpp --- a/hotspot/src/share/vm/opto/compile.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/compile.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -3105,6 +3105,7 @@ default: assert( !n->is_Call(), "" ); assert( !n->is_Mem(), "" ); + assert( nop != Op_ProfileBoolean, "should be eliminated during IGVN"); break; } @@ -3321,6 +3322,9 @@ bool Compile::too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason) { + if (method->has_injected_profile()) { + return false; + } ciMethodData* md = method->method_data(); if (md->is_empty()) { // Assume the trap has not occurred, or that it occurred only @@ -3370,6 +3374,9 @@ bool Compile::too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason) { + if (method->has_injected_profile()) { + return false; + } ciMethodData* md = method->method_data(); if (md->is_empty()) { // Assume the trap has not occurred, or that it occurred only diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/escape.cpp --- a/hotspot/src/share/vm/opto/escape.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/escape.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -206,6 +206,11 @@ _verify = false; } #endif + // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes + // processing, calls to CI to resolve symbols (types, fields, methods) + // referenced in bytecode. During symbol resolution VM may throw + // an exception which CI cleans and converts to compilation failure. + if (C->failing()) return false; // 2. Finish Graph construction by propagating references to all // java objects through graph. diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/graphKit.cpp --- a/hotspot/src/share/vm/opto/graphKit.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/graphKit.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -1986,6 +1986,11 @@ Deoptimization::trap_request_index(trap_request) < 0 && too_many_recompiles(reason)) { // This BCI is causing too many recompilations. + if (C->log() != NULL) { + C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'", + Deoptimization::trap_reason_name(reason), + Deoptimization::trap_action_name(action)); + } action = Deoptimization::Action_none; trap_request = Deoptimization::make_trap_request(reason, action); } else { @@ -2760,7 +2765,7 @@ Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL); // Make sure we haven't already deoptimized from this tactic. - if (too_many_traps(reason)) + if (too_many_traps(reason) || too_many_recompiles(reason)) return NULL; // (No, this isn't a call, but it's enough like a virtual call @@ -2782,8 +2787,7 @@ &exact_obj); { PreserveJVMState pjvms(this); set_control(slow_ctl); - uncommon_trap(reason, - Deoptimization::Action_maybe_recompile); + uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile); } if (safe_for_replace) { replace_in_map(not_null_obj, exact_obj); @@ -2812,8 +2816,12 @@ if (type != NULL) { Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check; Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check; - if (!too_many_traps(null_reason) && - !too_many_traps(class_reason)) { + ciMethod* trap_method = (sfpt == NULL) ? method() : sfpt->jvms()->method(); + int trap_bci = (sfpt == NULL) ? bci() : sfpt->jvms()->bci(); + + if (!too_many_traps(null_reason) && !too_many_recompiles(null_reason) && + !C->too_many_traps(trap_method, trap_bci, class_reason) && + !C->too_many_recompiles(trap_method, trap_bci, class_reason)) { Node* not_null_obj = NULL; // not_null is true if we know the object is not null and // there's no need for a null check @@ -2833,19 +2841,18 @@ GraphKit kit(sfpt->jvms()); PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); - kit.uncommon_trap(class_reason, - Deoptimization::Action_maybe_recompile); + kit.uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile); } else { PreserveJVMState pjvms(this); set_control(slow_ctl); - uncommon_trap(class_reason, - Deoptimization::Action_maybe_recompile); + uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile); } replace_in_map(not_null_obj, exact_obj); obj = exact_obj; } } else { - if (!too_many_traps(Deoptimization::Reason_null_assert)) { + if (!too_many_traps(Deoptimization::Reason_null_assert) && + !too_many_recompiles(Deoptimization::Reason_null_assert)) { Node* exact_obj = null_assert(obj); replace_in_map(obj, exact_obj); obj = exact_obj; diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/graphKit.hpp --- a/hotspot/src/share/vm/opto/graphKit.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/graphKit.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -714,6 +714,15 @@ klass, reason_string, must_throw, keep_exact_action); } + // Bail out to the interpreter and keep exact action (avoid switching to Action_none). + void uncommon_trap_exact(Deoptimization::DeoptReason reason, + Deoptimization::DeoptAction action, + ciKlass* klass = NULL, const char* reason_string = NULL, + bool must_throw = false) { + uncommon_trap(Deoptimization::make_trap_request(reason, action), + klass, reason_string, must_throw, /*keep_exact_action=*/true); + } + // SP when bytecode needs to be reexecuted. virtual int reexecute_sp() { return sp(); } diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/ifnode.cpp --- a/hotspot/src/share/vm/opto/ifnode.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/ifnode.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -145,10 +145,18 @@ Node* v = u->fast_out(k); // User of the phi // CNC - Allow only really simple patterns. // In particular I disallow AddP of the Phi, a fairly common pattern - if( v == cmp ) continue; // The compare is OK - if( (v->is_ConstraintCast()) && - v->in(0)->in(0) == iff ) - continue; // CastPP/II of the IfNode is OK + if (v == cmp) continue; // The compare is OK + if (v->is_ConstraintCast()) { + // If the cast is derived from data flow edges, it may not have a control edge. + // If so, it should be safe to split. But follow-up code can not deal with + // this (l. 359). So skip. + if (v->in(0) == NULL) { + return NULL; + } + if (v->in(0)->in(0) == iff) { + continue; // CastPP/II of the IfNode is OK + } + } // Disabled following code because I cannot tell if exactly one // path dominates without a real dominator check. CNC 9/9/1999 //uint vop = v->Opcode(); diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/library_call.cpp --- a/hotspot/src/share/vm/opto/library_call.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/library_call.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -41,6 +41,7 @@ #include "opto/movenode.hpp" #include "opto/mulnode.hpp" #include "opto/narrowptrnode.hpp" +#include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" @@ -286,6 +287,8 @@ bool inline_updateBytesCRC32(); bool inline_updateByteBufferCRC32(); bool inline_multiplyToLen(); + + bool inline_profileBoolean(); }; @@ -894,6 +897,9 @@ case vmIntrinsics::_updateByteBufferCRC32: return inline_updateByteBufferCRC32(); + case vmIntrinsics::_profileBoolean: + return inline_profileBoolean(); + default: // If you get here, it may be that someone has added a new intrinsic // to the list in vmSymbols.hpp without implementing it here. @@ -4661,6 +4667,8 @@ // tightly_coupled_allocation() AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL); + ciMethod* trap_method = method(); + int trap_bci = bci(); SafePointNode* sfpt = NULL; if (alloc != NULL) { // The JVM state for uncommon traps between the allocation and @@ -4685,6 +4693,9 @@ sfpt->set_i_o(map()->i_o()); sfpt->set_memory(map()->memory()); + + trap_method = jvms->method(); + trap_bci = jvms->bci(); } bool validated = false; @@ -4789,7 +4800,7 @@ } } - if (!too_many_traps(Deoptimization::Reason_intrinsic) && !src->is_top() && !dest->is_top()) { + if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) && !src->is_top() && !dest->is_top()) { // validate arguments: enables transformation the ArrayCopyNode validated = true; @@ -5794,3 +5805,47 @@ return instof_false; // even if it is NULL } + +bool LibraryCallKit::inline_profileBoolean() { + Node* counts = argument(1); + const TypeAryPtr* ary = NULL; + ciArray* aobj = NULL; + if (counts->is_Con() + && (ary = counts->bottom_type()->isa_aryptr()) != NULL + && (aobj = ary->const_oop()->as_array()) != NULL + && (aobj->length() == 2)) { + // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively. + jint false_cnt = aobj->element_value(0).as_int(); + jint true_cnt = aobj->element_value(1).as_int(); + + method()->set_injected_profile(true); + + if (C->log() != NULL) { + C->log()->elem("observe source='profileBoolean' false='%d' true='%d'", + false_cnt, true_cnt); + } + + if (false_cnt + true_cnt == 0) { + // According to profile, never executed. + uncommon_trap_exact(Deoptimization::Reason_intrinsic, + Deoptimization::Action_reinterpret); + return true; + } + // Stop profiling. + // MethodHandleImpl::profileBoolean() has profiling logic in it's bytecode. + // By replacing method's body with profile data (represented as ProfileBooleanNode + // on IR level) we effectively disable profiling. + // It enables full speed execution once optimized code is generated. + Node* profile = _gvn.transform(new ProfileBooleanNode(argument(0), false_cnt, true_cnt)); + C->record_for_igvn(profile); + set_result(profile); + return true; + } else { + // Continue profiling. + // Profile data isn't available at the moment. So, execute method's bytecode version. + // Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod + // is compiled and counters aren't available since corresponding MethodHandle + // isn't a compile-time constant. + return false; + } +} diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/opaquenode.cpp --- a/hotspot/src/share/vm/opto/opaquenode.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/opaquenode.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -60,4 +60,27 @@ return (&n == this); // Always fail except on self } +//============================================================================= +uint ProfileBooleanNode::hash() const { return NO_HASH; } +uint ProfileBooleanNode::cmp( const Node &n ) const { + return (&n == this); +} + +Node *ProfileBooleanNode::Ideal(PhaseGVN *phase, bool can_reshape) { + if (can_reshape && _delay_removal) { + _delay_removal = false; + return this; + } else { + return NULL; + } +} + +Node *ProfileBooleanNode::Identity( PhaseTransform *phase ) { + if (_delay_removal) { + return this; + } else { + assert(_consumed, "profile should be consumed before elimination"); + return in(1); + } +} diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/opaquenode.hpp --- a/hotspot/src/share/vm/opto/opaquenode.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/opaquenode.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -87,5 +87,31 @@ bool rtm_opt() const { return (_opt == RTM_OPT); } }; +//------------------------------ProfileBooleanNode------------------------------- +// A node represents value profile for a boolean during parsing. +// Once parsing is over, the node goes away (during IGVN). +// It is used to override branch frequencies from MDO (see has_injected_profile in parse2.cpp). +class ProfileBooleanNode : public Node { + uint _false_cnt; + uint _true_cnt; + bool _consumed; + bool _delay_removal; + virtual uint hash() const ; // { return NO_HASH; } + virtual uint cmp( const Node &n ) const; + public: + ProfileBooleanNode(Node *n, uint false_cnt, uint true_cnt) : Node(0, n), + _false_cnt(false_cnt), _true_cnt(true_cnt), _delay_removal(true), _consumed(false) {} + + uint false_count() const { return _false_cnt; } + uint true_count() const { return _true_cnt; } + + void consume() { _consumed = true; } + + virtual int Opcode() const; + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); + virtual Node *Identity(PhaseTransform *phase); + virtual const Type *bottom_type() const { return TypeInt::BOOL; } +}; + #endif // SHARE_VM_OPTO_OPAQUENODE_HPP diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/parse.hpp --- a/hotspot/src/share/vm/opto/parse.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/parse.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -555,8 +555,8 @@ void do_jsr(); void do_ret(); - float dynamic_branch_prediction(float &cnt); - float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci); + float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test); + float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test); bool seems_never_taken(float prob) const; bool path_is_suitable_for_uncommon_trap(float prob) const; bool seems_stable_comparison() const; diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/opto/parse2.cpp --- a/hotspot/src/share/vm/opto/parse2.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/opto/parse2.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -37,6 +37,7 @@ #include "opto/matcher.hpp" #include "opto/memnode.hpp" #include "opto/mulnode.hpp" +#include "opto/opaquenode.hpp" #include "opto/parse.hpp" #include "opto/runtime.hpp" #include "runtime/deoptimization.hpp" @@ -763,35 +764,64 @@ merge_common(target, pnum); } +static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { + if (btest != BoolTest::eq && btest != BoolTest::ne) { + // Only ::eq and ::ne are supported for profile injection. + return false; + } + if (test->is_Cmp() && + test->in(1)->Opcode() == Op_ProfileBoolean) { + ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); + int false_cnt = profile->false_count(); + int true_cnt = profile->true_count(); + + // Counts matching depends on the actual test operation (::eq or ::ne). + // No need to scale the counts because profile injection was designed + // to feed exact counts into VM. + taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; + not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; + + profile->consume(); + return true; + } + return false; +} //--------------------------dynamic_branch_prediction-------------------------- // Try to gather dynamic branch prediction behavior. Return a probability // of the branch being taken and set the "cnt" field. Returns a -1.0 // if we need to use static prediction for some reason. -float Parse::dynamic_branch_prediction(float &cnt) { +float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { ResourceMark rm; cnt = COUNT_UNKNOWN; - // Use MethodData information if it is available - // FIXME: free the ProfileData structure - ciMethodData* methodData = method()->method_data(); - if (!methodData->is_mature()) return PROB_UNKNOWN; - ciProfileData* data = methodData->bci_to_data(bci()); - if (!data->is_JumpData()) return PROB_UNKNOWN; + int taken = 0; + int not_taken = 0; + + bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); - // get taken and not taken values - int taken = data->as_JumpData()->taken(); - int not_taken = 0; - if (data->is_BranchData()) { - not_taken = data->as_BranchData()->not_taken(); + if (use_mdo) { + // Use MethodData information if it is available + // FIXME: free the ProfileData structure + ciMethodData* methodData = method()->method_data(); + if (!methodData->is_mature()) return PROB_UNKNOWN; + ciProfileData* data = methodData->bci_to_data(bci()); + if (!data->is_JumpData()) return PROB_UNKNOWN; + + // get taken and not taken values + taken = data->as_JumpData()->taken(); + not_taken = 0; + if (data->is_BranchData()) { + not_taken = data->as_BranchData()->not_taken(); + } + + // scale the counts to be commensurate with invocation counts: + taken = method()->scale_count(taken); + not_taken = method()->scale_count(not_taken); } - // scale the counts to be commensurate with invocation counts: - taken = method()->scale_count(taken); - not_taken = method()->scale_count(not_taken); - // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. - // We also check that individual counters are positive first, overwise the sum can become positive. + // We also check that individual counters are positive first, otherwise the sum can become positive. if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { if (C->log() != NULL) { C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); @@ -841,8 +871,9 @@ //-----------------------------branch_prediction------------------------------- float Parse::branch_prediction(float& cnt, BoolTest::mask btest, - int target_bci) { - float prob = dynamic_branch_prediction(cnt); + int target_bci, + Node* test) { + float prob = dynamic_branch_prediction(cnt, btest, test); // If prob is unknown, switch to static prediction if (prob != PROB_UNKNOWN) return prob; @@ -932,7 +963,7 @@ Block* next_block = successor_for_bci(iter().next_bci()); float cnt; - float prob = branch_prediction(cnt, btest, target_bci); + float prob = branch_prediction(cnt, btest, target_bci, c); if (prob == PROB_UNKNOWN) { // (An earlier version of do_ifnull omitted this trap for OSR methods.) #ifndef PRODUCT @@ -1013,7 +1044,7 @@ Block* next_block = successor_for_bci(iter().next_bci()); float cnt; - float prob = branch_prediction(cnt, btest, target_bci); + float prob = branch_prediction(cnt, btest, target_bci, c); float untaken_prob = 1.0 - prob; if (prob == PROB_UNKNOWN) { diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/runtime/arguments.cpp --- a/hotspot/src/share/vm/runtime/arguments.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/runtime/arguments.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -1114,34 +1114,39 @@ } #endif +// Returns threshold scaled with the value of scale. +// If scale < 0.0, threshold is returned without scaling. intx Arguments::scaled_compile_threshold(intx threshold, double scale) { - if (scale == 1.0 || scale <= 0.0) { + if (scale == 1.0 || scale < 0.0) { return threshold; } else { return (intx)(threshold * scale); } } -// Returns freq_log scaled with CompileThresholdScaling +// Returns freq_log scaled with the value of scale. +// Returned values are in the range of [0, InvocationCounter::number_of_count_bits + 1]. +// If scale < 0.0, freq_log is returned without scaling. intx Arguments::scaled_freq_log(intx freq_log, double scale) { - // Check if scaling is necessary or negative value was specified. + // Check if scaling is necessary or if negative value was specified. if (scale == 1.0 || scale < 0.0) { return freq_log; } - - // Check value to avoid calculating log2 of 0. - if (scale == 0.0) { - return freq_log; + // Check values to avoid calculating log2 of 0. + if (scale == 0.0 || freq_log == 0) { + return 0; } - - intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale); // Determine the maximum notification frequency value currently supported. // The largest mask value that the interpreter/C1 can handle is // of length InvocationCounter::number_of_count_bits. Mask values are always // one bit shorter then the value of the notification frequency. Set // max_freq_bits accordingly. intx max_freq_bits = InvocationCounter::number_of_count_bits + 1; - if (scaled_freq > nth_bit(max_freq_bits)) { + intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale); + if (scaled_freq == 0) { + // Return 0 right away to avoid calculating log2 of 0. + return 0; + } else if (scaled_freq > nth_bit(max_freq_bits)) { return max_freq_bits; } else { return log2_intptr(scaled_freq); @@ -1192,8 +1197,9 @@ vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL); } - // Scale tiered compilation thresholds - if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) { + // Scale tiered compilation thresholds. + // CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves compilation thresholds unchanged. + if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) { FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog)); FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog)); @@ -3912,7 +3918,8 @@ "Incompatible compilation policy selected", NULL); } // Scale CompileThreshold - if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) { + // CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged. + if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) { FLAG_SET_ERGO(intx, CompileThreshold, scaled_compile_threshold(CompileThreshold)); } } diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/runtime/globals.hpp --- a/hotspot/src/share/vm/runtime/globals.hpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/runtime/globals.hpp Sat Feb 14 00:03:48 2015 +0000 @@ -1505,7 +1505,7 @@ \ product(bool, ExplicitGCInvokesConcurrent, false, \ "A System.gc() request invokes a concurrent collection; " \ - "(effective only when UseConcMarkSweepGC)") \ + "(effective only when using concurrent collectors)") \ \ product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \ "A System.gc() request invokes a concurrent collection and " \ @@ -3535,7 +3535,7 @@ "(both with and without tiered compilation): " \ "values greater than 1.0 delay counter overflow, " \ "values between 0 and 1.0 rush counter overflow, " \ - "value of 1.0 leave compilation thresholds unchanged " \ + "value of 1.0 leaves compilation thresholds unchanged " \ "value of 0.0 is equivalent to -Xint. " \ "" \ "Flag can be set as per-method option. " \ diff -r 4d325459c405 -r fbd83ffd242b hotspot/src/share/vm/services/diagnosticCommand.cpp --- a/hotspot/src/share/vm/services/diagnosticCommand.cpp Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/src/share/vm/services/diagnosticCommand.cpp Sat Feb 14 00:03:48 2015 +0000 @@ -268,7 +268,7 @@ void SystemGCDCmd::execute(DCmdSource source, TRAPS) { if (!DisableExplicitGC) { - Universe::heap()->collect(GCCause::_dcmd_gc_run); + Universe::heap()->collect(GCCause::_java_lang_system_gc); } else { output()->print_cr("Explicit GC is disabled, no GC has been performed."); } diff -r 4d325459c405 -r fbd83ffd242b hotspot/test/TEST.groups --- a/hotspot/test/TEST.groups Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/test/TEST.groups Sat Feb 14 00:03:48 2015 +0000 @@ -235,7 +235,8 @@ gc/metaspace/G1AddMetaspaceDependency.java \ gc/metaspace/TestMetaspacePerfCounters.java \ gc/startup_warnings/TestG1.java \ - gc/whitebox/TestConcMarkCycleWB.java + gc/whitebox/TestConcMarkCycleWB.java \ + gc/arguments/TestG1ConcRefinementThreads.java # All tests that explicitly set the serial GC # diff -r 4d325459c405 -r fbd83ffd242b hotspot/test/compiler/arguments/CheckCompileThresholdScaling.java --- a/hotspot/test/compiler/arguments/CheckCompileThresholdScaling.java Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/test/compiler/arguments/CheckCompileThresholdScaling.java Sat Feb 14 00:03:48 2015 +0000 @@ -26,7 +26,7 @@ /* * @test CheckCompileThresholdScaling * @bug 8059604 - * @summary "Add CompileThresholdScalingPercentage flag to control when methods are first compiled (with +/-TieredCompilation)" + * @summary "Add CompileThresholdScaling flag to control when methods are first compiled (with +/-TieredCompilation)" * @library /testlibrary * @run main CheckCompileThresholdScaling */ diff -r 4d325459c405 -r fbd83ffd242b hotspot/test/compiler/arraycopy/TestArrayCopyNoInit.java --- a/hotspot/test/compiler/arraycopy/TestArrayCopyNoInit.java Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/test/compiler/arraycopy/TestArrayCopyNoInit.java Sat Feb 14 00:03:48 2015 +0000 @@ -29,8 +29,6 @@ * */ -import java.lang.invoke.*; - public class TestArrayCopyNoInit { static int[] m1(int[] src) { @@ -134,7 +132,7 @@ return dest; } - static public void main(String[] args) throws Throwable { + static public void main(String[] args) { boolean success = true; int[] src = new int[10]; TestArrayCopyNoInit[] src2 = new TestArrayCopyNoInit[10]; diff -r 4d325459c405 -r fbd83ffd242b hotspot/test/compiler/arraycopy/TestArrayCopyNoInitDeopt.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/compiler/arraycopy/TestArrayCopyNoInitDeopt.java Sat Feb 14 00:03:48 2015 +0000 @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8072016 + * @summary Infinite deoptimization/recompilation cycles in case of arraycopy with tightly coupled allocation + * @library /testlibrary /../../test/lib /compiler/whitebox + * @build TestArrayCopyNoInitDeopt + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform + * @run main/othervm -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=020 + * TestArrayCopyNoInitDeopt + * + */ + + +import sun.hotspot.WhiteBox; +import sun.hotspot.code.NMethod; +import com.oracle.java.testlibrary.Platform; +import java.lang.reflect.*; + +public class TestArrayCopyNoInitDeopt { + + public static int[] m1(Object src) { + if (src == null) return null; + int[] dest = new int[10]; + try { + System.arraycopy(src, 0, dest, 0, 10); + } catch (ArrayStoreException npe) { + } + return dest; + } + + static Object m2_src(Object src) { + return src; + } + + public static int[] m2(Object src) { + if (src == null) return null; + src = m2_src(src); + int[] dest = new int[10]; + try { + System.arraycopy(src, 0, dest, 0, 10); + } catch (ArrayStoreException npe) { + } + return dest; + } + + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + + static boolean deoptimize(Method method, Object src_obj) throws Exception { + for (int i = 0; i < 10; i++) { + method.invoke(null, src_obj); + if (!WHITE_BOX.isMethodCompiled(method)) { + return true; + } + } + return false; + } + + static public void main(String[] args) throws Exception { + if (Platform.isServer()) { + int[] src = new int[10]; + Object src_obj = new Object(); + Method method_m1 = TestArrayCopyNoInitDeopt.class.getMethod("m1", Object.class); + Method method_m2 = TestArrayCopyNoInitDeopt.class.getMethod("m2", Object.class); + + // Warm up + for (int i = 0; i < 20000; i++) { + m1(src); + } + + // And make sure m1 is compiled by C2 + WHITE_BOX.enqueueMethodForCompilation(method_m1, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION); + + if (!WHITE_BOX.isMethodCompiled(method_m1)) { + throw new RuntimeException("m1 not compiled"); + } + + // should deoptimize for type check + if (!deoptimize(method_m1, src_obj)) { + throw new RuntimeException("m1 not deoptimized"); + } + + WHITE_BOX.enqueueMethodForCompilation(method_m1, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION); + + if (!WHITE_BOX.isMethodCompiled(method_m1)) { + throw new RuntimeException("m1 not recompiled"); + } + + if (deoptimize(method_m1, src_obj)) { + throw new RuntimeException("m1 deoptimized again"); + } + + // Same test as above but with speculative types + + // Warm up & make sure we collect type profiling + for (int i = 0; i < 20000; i++) { + m2(src); + } + + // And make sure m2 is compiled by C2 + WHITE_BOX.enqueueMethodForCompilation(method_m2, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION); + + if (!WHITE_BOX.isMethodCompiled(method_m2)) { + throw new RuntimeException("m2 not compiled"); + } + + // should deoptimize for speculative type check + if (!deoptimize(method_m2, src_obj)) { + throw new RuntimeException("m2 not deoptimized"); + } + + WHITE_BOX.enqueueMethodForCompilation(method_m2, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION); + + if (!WHITE_BOX.isMethodCompiled(method_m2)) { + throw new RuntimeException("m2 not recompiled"); + } + + // should deoptimize for actual type check + if (!deoptimize(method_m2, src_obj)) { + throw new RuntimeException("m2 not deoptimized"); + } + + WHITE_BOX.enqueueMethodForCompilation(method_m2, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION); + + if (!WHITE_BOX.isMethodCompiled(method_m2)) { + throw new RuntimeException("m2 not recompiled"); + } + + if (deoptimize(method_m2, src_obj)) { + throw new RuntimeException("m2 deoptimized again"); + } + } + } +} diff -r 4d325459c405 -r fbd83ffd242b hotspot/test/compiler/c2/6857159/Test6857159.java --- a/hotspot/test/compiler/c2/6857159/Test6857159.java Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/test/compiler/c2/6857159/Test6857159.java Sat Feb 14 00:03:48 2015 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,15 +26,24 @@ * @test * @bug 6857159 * @summary local schedule failed with checkcast of Thread.currentThread() - * - * @run shell Test6857159.sh + * @library /testlibrary */ -public class Test6857159 extends Thread { - static class ct0 extends Test6857159 { - public void message() { - // System.out.println("message"); - } +import com.oracle.java.testlibrary.*; + +public class Test6857159 { + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xbatch", "-XX:+PrintCompilation", + "-XX:CompileOnly=Test$ct.run", "Test"); + OutputAnalyzer analyzer = new OutputAnalyzer(pb.start()); + analyzer.shouldNotContain("COMPILE SKIPPED"); + analyzer.shouldContain("Test$ct0::run (16 bytes)"); + } +} + +class Test extends Thread { + static class ct0 extends Test { + public void message() { } public void run() { message(); @@ -43,14 +52,10 @@ } } static class ct1 extends ct0 { - public void message() { - // System.out.println("message"); - } + public void message() { } } static class ct2 extends ct0 { - public void message() { - // System.out.println("message"); - } + public void message() { } } public static void main(String[] args) throws Exception { diff -r 4d325459c405 -r fbd83ffd242b hotspot/test/compiler/codecache/jmx/InitialAndMaxUsageTest.java --- a/hotspot/test/compiler/codecache/jmx/InitialAndMaxUsageTest.java Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/test/compiler/codecache/jmx/InitialAndMaxUsageTest.java Sat Feb 14 00:03:48 2015 +0000 @@ -36,7 +36,7 @@ * @run main/othervm -Xbootclasspath/a:. -XX:-UseCodeCacheFlushing * -XX:-MethodFlushing -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:+SegmentedCodeCache -XX:CompileCommand=compileonly,null::* - * InitialAndMaxUsageTest + * -XX:-UseLargePages InitialAndMaxUsageTest * @summary testing of initial and max usage */ public class InitialAndMaxUsageTest { diff -r 4d325459c405 -r fbd83ffd242b hotspot/test/compiler/codecache/stress/OverloadCompileQueueTest.java --- a/hotspot/test/compiler/codecache/stress/OverloadCompileQueueTest.java Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/test/compiler/codecache/stress/OverloadCompileQueueTest.java Sat Feb 14 00:03:48 2015 +0000 @@ -30,6 +30,7 @@ /* * @test OverloadCompileQueueTest * @library /testlibrary /../../test/lib + * @ignore 8071905 * @build OverloadCompileQueueTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * sun.hotspot.WhiteBox$WhiteBoxPermission diff -r 4d325459c405 -r fbd83ffd242b hotspot/test/compiler/whitebox/DeoptimizeFramesTest.java --- a/hotspot/test/compiler/whitebox/DeoptimizeFramesTest.java Fri Feb 13 20:33:25 2015 +0000 +++ b/hotspot/test/compiler/whitebox/DeoptimizeFramesTest.java Sat Feb 14 00:03:48 2015 +0000 @@ -31,11 +31,11 @@ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions * -XX:+WhiteBoxAPI -Xmixed * -XX:CompileCommand=compileonly,DeoptimizeFramesTest$TestCaseImpl::method - * -XX:-DeoptimizeRandom DeoptimizeFramesTest true + * -XX:-DeoptimizeRandom -XX:-DeoptimizeALot DeoptimizeFramesTest true * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions * -XX:+WhiteBoxAPI -Xmixed * -XX:CompileCommand=compileonly,DeoptimizeFramesTest$TestCaseImpl::method - * -XX:-DeoptimizeRandom DeoptimizeFramesTest false + * -XX:-DeoptimizeRandom -XX:-DeoptimizeALot DeoptimizeFramesTest false * @summary testing of WB::deoptimizeFrames() */ import java.lang.reflect.Executable;