# HG changeset patch # User jrose # Date 1274344462 25200 # Node ID 261ecc5bb65eb095cc02872c738fdb1b72eefe57 # Parent 8c0269fb855b7c88b0822c6c8f4651e3e71d637b# Parent 535ef83faf5dd4b6bc72b04d9a8a8b5dd2f0d430 Merge diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/os/linux/vm/os_linux.cpp --- a/hotspot/src/os/linux/vm/os_linux.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Thu May 20 01:34:22 2010 -0700 @@ -2788,7 +2788,7 @@ } // attach to the region - addr = (char*)shmat(shmid, NULL, 0); + addr = (char*)shmat(shmid, req_addr, 0); int err = errno; // Remove shmid. If shmat() is successful, the actual shared memory segment diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/adlc/formssel.cpp --- a/hotspot/src/share/vm/adlc/formssel.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/adlc/formssel.cpp Thu May 20 01:34:22 2010 -0700 @@ -735,7 +735,7 @@ // This instruction captures the machine-independent bottom_type // Expected use is for pointer vs oop determination for LoadP -bool InstructForm::captures_bottom_type() const { +bool InstructForm::captures_bottom_type(FormDict &globals) const { if( _matrule && _matrule->_rChild && (!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type !strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type @@ -748,6 +748,8 @@ else if ( is_ideal_load() == Form::idealP ) return true; else if ( is_ideal_store() != Form::none ) return true; + if (needs_base_oop_edge(globals)) return true; + return false; } @@ -1061,7 +1063,7 @@ // Base class for this instruction, MachNode except for calls -const char *InstructForm::mach_base_class() const { +const char *InstructForm::mach_base_class(FormDict &globals) const { if( is_ideal_call() == Form::JAVA_STATIC ) { return "MachCallStaticJavaNode"; } @@ -1092,7 +1094,7 @@ else if (is_ideal_nop()) { return "MachNopNode"; } - else if (captures_bottom_type()) { + else if (captures_bottom_type(globals)) { return "MachTypeNode"; } else { return "MachNode"; diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/adlc/formssel.hpp --- a/hotspot/src/share/vm/adlc/formssel.hpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/adlc/formssel.hpp Thu May 20 01:34:22 2010 -0700 @@ -188,7 +188,7 @@ // This instruction captures the machine-independent bottom_type // Expected use is for pointer vs oop determination for LoadP - virtual bool captures_bottom_type() const; + virtual bool captures_bottom_type(FormDict& globals) const; virtual const char *cost(); // Access ins_cost attribute virtual uint num_opnds(); // Count of num_opnds for MachNode class @@ -229,7 +229,7 @@ const char *reduce_left(FormDict &globals) const; // Base class for this instruction, MachNode except for calls - virtual const char *mach_base_class() const; + virtual const char *mach_base_class(FormDict &globals) const; // Check if this instruction can cisc-spill to 'alternate' bool cisc_spills_to(ArchDesc &AD, InstructForm *alternate); @@ -252,7 +252,7 @@ bool has_short_branch_form() { return _short_branch_form != NULL; } // Output short branch prototypes and method bodies void declare_short_branch_methods(FILE *fp_cpp); - bool define_short_branch_methods(FILE *fp_cpp); + bool define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp); uint alignment() { return _alignment; } void set_alignment(uint val) { _alignment = val; } diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/adlc/output_c.cpp --- a/hotspot/src/share/vm/adlc/output_c.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/adlc/output_c.cpp Thu May 20 01:34:22 2010 -0700 @@ -1382,7 +1382,7 @@ inst_num, unmatched_edge); } // If new instruction captures bottom type - if( root_form->captures_bottom_type() ) { + if( root_form->captures_bottom_type(globals) ) { // Get bottom type from instruction whose result we are replacing fprintf(fp, " root->_bottom_type = inst%d->bottom_type();\n", inst_num); } @@ -2963,7 +2963,7 @@ used |= instr->define_cisc_version(*this, fp); // Output code to convert to the short branch version, if applicable - used |= instr->define_short_branch_methods(fp); + used |= instr->define_short_branch_methods(*this, fp); } // Construct the method called by cisc_version() to copy inputs and operands. @@ -3708,7 +3708,7 @@ } // Fill in the bottom_type where requested - if ( inst->captures_bottom_type() ) { + if ( inst->captures_bottom_type(_globalNames) ) { fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent); } if( inst->is_ideal_if() ) { @@ -3762,7 +3762,7 @@ // Create the MachNode object fprintf(fp_cpp, " %sNode *node = new (C) %sNode();\n", name, name); // Fill in the bottom_type where requested - if ( this->captures_bottom_type() ) { + if ( this->captures_bottom_type(AD.globalNames()) ) { fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n"); } @@ -3798,7 +3798,7 @@ //---------------------------define_short_branch_methods----------------------- // Build definitions for short branch methods -bool InstructForm::define_short_branch_methods(FILE *fp_cpp) { +bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) { if (has_short_branch_form()) { InstructForm *short_branch = short_branch_form(); const char *name = short_branch->_ident; @@ -3813,7 +3813,7 @@ fprintf(fp_cpp, " node->_fcnt = _fcnt;\n"); } // Fill in the bottom_type where requested - if ( this->captures_bottom_type() ) { + if ( this->captures_bottom_type(AD.globalNames()) ) { fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n"); } diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/adlc/output_h.cpp --- a/hotspot/src/share/vm/adlc/output_h.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/adlc/output_h.cpp Thu May 20 01:34:22 2010 -0700 @@ -1493,7 +1493,7 @@ // Build class definition for this instruction fprintf(fp,"\n"); fprintf(fp,"class %sNode : public %s { \n", - instr->_ident, instr->mach_base_class() ); + instr->_ident, instr->mach_base_class(_globalNames) ); fprintf(fp,"private:\n"); fprintf(fp," MachOper *_opnd_array[%d];\n", instr->num_opnds() ); if ( instr->is_ideal_jump() ) { @@ -1566,7 +1566,7 @@ // Use MachNode::ideal_Opcode() for nodes based on MachNode class // if the ideal_Opcode == Op_Node. if ( strcmp("Node", instr->ideal_Opcode(_globalNames)) != 0 || - strcmp("MachNode", instr->mach_base_class()) != 0 ) { + strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) { fprintf(fp," virtual int ideal_Opcode() const { return Op_%s; }\n", instr->ideal_Opcode(_globalNames) ); } @@ -1631,7 +1631,7 @@ // Use MachNode::oper_input_base() for nodes based on MachNode class // if the base == 1. if ( instr->oper_input_base(_globalNames) != 1 || - strcmp("MachNode", instr->mach_base_class()) != 0 ) { + strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) { fprintf(fp," virtual uint oper_input_base() const { return %d; }\n", instr->oper_input_base(_globalNames)); } @@ -1906,11 +1906,6 @@ fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n", offset, offset+1, offset+1); } - else if( instr->needs_base_oop_edge(_globalNames) ) { - // Special hack for ideal AddP. Bottom type is an oop IFF it has a - // legal base-pointer input. Otherwise it is NOT an oop. - fprintf(fp," const Type *bottom_type() const { return AddPNode::mach_bottom_type(this); } // AddP\n"); - } else if (instr->is_tls_instruction()) { // Special hack for tlsLoadP fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP\n"); diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/c1/c1_GraphBuilder.cpp --- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu May 20 01:34:22 2010 -0700 @@ -2978,7 +2978,11 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) { if (!InlineNatives ) INLINE_BAILOUT("intrinsic method inlining disabled"); - if (callee->is_synchronized()) INLINE_BAILOUT("intrinsic method is synchronized"); + if (callee->is_synchronized()) { + // We don't currently support any synchronized intrinsics + return false; + } + // callee seems like a good candidate // determine id bool preserves_state = false; diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/code/codeCache.cpp --- a/hotspot/src/share/vm/code/codeCache.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/code/codeCache.cpp Thu May 20 01:34:22 2010 -0700 @@ -124,6 +124,23 @@ return (nmethod*)cb; } +nmethod* CodeCache::first_nmethod() { + assert_locked_or_safepoint(CodeCache_lock); + CodeBlob* cb = first(); + while (cb != NULL && !cb->is_nmethod()) { + cb = next(cb); + } + return (nmethod*)cb; +} + +nmethod* CodeCache::next_nmethod (CodeBlob* cb) { + assert_locked_or_safepoint(CodeCache_lock); + cb = next(cb); + while (cb != NULL && !cb->is_nmethod()) { + cb = next(cb); + } + return (nmethod*)cb; +} CodeBlob* CodeCache::allocate(int size) { // Do not seize the CodeCache lock here--if the caller has not @@ -414,7 +431,7 @@ saved->set_speculatively_disconnected(false); saved->set_saved_nmethod_link(NULL); if (PrintMethodFlushing) { - saved->print_on(tty, " ### nmethod is reconnected"); + saved->print_on(tty, " ### nmethod is reconnected\n"); } if (LogCompilation && (xtty != NULL)) { ttyLocker ttyl; @@ -432,7 +449,8 @@ } void CodeCache::remove_saved_code(nmethod* nm) { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + // For conc swpr this will be called with CodeCache_lock taken by caller + assert_locked_or_safepoint(CodeCache_lock); assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods"); nmethod* saved = _saved_nmethods; nmethod* prev = NULL; @@ -463,7 +481,7 @@ nm->set_saved_nmethod_link(_saved_nmethods); _saved_nmethods = nm; if (PrintMethodFlushing) { - nm->print_on(tty, " ### nmethod is speculatively disconnected"); + nm->print_on(tty, " ### nmethod is speculatively disconnected\n"); } if (LogCompilation && (xtty != NULL)) { ttyLocker ttyl; diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/code/codeCache.hpp --- a/hotspot/src/share/vm/code/codeCache.hpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/code/codeCache.hpp Thu May 20 01:34:22 2010 -0700 @@ -102,6 +102,8 @@ static CodeBlob* next (CodeBlob* cb); static CodeBlob* alive(CodeBlob *cb); static nmethod* alive_nmethod(CodeBlob *cb); + static nmethod* first_nmethod(); + static nmethod* next_nmethod (CodeBlob* cb); static int nof_blobs() { return _number_of_blobs; } // GC support diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/code/nmethod.cpp --- a/hotspot/src/share/vm/code/nmethod.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/code/nmethod.cpp Thu May 20 01:34:22 2010 -0700 @@ -1014,9 +1014,7 @@ void nmethod::cleanup_inline_caches() { - assert(SafepointSynchronize::is_at_safepoint() && - !CompiledIC_lock->is_locked() && - !Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs"); + assert_locked_or_safepoint(CompiledIC_lock); // If the method is not entrant or zombie then a JMP is plastered over the // first few bytes. If an oop in the old code was there, that oop @@ -1071,7 +1069,6 @@ // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack) bool nmethod::can_not_entrant_be_converted() { assert(is_not_entrant(), "must be a non-entrant method"); - assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint"); // Since the nmethod sweeper only does partial sweep the sweeper's traversal // count can be greater than the stack traversal count before it hits the @@ -1127,7 +1124,7 @@ _method = NULL; // Clear the method of this dead nmethod } // Make the class unloaded - i.e., change state and notify sweeper - check_safepoint(); + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); if (is_in_use()) { // Transitioning directly from live to unloaded -- so // we need to force a cache clean-up; remember this @@ -1220,17 +1217,6 @@ assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, ""); } - // When the nmethod becomes zombie it is no longer alive so the - // dependencies must be flushed. nmethods in the not_entrant - // state will be flushed later when the transition to zombie - // happens or they get unloaded. - if (state == zombie) { - assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); - flush_dependencies(NULL); - } else { - assert(state == not_entrant, "other cases may need to be handled differently"); - } - was_alive = is_in_use(); // Read state under lock // Change state @@ -1241,6 +1227,17 @@ } // leave critical region under Patching_lock + // When the nmethod becomes zombie it is no longer alive so the + // dependencies must be flushed. nmethods in the not_entrant + // state will be flushed later when the transition to zombie + // happens or they get unloaded. + if (state == zombie) { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + flush_dependencies(NULL); + } else { + assert(state == not_entrant, "other cases may need to be handled differently"); + } + if (state == not_entrant) { Events::log("Make nmethod not entrant " INTPTR_FORMAT, this); } else { @@ -1310,21 +1307,13 @@ return true; } - -#ifndef PRODUCT -void nmethod::check_safepoint() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); -} -#endif - - void nmethod::flush() { // Note that there are no valid oops in the nmethod anymore. assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method"); assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation"); assert (!is_locked_by_vm(), "locked methods shouldn't be flushed"); - check_safepoint(); + assert_locked_or_safepoint(CodeCache_lock); // completely deallocate this method EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, ""); @@ -1373,7 +1362,7 @@ // notifies instanceKlasses that are reachable void nmethod::flush_dependencies(BoolObjectClosure* is_alive) { - assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); + assert_locked_or_safepoint(CodeCache_lock); assert(Universe::heap()->is_gc_active() == (is_alive != NULL), "is_alive is non-NULL if and only if we are called during GC"); if (!has_flushed_dependencies()) { @@ -2266,7 +2255,6 @@ tty->print(" for method " INTPTR_FORMAT , (address)method()); tty->print(" { "); if (version()) tty->print("v%d ", version()); - if (level()) tty->print("l%d ", level()); if (is_in_use()) tty->print("in_use "); if (is_not_entrant()) tty->print("not_entrant "); if (is_zombie()) tty->print("zombie "); diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/code/nmethod.hpp --- a/hotspot/src/share/vm/code/nmethod.hpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/code/nmethod.hpp Thu May 20 01:34:22 2010 -0700 @@ -82,7 +82,6 @@ struct nmFlags { friend class VMStructs; unsigned int version:8; // version number (0 = first version) - unsigned int level:4; // optimization level unsigned int age:4; // age (in # of sweep steps) unsigned int state:2; // {alive, zombie, unloaded) @@ -410,14 +409,13 @@ void flush_dependencies(BoolObjectClosure* is_alive); bool has_flushed_dependencies() { return flags.hasFlushedDependencies; } void set_has_flushed_dependencies() { - check_safepoint(); assert(!has_flushed_dependencies(), "should only happen once"); flags.hasFlushedDependencies = 1; } bool is_marked_for_reclamation() const { return flags.markedForReclamation; } - void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; } - void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; } + void mark_for_reclamation() { flags.markedForReclamation = 1; } + void unmark_for_reclamation() { flags.markedForReclamation = 0; } bool has_unsafe_access() const { return flags.has_unsafe_access; } void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; } @@ -428,9 +426,6 @@ bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; } void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; } - int level() const { return flags.level; } - void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; } - int comp_level() const { return _comp_level; } int version() const { return flags.version; } diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/compiler/compileBroker.cpp --- a/hotspot/src/share/vm/compiler/compileBroker.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Thu May 20 01:34:22 2010 -0700 @@ -461,12 +461,25 @@ // // Get the next CompileTask from a CompileQueue CompileTask* CompileQueue::get() { + NMethodSweeper::possibly_sweep(); + MutexLocker locker(lock()); // Wait for an available CompileTask. while (_first == NULL) { // There is no work to be done right now. Wait. - lock()->wait(); + if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) { + // During the emergency sweeping periods, wake up and sweep occasionally + bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000); + if (timedout) { + MutexUnlocker ul(lock()); + // When otherwise not busy, run nmethod sweeping + NMethodSweeper::possibly_sweep(); + } + } else { + // During normal operation no need to wake up on timer + lock()->wait(); + } } CompileTask* task = _first; diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/opto/addnode.cpp --- a/hotspot/src/share/vm/opto/addnode.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/opto/addnode.cpp Thu May 20 01:34:22 2010 -0700 @@ -714,71 +714,6 @@ return idx > Base; } -//---------------------------mach_bottom_type---------------------------------- -// Utility function for use by ADLC. Implements bottom_type for matched AddP. -const Type *AddPNode::mach_bottom_type( const MachNode* n) { - Node* base = n->in(Base); - const Type *t = base->bottom_type(); - if ( t == Type::TOP ) { - // an untyped pointer - return TypeRawPtr::BOTTOM; - } - const TypePtr* tp = t->isa_oopptr(); - if ( tp == NULL ) return t; - if ( tp->_offset == TypePtr::OffsetBot ) return tp; - - // We must carefully add up the various offsets... - intptr_t offset = 0; - const TypePtr* tptr = NULL; - - uint numopnds = n->num_opnds(); - uint index = n->oper_input_base(); - for ( uint i = 1; i < numopnds; i++ ) { - MachOper *opnd = n->_opnds[i]; - // Check for any interesting operand info. - // In particular, check for both memory and non-memory operands. - // %%%%% Clean this up: use xadd_offset - intptr_t con = opnd->constant(); - if ( con == TypePtr::OffsetBot ) goto bottom_out; - offset += con; - con = opnd->constant_disp(); - if ( con == TypePtr::OffsetBot ) goto bottom_out; - offset += con; - if( opnd->scale() != 0 ) goto bottom_out; - - // Check each operand input edge. Find the 1 allowed pointer - // edge. Other edges must be index edges; track exact constant - // inputs and otherwise assume the worst. - for ( uint j = opnd->num_edges(); j > 0; j-- ) { - Node* edge = n->in(index++); - const Type* et = edge->bottom_type(); - const TypeX* eti = et->isa_intptr_t(); - if ( eti == NULL ) { - // there must be one pointer among the operands - guarantee(tptr == NULL, "must be only one pointer operand"); - if (UseCompressedOops && Universe::narrow_oop_shift() == 0) { - // 32-bits narrow oop can be the base of address expressions - tptr = et->make_ptr()->isa_oopptr(); - } else { - // only regular oops are expected here - tptr = et->isa_oopptr(); - } - guarantee(tptr != NULL, "non-int operand must be pointer"); - if (tptr->higher_equal(tp->add_offset(tptr->offset()))) - tp = tptr; // Set more precise type for bailout - continue; - } - if ( eti->_hi != eti->_lo ) goto bottom_out; - offset += eti->_lo; - } - } - guarantee(tptr != NULL, "must be exactly one pointer operand"); - return tptr->add_offset(offset); - - bottom_out: - return tp->add_offset(TypePtr::OffsetBot); -} - //============================================================================= //------------------------------Identity--------------------------------------- Node *OrINode::Identity( PhaseTransform *phase ) { diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/opto/addnode.hpp --- a/hotspot/src/share/vm/opto/addnode.hpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/opto/addnode.hpp Thu May 20 01:34:22 2010 -0700 @@ -151,7 +151,6 @@ // Do not match base-ptr edge virtual uint match_edge(uint idx) const; - static const Type *mach_bottom_type(const MachNode* n); // used by ad_.hpp }; //------------------------------OrINode---------------------------------------- diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/opto/cfgnode.cpp --- a/hotspot/src/share/vm/opto/cfgnode.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/opto/cfgnode.cpp Thu May 20 01:34:22 2010 -0700 @@ -1654,6 +1654,64 @@ if (opt != NULL) return opt; } + if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) { + // Try to undo Phi of AddP: + // (Phi (AddP base base y) (AddP base2 base2 y)) + // becomes: + // newbase := (Phi base base2) + // (AddP newbase newbase y) + // + // This occurs as a result of unsuccessful split_thru_phi and + // interferes with taking advantage of addressing modes. See the + // clone_shift_expressions code in matcher.cpp + Node* addp = in(1); + const Type* type = addp->in(AddPNode::Base)->bottom_type(); + Node* y = addp->in(AddPNode::Offset); + if (y != NULL && addp->in(AddPNode::Base) == addp->in(AddPNode::Address)) { + // make sure that all the inputs are similar to the first one, + // i.e. AddP with base == address and same offset as first AddP + bool doit = true; + for (uint i = 2; i < req(); i++) { + if (in(i) == NULL || + in(i)->Opcode() != Op_AddP || + in(i)->in(AddPNode::Base) != in(i)->in(AddPNode::Address) || + in(i)->in(AddPNode::Offset) != y) { + doit = false; + break; + } + // Accumulate type for resulting Phi + type = type->meet(in(i)->in(AddPNode::Base)->bottom_type()); + } + Node* base = NULL; + if (doit) { + // Check for neighboring AddP nodes in a tree. + // If they have a base, use that it. + for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) { + Node* u = this->fast_out(k); + if (u->is_AddP()) { + Node* base2 = u->in(AddPNode::Base); + if (base2 != NULL && !base2->is_top()) { + if (base == NULL) + base = base2; + else if (base != base2) + { doit = false; break; } + } + } + } + } + if (doit) { + if (base == NULL) { + base = new (phase->C, in(0)->req()) PhiNode(in(0), type, NULL); + for (uint i = 1; i < req(); i++) { + base->init_req(i, in(i)->in(AddPNode::Base)); + } + phase->is_IterGVN()->register_new_node_with_optimizer(base); + } + return new (phase->C, 4) AddPNode(base, base, y); + } + } + } + // Split phis through memory merges, so that the memory merges will go away. // Piggy-back this transformation on the search for a unique input.... // It will be as if the merged memory is the unique value of the phi. diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/opto/escape.cpp --- a/hotspot/src/share/vm/opto/escape.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/opto/escape.cpp Thu May 20 01:34:22 2010 -0700 @@ -1989,20 +1989,15 @@ case Op_Allocate: { Node *k = call->in(AllocateNode::KlassNode); - const TypeKlassPtr *kt; - if (k->Opcode() == Op_LoadKlass) { - kt = k->as_Load()->type()->isa_klassptr(); - } else { - // Also works for DecodeN(LoadNKlass). - kt = k->as_Type()->type()->isa_klassptr(); - } + const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr(); assert(kt != NULL, "TypeKlassPtr required."); ciKlass* cik = kt->klass(); - ciInstanceKlass* ciik = cik->as_instance_klass(); PointsToNode::EscapeState es; uint edge_to; - if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) { + if (cik->is_subclass_of(_compile->env()->Thread_klass()) || + !cik->is_instance_klass() || // StressReflectiveCode + cik->as_instance_klass()->has_finalizer()) { es = PointsToNode::GlobalEscape; edge_to = _phantom_object; // Could not be worse } else { @@ -2017,13 +2012,28 @@ case Op_AllocateArray: { - int length = call->in(AllocateNode::ALength)->find_int_con(-1); - if (length < 0 || length > EliminateAllocationArraySizeLimit) { - // Not scalar replaceable if the length is not constant or too big. - ptnode_adr(call_idx)->_scalar_replaceable = false; + + Node *k = call->in(AllocateNode::KlassNode); + const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr(); + assert(kt != NULL, "TypeKlassPtr required."); + ciKlass* cik = kt->klass(); + + PointsToNode::EscapeState es; + uint edge_to; + if (!cik->is_array_klass()) { // StressReflectiveCode + es = PointsToNode::GlobalEscape; + edge_to = _phantom_object; + } else { + es = PointsToNode::NoEscape; + edge_to = call_idx; + int length = call->in(AllocateNode::ALength)->find_int_con(-1); + if (length < 0 || length > EliminateAllocationArraySizeLimit) { + // Not scalar replaceable if the length is not constant or too big. + ptnode_adr(call_idx)->_scalar_replaceable = false; + } } - set_escape_state(call_idx, PointsToNode::NoEscape); - add_pointsto_edge(resproj_idx, call_idx); + set_escape_state(call_idx, es); + add_pointsto_edge(resproj_idx, edge_to); _processed.set(resproj_idx); break; } diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/runtime/globals.hpp --- a/hotspot/src/share/vm/runtime/globals.hpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/runtime/globals.hpp Thu May 20 01:34:22 2010 -0700 @@ -2764,6 +2764,9 @@ product(intx, NmethodSweepFraction, 4, \ "Number of invocations of sweeper to cover all nmethods") \ \ + product(intx, NmethodSweepCheckInterval, 5, \ + "Compilers wake up every n seconds to possibly sweep nmethods") \ + \ notproduct(intx, MemProfilingInterval, 500, \ "Time between each invocation of the MemProfiler") \ \ diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/runtime/safepoint.cpp --- a/hotspot/src/share/vm/runtime/safepoint.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/runtime/safepoint.cpp Thu May 20 01:34:22 2010 -0700 @@ -472,7 +472,7 @@ } TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime); - NMethodSweeper::sweep(); + NMethodSweeper::scan_stacks(); } diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/runtime/sweeper.cpp --- a/hotspot/src/share/vm/runtime/sweeper.cpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/runtime/sweeper.cpp Thu May 20 01:34:22 2010 -0700 @@ -33,6 +33,8 @@ jint NMethodSweeper::_locked_seen = 0; jint NMethodSweeper::_not_entrant_seen_on_stack = 0; bool NMethodSweeper::_rescan = false; +bool NMethodSweeper::_do_sweep = false; +jint NMethodSweeper::_sweep_started = 0; bool NMethodSweeper::_was_full = false; jint NMethodSweeper::_advise_to_sweep = 0; jlong NMethodSweeper::_last_was_full = 0; @@ -50,14 +52,20 @@ }; static MarkActivationClosure mark_activation_closure; -void NMethodSweeper::sweep() { +void NMethodSweeper::scan_stacks() { assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); if (!MethodFlushing) return; + _do_sweep = true; // No need to synchronize access, since this is always executed at a // safepoint. If we aren't in the middle of scan and a rescan - // hasn't been requested then just return. - if (_current == NULL && !_rescan) return; + // hasn't been requested then just return. If UseCodeCacheFlushing is on and + // code cache flushing is in progress, don't skip sweeping to help make progress + // clearing space in the code cache. + if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) { + _do_sweep = false; + return; + } // Make sure CompiledIC_lock in unlocked, since we might update some // inline caches. If it is, we just bail-out and try later. @@ -68,7 +76,7 @@ if (_current == NULL) { _seen = 0; _invocations = NmethodSweepFraction; - _current = CodeCache::first(); + _current = CodeCache::first_nmethod(); _traversals += 1; if (PrintMethodFlushing) { tty->print_cr("### Sweep: stack traversal %d", _traversals); @@ -81,48 +89,9 @@ _not_entrant_seen_on_stack = 0; } - if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations); - } - - // We want to visit all nmethods after NmethodSweepFraction invocations. - // If invocation is 1 we do the rest - int todo = CodeCache::nof_blobs(); - if (_invocations != 1) { - todo = (CodeCache::nof_blobs() - _seen) / _invocations; - _invocations--; - } - - for(int i = 0; i < todo && _current != NULL; i++) { - CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current - if (_current->is_nmethod()) { - process_nmethod((nmethod *)_current); - } - _seen++; - _current = next; - } - // Because we could stop on a codeBlob other than an nmethod we skip forward - // to the next nmethod (if any). codeBlobs other than nmethods can be freed - // async to us and make _current invalid while we sleep. - while (_current != NULL && !_current->is_nmethod()) { - _current = CodeCache::next(_current); - } - - if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { - // we've completed a scan without making progress but there were - // nmethods we were unable to process either because they were - // locked or were still on stack. We don't have to aggresively - // clean them up so just stop scanning. We could scan once more - // but that complicates the control logic and it's unlikely to - // matter much. - if (PrintMethodFlushing) { - tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); - } - } - if (UseCodeCacheFlushing) { if (!CodeCache::needs_flushing()) { - // In a safepoint, no race with setters + // scan_stacks() runs during a safepoint, no race with setters _advise_to_sweep = 0; } @@ -155,13 +124,99 @@ } } +void NMethodSweeper::possibly_sweep() { + if ((!MethodFlushing) || (!_do_sweep)) return; + + if (_invocations > 0) { + // Only one thread at a time will sweep + jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); + if (old != 0) { + return; + } + sweep_code_cache(); + } + _sweep_started = 0; +} + +void NMethodSweeper::sweep_code_cache() { +#ifdef ASSERT + jlong sweep_start; + if(PrintMethodFlushing) { + sweep_start = os::javaTimeMillis(); + } +#endif + if (PrintMethodFlushing && Verbose) { + tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations); + } + + // We want to visit all nmethods after NmethodSweepFraction invocations. + // If invocation is 1 we do the rest + int todo = CodeCache::nof_blobs(); + if (_invocations > 1) { + todo = (CodeCache::nof_blobs() - _seen) / _invocations; + } + + // Compilers may check to sweep more often than stack scans happen, + // don't keep trying once it is all scanned + _invocations--; + + assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); + assert(!CodeCache_lock->owned_by_self(), "just checking"); + + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + + for(int i = 0; i < todo && _current != NULL; i++) { + + // Since we will give up the CodeCache_lock, always skip ahead to an nmethod. + // Other blobs can be deleted by other threads + // Read next before we potentially delete current + CodeBlob* next = CodeCache::next_nmethod(_current); + + // Now ready to process nmethod and give up CodeCache_lock + { + MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + process_nmethod((nmethod *)_current); + } + _seen++; + _current = next; + } + + // Skip forward to the next nmethod (if any). Code blobs other than nmethods + // can be freed async to us and make _current invalid while we sleep. + _current = CodeCache::next_nmethod(_current); + } + + if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { + // we've completed a scan without making progress but there were + // nmethods we were unable to process either because they were + // locked or were still on stack. We don't have to aggresively + // clean them up so just stop scanning. We could scan once more + // but that complicates the control logic and it's unlikely to + // matter much. + if (PrintMethodFlushing) { + tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); + } + } + +#ifdef ASSERT + if(PrintMethodFlushing) { + jlong sweep_end = os::javaTimeMillis(); + tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start); + } +#endif +} + void NMethodSweeper::process_nmethod(nmethod *nm) { + assert(!CodeCache_lock->owned_by_self(), "just checking"); + // Skip methods that are currently referenced by the VM if (nm->is_locked_by_vm()) { // But still remember to clean-up inline caches for alive nmethods if (nm->is_alive()) { // Clean-up all inline caches that points to zombie/non-reentrant methods + MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); } else { _locked_seen++; @@ -178,6 +233,7 @@ if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); } + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); nm->flush(); } else { if (PrintMethodFlushing && Verbose) { @@ -197,10 +253,11 @@ _rescan = true; } else { // Still alive, clean up its inline caches + MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); // we coudn't transition this nmethod so don't immediately // request a rescan. If this method stays on the stack for a - // long time we don't want to keep rescanning at every safepoint. + // long time we don't want to keep rescanning the code cache. _not_entrant_seen_on_stack++; } } else if (nm->is_unloaded()) { @@ -209,6 +266,7 @@ tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); if (nm->is_osr_method()) { // No inline caches will ever point to osr methods, so we can just remove it + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); nm->flush(); } else { nm->make_zombie(); @@ -227,6 +285,7 @@ } // Clean-up all inline caches that points to zombie/non-reentrant methods + MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); } } @@ -235,8 +294,8 @@ // they will call a vm op that comes here. This code attempts to speculatively // unload the oldest half of the nmethods (based on the compile job id) by // saving the old code in a list in the CodeCache. Then -// execution resumes. If a method so marked is not called by the second -// safepoint from the current one, the nmethod will be marked non-entrant and +// execution resumes. If a method so marked is not called by the second sweeper +// stack traversal after the current one, the nmethod will be marked non-entrant and // got rid of by normal sweeping. If the method is called, the methodOop's // _code field is restored and the methodOop/nmethod // go back to their normal state. @@ -364,8 +423,8 @@ xtty->end_elem(); } - // Shut off compiler. Sweeper will run exiting from this safepoint - // and turn it back on if it clears enough space + // Shut off compiler. Sweeper will start over with a new stack scan and + // traversal cycle and turn it back on if it clears enough space. if (was_full()) { _last_was_full = os::javaTimeMillis(); CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); diff -r 8c0269fb855b -r 261ecc5bb65e hotspot/src/share/vm/runtime/sweeper.hpp --- a/hotspot/src/share/vm/runtime/sweeper.hpp Mon May 17 07:11:27 2010 -0700 +++ b/hotspot/src/share/vm/runtime/sweeper.hpp Thu May 20 01:34:22 2010 -0700 @@ -35,6 +35,8 @@ static bool _rescan; // Indicates that we should do a full rescan of the // of the code cache looking for work to do. + static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened + static jint _sweep_started; // Flag to control conc sweeper static int _locked_seen; // Number of locked nmethods encountered during the scan static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack @@ -48,7 +50,9 @@ public: static long traversal_count() { return _traversals; } - static void sweep(); // Invoked at the end of each safepoint + static void scan_stacks(); // Invoked at the end of each safepoint + static void sweep_code_cache(); // Concurrent part of sweep job + static void possibly_sweep(); // Compiler threads call this to sweep static void notify(nmethod* nm) { // Perform a full scan of the code cache from the beginning. No