--- a/hotspot/src/share/vm/opto/matcher.cpp Thu Jul 03 11:01:32 2008 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp Fri Jul 11 01:14:44 2008 -0700
@@ -51,6 +51,7 @@
PhaseTransform( Phase::Ins_Select ),
#ifdef ASSERT
_old2new_map(C->comp_arena()),
+ _new2old_map(C->comp_arena()),
#endif
_shared_nodes(C->comp_arena()),
_reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
@@ -82,6 +83,7 @@
idealreg2debugmask[Op_RegF] = NULL;
idealreg2debugmask[Op_RegD] = NULL;
idealreg2debugmask[Op_RegP] = NULL;
+ debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
}
//------------------------------warp_incoming_stk_arg------------------------
@@ -834,10 +836,16 @@
if( n->is_Proj() && n->in(0)->is_Multi()) { // Projections?
// Convert to machine-dependent projection
m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
+#ifdef ASSERT
+ _new2old_map.map(m->_idx, n);
+#endif
if (m->in(0) != NULL) // m might be top
collect_null_checks(m);
} else { // Else just a regular 'ol guy
m = n->clone(); // So just clone into new-space
+#ifdef ASSERT
+ _new2old_map.map(m->_idx, n);
+#endif
// Def-Use edges will be added incrementally as Uses
// of this node are matched.
assert(m->outcnt() == 0, "no Uses of this clone yet");
@@ -886,6 +894,9 @@
// || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
) {
m = m->clone();
+#ifdef ASSERT
+ _new2old_map.map(m->_idx, n);
+#endif
mstack.push(m, Post_Visit, n, i); // Don't neet to visit
mstack.push(m->in(0), Visit, m, 0);
} else {
@@ -1153,7 +1164,10 @@
// StoreNodes require their Memory input to match any LoadNodes
Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
-
+#ifdef ASSERT
+ Node* save_mem_node = _mem_node;
+ _mem_node = n->is_Store() ? (Node*)n : NULL;
+#endif
// State object for root node of match tree
// Allocate it on _states_arena - stack allocation can cause stack overflow.
State *s = new (&_states_arena) State;
@@ -1186,6 +1200,7 @@
MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
#ifdef ASSERT
_old2new_map.map(n->_idx, m);
+ _new2old_map.map(m->_idx, (Node*)n);
#endif
// Add any Matcher-ignored edges
@@ -1205,6 +1220,7 @@
}
}
+ debug_only( _mem_node = save_mem_node; )
return m;
}
@@ -1445,8 +1461,30 @@
}
// If a Memory was used, insert a Memory edge
- if( mem != (Node*)1 )
+ if( mem != (Node*)1 ) {
mach->ins_req(MemNode::Memory,mem);
+#ifdef ASSERT
+ // Verify adr type after matching memory operation
+ const MachOper* oper = mach->memory_operand();
+ if (oper != NULL && oper != (MachOper*)-1 &&
+ mach->adr_type() != TypeRawPtr::BOTTOM) { // non-direct addressing mode
+ // It has a unique memory operand. Find corresponding ideal mem node.
+ Node* m = NULL;
+ if (leaf->is_Mem()) {
+ m = leaf;
+ } else {
+ m = _mem_node;
+ assert(m != NULL && m->is_Mem(), "expecting memory node");
+ }
+ if (m->adr_type() != mach->adr_type()) {
+ m->dump();
+ tty->print_cr("mach:");
+ mach->dump(1);
+ }
+ assert(m->adr_type() == mach->adr_type(), "matcher should not change adr type");
+ }
+#endif
+ }
// If the _leaf is an AddP, insert the base edge
if( leaf->is_AddP() )
@@ -1464,6 +1502,9 @@
for( uint i=0; i<mach->req(); i++ ) {
mach->set_req(i,NULL);
}
+#ifdef ASSERT
+ _new2old_map.map(ex->_idx, s->_leaf);
+#endif
}
// PhaseChaitin::fixup_spills will sometimes generate spill code
@@ -1510,7 +1551,9 @@
assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
Node *mem1 = (Node*)1;
+ debug_only(Node *save_mem_node = _mem_node;)
mach->add_req( ReduceInst(s, newrule, mem1) );
+ debug_only(_mem_node = save_mem_node;)
}
return;
}
@@ -1520,6 +1563,7 @@
if( s->_leaf->is_Load() ) {
Node *mem2 = s->_leaf->in(MemNode::Memory);
assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
+ debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
mem = mem2;
}
if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
@@ -1563,7 +1607,9 @@
// --> ReduceInst( newrule )
mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
Node *mem1 = (Node*)1;
+ debug_only(Node *save_mem_node = _mem_node;)
mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
+ debug_only(_mem_node = save_mem_node;)
}
}
assert( mach->_opnds[num_opnds-1], "" );
@@ -1594,6 +1640,7 @@
if( s->_leaf->is_Load() ) {
assert( mem == (Node*)1, "multiple Memories being matched at once?" );
mem = s->_leaf->in(MemNode::Memory);
+ debug_only(_mem_node = s->_leaf;)
}
if( s->_leaf->in(0) && s->_leaf->req() > 1) {
if( !mach->in(0) )
@@ -1618,7 +1665,9 @@
// Reduce the instruction, and add a direct pointer from this
// machine instruction to the newly reduced one.
Node *mem1 = (Node*)1;
+ debug_only(Node *save_mem_node = _mem_node;)
mach->add_req( ReduceInst( kid, newrule, mem1 ) );
+ debug_only(_mem_node = save_mem_node;)
}
}
}
@@ -1731,8 +1780,8 @@
}
case Op_ConN: { // Convert narrow pointers above the centerline to NUL
TypeNode *tn = n->as_Type(); // Constants derive from type nodes
- const TypePtr* tp = tn->type()->is_narrowoop()->make_oopptr();
- if (tp->_ptr == TypePtr::AnyNull) {
+ const TypePtr* tp = tn->type()->make_ptr();
+ if (tp && tp->_ptr == TypePtr::AnyNull) {
tn->set_type(TypeNarrowOop::NULL_PTR);
}
break;