1455 // |
1455 // |
1456 |
1456 |
1457 // factory methods in "int adr_idx" |
1457 // factory methods in "int adr_idx" |
1458 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, |
1458 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, |
1459 int adr_idx, |
1459 int adr_idx, |
1460 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) { |
1460 MemNode::MemOrd mo, |
|
1461 LoadNode::ControlDependency control_dependency, |
|
1462 bool require_atomic_access, |
|
1463 bool unaligned, |
|
1464 bool mismatched) { |
1461 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); |
1465 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); |
1462 const TypePtr* adr_type = NULL; // debug-mode-only argument |
1466 const TypePtr* adr_type = NULL; // debug-mode-only argument |
1463 debug_only(adr_type = C->get_adr_type(adr_idx)); |
1467 debug_only(adr_type = C->get_adr_type(adr_idx)); |
1464 Node* mem = memory(adr_idx); |
1468 Node* mem = memory(adr_idx); |
1465 Node* ld; |
1469 Node* ld; |
1468 } else if (require_atomic_access && bt == T_DOUBLE) { |
1472 } else if (require_atomic_access && bt == T_DOUBLE) { |
1469 ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency); |
1473 ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency); |
1470 } else { |
1474 } else { |
1471 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency); |
1475 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency); |
1472 } |
1476 } |
|
1477 if (unaligned) { |
|
1478 ld->as_Load()->set_unaligned_access(); |
|
1479 } |
|
1480 if (mismatched) { |
|
1481 ld->as_Load()->set_mismatched_access(); |
|
1482 } |
1473 ld = _gvn.transform(ld); |
1483 ld = _gvn.transform(ld); |
1474 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { |
1484 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { |
1475 // Improve graph before escape analysis and boxing elimination. |
1485 // Improve graph before escape analysis and boxing elimination. |
1476 record_for_igvn(ld); |
1486 record_for_igvn(ld); |
1477 } |
1487 } |
1479 } |
1489 } |
1480 |
1490 |
1481 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, |
1491 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, |
1482 int adr_idx, |
1492 int adr_idx, |
1483 MemNode::MemOrd mo, |
1493 MemNode::MemOrd mo, |
1484 bool require_atomic_access) { |
1494 bool require_atomic_access, |
|
1495 bool unaligned, |
|
1496 bool mismatched) { |
1485 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); |
1497 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); |
1486 const TypePtr* adr_type = NULL; |
1498 const TypePtr* adr_type = NULL; |
1487 debug_only(adr_type = C->get_adr_type(adr_idx)); |
1499 debug_only(adr_type = C->get_adr_type(adr_idx)); |
1488 Node *mem = memory(adr_idx); |
1500 Node *mem = memory(adr_idx); |
1489 Node* st; |
1501 Node* st; |
1491 st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo); |
1503 st = StoreLNode::make_atomic(ctl, mem, adr, adr_type, val, mo); |
1492 } else if (require_atomic_access && bt == T_DOUBLE) { |
1504 } else if (require_atomic_access && bt == T_DOUBLE) { |
1493 st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo); |
1505 st = StoreDNode::make_atomic(ctl, mem, adr, adr_type, val, mo); |
1494 } else { |
1506 } else { |
1495 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); |
1507 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); |
|
1508 } |
|
1509 if (unaligned) { |
|
1510 st->as_Store()->set_unaligned_access(); |
|
1511 } |
|
1512 if (mismatched) { |
|
1513 st->as_Store()->set_mismatched_access(); |
1496 } |
1514 } |
1497 st = _gvn.transform(st); |
1515 st = _gvn.transform(st); |
1498 set_memory(st, adr_idx); |
1516 set_memory(st, adr_idx); |
1499 // Back-to-back stores can only remove intermediate store with DU info |
1517 // Back-to-back stores can only remove intermediate store with DU info |
1500 // so push on worklist for optimizer. |
1518 // so push on worklist for optimizer. |
1585 const TypePtr* adr_type, |
1603 const TypePtr* adr_type, |
1586 Node* val, |
1604 Node* val, |
1587 const TypeOopPtr* val_type, |
1605 const TypeOopPtr* val_type, |
1588 BasicType bt, |
1606 BasicType bt, |
1589 bool use_precise, |
1607 bool use_precise, |
1590 MemNode::MemOrd mo) { |
1608 MemNode::MemOrd mo, |
|
1609 bool mismatched) { |
1591 // Transformation of a value which could be NULL pointer (CastPP #NULL) |
1610 // Transformation of a value which could be NULL pointer (CastPP #NULL) |
1592 // could be delayed during Parse (for example, in adjust_map_after_if()). |
1611 // could be delayed during Parse (for example, in adjust_map_after_if()). |
1593 // Execute transformation here to avoid barrier generation in such case. |
1612 // Execute transformation here to avoid barrier generation in such case. |
1594 if (_gvn.type(val) == TypePtr::NULL_PTR) |
1613 if (_gvn.type(val) == TypePtr::NULL_PTR) |
1595 val = _gvn.makecon(TypePtr::NULL_PTR); |
1614 val = _gvn.makecon(TypePtr::NULL_PTR); |
1605 pre_barrier(true /* do_load */, |
1624 pre_barrier(true /* do_load */, |
1606 control(), obj, adr, adr_idx, val, val_type, |
1625 control(), obj, adr, adr_idx, val, val_type, |
1607 NULL /* pre_val */, |
1626 NULL /* pre_val */, |
1608 bt); |
1627 bt); |
1609 |
1628 |
1610 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo); |
1629 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched); |
1611 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); |
1630 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); |
1612 return store; |
1631 return store; |
1613 } |
1632 } |
1614 |
1633 |
1615 // Could be an array or object we don't know at compile time (unsafe ref.) |
1634 // Could be an array or object we don't know at compile time (unsafe ref.) |
1617 Node* obj, // containing obj |
1636 Node* obj, // containing obj |
1618 Node* adr, // actual adress to store val at |
1637 Node* adr, // actual adress to store val at |
1619 const TypePtr* adr_type, |
1638 const TypePtr* adr_type, |
1620 Node* val, |
1639 Node* val, |
1621 BasicType bt, |
1640 BasicType bt, |
1622 MemNode::MemOrd mo) { |
1641 MemNode::MemOrd mo, |
|
1642 bool mismatched) { |
1623 Compile::AliasType* at = C->alias_type(adr_type); |
1643 Compile::AliasType* at = C->alias_type(adr_type); |
1624 const TypeOopPtr* val_type = NULL; |
1644 const TypeOopPtr* val_type = NULL; |
1625 if (adr_type->isa_instptr()) { |
1645 if (adr_type->isa_instptr()) { |
1626 if (at->field() != NULL) { |
1646 if (at->field() != NULL) { |
1627 // known field. This code is a copy of the do_put_xxx logic. |
1647 // known field. This code is a copy of the do_put_xxx logic. |
1636 val_type = adr_type->is_aryptr()->elem()->make_oopptr(); |
1656 val_type = adr_type->is_aryptr()->elem()->make_oopptr(); |
1637 } |
1657 } |
1638 if (val_type == NULL) { |
1658 if (val_type == NULL) { |
1639 val_type = TypeInstPtr::BOTTOM; |
1659 val_type = TypeInstPtr::BOTTOM; |
1640 } |
1660 } |
1641 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo); |
1661 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched); |
1642 } |
1662 } |
1643 |
1663 |
1644 |
1664 |
1645 //-------------------------array_element_address------------------------- |
1665 //-------------------------array_element_address------------------------- |
1646 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, |
1666 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt, |