477 } |
451 } |
478 } |
452 } |
479 |
453 |
480 bool LoadBarrierNode::has_true_uses() const { |
454 bool LoadBarrierNode::has_true_uses() const { |
481 Node* out_res = proj_out_or_null(Oop); |
455 Node* out_res = proj_out_or_null(Oop); |
482 if (out_res == NULL) { |
456 if (out_res != NULL) { |
483 return false; |
457 for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) { |
484 } |
458 Node *u = out_res->fast_out(i); |
485 |
459 if (!u->is_LoadBarrier() || u->in(Similar) != out_res) { |
486 for (DUIterator_Fast imax, i = out_res->fast_outs(imax); i < imax; i++) { |
460 return true; |
487 Node* u = out_res->fast_out(i); |
461 } |
488 if (!u->is_LoadBarrier() || u->in(Similar) != out_res) { |
462 } |
489 return true; |
463 } |
490 } |
|
491 } |
|
492 |
|
493 return false; |
464 return false; |
494 } |
|
495 |
|
496 // == Accesses == |
|
497 |
|
498 Node* ZBarrierSetC2::make_cas_loadbarrier(C2AtomicParseAccess& access) const { |
|
499 assert(!UseCompressedOops, "Not allowed"); |
|
500 CompareAndSwapNode* cas = (CompareAndSwapNode*)access.raw_access(); |
|
501 PhaseGVN& gvn = access.gvn(); |
|
502 Compile* C = Compile::current(); |
|
503 GraphKit* kit = access.kit(); |
|
504 |
|
505 Node* in_ctrl = cas->in(MemNode::Control); |
|
506 Node* in_mem = cas->in(MemNode::Memory); |
|
507 Node* in_adr = cas->in(MemNode::Address); |
|
508 Node* in_val = cas->in(MemNode::ValueIn); |
|
509 Node* in_expected = cas->in(LoadStoreConditionalNode::ExpectedIn); |
|
510 |
|
511 float likely = PROB_LIKELY(0.999); |
|
512 |
|
513 const TypePtr *adr_type = gvn.type(in_adr)->isa_ptr(); |
|
514 Compile::AliasType* alias_type = C->alias_type(adr_type); |
|
515 int alias_idx = C->get_alias_index(adr_type); |
|
516 |
|
517 // Outer check - true: continue, false: load and check |
|
518 Node* region = new RegionNode(3); |
|
519 Node* phi = new PhiNode(region, TypeInt::BOOL); |
|
520 Node* phi_mem = new PhiNode(region, Type::MEMORY, adr_type); |
|
521 |
|
522 // Inner check - is the healed ref equal to the expected |
|
523 Node* region2 = new RegionNode(3); |
|
524 Node* phi2 = new PhiNode(region2, TypeInt::BOOL); |
|
525 Node* phi_mem2 = new PhiNode(region2, Type::MEMORY, adr_type); |
|
526 |
|
527 // CAS node returns 0 or 1 |
|
528 Node* cmp = gvn.transform(new CmpINode(cas, kit->intcon(0))); |
|
529 Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool(); |
|
530 IfNode* iff = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If(); |
|
531 Node* then = gvn.transform(new IfTrueNode(iff)); |
|
532 Node* elsen = gvn.transform(new IfFalseNode(iff)); |
|
533 |
|
534 Node* scmemproj1 = gvn.transform(new SCMemProjNode(cas)); |
|
535 |
|
536 kit->set_memory(scmemproj1, alias_idx); |
|
537 phi_mem->init_req(1, scmemproj1); |
|
538 phi_mem2->init_req(2, scmemproj1); |
|
539 |
|
540 // CAS fail - reload and heal oop |
|
541 Node* reload = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered); |
|
542 Node* barrier = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false)); |
|
543 Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)); |
|
544 Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop)); |
|
545 |
|
546 // Check load |
|
547 Node* tmpX = gvn.transform(new CastP2XNode(NULL, barrierdata)); |
|
548 Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected)); |
|
549 Node* cmp2 = gvn.transform(new CmpXNode(tmpX, in_expX)); |
|
550 Node *bol2 = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool(); |
|
551 IfNode* iff2 = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If(); |
|
552 Node* then2 = gvn.transform(new IfTrueNode(iff2)); |
|
553 Node* elsen2 = gvn.transform(new IfFalseNode(iff2)); |
|
554 |
|
555 // redo CAS |
|
556 Node* cas2 = gvn.transform(new CompareAndSwapPNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, cas->order())); |
|
557 Node* scmemproj2 = gvn.transform(new SCMemProjNode(cas2)); |
|
558 kit->set_control(elsen2); |
|
559 kit->set_memory(scmemproj2, alias_idx); |
|
560 |
|
561 // Merge inner flow - check if healed oop was equal too expected. |
|
562 region2->set_req(1, kit->control()); |
|
563 region2->set_req(2, then2); |
|
564 phi2->set_req(1, cas2); |
|
565 phi2->set_req(2, kit->intcon(0)); |
|
566 phi_mem2->init_req(1, scmemproj2); |
|
567 kit->set_memory(phi_mem2, alias_idx); |
|
568 |
|
569 // Merge outer flow - then check if first CAS succeeded |
|
570 region->set_req(1, then); |
|
571 region->set_req(2, region2); |
|
572 phi->set_req(1, kit->intcon(1)); |
|
573 phi->set_req(2, phi2); |
|
574 phi_mem->init_req(2, phi_mem2); |
|
575 kit->set_memory(phi_mem, alias_idx); |
|
576 |
|
577 gvn.transform(region2); |
|
578 gvn.transform(phi2); |
|
579 gvn.transform(phi_mem2); |
|
580 gvn.transform(region); |
|
581 gvn.transform(phi); |
|
582 gvn.transform(phi_mem); |
|
583 |
|
584 kit->set_control(region); |
|
585 kit->insert_mem_bar(Op_MemBarCPUOrder); |
|
586 |
|
587 return phi; |
|
588 } |
|
589 |
|
590 Node* ZBarrierSetC2::make_cmpx_loadbarrier(C2AtomicParseAccess& access) const { |
|
591 CompareAndExchangePNode* cmpx = (CompareAndExchangePNode*)access.raw_access(); |
|
592 GraphKit* kit = access.kit(); |
|
593 PhaseGVN& gvn = kit->gvn(); |
|
594 Compile* C = Compile::current(); |
|
595 |
|
596 Node* in_ctrl = cmpx->in(MemNode::Control); |
|
597 Node* in_mem = cmpx->in(MemNode::Memory); |
|
598 Node* in_adr = cmpx->in(MemNode::Address); |
|
599 Node* in_val = cmpx->in(MemNode::ValueIn); |
|
600 Node* in_expected = cmpx->in(LoadStoreConditionalNode::ExpectedIn); |
|
601 |
|
602 float likely = PROB_LIKELY(0.999); |
|
603 |
|
604 const TypePtr *adr_type = cmpx->get_ptr_type(); |
|
605 Compile::AliasType* alias_type = C->alias_type(adr_type); |
|
606 int alias_idx = C->get_alias_index(adr_type); |
|
607 |
|
608 // Outer check - true: continue, false: load and check |
|
609 Node* region = new RegionNode(3); |
|
610 Node* phi = new PhiNode(region, adr_type); |
|
611 |
|
612 // Inner check - is the healed ref equal to the expected |
|
613 Node* region2 = new RegionNode(3); |
|
614 Node* phi2 = new PhiNode(region2, adr_type); |
|
615 |
|
616 // Check if cmpx succeeded |
|
617 Node* cmp = gvn.transform(new CmpPNode(cmpx, in_expected)); |
|
618 Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool(); |
|
619 IfNode* iff = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If(); |
|
620 Node* then = gvn.transform(new IfTrueNode(iff)); |
|
621 Node* elsen = gvn.transform(new IfFalseNode(iff)); |
|
622 |
|
623 Node* scmemproj1 = gvn.transform(new SCMemProjNode(cmpx)); |
|
624 kit->set_memory(scmemproj1, alias_idx); |
|
625 |
|
626 // CAS fail - reload and heal oop |
|
627 Node* reload = kit->make_load(elsen, in_adr, TypeOopPtr::BOTTOM, T_OBJECT, MemNode::unordered); |
|
628 Node* barrier = gvn.transform(new LoadBarrierNode(C, elsen, scmemproj1, reload, in_adr, false, true, false)); |
|
629 Node* barrierctrl = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control)); |
|
630 Node* barrierdata = gvn.transform(new ProjNode(barrier, LoadBarrierNode::Oop)); |
|
631 |
|
632 // Check load |
|
633 Node* tmpX = gvn.transform(new CastP2XNode(NULL, barrierdata)); |
|
634 Node* in_expX = gvn.transform(new CastP2XNode(NULL, in_expected)); |
|
635 Node* cmp2 = gvn.transform(new CmpXNode(tmpX, in_expX)); |
|
636 Node *bol2 = gvn.transform(new BoolNode(cmp2, BoolTest::ne))->as_Bool(); |
|
637 IfNode* iff2 = gvn.transform(new IfNode(barrierctrl, bol2, likely, COUNT_UNKNOWN))->as_If(); |
|
638 Node* then2 = gvn.transform(new IfTrueNode(iff2)); |
|
639 Node* elsen2 = gvn.transform(new IfFalseNode(iff2)); |
|
640 |
|
641 // Redo CAS |
|
642 Node* cmpx2 = gvn.transform(new CompareAndExchangePNode(elsen2, kit->memory(alias_idx), in_adr, in_val, in_expected, adr_type, cmpx->get_ptr_type(), cmpx->order())); |
|
643 Node* scmemproj2 = gvn.transform(new SCMemProjNode(cmpx2)); |
|
644 kit->set_control(elsen2); |
|
645 kit->set_memory(scmemproj2, alias_idx); |
|
646 |
|
647 // Merge inner flow - check if healed oop was equal too expected. |
|
648 region2->set_req(1, kit->control()); |
|
649 region2->set_req(2, then2); |
|
650 phi2->set_req(1, cmpx2); |
|
651 phi2->set_req(2, barrierdata); |
|
652 |
|
653 // Merge outer flow - then check if first cas succeeded |
|
654 region->set_req(1, then); |
|
655 region->set_req(2, region2); |
|
656 phi->set_req(1, cmpx); |
|
657 phi->set_req(2, phi2); |
|
658 |
|
659 gvn.transform(region2); |
|
660 gvn.transform(phi2); |
|
661 gvn.transform(region); |
|
662 gvn.transform(phi); |
|
663 |
|
664 kit->set_control(region); |
|
665 kit->set_memory(in_mem, alias_idx); |
|
666 kit->insert_mem_bar(Op_MemBarCPUOrder); |
|
667 |
|
668 return phi; |
|
669 } |
|
670 |
|
671 Node* ZBarrierSetC2::load_barrier(GraphKit* kit, Node* val, Node* adr, bool weak, bool writeback, bool oop_reload_allowed) const { |
|
672 PhaseGVN& gvn = kit->gvn(); |
|
673 Node* barrier = new LoadBarrierNode(Compile::current(), kit->control(), kit->memory(TypeRawPtr::BOTTOM), val, adr, weak, writeback, oop_reload_allowed); |
|
674 Node* transformed_barrier = gvn.transform(barrier); |
|
675 |
|
676 if (transformed_barrier->is_LoadBarrier()) { |
|
677 if (barrier == transformed_barrier) { |
|
678 kit->set_control(gvn.transform(new ProjNode(barrier, LoadBarrierNode::Control))); |
|
679 } |
|
680 Node* result = gvn.transform(new ProjNode(transformed_barrier, LoadBarrierNode::Oop)); |
|
681 return result; |
|
682 } else { |
|
683 return val; |
|
684 } |
|
685 } |
465 } |
686 |
466 |
687 static bool barrier_needed(C2Access& access) { |
467 static bool barrier_needed(C2Access& access) { |
688 return ZBarrierSet::barrier_needed(access.decorators(), access.type()); |
468 return ZBarrierSet::barrier_needed(access.decorators(), access.type()); |
689 } |
469 } |
693 if (!barrier_needed(access)) { |
473 if (!barrier_needed(access)) { |
694 return p; |
474 return p; |
695 } |
475 } |
696 |
476 |
697 bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0; |
477 bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0; |
698 |
478 if (p->isa_Load()) { |
699 assert(access.is_parse_access(), "entry not supported at optimization time"); |
479 load_set_barrier(p->as_Load(), weak); |
700 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); |
480 } |
701 GraphKit* kit = parse_access.kit(); |
481 return p; |
702 PhaseGVN& gvn = kit->gvn(); |
|
703 Node* adr = access.addr().node(); |
|
704 Node* heap_base_oop = access.base(); |
|
705 bool unsafe = (access.decorators() & C2_UNSAFE_ACCESS) != 0; |
|
706 if (unsafe) { |
|
707 if (!ZVerifyLoadBarriers) { |
|
708 p = load_barrier(kit, p, adr); |
|
709 } else { |
|
710 if (!TypePtr::NULL_PTR->higher_equal(gvn.type(heap_base_oop))) { |
|
711 p = load_barrier(kit, p, adr); |
|
712 } else { |
|
713 IdealKit ideal(kit); |
|
714 IdealVariable res(ideal); |
|
715 #define __ ideal. |
|
716 __ declarations_done(); |
|
717 __ set(res, p); |
|
718 __ if_then(heap_base_oop, BoolTest::ne, kit->null(), PROB_UNLIKELY(0.999)); { |
|
719 kit->sync_kit(ideal); |
|
720 p = load_barrier(kit, p, adr); |
|
721 __ set(res, p); |
|
722 __ sync_kit(kit); |
|
723 } __ end_if(); |
|
724 kit->final_sync(ideal); |
|
725 p = __ value(res); |
|
726 #undef __ |
|
727 } |
|
728 } |
|
729 return p; |
|
730 } else { |
|
731 return load_barrier(parse_access.kit(), p, access.addr().node(), weak, true, true); |
|
732 } |
|
733 } |
482 } |
734 |
483 |
735 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, |
484 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, |
736 Node* new_val, const Type* val_type) const { |
485 Node* new_val, const Type* val_type) const { |
737 Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type); |
486 Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type); |
738 if (!barrier_needed(access)) { |
487 LoadStoreNode* lsn = result->as_LoadStore(); |
739 return result; |
488 if (barrier_needed(access)) { |
740 } |
489 lsn->set_has_barrier(); |
741 |
490 } |
742 access.set_needs_pinning(false); |
491 return lsn; |
743 return make_cmpx_loadbarrier(access); |
|
744 } |
492 } |
745 |
493 |
746 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, |
494 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val, |
747 Node* new_val, const Type* value_type) const { |
495 Node* new_val, const Type* value_type) const { |
748 Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); |
496 Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type); |
749 if (!barrier_needed(access)) { |
497 LoadStoreNode* lsn = result->as_LoadStore(); |
750 return result; |
498 if (barrier_needed(access)) { |
751 } |
499 lsn->set_has_barrier(); |
752 |
500 } |
753 Node* load_store = access.raw_access(); |
501 return lsn; |
754 bool weak_cas = (access.decorators() & C2_WEAK_CMPXCHG) != 0; |
|
755 bool expected_is_null = (expected_val->get_ptr_type() == TypePtr::NULL_PTR); |
|
756 |
|
757 if (!expected_is_null) { |
|
758 if (weak_cas) { |
|
759 access.set_needs_pinning(false); |
|
760 load_store = make_cas_loadbarrier(access); |
|
761 } else { |
|
762 access.set_needs_pinning(false); |
|
763 load_store = make_cas_loadbarrier(access); |
|
764 } |
|
765 } |
|
766 |
|
767 return load_store; |
|
768 } |
502 } |
769 |
503 |
770 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const { |
504 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const { |
771 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type); |
505 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type); |
772 if (!barrier_needed(access)) { |
506 LoadStoreNode* lsn = result->as_LoadStore(); |
773 return result; |
507 if (barrier_needed(access)) { |
774 } |
508 lsn->set_has_barrier(); |
775 |
509 } |
776 Node* load_store = access.raw_access(); |
510 return lsn; |
777 Node* adr = access.addr().node(); |
|
778 |
|
779 assert(access.is_parse_access(), "entry not supported at optimization time"); |
|
780 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); |
|
781 return load_barrier(parse_access.kit(), load_store, adr, false, false, false); |
|
782 } |
511 } |
783 |
512 |
784 // == Macro Expansion == |
513 // == Macro Expansion == |
785 |
514 |
|
515 // Optimized, low spill, loadbarrier variant using stub specialized on register used |
786 void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const { |
516 void ZBarrierSetC2::expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const { |
787 Node* in_ctrl = barrier->in(LoadBarrierNode::Control); |
|
788 Node* in_mem = barrier->in(LoadBarrierNode::Memory); |
|
789 Node* in_val = barrier->in(LoadBarrierNode::Oop); |
|
790 Node* in_adr = barrier->in(LoadBarrierNode::Address); |
|
791 |
|
792 Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control); |
|
793 Node* out_res = barrier->proj_out(LoadBarrierNode::Oop); |
|
794 |
|
795 PhaseIterGVN &igvn = phase->igvn(); |
517 PhaseIterGVN &igvn = phase->igvn(); |
796 |
|
797 if (ZVerifyLoadBarriers) { |
|
798 igvn.replace_node(out_res, in_val); |
|
799 igvn.replace_node(out_ctrl, in_ctrl); |
|
800 return; |
|
801 } |
|
802 |
|
803 if (barrier->can_be_eliminated()) { |
|
804 // Clone and pin the load for this barrier below the dominating |
|
805 // barrier: the load cannot be allowed to float above the |
|
806 // dominating barrier |
|
807 Node* load = in_val; |
|
808 |
|
809 if (load->is_Load()) { |
|
810 Node* new_load = load->clone(); |
|
811 Node* addp = new_load->in(MemNode::Address); |
|
812 assert(addp->is_AddP() || addp->is_Phi() || addp->is_Load(), "bad address"); |
|
813 Node* cast = new CastPPNode(addp, igvn.type(addp), true); |
|
814 Node* ctrl = NULL; |
|
815 Node* similar = barrier->in(LoadBarrierNode::Similar); |
|
816 if (similar->is_Phi()) { |
|
817 // already expanded |
|
818 ctrl = similar->in(0); |
|
819 } else { |
|
820 assert(similar->is_Proj() && similar->in(0)->is_LoadBarrier(), "unexpected graph shape"); |
|
821 ctrl = similar->in(0)->as_LoadBarrier()->proj_out(LoadBarrierNode::Control); |
|
822 } |
|
823 assert(ctrl != NULL, "bad control"); |
|
824 cast->set_req(0, ctrl); |
|
825 igvn.transform(cast); |
|
826 new_load->set_req(MemNode::Address, cast); |
|
827 igvn.transform(new_load); |
|
828 |
|
829 igvn.replace_node(out_res, new_load); |
|
830 igvn.replace_node(out_ctrl, in_ctrl); |
|
831 return; |
|
832 } |
|
833 // cannot eliminate |
|
834 } |
|
835 |
|
836 // There are two cases that require the basic loadbarrier |
|
837 // 1) When the writeback of a healed oop must be avoided (swap) |
|
838 // 2) When we must guarantee that no reload of is done (swap, cas, cmpx) |
|
839 if (!barrier->is_writeback()) { |
|
840 assert(!barrier->oop_reload_allowed(), "writeback barriers should be marked as requires oop"); |
|
841 } |
|
842 |
|
843 if (!barrier->oop_reload_allowed()) { |
|
844 expand_loadbarrier_basic(phase, barrier); |
|
845 } else { |
|
846 expand_loadbarrier_optimized(phase, barrier); |
|
847 } |
|
848 } |
|
849 |
|
850 // Basic loadbarrier using conventional argument passing |
|
851 void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const { |
|
852 PhaseIterGVN &igvn = phase->igvn(); |
|
853 |
|
854 Node* in_ctrl = barrier->in(LoadBarrierNode::Control); |
|
855 Node* in_mem = barrier->in(LoadBarrierNode::Memory); |
|
856 Node* in_val = barrier->in(LoadBarrierNode::Oop); |
|
857 Node* in_adr = barrier->in(LoadBarrierNode::Address); |
|
858 |
|
859 Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control); |
|
860 Node* out_res = barrier->proj_out(LoadBarrierNode::Oop); |
|
861 |
|
862 float unlikely = PROB_UNLIKELY(0.999); |
518 float unlikely = PROB_UNLIKELY(0.999); |
863 const Type* in_val_maybe_null_t = igvn.type(in_val); |
|
864 |
|
865 Node* jthread = igvn.transform(new ThreadLocalNode()); |
|
866 Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset())); |
|
867 Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered)); |
|
868 Node* cast = igvn.transform(new CastP2XNode(in_ctrl, in_val)); |
|
869 Node* obj_masked = igvn.transform(new AndXNode(cast, bad_mask)); |
|
870 Node* cmp = igvn.transform(new CmpXNode(obj_masked, igvn.zerocon(TypeX_X->basic_type()))); |
|
871 Node *bol = igvn.transform(new BoolNode(cmp, BoolTest::ne))->as_Bool(); |
|
872 IfNode* iff = igvn.transform(new IfNode(in_ctrl, bol, unlikely, COUNT_UNKNOWN))->as_If(); |
|
873 Node* then = igvn.transform(new IfTrueNode(iff)); |
|
874 Node* elsen = igvn.transform(new IfFalseNode(iff)); |
|
875 |
|
876 Node* result_region; |
|
877 Node* result_val; |
|
878 |
|
879 result_region = new RegionNode(3); |
|
880 result_val = new PhiNode(result_region, TypeInstPtr::BOTTOM); |
|
881 |
|
882 result_region->set_req(1, elsen); |
|
883 Node* res = igvn.transform(new CastPPNode(in_val, in_val_maybe_null_t)); |
|
884 res->init_req(0, elsen); |
|
885 result_val->set_req(1, res); |
|
886 |
|
887 const TypeFunc *tf = load_barrier_Type(); |
|
888 Node* call; |
|
889 if (barrier->is_weak()) { |
|
890 call = new CallLeafNode(tf, |
|
891 ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded_addr(), |
|
892 "ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded", |
|
893 TypeRawPtr::BOTTOM); |
|
894 } else { |
|
895 call = new CallLeafNode(tf, |
|
896 ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(), |
|
897 "ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded", |
|
898 TypeRawPtr::BOTTOM); |
|
899 } |
|
900 |
|
901 call->init_req(TypeFunc::Control, then); |
|
902 call->init_req(TypeFunc::I_O , phase->top()); |
|
903 call->init_req(TypeFunc::Memory , in_mem); |
|
904 call->init_req(TypeFunc::FramePtr, phase->top()); |
|
905 call->init_req(TypeFunc::ReturnAdr, phase->top()); |
|
906 call->init_req(TypeFunc::Parms+0, in_val); |
|
907 if (barrier->is_writeback()) { |
|
908 call->init_req(TypeFunc::Parms+1, in_adr); |
|
909 } else { |
|
910 // When slow path is called with a null address, the healed oop will not be written back |
|
911 call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT)); |
|
912 } |
|
913 call = igvn.transform(call); |
|
914 |
|
915 Node* ctrl = igvn.transform(new ProjNode(call, TypeFunc::Control)); |
|
916 res = igvn.transform(new ProjNode(call, TypeFunc::Parms)); |
|
917 res = igvn.transform(new CheckCastPPNode(ctrl, res, in_val_maybe_null_t)); |
|
918 |
|
919 result_region->set_req(2, ctrl); |
|
920 result_val->set_req(2, res); |
|
921 |
|
922 result_region = igvn.transform(result_region); |
|
923 result_val = igvn.transform(result_val); |
|
924 |
|
925 if (out_ctrl != NULL) { // Added if cond |
|
926 igvn.replace_node(out_ctrl, result_region); |
|
927 } |
|
928 igvn.replace_node(out_res, result_val); |
|
929 } |
|
930 |
|
931 // Optimized, low spill, loadbarrier variant using stub specialized on register used |
|
932 void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const { |
|
933 PhaseIterGVN &igvn = phase->igvn(); |
|
934 #ifdef PRINT_NODE_TRAVERSALS |
|
935 Node* preceding_barrier_node = barrier->in(LoadBarrierNode::Oop); |
|
936 #endif |
|
937 |
519 |
938 Node* in_ctrl = barrier->in(LoadBarrierNode::Control); |
520 Node* in_ctrl = barrier->in(LoadBarrierNode::Control); |
939 Node* in_mem = barrier->in(LoadBarrierNode::Memory); |
521 Node* in_mem = barrier->in(LoadBarrierNode::Memory); |
940 Node* in_val = barrier->in(LoadBarrierNode::Oop); |
522 Node* in_val = barrier->in(LoadBarrierNode::Oop); |
941 Node* in_adr = barrier->in(LoadBarrierNode::Address); |
523 Node* in_adr = barrier->in(LoadBarrierNode::Address); |
942 |
524 |
943 Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control); |
525 Node* out_ctrl = barrier->proj_out_or_null(LoadBarrierNode::Control); |
944 Node* out_res = barrier->proj_out(LoadBarrierNode::Oop); |
526 Node* out_res = barrier->proj_out(LoadBarrierNode::Oop); |
945 |
527 |
946 assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null"); |
528 assert(barrier->in(LoadBarrierNode::Oop) != NULL, "oop to loadbarrier node cannot be null"); |
947 |
|
948 #ifdef PRINT_NODE_TRAVERSALS |
|
949 tty->print("\n\n\nBefore barrier optimization:\n"); |
|
950 traverse(barrier, out_ctrl, out_res, -1); |
|
951 |
|
952 tty->print("\nBefore barrier optimization: preceding_barrier_node\n"); |
|
953 traverse(preceding_barrier_node, out_ctrl, out_res, -1); |
|
954 #endif |
|
955 |
|
956 float unlikely = PROB_UNLIKELY(0.999); |
|
957 |
529 |
958 Node* jthread = igvn.transform(new ThreadLocalNode()); |
530 Node* jthread = igvn.transform(new ThreadLocalNode()); |
959 Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset())); |
531 Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset())); |
960 Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, |
532 Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr, |
961 TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), |
533 TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), |
1057 } |
612 } |
1058 |
613 |
1059 return false; |
614 return false; |
1060 } |
615 } |
1061 |
616 |
1062 // == Loop optimization == |
|
1063 |
|
1064 static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) { |
|
1065 PhaseIterGVN &igvn = phase->igvn(); |
|
1066 Compile* C = Compile::current(); |
|
1067 |
|
1068 LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round); |
|
1069 if (lb2 == NULL) { |
|
1070 return false; |
|
1071 } |
|
1072 |
|
1073 if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) { |
|
1074 assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "Invalid address"); |
|
1075 igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop)); |
|
1076 C->set_major_progress(); |
|
1077 return false; |
|
1078 } |
|
1079 |
|
1080 // That transformation may cause the Similar edge on dominated load barriers to be invalid |
|
1081 lb->fix_similar_in_uses(&igvn); |
|
1082 |
|
1083 Node* val = lb->proj_out(LoadBarrierNode::Oop); |
|
1084 assert(lb2->has_true_uses(), "Invalid uses"); |
|
1085 assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "Invalid oop"); |
|
1086 phase->lazy_update(lb, lb->in(LoadBarrierNode::Control)); |
|
1087 phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control)); |
|
1088 igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop)); |
|
1089 |
|
1090 return true; |
|
1091 } |
|
1092 |
|
1093 static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) { |
|
1094 assert(dom->is_Region() || i == -1, ""); |
|
1095 |
|
1096 Node* m = mem; |
|
1097 while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) { |
|
1098 if (m->is_Mem()) { |
|
1099 assert(m->as_Mem()->adr_type() == TypeRawPtr::BOTTOM, ""); |
|
1100 m = m->in(MemNode::Memory); |
|
1101 } else if (m->is_MergeMem()) { |
|
1102 m = m->as_MergeMem()->memory_at(Compile::AliasIdxRaw); |
|
1103 } else if (m->is_Phi()) { |
|
1104 if (m->in(0) == dom && i != -1) { |
|
1105 m = m->in(i); |
|
1106 break; |
|
1107 } else { |
|
1108 m = m->in(LoopNode::EntryControl); |
|
1109 } |
|
1110 } else if (m->is_Proj()) { |
|
1111 m = m->in(0); |
|
1112 } else if (m->is_SafePoint() || m->is_MemBar()) { |
|
1113 m = m->in(TypeFunc::Memory); |
|
1114 } else { |
|
1115 #ifdef ASSERT |
|
1116 m->dump(); |
|
1117 #endif |
|
1118 ShouldNotReachHere(); |
|
1119 } |
|
1120 } |
|
1121 |
|
1122 return m; |
|
1123 } |
|
1124 |
|
1125 static LoadBarrierNode* clone_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* ctl, Node* mem, Node* oop_in) { |
|
1126 PhaseIterGVN &igvn = phase->igvn(); |
|
1127 Compile* C = Compile::current(); |
|
1128 Node* the_clone = lb->clone(); |
|
1129 the_clone->set_req(LoadBarrierNode::Control, ctl); |
|
1130 the_clone->set_req(LoadBarrierNode::Memory, mem); |
|
1131 if (oop_in != NULL) { |
|
1132 the_clone->set_req(LoadBarrierNode::Oop, oop_in); |
|
1133 } |
|
1134 |
|
1135 LoadBarrierNode* new_lb = the_clone->as_LoadBarrier(); |
|
1136 igvn.register_new_node_with_optimizer(new_lb); |
|
1137 IdealLoopTree *loop = phase->get_loop(new_lb->in(0)); |
|
1138 phase->set_ctrl(new_lb, new_lb->in(0)); |
|
1139 phase->set_loop(new_lb, loop); |
|
1140 phase->set_idom(new_lb, new_lb->in(0), phase->dom_depth(new_lb->in(0))+1); |
|
1141 if (!loop->_child) { |
|
1142 loop->_body.push(new_lb); |
|
1143 } |
|
1144 |
|
1145 Node* proj_ctl = new ProjNode(new_lb, LoadBarrierNode::Control); |
|
1146 igvn.register_new_node_with_optimizer(proj_ctl); |
|
1147 phase->set_ctrl(proj_ctl, proj_ctl->in(0)); |
|
1148 phase->set_loop(proj_ctl, loop); |
|
1149 phase->set_idom(proj_ctl, new_lb, phase->dom_depth(new_lb)+1); |
|
1150 if (!loop->_child) { |
|
1151 loop->_body.push(proj_ctl); |
|
1152 } |
|
1153 |
|
1154 Node* proj_oop = new ProjNode(new_lb, LoadBarrierNode::Oop); |
|
1155 phase->register_new_node(proj_oop, new_lb); |
|
1156 |
|
1157 if (!new_lb->in(LoadBarrierNode::Similar)->is_top()) { |
|
1158 LoadBarrierNode* similar = new_lb->in(LoadBarrierNode::Similar)->in(0)->as_LoadBarrier(); |
|
1159 if (!phase->is_dominator(similar, ctl)) { |
|
1160 igvn.replace_input_of(new_lb, LoadBarrierNode::Similar, C->top()); |
|
1161 } |
|
1162 } |
|
1163 |
|
1164 return new_lb; |
|
1165 } |
|
1166 |
|
1167 static void replace_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, Node* new_val) { |
|
1168 PhaseIterGVN &igvn = phase->igvn(); |
|
1169 Node* val = lb->proj_out(LoadBarrierNode::Oop); |
|
1170 igvn.replace_node(val, new_val); |
|
1171 phase->lazy_update(lb, lb->in(LoadBarrierNode::Control)); |
|
1172 phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control)); |
|
1173 } |
|
1174 |
|
1175 static bool split_barrier_thru_phi(PhaseIdealLoop* phase, LoadBarrierNode* lb) { |
|
1176 PhaseIterGVN &igvn = phase->igvn(); |
|
1177 Compile* C = Compile::current(); |
|
1178 |
|
1179 if (lb->in(LoadBarrierNode::Oop)->is_Phi()) { |
|
1180 Node* oop_phi = lb->in(LoadBarrierNode::Oop); |
|
1181 |
|
1182 if ((oop_phi->req() != 3) || (oop_phi->in(2) == oop_phi)) { |
|
1183 // Ignore phis with only one input |
|
1184 return false; |
|
1185 } |
|
1186 |
|
1187 if (phase->is_dominator(phase->get_ctrl(lb->in(LoadBarrierNode::Address)), |
|
1188 oop_phi->in(0)) && phase->get_ctrl(lb->in(LoadBarrierNode::Address)) != oop_phi->in(0)) { |
|
1189 // That transformation may cause the Similar edge on dominated load barriers to be invalid |
|
1190 lb->fix_similar_in_uses(&igvn); |
|
1191 |
|
1192 RegionNode* region = oop_phi->in(0)->as_Region(); |
|
1193 |
|
1194 int backedge = LoopNode::LoopBackControl; |
|
1195 if (region->is_Loop() && region->in(backedge)->is_Proj() && region->in(backedge)->in(0)->is_If()) { |
|
1196 Node* c = region->in(backedge)->in(0)->in(0); |
|
1197 assert(c->unique_ctrl_out() == region->in(backedge)->in(0), ""); |
|
1198 Node* oop = lb->in(LoadBarrierNode::Oop)->in(backedge); |
|
1199 Node* oop_c = phase->has_ctrl(oop) ? phase->get_ctrl(oop) : oop; |
|
1200 if (!phase->is_dominator(oop_c, c)) { |
|
1201 return false; |
|
1202 } |
|
1203 } |
|
1204 |
|
1205 // If the node on the backedge above the phi is the node itself - we have a self loop. |
|
1206 // Don't clone - this will be folded later. |
|
1207 if (oop_phi->in(LoopNode::LoopBackControl) == lb->proj_out(LoadBarrierNode::Oop)) { |
|
1208 return false; |
|
1209 } |
|
1210 |
|
1211 bool is_strip_mined = region->is_CountedLoop() && region->as_CountedLoop()->is_strip_mined(); |
|
1212 Node *phi = oop_phi->clone(); |
|
1213 |
|
1214 for (uint i = 1; i < region->req(); i++) { |
|
1215 Node* ctrl = region->in(i); |
|
1216 if (ctrl != C->top()) { |
|
1217 assert(!phase->is_dominator(ctrl, region) || region->is_Loop(), ""); |
|
1218 |
|
1219 Node* mem = lb->in(LoadBarrierNode::Memory); |
|
1220 Node* m = find_dominating_memory(phase, mem, region, i); |
|
1221 |
|
1222 if (region->is_Loop() && i == LoopNode::LoopBackControl && ctrl->is_Proj() && ctrl->in(0)->is_If()) { |
|
1223 ctrl = ctrl->in(0)->in(0); |
|
1224 } else if (region->is_Loop() && is_strip_mined) { |
|
1225 // If this is a strip mined loop, control must move above OuterStripMinedLoop |
|
1226 assert(i == LoopNode::EntryControl, "check"); |
|
1227 assert(ctrl->is_OuterStripMinedLoop(), "sanity"); |
|
1228 ctrl = ctrl->as_OuterStripMinedLoop()->in(LoopNode::EntryControl); |
|
1229 } |
|
1230 |
|
1231 LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, ctrl, m, lb->in(LoadBarrierNode::Oop)->in(i)); |
|
1232 Node* out_ctrl = new_lb->proj_out(LoadBarrierNode::Control); |
|
1233 |
|
1234 if (is_strip_mined && (i == LoopNode::EntryControl)) { |
|
1235 assert(region->in(i)->is_OuterStripMinedLoop(), ""); |
|
1236 igvn.replace_input_of(region->in(i), i, out_ctrl); |
|
1237 phase->set_idom(region->in(i), out_ctrl, phase->dom_depth(out_ctrl)); |
|
1238 } else if (ctrl == region->in(i)) { |
|
1239 igvn.replace_input_of(region, i, out_ctrl); |
|
1240 // Only update the idom if is the loop entry we are updating |
|
1241 // - A loop backedge doesn't change the idom |
|
1242 if (region->is_Loop() && i == LoopNode::EntryControl) { |
|
1243 phase->set_idom(region, out_ctrl, phase->dom_depth(out_ctrl)); |
|
1244 } |
|
1245 } else { |
|
1246 Node* iff = region->in(i)->in(0); |
|
1247 igvn.replace_input_of(iff, 0, out_ctrl); |
|
1248 phase->set_idom(iff, out_ctrl, phase->dom_depth(out_ctrl)+1); |
|
1249 } |
|
1250 phi->set_req(i, new_lb->proj_out(LoadBarrierNode::Oop)); |
|
1251 } |
|
1252 } |
|
1253 phase->register_new_node(phi, region); |
|
1254 replace_barrier(phase, lb, phi); |
|
1255 |
|
1256 if (region->is_Loop()) { |
|
1257 // Load barrier moved to the back edge of the Loop may now |
|
1258 // have a safepoint on the path to the barrier on the Similar |
|
1259 // edge |
|
1260 igvn.replace_input_of(phi->in(LoopNode::LoopBackControl)->in(0), LoadBarrierNode::Similar, C->top()); |
|
1261 Node* head = region->in(LoopNode::EntryControl); |
|
1262 phase->set_idom(region, head, phase->dom_depth(head)+1); |
|
1263 phase->recompute_dom_depth(); |
|
1264 if (head->is_CountedLoop() && head->as_CountedLoop()->is_main_loop()) { |
|
1265 head->as_CountedLoop()->set_normal_loop(); |
|
1266 } |
|
1267 } |
|
1268 |
|
1269 return true; |
|
1270 } |
|
1271 } |
|
1272 |
|
1273 return false; |
|
1274 } |
|
1275 |
|
1276 static bool move_out_of_loop(PhaseIdealLoop* phase, LoadBarrierNode* lb) { |
|
1277 PhaseIterGVN &igvn = phase->igvn(); |
|
1278 IdealLoopTree *lb_loop = phase->get_loop(lb->in(0)); |
|
1279 if (lb_loop != phase->ltree_root() && !lb_loop->_irreducible) { |
|
1280 Node* oop_ctrl = phase->get_ctrl(lb->in(LoadBarrierNode::Oop)); |
|
1281 IdealLoopTree *oop_loop = phase->get_loop(oop_ctrl); |
|
1282 IdealLoopTree* adr_loop = phase->get_loop(phase->get_ctrl(lb->in(LoadBarrierNode::Address))); |
|
1283 if (!lb_loop->is_member(oop_loop) && !lb_loop->is_member(adr_loop)) { |
|
1284 // That transformation may cause the Similar edge on dominated load barriers to be invalid |
|
1285 lb->fix_similar_in_uses(&igvn); |
|
1286 |
|
1287 Node* head = lb_loop->_head; |
|
1288 assert(head->is_Loop(), ""); |
|
1289 |
|
1290 if (phase->is_dominator(head, oop_ctrl)) { |
|
1291 assert(oop_ctrl->Opcode() == Op_CProj && oop_ctrl->in(0)->Opcode() == Op_NeverBranch, ""); |
|
1292 assert(lb_loop->is_member(phase->get_loop(oop_ctrl->in(0)->in(0))), ""); |
|
1293 return false; |
|
1294 } |
|
1295 |
|
1296 if (head->is_CountedLoop()) { |
|
1297 CountedLoopNode* cloop = head->as_CountedLoop(); |
|
1298 if (cloop->is_main_loop()) { |
|
1299 cloop->set_normal_loop(); |
|
1300 } |
|
1301 // When we are moving barrier out of a counted loop, |
|
1302 // make sure we move it all the way out of the strip mined outer loop. |
|
1303 if (cloop->is_strip_mined()) { |
|
1304 head = cloop->outer_loop(); |
|
1305 } |
|
1306 } |
|
1307 |
|
1308 Node* mem = lb->in(LoadBarrierNode::Memory); |
|
1309 Node* m = find_dominating_memory(phase, mem, head, -1); |
|
1310 |
|
1311 LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, head->in(LoopNode::EntryControl), m, NULL); |
|
1312 |
|
1313 assert(phase->idom(head) == head->in(LoopNode::EntryControl), ""); |
|
1314 Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control); |
|
1315 igvn.replace_input_of(head, LoopNode::EntryControl, proj_ctl); |
|
1316 phase->set_idom(head, proj_ctl, phase->dom_depth(proj_ctl) + 1); |
|
1317 |
|
1318 replace_barrier(phase, lb, new_lb->proj_out(LoadBarrierNode::Oop)); |
|
1319 |
|
1320 phase->recompute_dom_depth(); |
|
1321 |
|
1322 return true; |
|
1323 } |
|
1324 } |
|
1325 |
|
1326 return false; |
|
1327 } |
|
1328 |
|
1329 static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) { |
|
1330 PhaseIterGVN &igvn = phase->igvn(); |
|
1331 Node* in_val = lb->in(LoadBarrierNode::Oop); |
|
1332 for (DUIterator_Fast imax, i = in_val->fast_outs(imax); i < imax; i++) { |
|
1333 Node* u = in_val->fast_out(i); |
|
1334 if (u != lb && u->is_LoadBarrier() && u->as_LoadBarrier()->has_true_uses()) { |
|
1335 Node* this_ctrl = lb->in(LoadBarrierNode::Control); |
|
1336 Node* other_ctrl = u->in(LoadBarrierNode::Control); |
|
1337 |
|
1338 Node* lca = phase->dom_lca(this_ctrl, other_ctrl); |
|
1339 Node* proj1 = NULL; |
|
1340 Node* proj2 = NULL; |
|
1341 bool ok = (lb->in(LoadBarrierNode::Address) == u->in(LoadBarrierNode::Address)); |
|
1342 |
|
1343 while (this_ctrl != lca && ok) { |
|
1344 if (this_ctrl->in(0) != NULL && |
|
1345 this_ctrl->in(0)->is_MultiBranch()) { |
|
1346 if (this_ctrl->in(0)->in(0) == lca) { |
|
1347 assert(proj1 == NULL, ""); |
|
1348 assert(this_ctrl->is_Proj(), ""); |
|
1349 proj1 = this_ctrl; |
|
1350 } else if (!(this_ctrl->in(0)->is_If() && this_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) { |
|
1351 ok = false; |
|
1352 } |
|
1353 } |
|
1354 this_ctrl = phase->idom(this_ctrl); |
|
1355 } |
|
1356 while (other_ctrl != lca && ok) { |
|
1357 if (other_ctrl->in(0) != NULL && |
|
1358 other_ctrl->in(0)->is_MultiBranch()) { |
|
1359 if (other_ctrl->in(0)->in(0) == lca) { |
|
1360 assert(other_ctrl->is_Proj(), ""); |
|
1361 assert(proj2 == NULL, ""); |
|
1362 proj2 = other_ctrl; |
|
1363 } else if (!(other_ctrl->in(0)->is_If() && other_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none))) { |
|
1364 ok = false; |
|
1365 } |
|
1366 } |
|
1367 other_ctrl = phase->idom(other_ctrl); |
|
1368 } |
|
1369 assert(proj1 == NULL || proj2 == NULL || proj1->in(0) == proj2->in(0), ""); |
|
1370 if (ok && proj1 && proj2 && proj1 != proj2 && proj1->in(0)->is_If()) { |
|
1371 // That transformation may cause the Similar edge on dominated load barriers to be invalid |
|
1372 lb->fix_similar_in_uses(&igvn); |
|
1373 u->as_LoadBarrier()->fix_similar_in_uses(&igvn); |
|
1374 |
|
1375 Node* split = lca->unique_ctrl_out(); |
|
1376 assert(split->in(0) == lca, ""); |
|
1377 |
|
1378 Node* mem = lb->in(LoadBarrierNode::Memory); |
|
1379 Node* m = find_dominating_memory(phase, mem, split, -1); |
|
1380 LoadBarrierNode* new_lb = clone_load_barrier(phase, lb, lca, m, NULL); |
|
1381 |
|
1382 Node* proj_ctl = new_lb->proj_out(LoadBarrierNode::Control); |
|
1383 igvn.replace_input_of(split, 0, new_lb->proj_out(LoadBarrierNode::Control)); |
|
1384 phase->set_idom(split, proj_ctl, phase->dom_depth(proj_ctl)+1); |
|
1385 |
|
1386 Node* proj_oop = new_lb->proj_out(LoadBarrierNode::Oop); |
|
1387 replace_barrier(phase, lb, proj_oop); |
|
1388 replace_barrier(phase, u->as_LoadBarrier(), proj_oop); |
|
1389 |
|
1390 phase->recompute_dom_depth(); |
|
1391 |
|
1392 return true; |
|
1393 } |
|
1394 } |
|
1395 } |
|
1396 |
|
1397 return false; |
|
1398 } |
|
1399 |
|
1400 void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) { |
|
1401 if (!Compile::current()->directive()->ZOptimizeLoadBarriersOption) { |
|
1402 return; |
|
1403 } |
|
1404 |
|
1405 if (!node->is_LoadBarrier()) { |
|
1406 return; |
|
1407 } |
|
1408 |
|
1409 if (!node->as_LoadBarrier()->has_true_uses()) { |
|
1410 return; |
|
1411 } |
|
1412 |
|
1413 if (replace_with_dominating_barrier(phase, node->as_LoadBarrier(), last_round)) { |
|
1414 return; |
|
1415 } |
|
1416 |
|
1417 if (split_barrier_thru_phi(phase, node->as_LoadBarrier())) { |
|
1418 return; |
|
1419 } |
|
1420 |
|
1421 if (move_out_of_loop(phase, node->as_LoadBarrier())) { |
|
1422 return; |
|
1423 } |
|
1424 |
|
1425 if (common_barriers(phase, node->as_LoadBarrier())) { |
|
1426 return; |
|
1427 } |
|
1428 } |
|
1429 |
|
1430 Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const { |
617 Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const { |
1431 Node* node = c; |
618 Node* node = c; |
1432 |
619 |
1433 // 1. This step follows potential oop projections of a load barrier before expansion |
620 // 1. This step follows potential oop projections of a load barrier before expansion |
1434 if (node->is_Proj()) { |
621 if (node->is_Proj()) { |
1555 if (!n->in(LoadBarrierNode::Similar)->is_top()) { |
799 if (!n->in(LoadBarrierNode::Similar)->is_top()) { |
1556 ResourceMark rm; |
800 ResourceMark rm; |
1557 Unique_Node_List wq; |
801 Unique_Node_List wq; |
1558 Node* other = n->in(LoadBarrierNode::Similar)->in(0); |
802 Node* other = n->in(LoadBarrierNode::Similar)->in(0); |
1559 wq.push(n); |
803 wq.push(n); |
1560 bool ok = true; |
|
1561 bool dom_found = false; |
|
1562 for (uint next = 0; next < wq.size(); ++next) { |
804 for (uint next = 0; next < wq.size(); ++next) { |
1563 Node *n = wq.at(next); |
805 Node *nn = wq.at(next); |
1564 assert(n->is_CFG(), ""); |
806 assert(nn->is_CFG(), ""); |
1565 assert(!n->is_SafePoint(), ""); |
807 assert(!nn->is_SafePoint(), ""); |
1566 |
808 |
1567 if (n == other) { |
809 if (nn == other) { |
1568 continue; |
810 continue; |
1569 } |
811 } |
1570 |
812 |
1571 if (n->is_Region()) { |
813 if (nn->is_Region()) { |
1572 for (uint i = 1; i < n->req(); i++) { |
814 for (uint i = 1; i < nn->req(); i++) { |
1573 Node* m = n->in(i); |
815 Node* m = nn->in(i); |
1574 if (m != NULL) { |
816 if (m != NULL) { |
1575 wq.push(m); |
817 wq.push(m); |
1576 } |
818 } |
1577 } |
819 } |
1578 } else { |
820 } else { |
1579 Node* m = n->in(0); |
821 Node* m = nn->in(0); |
1580 if (m != NULL) { |
822 if (m != NULL) { |
1581 wq.push(m); |
823 wq.push(m); |
1582 } |
824 } |
1583 } |
825 } |
1584 } |
826 } |
1585 } |
827 } |
1586 |
828 } |
1587 if (ZVerifyLoadBarriers) { |
829 } |
1588 if ((n->is_Load() || n->is_LoadStore()) && n->bottom_type()->make_oopptr() != NULL) { |
830 |
1589 visited.Clear(); |
831 #endif // end verification code |
1590 bool found = look_for_barrier(n, post_parse, visited); |
832 |
1591 if (!found) { |
833 static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl); |
1592 n->dump(1); |
834 |
1593 n->dump(-3); |
835 // This code is cloning all uses of a load that is between a call and the catch blocks, |
1594 stringStream ss; |
836 // to each use. |
1595 C->method()->print_short_name(&ss); |
837 |
1596 tty->print_cr("-%s-", ss.as_string()); |
838 static bool fixup_uses_in_catch(PhaseIdealLoop *phase, Node *start_ctrl, Node *node) { |
1597 assert(found, ""); |
839 |
|
840 if (!phase->has_ctrl(node)) { |
|
841 // This node is floating - doesn't need to be cloned. |
|
842 assert(node != start_ctrl, "check"); |
|
843 return false; |
|
844 } |
|
845 |
|
846 Node* ctrl = phase->get_ctrl(node); |
|
847 if (ctrl != start_ctrl) { |
|
848 // We are in a successor block - the node is ok. |
|
849 return false; // Unwind |
|
850 } |
|
851 |
|
852 // Process successor nodes |
|
853 int outcnt = node->outcnt(); |
|
854 for (int i = 0; i < outcnt; i++) { |
|
855 Node* n = node->raw_out(0); |
|
856 assert(!n->is_LoadBarrier(), "Sanity"); |
|
857 // Calling recursively, visiting leafs first |
|
858 fixup_uses_in_catch(phase, start_ctrl, n); |
|
859 } |
|
860 |
|
861 // Now all successors are outside |
|
862 // - Clone this node to both successors |
|
863 int no_succs = node->outcnt(); |
|
864 assert(!node->is_Store(), "Stores not expected here"); |
|
865 |
|
866 // In some very rare cases a load that doesn't need a barrier will end up here |
|
867 // Treat it as a LoadP and the insertion of phis will be done correctly. |
|
868 if (node->is_Load()) { |
|
869 assert(node->as_Load()->barrier_data() == 0, "Sanity"); |
|
870 call_catch_cleanup_one(phase, node->as_Load(), phase->get_ctrl(node)); |
|
871 } else { |
|
872 for (DUIterator_Fast jmax, i = node->fast_outs(jmax); i < jmax; i++) { |
|
873 Node* use = node->fast_out(i); |
|
874 Node* clone = node->clone(); |
|
875 assert(clone->outcnt() == 0, ""); |
|
876 |
|
877 assert(use->find_edge(node) != -1, "check"); |
|
878 phase->igvn().rehash_node_delayed(use); |
|
879 use->replace_edge(node, clone); |
|
880 |
|
881 Node* new_ctrl; |
|
882 if (use->is_block_start()) { |
|
883 new_ctrl = use; |
|
884 } else if (use->is_CFG()) { |
|
885 new_ctrl = use->in(0); |
|
886 assert (new_ctrl != NULL, ""); |
|
887 } else { |
|
888 new_ctrl = phase->get_ctrl(use); |
|
889 } |
|
890 |
|
891 phase->set_ctrl(clone, new_ctrl); |
|
892 |
|
893 if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr(" Clone op %i as %i to control %i", node->_idx, clone->_idx, new_ctrl->_idx); |
|
894 phase->igvn().register_new_node_with_optimizer(clone); |
|
895 --i, --jmax; |
|
896 } |
|
897 assert(node->outcnt() == 0, "must be empty now"); |
|
898 |
|
899 // Node node is dead. |
|
900 phase->igvn().remove_dead_node(node); |
|
901 } |
|
902 return true; // unwind - return if a use was processed |
|
903 } |
|
904 |
|
905 // Clone a load to a specific catch_proj |
|
906 static Node* clone_load_to_catchproj(PhaseIdealLoop* phase, Node* load, Node* catch_proj) { |
|
907 Node* cloned_load = load->clone(); |
|
908 cloned_load->set_req(0, catch_proj); // set explicit control |
|
909 phase->set_ctrl(cloned_load, catch_proj); // update |
|
910 if (phase->C->directive()->ZTraceLoadBarriersOption) tty->print_cr(" Clone LOAD %i as %i to control %i", load->_idx, cloned_load->_idx, catch_proj->_idx); |
|
911 phase->igvn().register_new_node_with_optimizer(cloned_load); |
|
912 return cloned_load; |
|
913 } |
|
914 |
|
915 static Node* get_dominating_region(PhaseIdealLoop* phase, Node* node, Node* stop) { |
|
916 Node* region = node; |
|
917 while (!region->isa_Region()) { |
|
918 Node *up = phase->idom(region); |
|
919 assert(up != region, "Must not loop"); |
|
920 assert(up != stop, "Must not find original control"); |
|
921 region = up; |
|
922 } |
|
923 return region; |
|
924 } |
|
925 |
|
926 // Clone this load to each catch block |
|
927 static void call_catch_cleanup_one(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) { |
|
928 bool trace = phase->C->directive()->ZTraceLoadBarriersOption; |
|
929 phase->igvn().set_delay_transform(true); |
|
930 |
|
931 // Verify pre conditions |
|
932 assert(ctrl->isa_Proj() && ctrl->in(0)->isa_Call(), "Must be a call proj"); |
|
933 assert(ctrl->raw_out(0)->isa_Catch(), "Must be a catch"); |
|
934 |
|
935 if (ctrl->raw_out(0)->isa_Catch()->outcnt() == 1) { |
|
936 if (trace) tty->print_cr("Cleaning up catch: Skipping load %i, call with single catch", load->_idx); |
|
937 return; |
|
938 } |
|
939 |
|
940 // Process the loads successor nodes - if any is between |
|
941 // the call and the catch blocks, they need to be cloned to. |
|
942 // This is done recursively |
|
943 int outcnt = load->outcnt(); |
|
944 uint index = 0; |
|
945 for (int i = 0; i < outcnt; i++) { |
|
946 if (index < load->outcnt()) { |
|
947 Node *n = load->raw_out(index); |
|
948 assert(!n->is_LoadBarrier(), "Sanity"); |
|
949 if (!fixup_uses_in_catch(phase, ctrl, n)) { |
|
950 // if no successor was cloned, progress to next out. |
|
951 index++; |
|
952 } |
|
953 } |
|
954 } |
|
955 |
|
956 // Now all the loads uses has been cloned down |
|
957 // Only thing left is to clone the loads, but they must end up |
|
958 // first in the catch blocks. |
|
959 |
|
960 // We clone the loads oo the catch blocks only when needed. |
|
961 // An array is used to map the catch blocks to each lazily cloned load. |
|
962 // In that way no extra unnecessary loads are cloned. |
|
963 |
|
964 // Any use dominated by original block must have an phi and a region added |
|
965 |
|
966 Node* catch_node = ctrl->raw_out(0); |
|
967 int number_of_catch_projs = catch_node->outcnt(); |
|
968 Node** proj_to_load_mapping = NEW_RESOURCE_ARRAY(Node*, number_of_catch_projs); |
|
969 Copy::zero_to_bytes(proj_to_load_mapping, sizeof(Node*) * number_of_catch_projs); |
|
970 |
|
971 // The phi_map is used to keep track of where phis have already been inserted |
|
972 int phi_map_len = phase->C->unique(); |
|
973 Node** phi_map = NEW_RESOURCE_ARRAY(Node*, phi_map_len); |
|
974 Copy::zero_to_bytes(phi_map, sizeof(Node*) * phi_map_len); |
|
975 |
|
976 for (unsigned int i = 0; i < load->outcnt(); i++) { |
|
977 Node* load_use_control = NULL; |
|
978 Node* load_use = load->raw_out(i); |
|
979 |
|
980 if (phase->has_ctrl(load_use)) { |
|
981 load_use_control = phase->get_ctrl(load_use); |
|
982 } else { |
|
983 load_use_control = load_use->in(0); |
|
984 } |
|
985 assert(load_use_control != NULL, "sanity"); |
|
986 if (trace) tty->print_cr(" Handling use: %i, with control: %i", load_use->_idx, load_use_control->_idx); |
|
987 |
|
988 // Some times the loads use is a phi. For them we need to determine from which catch block |
|
989 // the use is defined. |
|
990 bool load_use_is_phi = false; |
|
991 unsigned int load_use_phi_index = 0; |
|
992 Node* phi_ctrl = NULL; |
|
993 if (load_use->is_Phi()) { |
|
994 // Find phi input that matches load |
|
995 for (unsigned int u = 1; u < load_use->req(); u++) { |
|
996 if (load_use->in(u) == load) { |
|
997 load_use_is_phi = true; |
|
998 load_use_phi_index = u; |
|
999 assert(load_use->in(0)->is_Region(), "Region or broken"); |
|
1000 phi_ctrl = load_use->in(0)->in(u); |
|
1001 assert(phi_ctrl->is_CFG(), "check"); |
|
1002 assert(phi_ctrl != load, "check"); |
|
1003 break; |
1598 } |
1004 } |
1599 } |
1005 } |
1600 } |
1006 assert(load_use_is_phi, "must find"); |
1601 } |
1007 assert(load_use_phi_index > 0, "sanity"); |
1602 } |
1008 } |
1603 |
1009 |
1604 #endif |
1010 // For each load use, see which catch projs dominates, create load clone lazily and reconnect |
1605 |
1011 bool found_dominating_catchproj = false; |
1606 bool ZBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { |
1012 for (int c = 0; c < number_of_catch_projs; c++) { |
1607 switch (opcode) { |
1013 Node* catchproj = catch_node->raw_out(c); |
1608 case Op_LoadBarrierSlowReg: |
1014 assert(catchproj != NULL && catchproj->isa_CatchProj(), "Sanity"); |
1609 case Op_LoadBarrierWeakSlowReg: |
1015 |
1610 conn_graph->add_objload_to_connection_graph(n, delayed_worklist); |
1016 if (!phase->is_dominator(catchproj, load_use_control)) { |
1611 return true; |
1017 if (load_use_is_phi && phase->is_dominator(catchproj, phi_ctrl)) { |
1612 |
1018 // The loads use is local to the catchproj. |
1613 case Op_Proj: |
1019 // fall out and replace load with catch-local load clone. |
1614 if (n->as_Proj()->_con != LoadBarrierNode::Oop || !n->in(0)->is_LoadBarrier()) { |
1020 } else { |
1615 return false; |
1021 continue; |
1616 } |
1022 } |
1617 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist); |
1023 } |
1618 return true; |
1024 assert(!found_dominating_catchproj, "Max one should match"); |
1619 } |
1025 |
1620 |
1026 // Clone loads to catch projs |
1621 return false; |
1027 Node* load_clone = proj_to_load_mapping[c]; |
1622 } |
1028 if (load_clone == NULL) { |
1623 |
1029 load_clone = clone_load_to_catchproj(phase, load, catchproj); |
1624 bool ZBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const { |
1030 proj_to_load_mapping[c] = load_clone; |
1625 switch (opcode) { |
1031 } |
1626 case Op_LoadBarrierSlowReg: |
1032 phase->igvn().rehash_node_delayed(load_use); |
1627 case Op_LoadBarrierWeakSlowReg: |
1033 |
1628 if (gvn->type(n)->make_ptr() == NULL) { |
1034 if (load_use_is_phi) { |
1629 return false; |
1035 // phis are special - the load is defined from a specific control flow |
1630 } |
1036 load_use->set_req(load_use_phi_index, load_clone); |
1631 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL); |
1037 } else { |
1632 return true; |
1038 // Multipe edges can be replaced at once - on calls for example |
1633 |
1039 load_use->replace_edge(load, load_clone); |
1634 case Op_Proj: |
1040 } |
1635 if (n->as_Proj()->_con != LoadBarrierNode::Oop || !n->in(0)->is_LoadBarrier()) { |
1041 --i; // more than one edge can have been removed, but the next is in later iterations |
1636 return false; |
1042 |
1637 } |
1043 // We could break the for-loop after finding a dominating match. |
1638 conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL); |
1044 // But keep iterating to catch any bad idom early. |
1639 return true; |
1045 found_dominating_catchproj = true; |
1640 } |
1046 } |
1641 |
1047 |
1642 return false; |
1048 // We found no single catchproj that dominated the use - The use is at a point after |
1643 } |
1049 // where control flow from multiple catch projs have merged. We will have to create |
|
1050 // phi nodes before the use and tie the output from the cloned loads together. It |
|
1051 // can be a single phi or a number of chained phis, depending on control flow |
|
1052 if (!found_dominating_catchproj) { |
|
1053 |
|
1054 // Use phi-control if use is a phi |
|
1055 if (load_use_is_phi) { |
|
1056 load_use_control = phi_ctrl; |
|
1057 } |
|
1058 assert(phase->is_dominator(ctrl, load_use_control), "Common use but no dominator"); |
|
1059 |
|
1060 // Clone a load on all paths |
|
1061 for (int c = 0; c < number_of_catch_projs; c++) { |
|
1062 Node* catchproj = catch_node->raw_out(c); |
|
1063 Node* load_clone = proj_to_load_mapping[c]; |
|
1064 if (load_clone == NULL) { |
|
1065 load_clone = clone_load_to_catchproj(phase, load, catchproj); |
|
1066 proj_to_load_mapping[c] = load_clone; |
|
1067 } |
|
1068 } |
|
1069 |
|
1070 // Move up dominator tree from use until dom front is reached |
|
1071 Node* next_region = get_dominating_region(phase, load_use_control, ctrl); |
|
1072 while (phase->idom(next_region) != catch_node) { |
|
1073 next_region = phase->idom(next_region); |
|
1074 if (trace) tty->print_cr("Moving up idom to region ctrl %i", next_region->_idx); |
|
1075 } |
|
1076 assert(phase->is_dominator(catch_node, next_region), "Sanity"); |
|
1077 |
|
1078 // Create or reuse phi node that collect all cloned loads and feed it to the use. |
|
1079 Node* test_phi = phi_map[next_region->_idx]; |
|
1080 if ((test_phi != NULL) && test_phi->is_Phi()) { |
|
1081 // Reuse an already created phi |
|
1082 if (trace) tty->print_cr(" Using cached Phi %i on load_use %i", test_phi->_idx, load_use->_idx); |
|
1083 phase->igvn().rehash_node_delayed(load_use); |
|
1084 load_use->replace_edge(load, test_phi); |
|
1085 // Now this use is done |
|
1086 } else { |
|
1087 // Otherwise we need to create one or more phis |
|
1088 PhiNode* next_phi = new PhiNode(next_region, load->type()); |
|
1089 phi_map[next_region->_idx] = next_phi; // cache new phi |
|
1090 phase->igvn().rehash_node_delayed(load_use); |
|
1091 load_use->replace_edge(load, next_phi); |
|
1092 |
|
1093 int dominators_of_region = 0; |
|
1094 do { |
|
1095 // New phi, connect to region and add all loads as in. |
|
1096 Node* region = next_region; |
|
1097 assert(region->isa_Region() && region->req() > 2, "Catch dead region nodes"); |
|
1098 PhiNode* new_phi = next_phi; |
|
1099 |
|
1100 if (trace) tty->print_cr("Created Phi %i on load %i with control %i", new_phi->_idx, load->_idx, region->_idx); |
|
1101 |
|
1102 // Need to add all cloned loads to the phi, taking care that the right path is matched |
|
1103 dominators_of_region = 0; // reset for new region |
|
1104 for (unsigned int reg_i = 1; reg_i < region->req(); reg_i++) { |
|
1105 Node* region_pred = region->in(reg_i); |
|
1106 assert(region_pred->is_CFG(), "check"); |
|
1107 bool pred_has_dominator = false; |
|
1108 for (int c = 0; c < number_of_catch_projs; c++) { |
|
1109 Node* catchproj = catch_node->raw_out(c); |
|
1110 if (phase->is_dominator(catchproj, region_pred)) { |
|
1111 new_phi->set_req(reg_i, proj_to_load_mapping[c]); |
|
1112 if (trace) tty->print_cr(" - Phi in(%i) set to load %i", reg_i, proj_to_load_mapping[c]->_idx); |
|
1113 pred_has_dominator = true; |
|
1114 dominators_of_region++; |
|
1115 break; |
|
1116 } |
|
1117 } |
|
1118 |
|
1119 // Sometimes we need to chain several phis. |
|
1120 if (!pred_has_dominator) { |
|
1121 assert(dominators_of_region <= 1, "More than one region can't require extra phi"); |
|
1122 if (trace) tty->print_cr(" - Region %i pred %i not dominated by catch proj", region->_idx, region_pred->_idx); |
|
1123 // Continue search on on this region_pred |
|
1124 // - walk up to next region |
|
1125 // - create a new phi and connect to first new_phi |
|
1126 next_region = get_dominating_region(phase, region_pred, ctrl); |
|
1127 |
|
1128 // Lookup if there already is a phi, create a new otherwise |
|
1129 Node* test_phi = phi_map[next_region->_idx]; |
|
1130 if ((test_phi != NULL) && test_phi->is_Phi()) { |
|
1131 next_phi = test_phi->isa_Phi(); |
|
1132 dominators_of_region++; // record that a match was found and that we are done |
|
1133 if (trace) tty->print_cr(" Using cached phi Phi %i on control %i", next_phi->_idx, next_region->_idx); |
|
1134 } else { |
|
1135 next_phi = new PhiNode(next_region, load->type()); |
|
1136 phi_map[next_region->_idx] = next_phi; |
|
1137 } |
|
1138 new_phi->set_req(reg_i, next_phi); |
|
1139 } |
|
1140 } |
|
1141 |
|
1142 new_phi->set_req(0, region); |
|
1143 phase->igvn().register_new_node_with_optimizer(new_phi); |
|
1144 phase->set_ctrl(new_phi, region); |
|
1145 |
|
1146 assert(dominators_of_region != 0, "Must have found one this iteration"); |
|
1147 } while (dominators_of_region == 1); |
|
1148 } |
|
1149 --i; |
|
1150 } |
|
1151 } // end of loop over uses |
|
1152 |
|
1153 assert(load->outcnt() == 0, "All uses should be handled"); |
|
1154 phase->igvn().remove_dead_node(load); |
|
1155 phase->C->print_method(PHASE_CALL_CATCH_CLEANUP, 4, load->_idx); |
|
1156 |
|
1157 // Now we should be home |
|
1158 phase->igvn().set_delay_transform(false); |
|
1159 } |
|
1160 |
|
1161 // Sort out the loads that are between a call ant its catch blocks |
|
1162 static void process_catch_cleanup_candidate(PhaseIdealLoop* phase, LoadNode* load) { |
|
1163 bool trace = phase->C->directive()->ZTraceLoadBarriersOption; |
|
1164 |
|
1165 Node* ctrl = phase->get_ctrl(load); |
|
1166 if (!ctrl->is_Proj() || (ctrl->in(0) == NULL) || !ctrl->in(0)->isa_Call()) { |
|
1167 return; |
|
1168 } |
|
1169 |
|
1170 Node* catch_node = ctrl->isa_Proj()->raw_out(0); |
|
1171 if (catch_node->is_Catch()) { |
|
1172 if (catch_node->outcnt() > 1) { |
|
1173 call_catch_cleanup_one(phase, load, ctrl); |
|
1174 } else { |
|
1175 if (trace) tty->print_cr("Call catch cleanup with only one catch: load %i ", load->_idx); |
|
1176 } |
|
1177 } |
|
1178 } |
|
1179 |
|
1180 void ZBarrierSetC2::barrier_insertion_phase(Compile* C, PhaseIterGVN& igvn) const { |
|
1181 PhaseIdealLoop::optimize(igvn, LoopOptsZBarrierInsertion); |
|
1182 if (C->failing()) return; |
|
1183 } |
|
1184 |
|
1185 bool ZBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { |
|
1186 |
|
1187 if (mode == LoopOptsZBarrierInsertion) { |
|
1188 // First make sure all loads between call and catch are moved to the catch block |
|
1189 clean_catch_blocks(phase); |
|
1190 |
|
1191 // Then expand barriers on all loads |
|
1192 insert_load_barriers(phase); |
|
1193 |
|
1194 // Handle all Unsafe that need barriers. |
|
1195 insert_barriers_on_unsafe(phase); |
|
1196 |
|
1197 phase->C->clear_major_progress(); |
|
1198 return true; |
|
1199 } else { |
|
1200 return false; |
|
1201 } |
|
1202 } |
|
1203 |
|
1204 static bool can_simplify_cas(LoadStoreNode* node) { |
|
1205 if (node->isa_LoadStoreConditional()) { |
|
1206 Node *expected_in = node->as_LoadStoreConditional()->in(LoadStoreConditionalNode::ExpectedIn); |
|
1207 return (expected_in->get_ptr_type() == TypePtr::NULL_PTR); |
|
1208 } else { |
|
1209 return false; |
|
1210 } |
|
1211 } |
|
1212 |
|
1213 static void insert_barrier_before_unsafe(PhaseIdealLoop* phase, LoadStoreNode* old_node) { |
|
1214 |
|
1215 Compile *C = phase->C; |
|
1216 PhaseIterGVN &igvn = phase->igvn(); |
|
1217 LoadStoreNode* zclone = NULL; |
|
1218 bool is_weak = false; |
|
1219 |
|
1220 Node *in_ctrl = old_node->in(MemNode::Control); |
|
1221 Node *in_mem = old_node->in(MemNode::Memory); |
|
1222 Node *in_adr = old_node->in(MemNode::Address); |
|
1223 Node *in_val = old_node->in(MemNode::ValueIn); |
|
1224 const TypePtr *adr_type = old_node->adr_type(); |
|
1225 const TypePtr* load_type = TypeOopPtr::BOTTOM; // The type for the load we are adding |
|
1226 |
|
1227 switch (old_node->Opcode()) { |
|
1228 case Op_CompareAndExchangeP: { |
|
1229 zclone = new ZCompareAndExchangePNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn), |
|
1230 adr_type, old_node->get_ptr_type(), ((CompareAndExchangeNode*)old_node)->order()); |
|
1231 load_type = old_node->bottom_type()->is_ptr(); |
|
1232 break; |
|
1233 } |
|
1234 case Op_WeakCompareAndSwapP: { |
|
1235 if (can_simplify_cas(old_node)) { |
|
1236 break; |
|
1237 } |
|
1238 is_weak = true; |
|
1239 zclone = new ZWeakCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn), |
|
1240 ((CompareAndSwapNode*)old_node)->order()); |
|
1241 adr_type = TypePtr::BOTTOM; |
|
1242 break; |
|
1243 } |
|
1244 case Op_CompareAndSwapP: { |
|
1245 if (can_simplify_cas(old_node)) { |
|
1246 break; |
|
1247 } |
|
1248 zclone = new ZCompareAndSwapPNode(in_ctrl, in_mem, in_adr, in_val, old_node->in(LoadStoreConditionalNode::ExpectedIn), |
|
1249 ((CompareAndSwapNode*)old_node)->order()); |
|
1250 adr_type = TypePtr::BOTTOM; |
|
1251 break; |
|
1252 } |
|
1253 case Op_GetAndSetP: { |
|
1254 zclone = new ZGetAndSetPNode(in_ctrl, in_mem, in_adr, in_val, old_node->adr_type(), old_node->get_ptr_type()); |
|
1255 load_type = old_node->bottom_type()->is_ptr(); |
|
1256 break; |
|
1257 } |
|
1258 } |
|
1259 if (zclone != NULL) { |
|
1260 igvn.register_new_node_with_optimizer(zclone, old_node); |
|
1261 |
|
1262 // Make load |
|
1263 LoadPNode *load = new LoadPNode(NULL, in_mem, in_adr, adr_type, load_type, MemNode::unordered, |
|
1264 LoadNode::DependsOnlyOnTest); |
|
1265 load_set_expanded_barrier(load); |
|
1266 igvn.register_new_node_with_optimizer(load); |
|
1267 igvn.replace_node(old_node, zclone); |
|
1268 |
|
1269 Node *barrier = new LoadBarrierNode(C, NULL, in_mem, load, in_adr, is_weak); |
|
1270 Node *barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop); |
|
1271 Node *barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control); |
|
1272 |
|
1273 igvn.register_new_node_with_optimizer(barrier); |
|
1274 igvn.register_new_node_with_optimizer(barrier_val); |
|
1275 igvn.register_new_node_with_optimizer(barrier_ctrl); |
|
1276 |
|
1277 // loop over all of in_ctrl usages and move to barrier_ctrl |
|
1278 for (DUIterator_Last imin, i = in_ctrl->last_outs(imin); i >= imin; --i) { |
|
1279 Node *use = in_ctrl->last_out(i); |
|
1280 uint l; |
|
1281 for (l = 0; use->in(l) != in_ctrl; l++) {} |
|
1282 igvn.replace_input_of(use, l, barrier_ctrl); |
|
1283 } |
|
1284 |
|
1285 load->set_req(MemNode::Control, in_ctrl); |
|
1286 barrier->set_req(LoadBarrierNode::Control, in_ctrl); |
|
1287 zclone->add_req(barrier_val); // add req as keep alive. |
|
1288 |
|
1289 C->print_method(PHASE_ADD_UNSAFE_BARRIER, 4, zclone->_idx); |
|
1290 } |
|
1291 } |
|
1292 |
|
1293 void ZBarrierSetC2::insert_barriers_on_unsafe(PhaseIdealLoop* phase) const { |
|
1294 Compile *C = phase->C; |
|
1295 PhaseIterGVN &igvn = phase->igvn(); |
|
1296 uint new_ids = C->unique(); |
|
1297 VectorSet visited(Thread::current()->resource_area()); |
|
1298 GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL); |
|
1299 nodeStack.push(C->root()); |
|
1300 visited.test_set(C->root()->_idx); |
|
1301 |
|
1302 // Traverse all nodes, visit all unsafe ops that require a barrier |
|
1303 while (nodeStack.length() > 0) { |
|
1304 Node *n = nodeStack.pop(); |
|
1305 |
|
1306 bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup |
|
1307 if (is_old_node) { |
|
1308 if (n->is_LoadStore()) { |
|
1309 LoadStoreNode* lsn = n->as_LoadStore(); |
|
1310 if (lsn->has_barrier()) { |
|
1311 BasicType bt = lsn->in(MemNode::Address)->bottom_type()->basic_type(); |
|
1312 assert ((bt == T_OBJECT || bt == T_ARRAY), "Sanity test"); |
|
1313 insert_barrier_before_unsafe(phase, lsn); |
|
1314 } |
|
1315 } |
|
1316 } |
|
1317 for (uint i = 0; i < n->len(); i++) { |
|
1318 if (n->in(i)) { |
|
1319 if (!visited.test_set(n->in(i)->_idx)) { |
|
1320 nodeStack.push(n->in(i)); |
|
1321 } |
|
1322 } |
|
1323 } |
|
1324 } |
|
1325 |
|
1326 igvn.optimize(); |
|
1327 C->print_method(PHASE_ADD_UNSAFE_BARRIER, 2); |
|
1328 } |
|
1329 |
|
1330 // The purpose of ZBarrierSetC2::clean_catch_blocks is to prepare the IR for |
|
1331 // splicing in load barrier nodes. |
|
1332 // |
|
1333 // The problem is that we might have instructions between a call and its catch nodes. |
|
1334 // (This is usually handled in PhaseCFG:call_catch_cleanup, which clones mach nodes in |
|
1335 // already scheduled blocks.) We can't have loads that require barriers there, |
|
1336 // because we need to splice in new control flow, and that would violate the IR. |
|
1337 // |
|
1338 // clean_catch_blocks find all Loads that require a barrier and clone them and any |
|
1339 // dependent instructions to each use. The loads must be in the beginning of the catch block |
|
1340 // before any store. |
|
1341 // |
|
1342 // Sometimes the loads use will be at a place dominated by all catch blocks, then we need |
|
1343 // a load in each catch block, and a Phi at the dominated use. |
|
1344 |
|
1345 void ZBarrierSetC2::clean_catch_blocks(PhaseIdealLoop* phase) const { |
|
1346 |
|
1347 Compile *C = phase->C; |
|
1348 uint new_ids = C->unique(); |
|
1349 PhaseIterGVN &igvn = phase->igvn(); |
|
1350 VectorSet visited(Thread::current()->resource_area()); |
|
1351 GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL); |
|
1352 nodeStack.push(C->root()); |
|
1353 visited.test_set(C->root()->_idx); |
|
1354 |
|
1355 // Traverse all nodes, visit all loads that require a barrier |
|
1356 while(nodeStack.length() > 0) { |
|
1357 Node *n = nodeStack.pop(); |
|
1358 |
|
1359 bool is_old_node = (n->_idx < new_ids); // don't process nodes that were created during cleanup |
|
1360 if (n->is_Load() && is_old_node) { |
|
1361 LoadNode* load = n->isa_Load(); |
|
1362 // only care about loads that will have a barrier |
|
1363 if (load_require_barrier(load)) { |
|
1364 process_catch_cleanup_candidate(phase, load); |
|
1365 } |
|
1366 } |
|
1367 |
|
1368 for (uint i = 0; i < n->len(); i++) { |
|
1369 if (n->in(i)) { |
|
1370 if (!visited.test_set(n->in(i)->_idx)) { |
|
1371 nodeStack.push(n->in(i)); |
|
1372 } |
|
1373 } |
|
1374 } |
|
1375 } |
|
1376 |
|
1377 C->print_method(PHASE_CALL_CATCH_CLEANUP, 2); |
|
1378 } |
|
1379 |
|
1380 class DomDepthCompareClosure : public CompareClosure<LoadNode*> { |
|
1381 PhaseIdealLoop* _phase; |
|
1382 |
|
1383 public: |
|
1384 DomDepthCompareClosure(PhaseIdealLoop* phase) : _phase(phase) { } |
|
1385 |
|
1386 int do_compare(LoadNode* const &n1, LoadNode* const &n2) { |
|
1387 int d1 = _phase->dom_depth(_phase->get_ctrl(n1)); |
|
1388 int d2 = _phase->dom_depth(_phase->get_ctrl(n2)); |
|
1389 if (d1 == d2) { |
|
1390 // Compare index if the depth is the same, ensures all entries are unique. |
|
1391 return n1->_idx - n2->_idx; |
|
1392 } else { |
|
1393 return d2 - d1; |
|
1394 } |
|
1395 } |
|
1396 }; |
|
1397 |
|
1398 // Traverse graph and add all loadPs to list, sorted by dom depth |
|
1399 void gather_loadnodes_sorted(PhaseIdealLoop* phase, GrowableArray<LoadNode*>* loadList) { |
|
1400 |
|
1401 VectorSet visited(Thread::current()->resource_area()); |
|
1402 GrowableArray<Node *> nodeStack(Thread::current()->resource_area(), 0, 0, NULL); |
|
1403 DomDepthCompareClosure ddcc(phase); |
|
1404 |
|
1405 nodeStack.push(phase->C->root()); |
|
1406 while(nodeStack.length() > 0) { |
|
1407 Node *n = nodeStack.pop(); |
|
1408 if (visited.test(n->_idx)) { |
|
1409 continue; |
|
1410 } |
|
1411 |
|
1412 if (n->isa_Load()) { |
|
1413 LoadNode *load = n->as_Load(); |
|
1414 if (load_require_barrier(load)) { |
|
1415 assert(phase->get_ctrl(load) != NULL, "sanity"); |
|
1416 assert(phase->dom_depth(phase->get_ctrl(load)) != 0, "sanity"); |
|
1417 loadList->insert_sorted(&ddcc, load); |
|
1418 } |
|
1419 } |
|
1420 |
|
1421 visited.set(n->_idx); |
|
1422 for (uint i = 0; i < n->req(); i++) { |
|
1423 if (n->in(i)) { |
|
1424 if (!visited.test(n->in(i)->_idx)) { |
|
1425 nodeStack.push(n->in(i)); |
|
1426 } |
|
1427 } |
|
1428 } |
|
1429 } |
|
1430 } |
|
1431 |
|
1432 // Add LoadBarriers to all LoadPs |
|
1433 void ZBarrierSetC2::insert_load_barriers(PhaseIdealLoop* phase) const { |
|
1434 |
|
1435 bool trace = phase->C->directive()->ZTraceLoadBarriersOption; |
|
1436 GrowableArray<LoadNode *> loadList(Thread::current()->resource_area(), 0, 0, NULL); |
|
1437 gather_loadnodes_sorted(phase, &loadList); |
|
1438 |
|
1439 PhaseIterGVN &igvn = phase->igvn(); |
|
1440 int count = 0; |
|
1441 |
|
1442 for (GrowableArrayIterator<LoadNode *> loadIter = loadList.begin(); loadIter != loadList.end(); ++loadIter) { |
|
1443 LoadNode *load = *loadIter; |
|
1444 |
|
1445 if (load_has_expanded_barrier(load)) { |
|
1446 continue; |
|
1447 } |
|
1448 |
|
1449 do { |
|
1450 // Insert a barrier on a loadP |
|
1451 // if another load is found that needs to be expanded first, retry on that one |
|
1452 LoadNode* result = insert_one_loadbarrier(phase, load, phase->get_ctrl(load)); |
|
1453 while (result != NULL) { |
|
1454 result = insert_one_loadbarrier(phase, result, phase->get_ctrl(result)); |
|
1455 } |
|
1456 } while (!load_has_expanded_barrier(load)); |
|
1457 } |
|
1458 |
|
1459 phase->C->print_method(PHASE_INSERT_BARRIER, 2); |
|
1460 } |
|
1461 |
|
1462 void push_antidependent_stores(PhaseIdealLoop* phase, Node_Stack& nodestack, LoadNode* start_load) { |
|
1463 // push all stores on the same mem, that can_alias |
|
1464 // Any load found must be handled first |
|
1465 PhaseIterGVN &igvn = phase->igvn(); |
|
1466 int load_alias_idx = igvn.C->get_alias_index(start_load->adr_type()); |
|
1467 |
|
1468 Node *mem = start_load->in(1); |
|
1469 for (DUIterator_Fast imax, u = mem->fast_outs(imax); u < imax; u++) { |
|
1470 Node *mem_use = mem->fast_out(u); |
|
1471 |
|
1472 if (mem_use == start_load) continue; |
|
1473 if (!mem_use->is_Store()) continue; |
|
1474 if (!phase->has_ctrl(mem_use)) continue; |
|
1475 if (phase->get_ctrl(mem_use) != phase->get_ctrl(start_load)) continue; |
|
1476 |
|
1477 // add any aliasing store in this block |
|
1478 StoreNode *store = mem_use->isa_Store(); |
|
1479 const TypePtr *adr_type = store->adr_type(); |
|
1480 if (igvn.C->can_alias(adr_type, load_alias_idx)) { |
|
1481 nodestack.push(store, 0); |
|
1482 } |
|
1483 } |
|
1484 } |
|
1485 |
|
1486 LoadNode* ZBarrierSetC2::insert_one_loadbarrier(PhaseIdealLoop* phase, LoadNode* start_load, Node* ctrl) const { |
|
1487 bool trace = phase->C->directive()->ZTraceLoadBarriersOption; |
|
1488 PhaseIterGVN &igvn = phase->igvn(); |
|
1489 |
|
1490 // Check for other loadPs at the same loop depth that is reachable by a DFS |
|
1491 // - if found - return it. It needs to be inserted first |
|
1492 // - otherwise proceed and insert barrier |
|
1493 |
|
1494 VectorSet visited(Thread::current()->resource_area()); |
|
1495 Node_Stack nodestack(100); |
|
1496 |
|
1497 nodestack.push(start_load, 0); |
|
1498 push_antidependent_stores(phase, nodestack, start_load); |
|
1499 |
|
1500 while(!nodestack.is_empty()) { |
|
1501 Node* n = nodestack.node(); // peek |
|
1502 nodestack.pop(); |
|
1503 if (visited.test(n->_idx)) { |
|
1504 continue; |
|
1505 } |
|
1506 |
|
1507 if (n->is_Load() && n != start_load && load_require_barrier(n->as_Load()) && !load_has_expanded_barrier(n->as_Load())) { |
|
1508 // Found another load that needs a barrier in the same block. Must expand later loads first. |
|
1509 if (trace) tty->print_cr(" * Found LoadP %i on DFS", n->_idx); |
|
1510 return n->as_Load(); // return node that should be expanded first |
|
1511 } |
|
1512 |
|
1513 if (!phase->has_ctrl(n)) continue; |
|
1514 if (phase->get_ctrl(n) != phase->get_ctrl(start_load)) continue; |
|
1515 if (n->is_Phi()) continue; |
|
1516 |
|
1517 visited.set(n->_idx); |
|
1518 // push all children |
|
1519 for (DUIterator_Fast imax, ii = n->fast_outs(imax); ii < imax; ii++) { |
|
1520 Node* c = n->fast_out(ii); |
|
1521 if (c != NULL) { |
|
1522 nodestack.push(c, 0); |
|
1523 } |
|
1524 } |
|
1525 } |
|
1526 |
|
1527 insert_one_loadbarrier_inner(phase, start_load, ctrl, visited); |
|
1528 return NULL; |
|
1529 } |
|
1530 |
|
1531 void ZBarrierSetC2::insert_one_loadbarrier_inner(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl, VectorSet visited2) const { |
|
1532 PhaseIterGVN &igvn = phase->igvn(); |
|
1533 Compile* C = igvn.C; |
|
1534 bool trace = C->directive()->ZTraceLoadBarriersOption; |
|
1535 |
|
1536 // create barrier |
|
1537 Node* barrier = new LoadBarrierNode(C, NULL, load->in(LoadNode::Memory), NULL, load->in(LoadNode::Address), load_has_weak_barrier(load)); |
|
1538 Node* barrier_val = new ProjNode(barrier, LoadBarrierNode::Oop); |
|
1539 Node* barrier_ctrl = new ProjNode(barrier, LoadBarrierNode::Control); |
|
1540 |
|
1541 if (trace) tty->print_cr("Insert load %i with barrier: %i and ctrl : %i", load->_idx, barrier->_idx, ctrl->_idx); |
|
1542 |
|
1543 // Splice control |
|
1544 // - insert barrier control diamond between loads ctrl and ctrl successor on path to block end. |
|
1545 // - If control successor is a catch, step over to next. |
|
1546 Node* ctrl_succ = NULL; |
|
1547 for (DUIterator_Fast imax, j = ctrl->fast_outs(imax); j < imax; j++) { |
|
1548 Node* tmp = ctrl->fast_out(j); |
|
1549 |
|
1550 // - CFG nodes is the ones we are going to splice (1 only!) |
|
1551 // - Phi nodes will continue to hang from the region node! |
|
1552 // - self loops should be skipped |
|
1553 if (tmp->is_Phi() || tmp == ctrl) { |
|
1554 continue; |
|
1555 } |
|
1556 |
|
1557 if (tmp->is_CFG()) { |
|
1558 assert(ctrl_succ == NULL, "There can be only one"); |
|
1559 ctrl_succ = tmp; |
|
1560 continue; |
|
1561 } |
|
1562 } |
|
1563 |
|
1564 // Now splice control |
|
1565 assert(ctrl_succ != load, "sanity"); |
|
1566 assert(ctrl_succ != NULL, "Broken IR"); |
|
1567 bool found = false; |
|
1568 for(uint k = 0; k < ctrl_succ->req(); k++) { |
|
1569 if (ctrl_succ->in(k) == ctrl) { |
|
1570 assert(!found, "sanity"); |
|
1571 if (trace) tty->print_cr(" Move CFG ctrl_succ %i to barrier_ctrl", ctrl_succ->_idx); |
|
1572 igvn.replace_input_of(ctrl_succ, k, barrier_ctrl); |
|
1573 found = true; |
|
1574 k--; |
|
1575 } |
|
1576 } |
|
1577 |
|
1578 // For all successors of ctrl - move all visited to become successors of barrier_ctrl instead |
|
1579 for (DUIterator_Fast imax, r = ctrl->fast_outs(imax); r < imax; r++) { |
|
1580 Node* tmp = ctrl->fast_out(r); |
|
1581 if (visited2.test(tmp->_idx) && (tmp != load)) { |
|
1582 if (trace) tty->print_cr(" Move ctrl_succ %i to barrier_ctrl", tmp->_idx); |
|
1583 igvn.replace_input_of(tmp, 0, barrier_ctrl); |
|
1584 --r; --imax; |
|
1585 } |
|
1586 } |
|
1587 |
|
1588 // Move the loads user to the barrier |
|
1589 for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) { |
|
1590 Node* u = load->fast_out(i); |
|
1591 if (u->isa_LoadBarrier()) { |
|
1592 continue; |
|
1593 } |
|
1594 |
|
1595 // find correct input - replace with iterator? |
|
1596 for(uint j = 0; j < u->req(); j++) { |
|
1597 if (u->in(j) == load) { |
|
1598 igvn.replace_input_of(u, j, barrier_val); |
|
1599 --i; --imax; // Adjust the iterator of the *outer* loop |
|
1600 break; // some nodes (calls) might have several uses from the same node |
|
1601 } |
|
1602 } |
|
1603 } |
|
1604 |
|
1605 // Connect barrier to load and control |
|
1606 barrier->set_req(LoadBarrierNode::Oop, load); |
|
1607 barrier->set_req(LoadBarrierNode::Control, ctrl); |
|
1608 |
|
1609 igvn.rehash_node_delayed(load); |
|
1610 igvn.register_new_node_with_optimizer(barrier); |
|
1611 igvn.register_new_node_with_optimizer(barrier_val); |
|
1612 igvn.register_new_node_with_optimizer(barrier_ctrl); |
|
1613 load_set_expanded_barrier(load); |
|
1614 |
|
1615 C->print_method(PHASE_INSERT_BARRIER, 3, load->_idx); |
|
1616 } |
|
1617 |
|
1618 // The bad_mask in the ThreadLocalData shouldn't have an anti-dep-check. |
|
1619 // The bad_mask address if of type TypeRawPtr, but that will alias |
|
1620 // InitializeNodes until the type system is expanded. |
|
1621 bool ZBarrierSetC2::needs_anti_dependence_check(const Node* node) const { |
|
1622 MachNode* mnode = node->as_Mach(); |
|
1623 if (mnode != NULL) { |
|
1624 intptr_t offset = 0; |
|
1625 const TypePtr *adr_type2 = NULL; |
|
1626 const Node* base = mnode->get_base_and_disp(offset, adr_type2); |
|
1627 if ((base != NULL) && |
|
1628 (base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_ThreadLocal) && |
|
1629 (offset == in_bytes(ZThreadLocalData::address_bad_mask_offset()))) { |
|
1630 return false; |
|
1631 } |
|
1632 } |
|
1633 return true; |
|
1634 } |