830 __ ret(); |
830 __ ret(); |
831 __ delayed()->restore(); |
831 __ delayed()->restore(); |
832 } |
832 } |
833 break; |
833 break; |
834 |
834 |
|
835 #ifndef SERIALGC |
|
836 case g1_pre_barrier_slow_id: |
|
837 { // G4: previous value of memory |
|
838 BarrierSet* bs = Universe::heap()->barrier_set(); |
|
839 if (bs->kind() != BarrierSet::G1SATBCTLogging) { |
|
840 __ save_frame(0); |
|
841 __ set((int)id, O1); |
|
842 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); |
|
843 __ should_not_reach_here(); |
|
844 break; |
|
845 } |
|
846 |
|
847 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); |
|
848 |
|
849 Register pre_val = G4; |
|
850 Register tmp = G1_scratch; |
|
851 Register tmp2 = G3_scratch; |
|
852 |
|
853 Label refill, restart; |
|
854 bool with_frame = false; // I don't know if we can do with-frame. |
|
855 int satb_q_index_byte_offset = |
|
856 in_bytes(JavaThread::satb_mark_queue_offset() + |
|
857 PtrQueue::byte_offset_of_index()); |
|
858 int satb_q_buf_byte_offset = |
|
859 in_bytes(JavaThread::satb_mark_queue_offset() + |
|
860 PtrQueue::byte_offset_of_buf()); |
|
861 __ bind(restart); |
|
862 __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp); |
|
863 |
|
864 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, |
|
865 Assembler::pn, tmp, refill); |
|
866 |
|
867 // If the branch is taken, no harm in executing this in the delay slot. |
|
868 __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2); |
|
869 __ sub(tmp, oopSize, tmp); |
|
870 |
|
871 __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> |
|
872 // Use return-from-leaf |
|
873 __ retl(); |
|
874 __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset); |
|
875 |
|
876 __ bind(refill); |
|
877 __ save_frame(0); |
|
878 |
|
879 __ mov(pre_val, L0); |
|
880 __ mov(tmp, L1); |
|
881 __ mov(tmp2, L2); |
|
882 |
|
883 __ call_VM_leaf(L7_thread_cache, |
|
884 CAST_FROM_FN_PTR(address, |
|
885 SATBMarkQueueSet::handle_zero_index_for_thread), |
|
886 G2_thread); |
|
887 |
|
888 __ mov(L0, pre_val); |
|
889 __ mov(L1, tmp); |
|
890 __ mov(L2, tmp2); |
|
891 |
|
892 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); |
|
893 __ delayed()->restore(); |
|
894 } |
|
895 break; |
|
896 |
|
897 case g1_post_barrier_slow_id: |
|
898 { |
|
899 BarrierSet* bs = Universe::heap()->barrier_set(); |
|
900 if (bs->kind() != BarrierSet::G1SATBCTLogging) { |
|
901 __ save_frame(0); |
|
902 __ set((int)id, O1); |
|
903 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); |
|
904 __ should_not_reach_here(); |
|
905 break; |
|
906 } |
|
907 |
|
908 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); |
|
909 |
|
910 Register addr = G4; |
|
911 Register cardtable = G5; |
|
912 Register tmp = G1_scratch; |
|
913 Register tmp2 = G3_scratch; |
|
914 jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; |
|
915 |
|
916 Label not_already_dirty, restart, refill; |
|
917 |
|
918 #ifdef _LP64 |
|
919 __ srlx(addr, CardTableModRefBS::card_shift, addr); |
|
920 #else |
|
921 __ srl(addr, CardTableModRefBS::card_shift, addr); |
|
922 #endif |
|
923 |
|
924 Address rs(cardtable, (address)byte_map_base); |
|
925 __ load_address(rs); // cardtable := <card table base> |
|
926 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] |
|
927 |
|
928 __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, |
|
929 tmp, not_already_dirty); |
|
930 // Get cardtable + tmp into a reg by itself -- useful in the take-the-branch |
|
931 // case, harmless if not. |
|
932 __ delayed()->add(addr, cardtable, tmp2); |
|
933 |
|
934 // We didn't take the branch, so we're already dirty: return. |
|
935 // Use return-from-leaf |
|
936 __ retl(); |
|
937 __ delayed()->nop(); |
|
938 |
|
939 // Not dirty. |
|
940 __ bind(not_already_dirty); |
|
941 // First, dirty it. |
|
942 __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty). |
|
943 |
|
944 Register tmp3 = cardtable; |
|
945 Register tmp4 = tmp; |
|
946 |
|
947 // these registers are now dead |
|
948 addr = cardtable = tmp = noreg; |
|
949 |
|
950 int dirty_card_q_index_byte_offset = |
|
951 in_bytes(JavaThread::dirty_card_queue_offset() + |
|
952 PtrQueue::byte_offset_of_index()); |
|
953 int dirty_card_q_buf_byte_offset = |
|
954 in_bytes(JavaThread::dirty_card_queue_offset() + |
|
955 PtrQueue::byte_offset_of_buf()); |
|
956 __ bind(restart); |
|
957 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3); |
|
958 |
|
959 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, |
|
960 tmp3, refill); |
|
961 // If the branch is taken, no harm in executing this in the delay slot. |
|
962 __ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4); |
|
963 __ sub(tmp3, oopSize, tmp3); |
|
964 |
|
965 __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card> |
|
966 // Use return-from-leaf |
|
967 __ retl(); |
|
968 __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset); |
|
969 |
|
970 __ bind(refill); |
|
971 __ save_frame(0); |
|
972 |
|
973 __ mov(tmp2, L0); |
|
974 __ mov(tmp3, L1); |
|
975 __ mov(tmp4, L2); |
|
976 |
|
977 __ call_VM_leaf(L7_thread_cache, |
|
978 CAST_FROM_FN_PTR(address, |
|
979 DirtyCardQueueSet::handle_zero_index_for_thread), |
|
980 G2_thread); |
|
981 |
|
982 __ mov(L0, tmp2); |
|
983 __ mov(L1, tmp3); |
|
984 __ mov(L2, tmp4); |
|
985 |
|
986 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); |
|
987 __ delayed()->restore(); |
|
988 } |
|
989 break; |
|
990 #endif // !SERIALGC |
|
991 |
835 default: |
992 default: |
836 { __ set_info("unimplemented entry", dont_gc_arguments); |
993 { __ set_info("unimplemented entry", dont_gc_arguments); |
837 __ save_frame(0); |
994 __ save_frame(0); |
838 __ set((int)id, O1); |
995 __ set((int)id, O1); |
839 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1); |
996 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1); |