--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Thu Apr 26 12:48:35 2018 -0700
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Thu Apr 26 20:42:43 2018 +0200
@@ -26,12 +26,17 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
#define __ masm->
@@ -339,4 +344,209 @@
__ bind(done);
}
+#ifdef COMPILER1
+
#undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ // At this point we know that marking is in progress.
+ // If do_load() is true then we have to emit the
+ // load of the previous value; otherwise it has already
+ // been loaded into _pre_val.
+
+ __ bind(*stub->entry());
+
+ assert(stub->pre_val()->is_register(), "Precondition.");
+ Register pre_val_reg = stub->pre_val()->as_register();
+
+ if (stub->do_load()) {
+ ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+ }
+
+ __ cmpdi(CCR0, pre_val_reg, 0);
+ __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
+
+ address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
+ //__ load_const_optimized(R0, c_code);
+ __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
+ __ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
+ __ mtctr(R0);
+ __ bctrl();
+ __ b(*stub->continuation());
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ __ bind(*stub->entry());
+
+ assert(stub->addr()->is_register(), "Precondition.");
+ assert(stub->new_val()->is_register(), "Precondition.");
+ Register addr_reg = stub->addr()->as_pointer_register();
+ Register new_val_reg = stub->new_val()->as_register();
+
+ __ cmpdi(CCR0, new_val_reg, 0);
+ __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
+
+ address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();
+ //__ load_const_optimized(R0, c_code);
+ __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
+ __ mtctr(R0);
+ __ mr(R0, addr_reg); // Pass addr in R0.
+ __ bctrl();
+ __ b(*stub->continuation());
+}
+
+#undef __
+#define __ sasm->
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+ BarrierSet* bs = BarrierSet::barrier_set();
+
+ __ set_info("g1_pre_barrier_slow_id", false);
+
+ // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
+ const int stack_slots = 3;
+ Register pre_val = R0; // previous value of memory
+ Register tmp = R14;
+ Register tmp2 = R15;
+
+ Label refill, restart, marking_not_active;
+ int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
+ int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
+ int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
+
+ // Spill
+ __ std(tmp, -16, R1_SP);
+ __ std(tmp2, -24, R1_SP);
+
+ // Is marking still active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ lwz(tmp, satb_q_active_byte_offset, R16_thread);
+ } else {
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ lbz(tmp, satb_q_active_byte_offset, R16_thread);
+ }
+ __ cmpdi(CCR0, tmp, 0);
+ __ beq(CCR0, marking_not_active);
+
+ __ bind(restart);
+ // Load the index into the SATB buffer. SATBMarkQueue::_index is a
+ // size_t so ld_ptr is appropriate.
+ __ ld(tmp, satb_q_index_byte_offset, R16_thread);
+
+ // index == 0?
+ __ cmpdi(CCR0, tmp, 0);
+ __ beq(CCR0, refill);
+
+ __ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
+ __ ld(pre_val, -8, R1_SP); // Load from stack.
+ __ addi(tmp, tmp, -oopSize);
+
+ __ std(tmp, satb_q_index_byte_offset, R16_thread);
+ __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
+
+ __ bind(marking_not_active);
+ // Restore temp registers and return-from-leaf.
+ __ ld(tmp2, -24, R1_SP);
+ __ ld(tmp, -16, R1_SP);
+ __ blr();
+
+ __ bind(refill);
+ const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
+ __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+ __ mflr(R0);
+ __ std(R0, _abi(lr), R1_SP);
+ __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
+ __ pop_frame();
+ __ ld(R0, _abi(lr), R1_SP);
+ __ mtlr(R0);
+ __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
+ __ b(restart);
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+ G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+
+ __ set_info("g1_post_barrier_slow_id", false);
+
+ // Using stack slots: spill addr, spill tmp2
+ const int stack_slots = 2;
+ Register tmp = R0;
+ Register addr = R14;
+ Register tmp2 = R15;
+ jbyte* byte_map_base = bs->card_table()->byte_map_base();
+
+ Label restart, refill, ret;
+
+ // Spill
+ __ std(addr, -8, R1_SP);
+ __ std(tmp2, -16, R1_SP);
+
+ __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
+ __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
+ __ add(addr, tmp2, addr);
+ __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
+
+ // Return if young card.
+ __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
+ __ beq(CCR0, ret);
+
+ // Return if sequential consistent value is already dirty.
+ __ membar(Assembler::StoreLoad);
+ __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
+
+ __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
+ __ beq(CCR0, ret);
+
+ // Not dirty.
+
+ // First, dirty it.
+ __ li(tmp, G1CardTable::dirty_card_val());
+ __ stb(tmp, 0, addr);
+
+ int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
+ int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
+
+ __ bind(restart);
+
+ // Get the index into the update buffer. DirtyCardQueue::_index is
+ // a size_t so ld_ptr is appropriate here.
+ __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
+
+ // index == 0?
+ __ cmpdi(CCR0, tmp2, 0);
+ __ beq(CCR0, refill);
+
+ __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
+ __ addi(tmp2, tmp2, -oopSize);
+
+ __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
+ __ add(tmp2, tmp, tmp2);
+ __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
+
+ // Restore temp registers and return-from-leaf.
+ __ bind(ret);
+ __ ld(tmp2, -16, R1_SP);
+ __ ld(addr, -8, R1_SP);
+ __ blr();
+
+ __ bind(refill);
+ const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
+ __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+ __ mflr(R0);
+ __ std(R0, _abi(lr), R1_SP);
+ __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
+ __ pop_frame();
+ __ ld(R0, _abi(lr), R1_SP);
+ __ mtlr(R0);
+ __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
+ __ b(restart);
+}
+
+#undef __
+
+#endif // COMPILER1