404 __ bind(Lnot_weak); |
409 __ bind(Lnot_weak); |
405 __ verify_oop(value); |
410 __ verify_oop(value); |
406 __ bind(Ldone); |
411 __ bind(Ldone); |
407 } |
412 } |
408 |
413 |
|
414 #ifdef COMPILER1 |
|
415 |
409 #undef __ |
416 #undef __ |
|
417 #define __ ce->masm()-> |
|
418 |
|
419 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) { |
|
420 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); |
|
421 // At this point we know that marking is in progress. |
|
422 // If do_load() is true then we have to emit the |
|
423 // load of the previous value; otherwise it has already |
|
424 // been loaded into _pre_val. |
|
425 __ bind(*stub->entry()); |
|
426 ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots. |
|
427 assert(stub->pre_val()->is_register(), "Precondition."); |
|
428 |
|
429 Register pre_val_reg = stub->pre_val()->as_register(); |
|
430 |
|
431 if (stub->do_load()) { |
|
432 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/); |
|
433 } |
|
434 |
|
435 __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id. |
|
436 __ branch_optimized(Assembler::bcondZero, *stub->continuation()); |
|
437 ce->emit_call_c(bs->pre_barrier_c1_runtime_code_blob()->code_begin()); |
|
438 __ branch_optimized(Assembler::bcondAlways, *stub->continuation()); |
|
439 } |
|
440 |
|
441 void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) { |
|
442 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); |
|
443 __ bind(*stub->entry()); |
|
444 ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots. |
|
445 assert(stub->addr()->is_register(), "Precondition."); |
|
446 assert(stub->new_val()->is_register(), "Precondition."); |
|
447 Register new_val_reg = stub->new_val()->as_register(); |
|
448 __ z_ltgr(new_val_reg, new_val_reg); |
|
449 __ branch_optimized(Assembler::bcondZero, *stub->continuation()); |
|
450 __ z_lgr(Z_R1_scratch, stub->addr()->as_pointer_register()); |
|
451 ce->emit_call_c(bs->post_barrier_c1_runtime_code_blob()->code_begin()); |
|
452 __ branch_optimized(Assembler::bcondAlways, *stub->continuation()); |
|
453 } |
|
454 |
|
455 #undef __ |
|
456 |
|
457 #define __ sasm-> |
|
458 |
|
459 static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) { |
|
460 __ block_comment("save_volatile_registers"); |
|
461 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers; |
|
462 int frame_size_in_slots = RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; |
|
463 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); |
|
464 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc); |
|
465 } |
|
466 |
|
467 static void restore_volatile_registers(StubAssembler* sasm) { |
|
468 __ block_comment("restore_volatile_registers"); |
|
469 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers; |
|
470 RegisterSaver::restore_live_registers(sasm, reg_set); |
|
471 } |
|
472 |
|
473 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) { |
|
474 // Z_R1_scratch: previous value of memory |
|
475 |
|
476 BarrierSet* bs = BarrierSet::barrier_set(); |
|
477 __ set_info("g1_pre_barrier_slow_id", false); |
|
478 |
|
479 Register pre_val = Z_R1_scratch; |
|
480 Register tmp = Z_R6; // Must be non-volatile because it is used to save pre_val. |
|
481 Register tmp2 = Z_R7; |
|
482 |
|
483 Label refill, restart, marking_not_active; |
|
484 int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()); |
|
485 int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()); |
|
486 int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()); |
|
487 |
|
488 // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()). |
|
489 __ z_stg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); |
|
490 __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); |
|
491 |
|
492 // Is marking still active? |
|
493 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { |
|
494 __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset)); |
|
495 } else { |
|
496 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); |
|
497 __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset)); |
|
498 } |
|
499 __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently. |
|
500 |
|
501 __ bind(restart); |
|
502 // Load the index into the SATB buffer. SATBMarkQueue::_index is a |
|
503 // size_t so ld_ptr is appropriate. |
|
504 __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread); |
|
505 |
|
506 // index == 0? |
|
507 __ z_brz(refill); |
|
508 |
|
509 __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread); |
|
510 __ add2reg(tmp, -oopSize); |
|
511 |
|
512 __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card> |
|
513 __ z_stg(tmp, satb_q_index_byte_offset, Z_thread); |
|
514 |
|
515 __ bind(marking_not_active); |
|
516 // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()). |
|
517 __ z_lg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); |
|
518 __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); |
|
519 __ z_br(Z_R14); |
|
520 |
|
521 __ bind(refill); |
|
522 save_volatile_registers(sasm); |
|
523 __ z_lgr(tmp, pre_val); // save pre_val |
|
524 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), |
|
525 Z_thread); |
|
526 __ z_lgr(pre_val, tmp); // restore pre_val |
|
527 restore_volatile_registers(sasm); |
|
528 __ z_bru(restart); |
|
529 } |
|
530 |
|
531 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) { |
|
532 // Z_R1_scratch: oop address, address of updated memory slot |
|
533 |
|
534 BarrierSet* bs = BarrierSet::barrier_set(); |
|
535 __ set_info("g1_post_barrier_slow_id", false); |
|
536 |
|
537 Register addr_oop = Z_R1_scratch; |
|
538 Register addr_card = Z_R1_scratch; |
|
539 Register r1 = Z_R6; // Must be saved/restored. |
|
540 Register r2 = Z_R7; // Must be saved/restored. |
|
541 Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card. |
|
542 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); |
|
543 CardTable* ct = ctbs->card_table(); |
|
544 jbyte* byte_map_base = ct->byte_map_base(); |
|
545 |
|
546 // Save registers used below (see assertion in G1PreBarrierStub::emit_code()). |
|
547 __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); |
|
548 |
|
549 Label not_already_dirty, restart, refill, young_card; |
|
550 |
|
551 // Calculate address of card corresponding to the updated oop slot. |
|
552 AddressLiteral rs(byte_map_base); |
|
553 __ z_srlg(addr_card, addr_oop, CardTable::card_shift); |
|
554 addr_oop = noreg; // dead now |
|
555 __ load_const_optimized(cardtable, rs); // cardtable := <card table base> |
|
556 __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable |
|
557 |
|
558 __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val()); |
|
559 __ z_bre(young_card); |
|
560 |
|
561 __ z_sync(); // Required to support concurrent cleaning. |
|
562 |
|
563 __ z_cli(0, addr_card, (int)CardTable::dirty_card_val()); |
|
564 __ z_brne(not_already_dirty); |
|
565 |
|
566 __ bind(young_card); |
|
567 // We didn't take the branch, so we're already dirty: restore |
|
568 // used registers and return. |
|
569 __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); |
|
570 __ z_br(Z_R14); |
|
571 |
|
572 // Not dirty. |
|
573 __ bind(not_already_dirty); |
|
574 |
|
575 // First, dirty it: [addr_card] := 0 |
|
576 __ z_mvi(0, addr_card, CardTable::dirty_card_val()); |
|
577 |
|
578 Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card. |
|
579 Register buf = r2; |
|
580 cardtable = noreg; // now dead |
|
581 |
|
582 // Save registers used below (see assertion in G1PreBarrierStub::emit_code()). |
|
583 __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); |
|
584 |
|
585 ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset(); |
|
586 ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset(); |
|
587 |
|
588 __ bind(restart); |
|
589 |
|
590 // Get the index into the update buffer. DirtyCardQueue::_index is |
|
591 // a size_t so z_ltg is appropriate here. |
|
592 __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset)); |
|
593 |
|
594 // index == 0? |
|
595 __ z_brz(refill); |
|
596 |
|
597 __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset)); |
|
598 __ add2reg(idx, -oopSize); |
|
599 |
|
600 __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card> |
|
601 __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset)); |
|
602 // Restore killed registers and return. |
|
603 __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); |
|
604 __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP); |
|
605 __ z_br(Z_R14); |
|
606 |
|
607 __ bind(refill); |
|
608 save_volatile_registers(sasm); |
|
609 __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile. |
|
610 __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), |
|
611 Z_thread); |
|
612 __ z_lgr(addr_card, idx); |
|
613 restore_volatile_registers(sasm); // Restore addr_card. |
|
614 __ z_bru(restart); |
|
615 } |
|
616 |
|
617 #undef __ |
|
618 |
|
619 #endif // COMPILER1 |