--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/s390/vm/interp_masm_s390.cpp Thu Oct 13 14:49:34 2016 +0200
@@ -0,0 +1,2127 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Major contributions by AHa, AS, JL, ML.
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "interp_masm_s390.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/markOop.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/basicLock.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.inline.hpp"
+
+// Implementation of InterpreterMacroAssembler.
+// This file specializes the assember with interpreter-specific macros.
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str)
+#define BIND(label) bind(label);
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+#endif
+
+void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) {
+ assert(entry != NULL, "Entry must have been generated by now");
+ assert(Rscratch != Z_R0, "Can't use R0 for addressing");
+ branch_optimized(Assembler::bcondAlways, entry);
+}
+
+void InterpreterMacroAssembler::empty_expression_stack(void) {
+ get_monitors(Z_R1_scratch);
+ add2reg(Z_esp, -Interpreter::stackElementSize, Z_R1_scratch);
+}
+
+// Dispatch code executed in the prolog of a bytecode which does not do it's
+// own dispatch.
+void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
+ // On z/Architecture we are short on registers, therefore we do not preload the
+ // dispatch address of the next bytecode.
+}
+
+// Dispatch code executed in the epilog of a bytecode which does not do it's
+// own dispatch.
+void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
+ dispatch_next(state, step);
+}
+
+void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
+ z_llgc(Z_bytecode, bcp_incr, Z_R0, Z_bcp); // Load next bytecode.
+ add2reg(Z_bcp, bcp_incr); // Advance bcp. Add2reg produces optimal code.
+ dispatch_base(state, Interpreter::dispatch_table(state));
+}
+
+// Common code to dispatch and dispatch_only.
+// Dispatch value in Lbyte_code and increment Lbcp.
+
+void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
+ verify_FPU(1, state);
+
+#ifdef ASSERT
+ address reentry = NULL;
+ { Label OK;
+ // Check if the frame pointer in Z_fp is correct.
+ z_cg(Z_fp, 0, Z_SP);
+ z_bre(OK);
+ reentry = stop_chain_static(reentry, "invalid frame pointer Z_fp: " FILE_AND_LINE);
+ bind(OK);
+ }
+ { Label OK;
+ // check if the locals pointer in Z_locals is correct
+ z_cg(Z_locals, _z_ijava_state_neg(locals), Z_fp);
+ z_bre(OK);
+ reentry = stop_chain_static(reentry, "invalid locals pointer Z_locals: " FILE_AND_LINE);
+ bind(OK);
+ }
+#endif
+
+ // TODO: Maybe implement +VerifyActivationFrameSize here.
+ // verify_thread(); // Too slow. We will just verify on method entry & exit.
+ verify_oop(Z_tos, state);
+#ifdef FAST_DISPATCH
+ if (table == Interpreter::dispatch_table(state)) {
+ // Use IdispatchTables.
+ add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
+ // Add offset to correct dispatch table.
+ sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // Multiply by wordSize.
+ ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // Get entry addr.
+ } else
+#endif
+ {
+ // Dispatch table to use.
+ load_absolute_address(Z_tmp_1, (address) table); // Z_tmp_1 = table;
+
+ // 0 <= Z_bytecode < 256 => Use a 32 bit shift, because it is shorter than sllg.
+ // Z_bytecode must have been loaded zero-extended for this approach to be correct.
+ z_sll(Z_bytecode, LogBytesPerWord, Z_R0); // Multiply by wordSize.
+ z_lg(Z_tmp_1, 0, Z_bytecode, Z_tmp_1); // Get entry addr.
+ }
+ z_br(Z_tmp_1);
+}
+
+void InterpreterMacroAssembler::dispatch_only(TosState state) {
+ dispatch_base(state, Interpreter::dispatch_table(state));
+}
+
+void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
+ dispatch_base(state, Interpreter::normal_table(state));
+}
+
+void InterpreterMacroAssembler::dispatch_via(TosState state, address *table) {
+ // Load current bytecode.
+ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t)0));
+ dispatch_base(state, table);
+}
+
+// The following call_VM*_base() methods overload and mask the respective
+// declarations/definitions in class MacroAssembler. They are meant as a "detour"
+// to perform additional, template interpreter specific tasks before actually
+// calling their MacroAssembler counterparts.
+
+void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point) {
+ bool allow_relocation = true; // Fenerally valid variant. Assume code is relocated.
+ // interpreter specific
+ // Note: No need to save/restore bcp (Z_R13) pointer since these are callee
+ // saved registers and no blocking/ GC can happen in leaf calls.
+
+ // super call
+ MacroAssembler::call_VM_leaf_base(entry_point, allow_relocation);
+}
+
+void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
+ // interpreter specific
+ // Note: No need to save/restore bcp (Z_R13) pointer since these are callee
+ // saved registers and no blocking/ GC can happen in leaf calls.
+
+ // super call
+ MacroAssembler::call_VM_leaf_base(entry_point, allow_relocation);
+}
+
+void InterpreterMacroAssembler::call_VM_base(Register oop_result, Register last_java_sp,
+ address entry_point, bool check_exceptions) {
+ bool allow_relocation = true; // Fenerally valid variant. Assume code is relocated.
+ // interpreter specific
+
+ save_bcp();
+ save_esp();
+ // super call
+ MacroAssembler::call_VM_base(oop_result, last_java_sp,
+ entry_point, allow_relocation, check_exceptions);
+ restore_bcp();
+}
+
+void InterpreterMacroAssembler::call_VM_base(Register oop_result, Register last_java_sp,
+ address entry_point, bool allow_relocation,
+ bool check_exceptions) {
+ // interpreter specific
+
+ save_bcp();
+ save_esp();
+ // super call
+ MacroAssembler::call_VM_base(oop_result, last_java_sp,
+ entry_point, allow_relocation, check_exceptions);
+ restore_bcp();
+}
+
+void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
+ if (JvmtiExport::can_pop_frame()) {
+ BLOCK_COMMENT("check_and_handle_popframe {");
+ Label L;
+ // Initiate popframe handling only if it is not already being
+ // processed. If the flag has the popframe_processing bit set, it
+ // means that this code is called *during* popframe handling - we
+ // don't want to reenter.
+ // TODO: Check if all four state combinations could be visible.
+ // If (processing and !pending) is an invisible/impossible state,
+ // there is optimization potential by testing both bits at once.
+ // Then, All_Zeroes and All_Ones means skip, Mixed means doit.
+ testbit(Address(Z_thread, JavaThread::popframe_condition_offset()),
+ exact_log2(JavaThread::popframe_pending_bit));
+ z_bfalse(L);
+ testbit(Address(Z_thread, JavaThread::popframe_condition_offset()),
+ exact_log2(JavaThread::popframe_processing_bit));
+ z_btrue(L);
+
+ // Call Interpreter::remove_activation_preserving_args_entry() to get the
+ // address of the same-named entrypoint in the generated interpreter code.
+ call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
+ // The above call should (as its only effect) return the contents of the field
+ // _remove_activation_preserving_args_entry in Z_RET.
+ // We just jump there to have the work done.
+ z_br(Z_RET);
+ // There is no way for control to fall thru here.
+
+ bind(L);
+ BLOCK_COMMENT("} check_and_handle_popframe");
+ }
+}
+
+
+void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
+ Register RjvmtiState = Z_R1_scratch;
+ int tos_off = in_bytes(JvmtiThreadState::earlyret_tos_offset());
+ int oop_off = in_bytes(JvmtiThreadState::earlyret_oop_offset());
+ int val_off = in_bytes(JvmtiThreadState::earlyret_value_offset());
+ int state_off = in_bytes(JavaThread::jvmti_thread_state_offset());
+
+ z_lg(RjvmtiState, state_off, Z_thread);
+
+ switch (state) {
+ case atos: z_lg(Z_tos, oop_off, RjvmtiState);
+ store_const(Address(RjvmtiState, oop_off), 0L, 8, 8, Z_R0_scratch);
+ break;
+ case ltos: z_lg(Z_tos, val_off, RjvmtiState); break;
+ case btos: // fall through
+ case ztos: // fall through
+ case ctos: // fall through
+ case stos: // fall through
+ case itos: z_llgf(Z_tos, val_off, RjvmtiState); break;
+ case ftos: z_le(Z_ftos, val_off, RjvmtiState); break;
+ case dtos: z_ld(Z_ftos, val_off, RjvmtiState); break;
+ case vtos: /* nothing to do */ break;
+ default : ShouldNotReachHere();
+ }
+
+ // Clean up tos value in the jvmti thread state.
+ store_const(Address(RjvmtiState, val_off), 0L, 8, 8, Z_R0_scratch);
+ // Set tos state field to illegal value.
+ store_const(Address(RjvmtiState, tos_off), ilgl, 4, 1, Z_R0_scratch);
+}
+
+void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
+ if (JvmtiExport::can_force_early_return()) {
+ BLOCK_COMMENT("check_and_handle_earlyret {");
+ Label L;
+ // arg regs are save, because we are just behind the call in call_VM_base
+ Register jvmti_thread_state = Z_ARG2;
+ Register tmp = Z_ARG3;
+ load_and_test_long(jvmti_thread_state, Address(Z_thread, JavaThread::jvmti_thread_state_offset()));
+ z_bre(L); // if (thread->jvmti_thread_state() == NULL) exit;
+
+ // Initiate earlyret handling only if it is not already being processed.
+ // If the flag has the earlyret_processing bit set, it means that this code
+ // is called *during* earlyret handling - we don't want to reenter.
+
+ assert((JvmtiThreadState::earlyret_pending != 0) && (JvmtiThreadState::earlyret_inactive == 0),
+ "must fix this check, when changing the values of the earlyret enum");
+ assert(JvmtiThreadState::earlyret_pending == 1, "must fix this check, when changing the values of the earlyret enum");
+
+ load_and_test_int(tmp, Address(jvmti_thread_state, JvmtiThreadState::earlyret_state_offset()));
+ z_brz(L); // if (thread->jvmti_thread_state()->_earlyret_state != JvmtiThreadState::earlyret_pending) exit;
+
+ // Call Interpreter::remove_activation_early_entry() to get the address of the
+ // same-named entrypoint in the generated interpreter code.
+ assert(sizeof(TosState) == 4, "unexpected size");
+ z_l(Z_ARG1, Address(jvmti_thread_state, JvmtiThreadState::earlyret_tos_offset()));
+ call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Z_ARG1);
+ // The above call should (as its only effect) return the contents of the field
+ // _remove_activation_preserving_args_entry in Z_RET.
+ // We just jump there to have the work done.
+ z_br(Z_RET);
+ // There is no way for control to fall thru here.
+
+ bind(L);
+ BLOCK_COMMENT("} check_and_handle_earlyret");
+ }
+}
+
+void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
+ lgr_if_needed(Z_ARG1, arg_1);
+ assert(arg_2 != Z_ARG1, "smashed argument");
+ lgr_if_needed(Z_ARG2, arg_2);
+ MacroAssembler::call_VM_leaf_base(entry_point, true);
+}
+
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size) {
+ Address param(Z_bcp, bcp_offset);
+
+ BLOCK_COMMENT("get_cache_index_at_bcp {");
+ assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+ if (index_size == sizeof(u2)) {
+ load_sized_value(index, param, 2, false /*signed*/);
+ } else if (index_size == sizeof(u4)) {
+
+ load_sized_value(index, param, 4, false);
+
+ // Check if the secondary index definition is still ~x, otherwise
+ // we have to change the following assembler code to calculate the
+ // plain index.
+ assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
+ not_(index); // Convert to plain index.
+ } else if (index_size == sizeof(u1)) {
+ z_llgc(index, param);
+ } else {
+ ShouldNotReachHere();
+ }
+ BLOCK_COMMENT("}");
+}
+
+
+void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register cpe_offset,
+ int bcp_offset, size_t index_size) {
+ BLOCK_COMMENT("get_cache_and_index_at_bcp {");
+ assert_different_registers(cache, cpe_offset);
+ get_cache_index_at_bcp(cpe_offset, bcp_offset, index_size);
+ z_lg(cache, Address(Z_fp, _z_ijava_state_neg(cpoolCache)));
+ // Convert from field index to ConstantPoolCache offset in bytes.
+ z_sllg(cpe_offset, cpe_offset, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord));
+ BLOCK_COMMENT("}");
+}
+
+// Kills Z_R0_scratch.
+void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
+ Register cpe_offset,
+ Register bytecode,
+ int byte_no,
+ int bcp_offset,
+ size_t index_size) {
+ BLOCK_COMMENT("get_cache_and_index_and_bytecode_at_bcp {");
+ get_cache_and_index_at_bcp(cache, cpe_offset, bcp_offset, index_size);
+
+ // We want to load (from CP cache) the bytecode that corresponds to the passed-in byte_no.
+ // It is located at (cache + cpe_offset + base_offset + indices_offset + (8-1) (last byte in DW) - (byte_no+1).
+ // Instead of loading, shifting and masking a DW, we just load that one byte of interest with z_llgc (unsigned).
+ const int base_ix_off = in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset());
+ const int off_in_DW = (8-1) - (1+byte_no);
+ assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
+ assert(ConstantPoolCacheEntry::bytecode_1_mask == 0xff, "");
+ load_sized_value(bytecode, Address(cache, cpe_offset, base_ix_off+off_in_DW), 1, false /*signed*/);
+
+ BLOCK_COMMENT("}");
+}
+
+// Load object from cpool->resolved_references(index).
+void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) {
+ assert_different_registers(result, index);
+ get_constant_pool(result);
+
+ // Convert
+ // - from field index to resolved_references() index and
+ // - from word index to byte offset.
+ // Since this is a java object, it is potentially compressed.
+ Register tmp = index; // reuse
+ z_sllg(index, index, LogBytesPerHeapOop); // Offset into resolved references array.
+ // Load pointer for resolved_references[] objArray.
+ z_lg(result, ConstantPool::resolved_references_offset_in_bytes(), result);
+ // JNIHandles::resolve(result)
+ z_lg(result, 0, result); // Load resolved references array itself.
+#ifdef ASSERT
+ NearLabel index_ok;
+ z_lgf(Z_R0, Address(result, arrayOopDesc::length_offset_in_bytes()));
+ z_sllg(Z_R0, Z_R0, LogBytesPerHeapOop);
+ compare64_and_branch(tmp, Z_R0, Assembler::bcondLow, index_ok);
+ stop("resolved reference index out of bounds", 0x09256);
+ bind(index_ok);
+#endif
+ z_agr(result, index); // Address of indexed array element.
+ load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
+}
+
+void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
+ Register tmp,
+ int bcp_offset,
+ size_t index_size) {
+ BLOCK_COMMENT("get_cache_entry_pointer_at_bcp {");
+ get_cache_and_index_at_bcp(cache, tmp, bcp_offset, index_size);
+ add2reg_with_index(cache, in_bytes(ConstantPoolCache::base_offset()), tmp, cache);
+ BLOCK_COMMENT("}");
+}
+
+// Generate a subtype check: branch to ok_is_subtype if sub_klass is
+// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
+void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
+ Register Rsuper_klass,
+ Register Rtmp1,
+ Register Rtmp2,
+ Label &ok_is_subtype) {
+ // Profile the not-null value's klass.
+ profile_typecheck(Rtmp1, Rsub_klass, Rtmp2);
+
+ // Do the check.
+ check_klass_subtype(Rsub_klass, Rsuper_klass, Rtmp1, Rtmp2, ok_is_subtype);
+
+ // Profile the failure of the check.
+ profile_typecheck_failed(Rtmp1, Rtmp2);
+}
+
+// Pop topmost element from stack. It just disappears.
+// Useful if consumed previously by access via stackTop().
+void InterpreterMacroAssembler::popx(int len) {
+ add2reg(Z_esp, len*Interpreter::stackElementSize);
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+}
+
+// Get Address object of stack top. No checks. No pop.
+// Purpose: - Provide address of stack operand to exploit reg-mem operations.
+// - Avoid RISC-like mem2reg - reg-reg-op sequence.
+Address InterpreterMacroAssembler::stackTop() {
+ return Address(Z_esp, Interpreter::expr_offset_in_bytes(0));
+}
+
+void InterpreterMacroAssembler::pop_i(Register r) {
+ z_l(r, Interpreter::expr_offset_in_bytes(0), Z_esp);
+ add2reg(Z_esp, Interpreter::stackElementSize);
+ assert_different_registers(r, Z_R1_scratch);
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+}
+
+void InterpreterMacroAssembler::pop_ptr(Register r) {
+ z_lg(r, Interpreter::expr_offset_in_bytes(0), Z_esp);
+ add2reg(Z_esp, Interpreter::stackElementSize);
+ assert_different_registers(r, Z_R1_scratch);
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+}
+
+void InterpreterMacroAssembler::pop_l(Register r) {
+ z_lg(r, Interpreter::expr_offset_in_bytes(0), Z_esp);
+ add2reg(Z_esp, 2*Interpreter::stackElementSize);
+ assert_different_registers(r, Z_R1_scratch);
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+}
+
+void InterpreterMacroAssembler::pop_f(FloatRegister f) {
+ mem2freg_opt(f, Address(Z_esp, Interpreter::expr_offset_in_bytes(0)), false);
+ add2reg(Z_esp, Interpreter::stackElementSize);
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+}
+
+void InterpreterMacroAssembler::pop_d(FloatRegister f) {
+ mem2freg_opt(f, Address(Z_esp, Interpreter::expr_offset_in_bytes(0)), true);
+ add2reg(Z_esp, 2*Interpreter::stackElementSize);
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+}
+
+void InterpreterMacroAssembler::push_i(Register r) {
+ assert_different_registers(r, Z_R1_scratch);
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+ z_st(r, Address(Z_esp));
+ add2reg(Z_esp, -Interpreter::stackElementSize);
+}
+
+void InterpreterMacroAssembler::push_ptr(Register r) {
+ z_stg(r, Address(Z_esp));
+ add2reg(Z_esp, -Interpreter::stackElementSize);
+}
+
+void InterpreterMacroAssembler::push_l(Register r) {
+ assert_different_registers(r, Z_R1_scratch);
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+ int offset = -Interpreter::stackElementSize;
+ z_stg(r, Address(Z_esp, offset));
+ clear_mem(Address(Z_esp), Interpreter::stackElementSize);
+ add2reg(Z_esp, 2 * offset);
+}
+
+void InterpreterMacroAssembler::push_f(FloatRegister f) {
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+ freg2mem_opt(f, Address(Z_esp), false);
+ add2reg(Z_esp, -Interpreter::stackElementSize);
+}
+
+void InterpreterMacroAssembler::push_d(FloatRegister d) {
+ debug_only(verify_esp(Z_esp, Z_R1_scratch));
+ int offset = -Interpreter::stackElementSize;
+ freg2mem_opt(d, Address(Z_esp, offset));
+ add2reg(Z_esp, 2 * offset);
+}
+
+void InterpreterMacroAssembler::push(TosState state) {
+ verify_oop(Z_tos, state);
+ switch (state) {
+ case atos: push_ptr(); break;
+ case btos: push_i(); break;
+ case ztos:
+ case ctos:
+ case stos: push_i(); break;
+ case itos: push_i(); break;
+ case ltos: push_l(); break;
+ case ftos: push_f(); break;
+ case dtos: push_d(); break;
+ case vtos: /* nothing to do */ break;
+ default : ShouldNotReachHere();
+ }
+}
+
+void InterpreterMacroAssembler::pop(TosState state) {
+ switch (state) {
+ case atos: pop_ptr(Z_tos); break;
+ case btos: pop_i(Z_tos); break;
+ case ztos:
+ case ctos:
+ case stos: pop_i(Z_tos); break;
+ case itos: pop_i(Z_tos); break;
+ case ltos: pop_l(Z_tos); break;
+ case ftos: pop_f(Z_ftos); break;
+ case dtos: pop_d(Z_ftos); break;
+ case vtos: /* nothing to do */ break;
+ default : ShouldNotReachHere();
+ }
+ verify_oop(Z_tos, state);
+}
+
+// Helpers for swap and dup.
+void InterpreterMacroAssembler::load_ptr(int n, Register val) {
+ z_lg(val, Address(Z_esp, Interpreter::expr_offset_in_bytes(n)));
+}
+
+void InterpreterMacroAssembler::store_ptr(int n, Register val) {
+ z_stg(val, Address(Z_esp, Interpreter::expr_offset_in_bytes(n)));
+}
+
+void InterpreterMacroAssembler::prepare_to_jump_from_interpreted(Register method) {
+ // Satisfy interpreter calling convention (see generate_normal_entry()).
+ z_lgr(Z_R10, Z_SP); // Set sender sp (aka initial caller sp, aka unextended sp).
+ // Record top_frame_sp, because the callee might modify it, if it's compiled.
+ z_stg(Z_SP, _z_ijava_state_neg(top_frame_sp), Z_fp);
+ save_bcp();
+ save_esp();
+ z_lgr(Z_method, method); // Set Z_method (kills Z_fp!).
+}
+
+// Jump to from_interpreted entry of a call unless single stepping is possible
+// in this thread in which case we must call the i2i entry.
+void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
+ assert_different_registers(method, Z_R10 /*used for initial_caller_sp*/, temp);
+ prepare_to_jump_from_interpreted(method);
+
+ if (JvmtiExport::can_post_interpreter_events()) {
+ // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+ // compiled code in threads for which the event is enabled. Check here for
+ // interp_only_mode if these events CAN be enabled.
+ z_lg(Z_R1_scratch, Address(method, Method::from_interpreted_offset()));
+ MacroAssembler::load_and_test_int(Z_R0_scratch, Address(Z_thread, JavaThread::interp_only_mode_offset()));
+ z_bcr(bcondEqual, Z_R1_scratch); // Run compiled code if zero.
+ // Run interpreted.
+ z_lg(Z_R1_scratch, Address(method, Method::interpreter_entry_offset()));
+ z_br(Z_R1_scratch);
+ } else {
+ // Run compiled code.
+ z_lg(Z_R1_scratch, Address(method, Method::from_interpreted_offset()));
+ z_br(Z_R1_scratch);
+ }
+}
+
+#ifdef ASSERT
+void InterpreterMacroAssembler::verify_esp(Register Resp, Register Rtemp) {
+ // About to read or write Resp[0].
+ // Make sure it is not in the monitors or the TOP_IJAVA_FRAME_ABI.
+ address reentry = NULL;
+
+ {
+ // Check if the frame pointer in Z_fp is correct.
+ NearLabel OK;
+ z_cg(Z_fp, 0, Z_SP);
+ z_bre(OK);
+ reentry = stop_chain_static(reentry, "invalid frame pointer Z_fp");
+ bind(OK);
+ }
+ {
+ // Resp must not point into or below the operand stack,
+ // i.e. IJAVA_STATE.monitors > Resp.
+ NearLabel OK;
+ Register Rmonitors = Rtemp;
+ z_lg(Rmonitors, _z_ijava_state_neg(monitors), Z_fp);
+ compareU64_and_branch(Rmonitors, Resp, bcondHigh, OK);
+ reentry = stop_chain_static(reentry, "too many pops: Z_esp points into monitor area");
+ bind(OK);
+ }
+ {
+ // Resp may point to the last word of TOP_IJAVA_FRAME_ABI, but not below
+ // i.e. !(Z_SP + frame::z_top_ijava_frame_abi_size - Interpreter::stackElementSize > Resp).
+ NearLabel OK;
+ Register Rabi_bottom = Rtemp;
+ add2reg(Rabi_bottom, frame::z_top_ijava_frame_abi_size - Interpreter::stackElementSize, Z_SP);
+ compareU64_and_branch(Rabi_bottom, Resp, bcondNotHigh, OK);
+ reentry = stop_chain_static(reentry, "too many pushes: Z_esp points into TOP_IJAVA_FRAME_ABI");
+ bind(OK);
+ }
+}
+
+void InterpreterMacroAssembler::asm_assert_ijava_state_magic(Register tmp) {
+ Label magic_ok;
+ load_const_optimized(tmp, frame::z_istate_magic_number);
+ z_cg(tmp, Address(Z_fp, _z_ijava_state_neg(magic)));
+ z_bre(magic_ok);
+ stop_static("error: wrong magic number in ijava_state access");
+ bind(magic_ok);
+}
+#endif // ASSERT
+
+void InterpreterMacroAssembler::save_bcp() {
+ z_stg(Z_bcp, Address(Z_fp, _z_ijava_state_neg(bcp)));
+ asm_assert_ijava_state_magic(Z_bcp);
+ NOT_PRODUCT(z_lg(Z_bcp, Address(Z_fp, _z_ijava_state_neg(bcp))));
+}
+
+void InterpreterMacroAssembler::restore_bcp() {
+ asm_assert_ijava_state_magic(Z_bcp);
+ z_lg(Z_bcp, Address(Z_fp, _z_ijava_state_neg(bcp)));
+}
+
+void InterpreterMacroAssembler::save_esp() {
+ z_stg(Z_esp, Address(Z_fp, _z_ijava_state_neg(esp)));
+}
+
+void InterpreterMacroAssembler::restore_esp() {
+ asm_assert_ijava_state_magic(Z_esp);
+ z_lg(Z_esp, Address(Z_fp, _z_ijava_state_neg(esp)));
+}
+
+void InterpreterMacroAssembler::get_monitors(Register reg) {
+ asm_assert_ijava_state_magic(reg);
+ mem2reg_opt(reg, Address(Z_fp, _z_ijava_state_neg(monitors)));
+}
+
+void InterpreterMacroAssembler::save_monitors(Register reg) {
+ reg2mem_opt(reg, Address(Z_fp, _z_ijava_state_neg(monitors)));
+}
+
+void InterpreterMacroAssembler::get_mdp(Register mdp) {
+ z_lg(mdp, _z_ijava_state_neg(mdx), Z_fp);
+}
+
+void InterpreterMacroAssembler::save_mdp(Register mdp) {
+ z_stg(mdp, _z_ijava_state_neg(mdx), Z_fp);
+}
+
+// Values that are only read (besides initialization).
+void InterpreterMacroAssembler::restore_locals() {
+ asm_assert_ijava_state_magic(Z_locals);
+ z_lg(Z_locals, Address(Z_fp, _z_ijava_state_neg(locals)));
+}
+
+void InterpreterMacroAssembler::get_method(Register reg) {
+ asm_assert_ijava_state_magic(reg);
+ z_lg(reg, Address(Z_fp, _z_ijava_state_neg(method)));
+}
+
+void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(Register Rdst, int bcp_offset,
+ signedOrNot is_signed) {
+ // Rdst is an 8-byte return value!!!
+
+ // Unaligned loads incur only a small penalty on z/Architecture. The penalty
+ // is a few (2..3) ticks, even when the load crosses a cache line
+ // boundary. In case of a cache miss, the stall could, of course, be
+ // much longer.
+
+ switch (is_signed) {
+ case Signed:
+ z_lgh(Rdst, bcp_offset, Z_R0, Z_bcp);
+ break;
+ case Unsigned:
+ z_llgh(Rdst, bcp_offset, Z_R0, Z_bcp);
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+
+void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(Register Rdst, int bcp_offset,
+ setCCOrNot set_cc) {
+ // Rdst is an 8-byte return value!!!
+
+ // Unaligned loads incur only a small penalty on z/Architecture. The penalty
+ // is a few (2..3) ticks, even when the load crosses a cache line
+ // boundary. In case of a cache miss, the stall could, of course, be
+ // much longer.
+
+ // Both variants implement a sign-extending int2long load.
+ if (set_cc == set_CC) {
+ load_and_test_int2long(Rdst, Address(Z_bcp, (intptr_t)bcp_offset));
+ } else {
+ mem2reg_signed_opt( Rdst, Address(Z_bcp, (intptr_t)bcp_offset));
+ }
+}
+
+void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
+ get_method(Rdst);
+ mem2reg_opt(Rdst, Address(Rdst, Method::const_offset()));
+ mem2reg_opt(Rdst, Address(Rdst, ConstMethod::constants_offset()));
+}
+
+void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
+ get_constant_pool(Rcpool);
+ mem2reg_opt(Rtags, Address(Rcpool, ConstantPool::tags_offset_in_bytes()));
+}
+
+// Unlock if synchronized method.
+//
+// Unlock the receiver if this is a synchronized method.
+// Unlock any Java monitors from syncronized blocks.
+//
+// If there are locked Java monitors
+// If throw_monitor_exception
+// throws IllegalMonitorStateException
+// Else if install_monitor_exception
+// installs IllegalMonitorStateException
+// Else
+// no error processing
+void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
+ bool throw_monitor_exception,
+ bool install_monitor_exception) {
+ NearLabel unlocked, unlock, no_unlock;
+
+ {
+ Register R_method = Z_ARG2;
+ Register R_do_not_unlock_if_synchronized = Z_ARG3;
+
+ // Get the value of _do_not_unlock_if_synchronized into G1_scratch.
+ const Address do_not_unlock_if_synchronized(Z_thread,
+ JavaThread::do_not_unlock_if_synchronized_offset());
+ load_sized_value(R_do_not_unlock_if_synchronized, do_not_unlock_if_synchronized, 1, false /*unsigned*/);
+ z_mvi(do_not_unlock_if_synchronized, false); // Reset the flag.
+
+ // Check if synchronized method.
+ get_method(R_method);
+ verify_oop(Z_tos, state);
+ push(state); // Save tos/result.
+ testbit(method2_(R_method, access_flags), JVM_ACC_SYNCHRONIZED_BIT);
+ z_bfalse(unlocked);
+
+ // Don't unlock anything if the _do_not_unlock_if_synchronized flag
+ // is set.
+ compareU64_and_branch(R_do_not_unlock_if_synchronized, (intptr_t)0L, bcondNotEqual, no_unlock);
+ }
+
+ // unlock monitor
+
+ // BasicObjectLock will be first in list, since this is a
+ // synchronized method. However, need to check that the object has
+ // not been unlocked by an explicit monitorexit bytecode.
+ const Address monitor(Z_fp, -(frame::z_ijava_state_size + (int) sizeof(BasicObjectLock)));
+ // We use Z_ARG2 so that if we go slow path it will be the correct
+ // register for unlock_object to pass to VM directly.
+ load_address(Z_ARG2, monitor); // Address of first monitor.
+ z_lg(Z_ARG3, Address(Z_ARG2, BasicObjectLock::obj_offset_in_bytes()));
+ compareU64_and_branch(Z_ARG3, (intptr_t)0L, bcondNotEqual, unlock);
+
+ if (throw_monitor_exception) {
+ // Entry already unlocked need to throw an exception.
+ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
+ should_not_reach_here();
+ } else {
+ // Monitor already unlocked during a stack unroll.
+ // If requested, install an illegal_monitor_state_exception.
+ // Continue with stack unrolling.
+ if (install_monitor_exception) {
+ MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
+ }
+ z_bru(unlocked);
+ }
+
+ bind(unlock);
+
+ unlock_object(Z_ARG2);
+
+ bind(unlocked);
+
+ // I0, I1: Might contain return value
+
+ // Check that all monitors are unlocked.
+ {
+ NearLabel loop, exception, entry, restart;
+ const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
+ // We use Z_ARG2 so that if we go slow path it will be the correct
+ // register for unlock_object to pass to VM directly.
+ Register R_current_monitor = Z_ARG2;
+ Register R_monitor_block_bot = Z_ARG1;
+ const Address monitor_block_top(Z_fp, _z_ijava_state_neg(monitors));
+ const Address monitor_block_bot(Z_fp, -frame::z_ijava_state_size);
+
+ bind(restart);
+ // Starting with top-most entry.
+ z_lg(R_current_monitor, monitor_block_top);
+ // Points to word before bottom of monitor block.
+ load_address(R_monitor_block_bot, monitor_block_bot);
+ z_bru(entry);
+
+ // Entry already locked, need to throw exception.
+ bind(exception);
+
+ if (throw_monitor_exception) {
+ // Throw exception.
+ MacroAssembler::call_VM(noreg,
+ CAST_FROM_FN_PTR(address, InterpreterRuntime::
+ throw_illegal_monitor_state_exception));
+ should_not_reach_here();
+ } else {
+ // Stack unrolling. Unlock object and install illegal_monitor_exception.
+ // Unlock does not block, so don't have to worry about the frame.
+ // We don't have to preserve c_rarg1 since we are going to throw an exception.
+ unlock_object(R_current_monitor);
+ if (install_monitor_exception) {
+ call_VM(noreg, CAST_FROM_FN_PTR(address,
+ InterpreterRuntime::
+ new_illegal_monitor_state_exception));
+ }
+ z_bru(restart);
+ }
+
+ bind(loop);
+ // Check if current entry is used.
+ load_and_test_long(Z_R0_scratch, Address(R_current_monitor, BasicObjectLock::obj_offset_in_bytes()));
+ z_brne(exception);
+
+ add2reg(R_current_monitor, entry_size); // Otherwise advance to next entry.
+ bind(entry);
+ compareU64_and_branch(R_current_monitor, R_monitor_block_bot, bcondNotEqual, loop);
+ }
+
+ bind(no_unlock);
+ pop(state);
+ verify_oop(Z_tos, state);
+}
+
+// remove activation
+//
+// Unlock the receiver if this is a synchronized method.
+// Unlock any Java monitors from syncronized blocks.
+// Remove the activation from the stack.
+//
+// If there are locked Java monitors
+// If throw_monitor_exception
+// throws IllegalMonitorStateException
+// Else if install_monitor_exception
+// installs IllegalMonitorStateException
+// Else
+// no error processing
+void InterpreterMacroAssembler::remove_activation(TosState state,
+ Register return_pc,
+ bool throw_monitor_exception,
+ bool install_monitor_exception,
+ bool notify_jvmti) {
+
+ unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
+
+ // Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
+ notify_method_exit(false, state, notify_jvmti ? NotifyJVMTI : SkipNotifyJVMTI);
+
+ verify_oop(Z_tos, state);
+ verify_thread();
+
+ pop_interpreter_frame(return_pc, Z_ARG2, Z_ARG3);
+}
+
+// lock object
+//
+// Registers alive
+// monitor - Address of the BasicObjectLock to be used for locking,
+// which must be initialized with the object to lock.
+// object - Address of the object to be locked.
+void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
+
+ if (UseHeavyMonitors) {
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
+ monitor, /*check_for_exceptions=*/false);
+ return;
+ }
+
+ // template code:
+ //
+ // markOop displaced_header = obj->mark().set_unlocked();
+ // monitor->lock()->set_displaced_header(displaced_header);
+ // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+ // // We stored the monitor address into the object's mark word.
+ // } else if (THREAD->is_lock_owned((address)displaced_header))
+ // // Simple recursive case.
+ // monitor->lock()->set_displaced_header(NULL);
+ // } else {
+ // // Slow path.
+ // InterpreterRuntime::monitorenter(THREAD, monitor);
+ // }
+
+ const Register displaced_header = Z_ARG5;
+ const Register object_mark_addr = Z_ARG4;
+ const Register current_header = Z_ARG5;
+
+ NearLabel done;
+ NearLabel slow_case;
+
+ // markOop displaced_header = obj->mark().set_unlocked();
+
+ // Load markOop from object into displaced_header.
+ z_lg(displaced_header, oopDesc::mark_offset_in_bytes(), object);
+
+ if (UseBiasedLocking) {
+ biased_locking_enter(object, displaced_header, Z_R1, Z_R0, done, &slow_case);
+ }
+
+ // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
+ z_oill(displaced_header, markOopDesc::unlocked_value);
+
+ // monitor->lock()->set_displaced_header(displaced_header);
+
+ // Initialize the box (Must happen before we update the object mark!).
+ z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
+ BasicLock::displaced_header_offset_in_bytes(), monitor);
+
+ // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+
+ // Store stack address of the BasicObjectLock (this is monitor) into object.
+ add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object);
+
+ z_csg(displaced_header, monitor, 0, object_mark_addr);
+ assert(current_header==displaced_header, "must be same register"); // Identified two registers from z/Architecture.
+
+ z_bre(done);
+
+ // } else if (THREAD->is_lock_owned((address)displaced_header))
+ // // Simple recursive case.
+ // monitor->lock()->set_displaced_header(NULL);
+
+ // We did not see an unlocked object so try the fast recursive case.
+
+ // Check if owner is self by comparing the value in the markOop of object
+ // (current_header) with the stack pointer.
+ z_sgr(current_header, Z_SP);
+
+ assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
+
+ // The prior sequence "LGR, NGR, LTGR" can be done better
+ // (Z_R1 is temp and not used after here).
+ load_const_optimized(Z_R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+ z_ngr(Z_R0, current_header); // AND sets CC (result eq/ne 0)
+
+ // If condition is true we are done and hence we can store 0 in the displaced
+ // header indicating it is a recursive lock and be done.
+ z_brne(slow_case);
+ z_release(); // Membar unnecessary on zarch AND because the above csg does a sync before and after.
+ z_stg(Z_R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() +
+ BasicLock::displaced_header_offset_in_bytes(), monitor);
+ z_bru(done);
+
+ // } else {
+ // // Slow path.
+ // InterpreterRuntime::monitorenter(THREAD, monitor);
+
+ // None of the above fast optimizations worked so we have to get into the
+ // slow case of monitor enter.
+ bind(slow_case);
+
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
+ monitor, /*check_for_exceptions=*/false);
+
+ // }
+
+ bind(done);
+}
+
+// Unlocks an object. Used in monitorexit bytecode and remove_activation.
+//
+// Registers alive
+// monitor - address of the BasicObjectLock to be used for locking,
+// which must be initialized with the object to lock.
+//
+// Throw IllegalMonitorException if object is not locked by current thread.
+void InterpreterMacroAssembler::unlock_object(Register monitor, Register object) {
+
+ if (UseHeavyMonitors) {
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
+ monitor, /*check_for_exceptions=*/ true);
+ return;
+ }
+
+// else {
+ // template code:
+ //
+ // if ((displaced_header = monitor->displaced_header()) == NULL) {
+ // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
+ // monitor->set_obj(NULL);
+ // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+ // // We swapped the unlocked mark in displaced_header into the object's mark word.
+ // monitor->set_obj(NULL);
+ // } else {
+ // // Slow path.
+ // InterpreterRuntime::monitorexit(THREAD, monitor);
+ // }
+
+ const Register displaced_header = Z_ARG4;
+ const Register current_header = Z_R1;
+ Address obj_entry(monitor, BasicObjectLock::obj_offset_in_bytes());
+ Label done;
+
+ if (object == noreg) {
+ // In the template interpreter, we must assure that the object
+ // entry in the monitor is cleared on all paths. Thus we move
+ // loading up to here, and clear the entry afterwards.
+ object = Z_ARG3; // Use Z_ARG3 if caller didn't pass object.
+ z_lg(object, obj_entry);
+ }
+
+ assert_different_registers(monitor, object, displaced_header, current_header);
+
+ // if ((displaced_header = monitor->displaced_header()) == NULL) {
+ // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
+ // monitor->set_obj(NULL);
+
+ clear_mem(obj_entry, sizeof(oop));
+
+ if (UseBiasedLocking) {
+ // The object address from the monitor is in object.
+ assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+ biased_locking_exit(object, displaced_header, done);
+ }
+
+ // Test first if we are in the fast recursive case.
+ MacroAssembler::load_and_test_long(displaced_header,
+ Address(monitor, BasicObjectLock::lock_offset_in_bytes() +
+ BasicLock::displaced_header_offset_in_bytes()));
+ z_bre(done); // displaced_header == 0 -> goto done
+
+ // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+ // // We swapped the unlocked mark in displaced_header into the object's mark word.
+ // monitor->set_obj(NULL);
+
+ // If we still have a lightweight lock, unlock the object and be done.
+
+ // The markword is expected to be at offset 0.
+ assert(oopDesc::mark_offset_in_bytes() == 0, "unlock_object: review code below");
+
+ // We have the displaced header in displaced_header. If the lock is still
+ // lightweight, it will contain the monitor address and we'll store the
+ // displaced header back into the object's mark word.
+ z_lgr(current_header, monitor);
+ z_csg(current_header, displaced_header, 0, object);
+ z_bre(done);
+
+ // } else {
+ // // Slow path.
+ // InterpreterRuntime::monitorexit(THREAD, monitor);
+
+ // The lock has been converted into a heavy lock and hence
+ // we need to get into the slow case.
+ z_stg(object, obj_entry); // Restore object entry, has been cleared above.
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
+ monitor, /*check_for_exceptions=*/false);
+
+ // }
+
+ bind(done);
+}
+
+void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ load_and_test_long(mdp, Address(Z_fp, _z_ijava_state_neg(mdx)));
+ z_brz(zero_continue);
+}
+
+// Set the method data pointer for the current bcp.
+void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ Label set_mdp;
+ Register mdp = Z_ARG4;
+ Register method = Z_ARG5;
+
+ get_method(method);
+ // Test MDO to avoid the call if it is NULL.
+ load_and_test_long(mdp, method2_(method, method_data));
+ z_brz(set_mdp);
+
+ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), method, Z_bcp);
+ // Z_RET: mdi
+ // Mdo is guaranteed to be non-zero here, we checked for it before the call.
+ assert(method->is_nonvolatile(), "choose nonvolatile reg or reload from frame");
+ z_lg(mdp, method2_(method, method_data)); // Must reload, mdp is volatile reg.
+ add2reg_with_index(mdp, in_bytes(MethodData::data_offset()), Z_RET, mdp);
+
+ bind(set_mdp);
+ save_mdp(mdp);
+}
+
+void InterpreterMacroAssembler::verify_method_data_pointer() {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+#ifdef ASSERT
+ NearLabel verify_continue;
+ Register bcp_expected = Z_ARG3;
+ Register mdp = Z_ARG4;
+ Register method = Z_ARG5;
+
+ test_method_data_pointer(mdp, verify_continue); // If mdp is zero, continue
+ get_method(method);
+
+ // If the mdp is valid, it will point to a DataLayout header which is
+ // consistent with the bcp. The converse is highly probable also.
+ load_sized_value(bcp_expected, Address(mdp, DataLayout::bci_offset()), 2, false /*signed*/);
+ z_ag(bcp_expected, Address(method, Method::const_offset()));
+ load_address(bcp_expected, Address(bcp_expected, ConstMethod::codes_offset()));
+ compareU64_and_branch(bcp_expected, Z_bcp, bcondEqual, verify_continue);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), method, Z_bcp, mdp);
+ bind(verify_continue);
+#endif // ASSERT
+}
+
+void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ z_stg(value, constant, mdp_in);
+}
+
+void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
+ int constant,
+ Register tmp,
+ bool decrement) {
+ assert_different_registers(mdp_in, tmp);
+ // counter address
+ Address data(mdp_in, constant);
+ const int delta = decrement ? -DataLayout::counter_increment : DataLayout::counter_increment;
+ add2mem_64(Address(mdp_in, constant), delta, tmp);
+}
+
+void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
+ int flag_byte_constant) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ // Set the flag.
+ z_oi(Address(mdp_in, DataLayout::flags_offset()), flag_byte_constant);
+}
+
+void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
+ int offset,
+ Register value,
+ Register test_value_out,
+ Label& not_equal_continue) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ if (test_value_out == noreg) {
+ z_cg(value, Address(mdp_in, offset));
+ z_brne(not_equal_continue);
+ } else {
+ // Put the test value into a register, so caller can use it:
+ z_lg(test_value_out, Address(mdp_in, offset));
+ compareU64_and_branch(test_value_out, value, bcondNotEqual, not_equal_continue);
+ }
+}
+
+void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) {
+ update_mdp_by_offset(mdp_in, noreg, offset_of_disp);
+}
+
+void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
+ Register dataidx,
+ int offset_of_disp) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ Address disp_address(mdp_in, dataidx, offset_of_disp);
+ Assembler::z_ag(mdp_in, disp_address);
+ save_mdp(mdp_in);
+}
+
+void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ add2reg(mdp_in, constant);
+ save_mdp(mdp_in);
+}
+
+void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ assert(return_bci->is_nonvolatile(), "choose nonvolatile reg or save/restore");
+ call_VM(noreg,
+ CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
+ return_bci);
+}
+
+void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bumped_count) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ // Otherwise, assign to mdp.
+ test_method_data_pointer(mdp, profile_continue);
+
+ // We are taking a branch. Increment the taken count.
+ // We inline increment_mdp_data_at to return bumped_count in a register
+ //increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
+ Address data(mdp, JumpData::taken_offset());
+ z_lg(bumped_count, data);
+ // 64-bit overflow is very unlikely. Saturation to 32-bit values is
+ // performed when reading the counts.
+ add2reg(bumped_count, DataLayout::counter_increment);
+ z_stg(bumped_count, data); // Store back out
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
+ bind(profile_continue);
+ }
+}
+
+// Kills Z_R1_scratch.
+void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ // We are taking a branch. Increment the not taken count.
+ increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()), Z_R1_scratch);
+
+ // The method data pointer needs to be updated to correspond to
+ // the next bytecode.
+ update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
+ bind(profile_continue);
+ }
+}
+
+// Kills: Z_R1_scratch.
+void InterpreterMacroAssembler::profile_call(Register mdp) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ // We are making a call. Increment the count.
+ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
+ bind(profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_final_call(Register mdp) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ // We are making a call. Increment the count.
+ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
+ bind(profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
+ Register mdp,
+ Register reg2,
+ bool receiver_can_be_null) {
+ if (ProfileInterpreter) {
+ NearLabel profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ NearLabel skip_receiver_profile;
+ if (receiver_can_be_null) {
+ NearLabel not_null;
+ compareU64_and_branch(receiver, (intptr_t)0L, bcondNotEqual, not_null);
+ // We are making a call. Increment the count for null receiver.
+ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+ z_bru(skip_receiver_profile);
+ bind(not_null);
+ }
+
+ // Record the receiver type.
+ record_klass_in_profile(receiver, mdp, reg2, true);
+ bind(skip_receiver_profile);
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_constant(mdp, in_bytes(VirtualCallData::virtual_call_data_size()));
+ bind(profile_continue);
+ }
+}
+
+// This routine creates a state machine for updating the multi-row
+// type profile at a virtual call site (or other type-sensitive bytecode).
+// The machine visits each row (of receiver/count) until the receiver type
+// is found, or until it runs out of rows. At the same time, it remembers
+// the location of the first empty row. (An empty row records null for its
+// receiver, and can be allocated for a newly-observed receiver type.)
+// Because there are two degrees of freedom in the state, a simple linear
+// search will not work; it must be a decision tree. Hence this helper
+// function is recursive, to generate the required tree structured code.
+// It's the interpreter, so we are trading off code space for speed.
+// See below for example code.
+void InterpreterMacroAssembler::record_klass_in_profile_helper(
+ Register receiver, Register mdp,
+ Register reg2, int start_row,
+ Label& done, bool is_virtual_call) {
+ if (TypeProfileWidth == 0) {
+ if (is_virtual_call) {
+ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+ }
+ return;
+ }
+
+ int last_row = VirtualCallData::row_limit() - 1;
+ assert(start_row <= last_row, "must be work left to do");
+ // Test this row for both the receiver and for null.
+ // Take any of three different outcomes:
+ // 1. found receiver => increment count and goto done
+ // 2. found null => keep looking for case 1, maybe allocate this cell
+ // 3. found something else => keep looking for cases 1 and 2
+ // Case 3 is handled by a recursive call.
+ for (int row = start_row; row <= last_row; row++) {
+ NearLabel next_test;
+ bool test_for_null_also = (row == start_row);
+
+ // See if the receiver is receiver[n].
+ int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
+ test_mdp_data_at(mdp, recvr_offset, receiver,
+ (test_for_null_also ? reg2 : noreg),
+ next_test);
+ // (Reg2 now contains the receiver from the CallData.)
+
+ // The receiver is receiver[n]. Increment count[n].
+ int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
+ increment_mdp_data_at(mdp, count_offset);
+ z_bru(done);
+ bind(next_test);
+
+ if (test_for_null_also) {
+ Label found_null;
+ // Failed the equality check on receiver[n]... Test for null.
+ z_ltgr(reg2, reg2);
+ if (start_row == last_row) {
+ // The only thing left to do is handle the null case.
+ if (is_virtual_call) {
+ z_brz(found_null);
+ // Receiver did not match any saved receiver and there is no empty row for it.
+ // Increment total counter to indicate polymorphic case.
+ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+ z_bru(done);
+ bind(found_null);
+ } else {
+ z_brnz(done);
+ }
+ break;
+ }
+ // Since null is rare, make it be the branch-taken case.
+ z_brz(found_null);
+
+ // Put all the "Case 3" tests here.
+ record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);
+
+ // Found a null. Keep searching for a matching receiver,
+ // but remember that this is an empty (unused) slot.
+ bind(found_null);
+ }
+ }
+
+ // In the fall-through case, we found no matching receiver, but we
+ // observed the receiver[start_row] is NULL.
+
+ // Fill in the receiver field and increment the count.
+ int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
+ set_mdp_data_at(mdp, recvr_offset, receiver);
+ int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
+ load_const_optimized(reg2, DataLayout::counter_increment);
+ set_mdp_data_at(mdp, count_offset, reg2);
+ if (start_row > 0) {
+ z_bru(done);
+ }
+}
+
+// Example state machine code for three profile rows:
+// // main copy of decision tree, rooted at row[1]
+// if (row[0].rec == rec) { row[0].incr(); goto done; }
+// if (row[0].rec != NULL) {
+// // inner copy of decision tree, rooted at row[1]
+// if (row[1].rec == rec) { row[1].incr(); goto done; }
+// if (row[1].rec != NULL) {
+// // degenerate decision tree, rooted at row[2]
+// if (row[2].rec == rec) { row[2].incr(); goto done; }
+// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow
+// row[2].init(rec); goto done;
+// } else {
+// // remember row[1] is empty
+// if (row[2].rec == rec) { row[2].incr(); goto done; }
+// row[1].init(rec); goto done;
+// }
+// } else {
+// // remember row[0] is empty
+// if (row[1].rec == rec) { row[1].incr(); goto done; }
+// if (row[2].rec == rec) { row[2].incr(); goto done; }
+// row[0].init(rec); goto done;
+// }
+// done:
+
+void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
+ Register mdp, Register reg2,
+ bool is_virtual_call) {
+ assert(ProfileInterpreter, "must be profiling");
+ Label done;
+
+ record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);
+
+ bind (done);
+}
+
+void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) {
+ if (ProfileInterpreter) {
+ NearLabel profile_continue;
+ uint row;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ // Update the total ret count.
+ increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
+
+ for (row = 0; row < RetData::row_limit(); row++) {
+ NearLabel next_test;
+
+ // See if return_bci is equal to bci[n]:
+ test_mdp_data_at(mdp,
+ in_bytes(RetData::bci_offset(row)),
+ return_bci, noreg,
+ next_test);
+
+ // Return_bci is equal to bci[n]. Increment the count.
+ increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_offset(mdp, in_bytes(RetData::bci_displacement_offset(row)));
+ z_bru(profile_continue);
+ bind(next_test);
+ }
+
+ update_mdp_for_ret(return_bci);
+
+ bind(profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_null_seen(Register mdp) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
+
+ // The method data pointer needs to be updated.
+ int mdp_delta = in_bytes(BitData::bit_data_size());
+ if (TypeProfileCasts) {
+ mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
+ }
+ update_mdp_by_constant(mdp, mdp_delta);
+
+ bind(profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp, Register tmp) {
+ if (ProfileInterpreter && TypeProfileCasts) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ int count_offset = in_bytes(CounterData::count_offset());
+ // Back up the address, since we have already bumped the mdp.
+ count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
+
+ // *Decrement* the counter. We expect to see zero or small negatives.
+ increment_mdp_data_at(mdp, count_offset, tmp, true);
+
+ bind (profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ // The method data pointer needs to be updated.
+ int mdp_delta = in_bytes(BitData::bit_data_size());
+ if (TypeProfileCasts) {
+ mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
+
+ // Record the object type.
+ record_klass_in_profile(klass, mdp, reg2, false);
+ }
+ update_mdp_by_constant(mdp, mdp_delta);
+
+ bind(profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_switch_default(Register mdp) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ // Update the default case count.
+ increment_mdp_data_at(mdp, in_bytes(MultiBranchData::default_count_offset()));
+
+ // The method data pointer needs to be updated.
+ update_mdp_by_offset(mdp, in_bytes(MultiBranchData::default_displacement_offset()));
+
+ bind(profile_continue);
+ }
+}
+
+// Kills: index, scratch1, scratch2.
+void InterpreterMacroAssembler::profile_switch_case(Register index,
+ Register mdp,
+ Register scratch1,
+ Register scratch2) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+ assert_different_registers(index, mdp, scratch1, scratch2);
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(mdp, profile_continue);
+
+ // Build the base (index * per_case_size_in_bytes()) +
+ // case_array_offset_in_bytes().
+ z_sllg(index, index, exact_log2(in_bytes(MultiBranchData::per_case_size())));
+ add2reg(index, in_bytes(MultiBranchData::case_array_offset()));
+
+ // Add the calculated base to the mdp -> address of the case' data.
+ Address case_data_addr(mdp, index);
+ Register case_data = scratch1;
+ load_address(case_data, case_data_addr);
+
+ // Update the case count.
+ increment_mdp_data_at(case_data,
+ in_bytes(MultiBranchData::relative_count_offset()),
+ scratch2);
+
+ // The method data pointer needs to be updated.
+ update_mdp_by_offset(mdp,
+ index,
+ in_bytes(MultiBranchData::relative_displacement_offset()));
+
+ bind(profile_continue);
+ }
+}
+
+// kills: R0, R1, flags, loads klass from obj (if not null)
+void InterpreterMacroAssembler::profile_obj_type(Register obj, Address mdo_addr, Register klass, bool cmp_done) {
+ NearLabel null_seen, init_klass, do_nothing, do_update;
+
+ // Klass = obj is allowed.
+ const Register tmp = Z_R1;
+ assert_different_registers(obj, mdo_addr.base(), tmp, Z_R0);
+ assert_different_registers(klass, mdo_addr.base(), tmp, Z_R0);
+
+ z_lg(tmp, mdo_addr);
+ if (cmp_done) {
+ z_brz(null_seen);
+ } else {
+ compareU64_and_branch(obj, (intptr_t)0, Assembler::bcondEqual, null_seen);
+ }
+
+ verify_oop(obj);
+ load_klass(klass, obj);
+
+ // Klass seen before, nothing to do (regardless of unknown bit).
+ z_lgr(Z_R0, tmp);
+ assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction");
+ z_nill(Z_R0, TypeEntries::type_klass_mask & 0xFFFF);
+ compareU64_and_branch(Z_R0, klass, Assembler::bcondEqual, do_nothing);
+
+ // Already unknown. Nothing to do anymore.
+ z_tmll(tmp, TypeEntries::type_unknown);
+ z_brc(Assembler::bcondAllOne, do_nothing);
+
+ z_lgr(Z_R0, tmp);
+ assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction");
+ z_nill(Z_R0, TypeEntries::type_mask & 0xFFFF);
+ compareU64_and_branch(Z_R0, (intptr_t)0, Assembler::bcondEqual, init_klass);
+
+ // Different than before. Cannot keep accurate profile.
+ z_oill(tmp, TypeEntries::type_unknown);
+ z_bru(do_update);
+
+ bind(init_klass);
+ // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
+ z_ogr(tmp, klass);
+ z_bru(do_update);
+
+ bind(null_seen);
+ // Set null_seen if obj is 0.
+ z_oill(tmp, TypeEntries::null_seen);
+ // fallthru: z_bru(do_update);
+
+ bind(do_update);
+ z_stg(tmp, mdo_addr);
+
+ bind(do_nothing);
+}
+
+void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
+ if (!ProfileInterpreter) {
+ return;
+ }
+
+ assert_different_registers(mdp, callee, tmp);
+
+ if (MethodData::profile_arguments() || MethodData::profile_return()) {
+ Label profile_continue;
+
+ test_method_data_pointer(mdp, profile_continue);
+
+ int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
+
+ z_cliy(in_bytes(DataLayout::tag_offset()) - off_to_start, mdp,
+ is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
+ z_brne(profile_continue);
+
+ if (MethodData::profile_arguments()) {
+ NearLabel done;
+ int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+ add2reg(mdp, off_to_args);
+
+ for (int i = 0; i < TypeProfileArgsLimit; i++) {
+ if (i > 0 || MethodData::profile_return()) {
+ // If return value type is profiled we may have no argument to profile.
+ z_lg(tmp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, mdp);
+ add2reg(tmp, -i*TypeStackSlotEntries::per_arg_count());
+ compare64_and_branch(tmp, TypeStackSlotEntries::per_arg_count(), Assembler::bcondLow, done);
+ }
+ z_lg(tmp, Address(callee, Method::const_offset()));
+ z_lgh(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
+ // Stack offset o (zero based) from the start of the argument
+ // list. For n arguments translates into offset n - o - 1 from
+ // the end of the argument list. But there is an extra slot at
+ // the top of the stack. So the offset is n - o from Lesp.
+ z_sg(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
+ z_sllg(tmp, tmp, Interpreter::logStackElementSize);
+ Address stack_slot_addr(tmp, Z_esp);
+ z_ltg(tmp, stack_slot_addr);
+
+ Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
+ profile_obj_type(tmp, mdo_arg_addr, tmp, /*ltg did compare to 0*/ true);
+
+ int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+ add2reg(mdp, to_add);
+ off_to_args += to_add;
+ }
+
+ if (MethodData::profile_return()) {
+ z_lg(tmp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, mdp);
+ add2reg(tmp, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
+ }
+
+ bind(done);
+
+ if (MethodData::profile_return()) {
+ // We're right after the type profile for the last
+ // argument. Tmp is the number of cells left in the
+ // CallTypeData/VirtualCallTypeData to reach its end. Non null
+ // if there's a return to profile.
+ assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+ z_sllg(tmp, tmp, exact_log2(DataLayout::cell_size));
+ z_agr(mdp, tmp);
+ }
+ z_stg(mdp, _z_ijava_state_neg(mdx), Z_fp);
+ } else {
+ assert(MethodData::profile_return(), "either profile call args or call ret");
+ update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
+ }
+
+ // Mdp points right after the end of the
+ // CallTypeData/VirtualCallTypeData, right after the cells for the
+ // return value type if there's one.
+ bind(profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
+ assert_different_registers(mdp, ret, tmp);
+ if (ProfileInterpreter && MethodData::profile_return()) {
+ Label profile_continue;
+
+ test_method_data_pointer(mdp, profile_continue);
+
+ if (MethodData::profile_return_jsr292_only()) {
+ // If we don't profile all invoke bytecodes we must make sure
+ // it's a bytecode we indeed profile. We can't go back to the
+ // beginning of the ProfileData we intend to update to check its
+ // type because we're right after it and we don't known its
+ // length.
+ NearLabel do_profile;
+ Address bc(Z_bcp);
+ z_lb(tmp, bc);
+ compare32_and_branch(tmp, Bytecodes::_invokedynamic, Assembler::bcondEqual, do_profile);
+ compare32_and_branch(tmp, Bytecodes::_invokehandle, Assembler::bcondEqual, do_profile);
+ get_method(tmp);
+ // Supplement to 8139891: _intrinsic_id exceeded 1-byte size limit.
+ if (Method::intrinsic_id_size_in_bytes() == 1) {
+ z_cli(Method::intrinsic_id_offset_in_bytes(), tmp, vmIntrinsics::_compiledLambdaForm);
+ } else {
+ assert(Method::intrinsic_id_size_in_bytes() == 2, "size error: check Method::_intrinsic_id");
+ z_lh(tmp, Method::intrinsic_id_offset_in_bytes(), Z_R0, tmp);
+ z_chi(tmp, vmIntrinsics::_compiledLambdaForm);
+ }
+ z_brne(profile_continue);
+
+ bind(do_profile);
+ }
+
+ Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
+ profile_obj_type(ret, mdo_ret_addr, tmp);
+
+ bind(profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
+ if (ProfileInterpreter && MethodData::profile_parameters()) {
+ Label profile_continue, done;
+
+ test_method_data_pointer(mdp, profile_continue);
+
+ // Load the offset of the area within the MDO used for
+ // parameters. If it's negative we're not profiling any parameters.
+ Address parm_di_addr(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()));
+ load_and_test_int2long(tmp1, parm_di_addr);
+ z_brl(profile_continue);
+
+ // Compute a pointer to the area for parameters from the offset
+ // and move the pointer to the slot for the last
+ // parameters. Collect profiling from last parameter down.
+ // mdo start + parameters offset + array length - 1
+
+ // Pointer to the parameter area in the MDO.
+ z_agr(mdp, tmp1);
+
+ // Offset of the current profile entry to update.
+ const Register entry_offset = tmp1;
+ // entry_offset = array len in number of cells.
+ z_lg(entry_offset, Address(mdp, ArrayData::array_len_offset()));
+ // entry_offset (number of cells) = array len - size of 1 entry
+ add2reg(entry_offset, -TypeStackSlotEntries::per_arg_count());
+ // entry_offset in bytes
+ z_sllg(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
+
+ Label loop;
+ bind(loop);
+
+ Address arg_off(mdp, entry_offset, ParametersTypeData::stack_slot_offset(0));
+ Address arg_type(mdp, entry_offset, ParametersTypeData::type_offset(0));
+
+ // Load offset on the stack from the slot for this parameter.
+ z_lg(tmp2, arg_off);
+ z_sllg(tmp2, tmp2, Interpreter::logStackElementSize);
+ z_lcgr(tmp2); // Negate.
+
+ // Profile the parameter.
+ z_ltg(tmp2, Address(Z_locals, tmp2));
+ profile_obj_type(tmp2, arg_type, tmp2, /*ltg did compare to 0*/ true);
+
+ // Go to next parameter.
+ z_aghi(entry_offset, -TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size);
+ z_brnl(loop);
+
+ bind(profile_continue);
+ }
+}
+
+// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
+void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
+ int increment,
+ Address mask,
+ Register scratch,
+ bool preloaded,
+ branch_condition cond,
+ Label *where) {
+ assert_different_registers(counter_addr.base(), scratch);
+ if (preloaded) {
+ add2reg(scratch, increment);
+ reg2mem_opt(scratch, counter_addr, false);
+ } else {
+ if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(increment) && counter_addr.is_RSYform()) {
+ z_alsi(counter_addr.disp20(), counter_addr.base(), increment);
+ mem2reg_signed_opt(scratch, counter_addr);
+ } else {
+ mem2reg_signed_opt(scratch, counter_addr);
+ add2reg(scratch, increment);
+ reg2mem_opt(scratch, counter_addr, false);
+ }
+ }
+ z_n(scratch, mask);
+ if (where) { z_brc(cond, *where); }
+}
+
+// Get MethodCounters object for given method. Lazily allocated if necessary.
+// method - Ptr to Method object.
+// Rcounters - Ptr to MethodCounters object associated with Method object.
+// skip - Exit point if MethodCounters object can't be created (OOM condition).
+void InterpreterMacroAssembler::get_method_counters(Register Rmethod,
+ Register Rcounters,
+ Label& skip) {
+ assert_different_registers(Rmethod, Rcounters);
+
+ BLOCK_COMMENT("get MethodCounters object {");
+
+ Label has_counters;
+ load_and_test_long(Rcounters, Address(Rmethod, Method::method_counters_offset()));
+ z_brnz(has_counters);
+
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), Rmethod, false);
+ z_ltgr(Rcounters, Z_RET); // Runtime call returns MethodCounters object.
+ z_brz(skip); // No MethodCounters, out of memory.
+
+ bind(has_counters);
+
+ BLOCK_COMMENT("} get MethodCounters object");
+}
+
+// Increment invocation counter in MethodCounters object.
+// Return (invocation_counter+backedge_counter) as "result" in RctrSum.
+// Counter values are all unsigned.
+void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register RctrSum) {
+ assert(UseCompiler || LogTouchedMethods, "incrementing must be useful");
+ assert_different_registers(Rcounters, RctrSum);
+
+ int increment = InvocationCounter::count_increment;
+ int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset());
+ int be_counter_offset = in_bytes(MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset());
+
+ BLOCK_COMMENT("Increment invocation counter {");
+
+ if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(increment)) {
+ // Increment the invocation counter in place,
+ // then add the incremented value to the backedge counter.
+ z_l(RctrSum, be_counter_offset, Rcounters);
+ z_alsi(inv_counter_offset, Rcounters, increment); // Atomic increment @no extra cost!
+ z_nilf(RctrSum, InvocationCounter::count_mask_value); // Mask off state bits.
+ z_al(RctrSum, inv_counter_offset, Z_R0, Rcounters);
+ } else {
+ // This path is optimized for low register consumption
+ // at the cost of somewhat higher operand delays.
+ // It does not need an extra temp register.
+
+ // Update the invocation counter.
+ z_l(RctrSum, inv_counter_offset, Rcounters);
+ if (RctrSum == Z_R0) {
+ z_ahi(RctrSum, increment);
+ } else {
+ add2reg(RctrSum, increment);
+ }
+ z_st(RctrSum, inv_counter_offset, Rcounters);
+
+ // Mask off the state bits.
+ z_nilf(RctrSum, InvocationCounter::count_mask_value);
+
+ // Add the backedge counter to the updated invocation counter to
+ // form the result.
+ z_al(RctrSum, be_counter_offset, Z_R0, Rcounters);
+ }
+
+ BLOCK_COMMENT("} Increment invocation counter");
+
+ // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
+}
+
+
+// increment backedge counter in MethodCounters object.
+// return (invocation_counter+backedge_counter) as "result" in RctrSum
+// counter values are all unsigned!
+void InterpreterMacroAssembler::increment_backedge_counter(Register Rcounters, Register RctrSum) {
+ assert(UseCompiler, "incrementing must be useful");
+ assert_different_registers(Rcounters, RctrSum);
+
+ int increment = InvocationCounter::count_increment;
+ int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset());
+ int be_counter_offset = in_bytes(MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset());
+
+ BLOCK_COMMENT("Increment backedge counter {");
+
+ if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(increment)) {
+ // Increment the invocation counter in place,
+ // then add the incremented value to the backedge counter.
+ z_l(RctrSum, inv_counter_offset, Rcounters);
+ z_alsi(be_counter_offset, Rcounters, increment); // Atomic increment @no extra cost!
+ z_nilf(RctrSum, InvocationCounter::count_mask_value); // Mask off state bits.
+ z_al(RctrSum, be_counter_offset, Z_R0, Rcounters);
+ } else {
+ // This path is optimized for low register consumption
+ // at the cost of somewhat higher operand delays.
+ // It does not need an extra temp register.
+
+ // Update the invocation counter.
+ z_l(RctrSum, be_counter_offset, Rcounters);
+ if (RctrSum == Z_R0) {
+ z_ahi(RctrSum, increment);
+ } else {
+ add2reg(RctrSum, increment);
+ }
+ z_st(RctrSum, be_counter_offset, Rcounters);
+
+ // Mask off the state bits.
+ z_nilf(RctrSum, InvocationCounter::count_mask_value);
+
+ // Add the backedge counter to the updated invocation counter to
+ // form the result.
+ z_al(RctrSum, inv_counter_offset, Z_R0, Rcounters);
+ }
+
+ BLOCK_COMMENT("} Increment backedge counter");
+
+ // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
+}
+
+// Add an InterpMonitorElem to stack (see frame_s390.hpp).
+void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty,
+ Register Rtemp1,
+ Register Rtemp2,
+ Register Rtemp3) {
+
+ const Register Rcurr_slot = Rtemp1;
+ const Register Rlimit = Rtemp2;
+ const jint delta = -frame::interpreter_frame_monitor_size() * wordSize;
+
+ assert((delta & LongAlignmentMask) == 0,
+ "sizeof BasicObjectLock must be even number of doublewords");
+ assert(2 * wordSize == -delta, "this works only as long as delta == -2*wordSize");
+ assert(Rcurr_slot != Z_R0, "Register must be usable as base register");
+ assert_different_registers(Rlimit, Rcurr_slot, Rtemp3);
+
+ get_monitors(Rlimit);
+
+ // Adjust stack pointer for additional monitor entry.
+ resize_frame(RegisterOrConstant((intptr_t) delta), Z_fp, false);
+
+ if (!stack_is_empty) {
+ // Must copy stack contents down.
+ NearLabel next, done;
+
+ // Rtemp := addr(Tos), Z_esp is pointing below it!
+ add2reg(Rcurr_slot, wordSize, Z_esp);
+
+ // Nothing to do, if already at monitor area.
+ compareU64_and_branch(Rcurr_slot, Rlimit, bcondNotLow, done);
+
+ bind(next);
+
+ // Move one stack slot.
+ mem2reg_opt(Rtemp3, Address(Rcurr_slot));
+ reg2mem_opt(Rtemp3, Address(Rcurr_slot, delta));
+ add2reg(Rcurr_slot, wordSize);
+ compareU64_and_branch(Rcurr_slot, Rlimit, bcondLow, next); // Are we done?
+
+ bind(done);
+ // Done copying stack.
+ }
+
+ // Adjust expression stack and monitor pointers.
+ add2reg(Z_esp, delta);
+ add2reg(Rlimit, delta);
+ save_monitors(Rlimit);
+}
+
+// Note: Index holds the offset in bytes afterwards.
+// You can use this to store a new value (with Llocals as the base).
+void InterpreterMacroAssembler::access_local_int(Register index, Register dst) {
+ z_sllg(index, index, LogBytesPerWord);
+ mem2reg_opt(dst, Address(Z_locals, index), false);
+}
+
+void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
+ if (state == atos) { MacroAssembler::verify_oop(reg); }
+}
+
+// Inline assembly for:
+//
+// if (thread is in interp_only_mode) {
+// InterpreterRuntime::post_method_entry();
+// }
+
+void InterpreterMacroAssembler::notify_method_entry() {
+
+ // JVMTI
+ // Whenever JVMTI puts a thread in interp_only_mode, method
+ // entry/exit events are sent for that thread to track stack
+ // depth. If it is possible to enter interp_only_mode we add
+ // the code to check if the event should be sent.
+ if (JvmtiExport::can_post_interpreter_events()) {
+ Label jvmti_post_done;
+ MacroAssembler::load_and_test_int(Z_R0, Address(Z_thread, JavaThread::interp_only_mode_offset()));
+ z_bre(jvmti_post_done);
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry), /*check_exceptions=*/false);
+ bind(jvmti_post_done);
+ }
+}
+
+// Inline assembly for:
+//
+// if (thread is in interp_only_mode) {
+// if (!native_method) save result
+// InterpreterRuntime::post_method_exit();
+// if (!native_method) restore result
+// }
+// if (DTraceMethodProbes) {
+// SharedRuntime::dtrace_method_exit(thread, method);
+// }
+//
+// For native methods their result is stored in z_ijava_state.lresult
+// and z_ijava_state.fresult before coming here.
+// Java methods have their result stored in the expression stack.
+//
+// Notice the dependency to frame::interpreter_frame_result().
+void InterpreterMacroAssembler::notify_method_exit(bool native_method,
+ TosState state,
+ NotifyMethodExitMode mode) {
+ // JVMTI
+ // Whenever JVMTI puts a thread in interp_only_mode, method
+ // entry/exit events are sent for that thread to track stack
+ // depth. If it is possible to enter interp_only_mode we add
+ // the code to check if the event should be sent.
+ if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
+ Label jvmti_post_done;
+ MacroAssembler::load_and_test_int(Z_R0, Address(Z_thread, JavaThread::interp_only_mode_offset()));
+ z_bre(jvmti_post_done);
+ if (!native_method) push(state); // see frame::interpreter_frame_result()
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit), /*check_exceptions=*/false);
+ if (!native_method) pop(state);
+ bind(jvmti_post_done);
+ }
+
+#if 0
+ // Dtrace currently not supported on z/Architecture.
+ {
+ SkipIfEqual skip(this, &DTraceMethodProbes, false);
+ push(state);
+ get_method(c_rarg1);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
+ r15_thread, c_rarg1);
+ pop(state);
+ }
+#endif
+}
+
+void InterpreterMacroAssembler::skip_if_jvmti_mode(Label &Lskip, Register Rscratch) {
+ if (!JvmtiExport::can_post_interpreter_events()) {
+ return;
+ }
+
+ load_and_test_int(Rscratch, Address(Z_thread, JavaThread::interp_only_mode_offset()));
+ z_brnz(Lskip);
+
+}
+
+// Pop the topmost TOP_IJAVA_FRAME and set it's sender_sp as new Z_SP.
+// The return pc is loaded into the register return_pc.
+//
+// Registers updated:
+// return_pc - The return pc of the calling frame.
+// tmp1, tmp2 - scratch
+void InterpreterMacroAssembler::pop_interpreter_frame(Register return_pc, Register tmp1, Register tmp2) {
+ // F0 Z_SP -> caller_sp (F1's)
+ // ...
+ // sender_sp (F1's)
+ // ...
+ // F1 Z_fp -> caller_sp (F2's)
+ // return_pc (Continuation after return from F0.)
+ // ...
+ // F2 caller_sp
+
+ // Remove F0's activation. Restoring Z_SP to sender_sp reverts modifications
+ // (a) by a c2i adapter and (b) by generate_fixed_frame().
+ // In case (a) the new top frame F1 is an unextended compiled frame.
+ // In case (b) F1 is converted from PARENT_IJAVA_FRAME to TOP_IJAVA_FRAME.
+
+ // Case (b) seems to be redundant when returning to a interpreted caller,
+ // because then the caller's top_frame_sp is installed as sp (see
+ // TemplateInterpreterGenerator::generate_return_entry_for ()). But
+ // pop_interpreter_frame() is also used in exception handling and there the
+ // frame type of the caller is unknown, therefore top_frame_sp cannot be used,
+ // so it is important that sender_sp is the caller's sp as TOP_IJAVA_FRAME.
+
+ Register R_f1_sender_sp = tmp1;
+ Register R_f2_sp = tmp2;
+
+ // Tirst check the for the interpreter frame's magic.
+ asm_assert_ijava_state_magic(R_f2_sp/*tmp*/);
+ z_lg(R_f2_sp, _z_parent_ijava_frame_abi(callers_sp), Z_fp);
+ z_lg(R_f1_sender_sp, _z_ijava_state_neg(sender_sp), Z_fp);
+ if (return_pc->is_valid())
+ z_lg(return_pc, _z_parent_ijava_frame_abi(return_pc), Z_fp);
+ // Pop F0 by resizing to R_f1_sender_sp and using R_f2_sp as fp.
+ resize_frame_absolute(R_f1_sender_sp, R_f2_sp, false/*load fp*/);
+
+#ifdef ASSERT
+ // The return_pc in the new top frame is dead... at least that's my
+ // current understanding; to assert this I overwrite it.
+ load_const_optimized(Z_ARG3, 0xb00b1);
+ z_stg(Z_ARG3, _z_parent_ijava_frame_abi(return_pc), Z_SP);
+#endif
+}
+
+void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
+ if (VerifyFPU) {
+ unimplemented("verfiyFPU");
+ }
+}
+