--- a/hotspot/make/aix/Makefile Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/make/aix/Makefile Fri Mar 14 09:26:27 2014 +0100
@@ -70,6 +70,10 @@
FORCE_TIERED=1
endif
endif
+# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
+ifneq (,$(filter $(ARCH),ppc64 pp64le))
+ FORCE_TIERED=0
+endif
ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
--- a/hotspot/make/aix/makefiles/adjust-mflags.sh Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/make/aix/makefiles/adjust-mflags.sh Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
#! /bin/sh
#
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
echo "$MFLAGS" \
| sed '
s/^-/ -/
- s/ -\([^ ][^ ]*\)j/ -\1 -j/
+ s/ -\([^ I][^ I]*\)j/ -\1 -j/
s/ -j[0-9][0-9]*/ -j/
s/ -j\([^ ]\)/ -j -\1/
s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
--- a/hotspot/make/linux/Makefile Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/make/linux/Makefile Fri Mar 14 09:26:27 2014 +0100
@@ -66,6 +66,10 @@
FORCE_TIERED=1
endif
endif
+# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
+ifneq (,$(filter $(ARCH),ppc64 pp64le))
+ FORCE_TIERED=0
+endif
ifdef LP64
ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
--- a/hotspot/make/linux/makefiles/zeroshark.make Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/make/linux/makefiles/zeroshark.make Fri Mar 14 09:26:27 2014 +0100
@@ -25,6 +25,9 @@
# Setup common to Zero (non-Shark) and Shark versions of VM
+# override this from the main file because some version of llvm do not like -Wundef
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wunused-function -Wunused-value
+
# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized
OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
# The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -124,6 +124,7 @@
}
};
+#if !defined(ABI_ELFv2)
// A ppc64 function descriptor.
struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC {
private:
@@ -161,6 +162,7 @@
_env = (address) 0xbad;
}
};
+#endif
class Assembler : public AbstractAssembler {
protected:
@@ -1067,6 +1069,7 @@
// Emit an address.
inline address emit_addr(const address addr = NULL);
+#if !defined(ABI_ELFv2)
// Emit a function descriptor with the specified entry point, TOC,
// and ENV. If the entry point is NULL, the descriptor will point
// just past the descriptor.
@@ -1074,6 +1077,7 @@
inline address emit_fd(address entry = NULL,
address toc = (address) FunctionDescriptor::friend_toc,
address env = (address) FunctionDescriptor::friend_env);
+#endif
/////////////////////////////////////////////////////////////////////////////////////
// PPC instructions
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.inline.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -55,6 +55,7 @@
return start;
}
+#if !defined(ABI_ELFv2)
// Emit a function descriptor with the specified entry point, TOC, and
// ENV. If the entry point is NULL, the descriptor will point just
// past the descriptor.
@@ -73,6 +74,7 @@
return (address)fd;
}
+#endif
// Issue an illegal instruction. 0 is guaranteed to be an illegal instruction.
inline void Assembler::illtrap() { Assembler::emit_int32(0); }
--- a/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1136,7 +1136,9 @@
// (outgoing C args), R3_ARG1 to R10_ARG8, and F1_ARG1 to
// F13_ARG13.
__ mr(R3_ARG1, R18_locals);
+#if !defined(ABI_ELFv2)
__ ld(signature_handler_fd, 0, signature_handler_fd);
+#endif
__ call_stub(signature_handler_fd);
// reload method
__ ld(R19_method, state_(_method));
@@ -1295,8 +1297,13 @@
// native result acrosss the call. No oop is present
__ mr(R3_ARG1, R16_thread);
+#if defined(ABI_ELFv2)
+ __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+ relocInfo::none);
+#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
relocInfo::none);
+#endif
__ bind(sync_check_done);
//=============================================================================
@@ -1346,9 +1353,9 @@
// notify here, we'll drop it on the floor.
__ notify_method_exit(true/*native method*/,
- ilgl /*illegal state (not used for native methods)*/);
-
-
+ ilgl /*illegal state (not used for native methods)*/,
+ InterpreterMacroAssembler::NotifyJVMTI,
+ false /*check_exceptions*/);
//=============================================================================
// Handle exceptions
@@ -1413,7 +1420,7 @@
// First, pop to caller's frame.
__ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2);
- __ push_frame_abi112(0, R11_scratch1);
+ __ push_frame_reg_args(0, R11_scratch1);
// Get the address of the exception handler.
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
R16_thread,
@@ -2545,7 +2552,7 @@
__ mr(R4_ARG2, R3_ARG1); // ARG2 := ARG1
// Find the address of the "catch_exception" stub.
- __ push_frame_abi112(0, R11_scratch1);
+ __ push_frame_reg_args(0, R11_scratch1);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
R16_thread,
R4_ARG2);
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,10 +42,6 @@
#include "runtime/vframeArray.hpp"
#endif
-#ifndef CC_INTERP
-#error "CC_INTERP must be defined on PPC64"
-#endif
-
#ifdef ASSERT
void RegisterMap::check_location_valid() {
}
@@ -89,7 +85,10 @@
frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
// Pass callers initial_caller_sp as unextended_sp.
- return frame(sender_sp(), sender_pc(), (intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp);
+ return frame(sender_sp(), sender_pc(),
+ CC_INTERP_ONLY((intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp)
+ NOT_CC_INTERP((intptr_t*)get_ijava_state()->sender_sp)
+ );
}
frame frame::sender_for_compiled_frame(RegisterMap *map) const {
@@ -183,6 +182,9 @@
interpreterState istate = get_interpreterState();
address lresult = (address)istate + in_bytes(BytecodeInterpreter::native_lresult_offset());
address fresult = (address)istate + in_bytes(BytecodeInterpreter::native_fresult_offset());
+#else
+ address lresult = (address)&(get_ijava_state()->lresult);
+ address fresult = (address)&(get_ijava_state()->fresult);
#endif
switch (method->result_type()) {
@@ -259,7 +261,21 @@
values.describe(frame_no, (intptr_t*)&(istate->_native_fresult), " native_fresult");
values.describe(frame_no, (intptr_t*)&(istate->_native_lresult), " native_lresult");
#else
- Unimplemented();
+#define DESCRIBE_ADDRESS(name) \
+ values.describe(frame_no, (intptr_t*)&(get_ijava_state()->name), #name);
+
+ DESCRIBE_ADDRESS(method);
+ DESCRIBE_ADDRESS(locals);
+ DESCRIBE_ADDRESS(monitors);
+ DESCRIBE_ADDRESS(cpoolCache);
+ DESCRIBE_ADDRESS(bcp);
+ DESCRIBE_ADDRESS(esp);
+ DESCRIBE_ADDRESS(mdx);
+ DESCRIBE_ADDRESS(top_frame_sp);
+ DESCRIBE_ADDRESS(sender_sp);
+ DESCRIBE_ADDRESS(oop_tmp);
+ DESCRIBE_ADDRESS(lresult);
+ DESCRIBE_ADDRESS(fresult);
#endif
}
}
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,10 +29,6 @@
#include "runtime/synchronizer.hpp"
#include "utilities/top.hpp"
-#ifndef CC_INTERP
-#error "CC_INTERP must be defined on PPC64"
-#endif
-
// C frame layout on PPC-64.
//
// In this figure the stack grows upwards, while memory grows
@@ -50,7 +46,7 @@
// [C_FRAME]
//
// C_FRAME:
- // 0 [ABI_112]
+ // 0 [ABI_REG_ARGS]
// 112 CARG_9: outgoing arg 9 (arg_1 ... arg_8 via gpr_3 ... gpr_{10})
// ...
// 40+M*8 CARG_M: outgoing arg M (M is the maximum of outgoing args taken over all call sites in the procedure)
@@ -77,7 +73,7 @@
// 32 reserved
// 40 space for TOC (=R2) register for next call
//
- // ABI_112:
+ // ABI_REG_ARGS:
// 0 [ABI_48]
// 48 CARG_1: spill slot for outgoing arg 1. used by next callee.
// ... ...
@@ -95,23 +91,25 @@
log_2_of_alignment_in_bits = 7
};
- // ABI_48:
- struct abi_48 {
+ // ABI_MINFRAME:
+ struct abi_minframe {
uint64_t callers_sp;
uint64_t cr; //_16
uint64_t lr;
+#if !defined(ABI_ELFv2)
uint64_t reserved1; //_16
uint64_t reserved2;
+#endif
uint64_t toc; //_16
// nothing to add here!
// aligned to frame::alignment_in_bytes (16)
};
enum {
- abi_48_size = sizeof(abi_48)
+ abi_minframe_size = sizeof(abi_minframe)
};
- struct abi_112 : abi_48 {
+ struct abi_reg_args : abi_minframe {
uint64_t carg_1;
uint64_t carg_2; //_16
uint64_t carg_3;
@@ -124,13 +122,13 @@
};
enum {
- abi_112_size = sizeof(abi_112)
+ abi_reg_args_size = sizeof(abi_reg_args)
};
#define _abi(_component) \
- (offset_of(frame::abi_112, _component))
+ (offset_of(frame::abi_reg_args, _component))
- struct abi_112_spill : abi_112 {
+ struct abi_reg_args_spill : abi_reg_args {
// additional spill slots
uint64_t spill_ret;
uint64_t spill_fret; //_16
@@ -138,11 +136,11 @@
};
enum {
- abi_112_spill_size = sizeof(abi_112_spill)
+ abi_reg_args_spill_size = sizeof(abi_reg_args_spill)
};
- #define _abi_112_spill(_component) \
- (offset_of(frame::abi_112_spill, _component))
+ #define _abi_reg_args_spill(_component) \
+ (offset_of(frame::abi_reg_args_spill, _component))
// non-volatile GPRs:
@@ -195,7 +193,85 @@
#define _spill_nonvolatiles_neg(_component) \
(int)(-frame::spill_nonvolatiles_size + offset_of(frame::spill_nonvolatiles, _component))
- // Frame layout for the Java interpreter on PPC64.
+
+
+#ifndef CC_INTERP
+ // Frame layout for the Java template interpreter on PPC64.
+ //
+ // Diffs to the CC_INTERP are marked with 'X'.
+ //
+ // TOP_IJAVA_FRAME:
+ //
+ // 0 [TOP_IJAVA_FRAME_ABI]
+ // alignment (optional)
+ // [operand stack]
+ // [monitors] (optional)
+ // X[IJAVA_STATE]
+ // note: own locals are located in the caller frame.
+ //
+ // PARENT_IJAVA_FRAME:
+ //
+ // 0 [PARENT_IJAVA_FRAME_ABI]
+ // alignment (optional)
+ // [callee's Java result]
+ // [callee's locals w/o arguments]
+ // [outgoing arguments]
+ // [used part of operand stack w/o arguments]
+ // [monitors] (optional)
+ // X[IJAVA_STATE]
+ //
+
+ struct parent_ijava_frame_abi : abi_minframe {
+ };
+
+ enum {
+ parent_ijava_frame_abi_size = sizeof(parent_ijava_frame_abi)
+ };
+
+#define _parent_ijava_frame_abi(_component) \
+ (offset_of(frame::parent_ijava_frame_abi, _component))
+
+ struct top_ijava_frame_abi : abi_reg_args {
+ };
+
+ enum {
+ top_ijava_frame_abi_size = sizeof(top_ijava_frame_abi)
+ };
+
+#define _top_ijava_frame_abi(_component) \
+ (offset_of(frame::top_ijava_frame_abi, _component))
+
+ struct ijava_state {
+#ifdef ASSERT
+ uint64_t ijava_reserved; // Used for assertion.
+ uint64_t ijava_reserved2; // Inserted for alignment.
+#endif
+ uint64_t method;
+ uint64_t locals;
+ uint64_t monitors;
+ uint64_t cpoolCache;
+ uint64_t bcp;
+ uint64_t esp;
+ uint64_t mdx;
+ uint64_t top_frame_sp; // Maybe define parent_frame_abi and move there.
+ uint64_t sender_sp;
+ // Slots only needed for native calls. Maybe better to move elsewhere.
+ uint64_t oop_tmp;
+ uint64_t lresult;
+ uint64_t fresult;
+ // Aligned to frame::alignment_in_bytes (16).
+ };
+
+ enum {
+ ijava_state_size = sizeof(ijava_state)
+ };
+
+#define _ijava_state_neg(_component) \
+ (int) (-frame::ijava_state_size + offset_of(frame::ijava_state, _component))
+
+#else // CC_INTERP:
+
+ // Frame layout for the Java C++ interpreter on PPC64.
//
// This frame layout provides a C-like frame for every Java frame.
//
@@ -242,7 +318,7 @@
// [ENTRY_FRAME_LOCALS]
//
// PARENT_IJAVA_FRAME_ABI:
- // 0 [ABI_48]
+ // 0 [ABI_MINFRAME]
// top_frame_sp
// initial_caller_sp
//
@@ -258,7 +334,7 @@
// PARENT_IJAVA_FRAME_ABI
- struct parent_ijava_frame_abi : abi_48 {
+ struct parent_ijava_frame_abi : abi_minframe {
// SOE registers.
// C2i adapters spill their top-frame stack-pointer here.
uint64_t top_frame_sp; // carg_1
@@ -285,7 +361,7 @@
uint64_t carg_6_unused; //_16 carg_6
uint64_t carg_7_unused; // carg_7
// Use arg8 for storing frame_manager_lr. The size of
- // top_ijava_frame_abi must match abi_112.
+ // top_ijava_frame_abi must match abi_reg_args.
uint64_t frame_manager_lr; //_16 carg_8
// nothing to add here!
// aligned to frame::alignment_in_bytes (16)
@@ -298,6 +374,8 @@
#define _top_ijava_frame_abi(_component) \
(offset_of(frame::top_ijava_frame_abi, _component))
+#endif // CC_INTERP
+
// ENTRY_FRAME
struct entry_frame_locals {
@@ -395,8 +473,8 @@
intptr_t* fp() const { return _fp; }
// Accessors for ABIs
- inline abi_48* own_abi() const { return (abi_48*) _sp; }
- inline abi_48* callers_abi() const { return (abi_48*) _fp; }
+ inline abi_minframe* own_abi() const { return (abi_minframe*) _sp; }
+ inline abi_minframe* callers_abi() const { return (abi_minframe*) _fp; }
private:
@@ -421,6 +499,14 @@
#ifdef CC_INTERP
// Additional interface for interpreter frames:
inline interpreterState get_interpreterState() const;
+#else
+ inline ijava_state* get_ijava_state() const;
+ // Some convenient register frame setters/getters for deoptimization.
+ inline intptr_t* interpreter_frame_esp() const;
+ inline void interpreter_frame_set_cpcache(ConstantPoolCache* cp);
+ inline void interpreter_frame_set_esp(intptr_t* esp);
+ inline void interpreter_frame_set_top_frame_sp(intptr_t* top_frame_sp);
+ inline void interpreter_frame_set_sender_sp(intptr_t* sender_sp);
#endif // CC_INTERP
// Size of a monitor in bytes.
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.inline.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,6 @@
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
-#ifndef CC_INTERP
-#error "CC_INTERP must be defined on PPC64"
-#endif
-
// Inline functions for ppc64 frames:
// Find codeblob and set deopt_state.
@@ -199,6 +195,75 @@
interpreterState istate = get_interpreterState();
return &istate->_constants;
}
+
+#else // !CC_INTERP
+
+// Template Interpreter frame value accessors.
+
+inline frame::ijava_state* frame::get_ijava_state() const {
+ return (ijava_state*) ((uintptr_t)fp() - ijava_state_size);
+}
+
+inline intptr_t** frame::interpreter_frame_locals_addr() const {
+ return (intptr_t**) &(get_ijava_state()->locals);
+}
+inline intptr_t* frame::interpreter_frame_bcx_addr() const {
+ return (intptr_t*) &(get_ijava_state()->bcp);
+}
+inline intptr_t* frame::interpreter_frame_mdx_addr() const {
+ return (intptr_t*) &(get_ijava_state()->mdx);
+}
+// Pointer beyond the "oldest/deepest" BasicObjectLock on stack.
+inline BasicObjectLock* frame::interpreter_frame_monitor_end() const {
+ return (BasicObjectLock *) get_ijava_state()->monitors;
+}
+
+inline BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
+ return (BasicObjectLock *) get_ijava_state();
+}
+
+// SAPJVM ASc 2012-11-21. Return register stack slot addr at which currently interpreted method is found
+inline Method** frame::interpreter_frame_method_addr() const {
+ return (Method**) &(get_ijava_state()->method);
+}
+inline ConstantPoolCache** frame::interpreter_frame_cpoolcache_addr() const {
+ return (ConstantPoolCache**) &(get_ijava_state()->cpoolCache);
+}
+inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
+ return (ConstantPoolCache**) &(get_ijava_state()->cpoolCache);
+}
+
+inline oop* frame::interpreter_frame_temp_oop_addr() const {
+ return (oop *) &(get_ijava_state()->oop_tmp);
+}
+inline intptr_t* frame::interpreter_frame_esp() const {
+ return (intptr_t*) get_ijava_state()->esp;
+}
+
+// Convenient setters
+inline void frame::interpreter_frame_set_monitor_end(BasicObjectLock* end) { get_ijava_state()->monitors = (intptr_t) end;}
+inline void frame::interpreter_frame_set_cpcache(ConstantPoolCache* cp) { *frame::interpreter_frame_cpoolcache_addr() = cp; }
+inline void frame::interpreter_frame_set_esp(intptr_t* esp) { get_ijava_state()->esp = (intptr_t) esp; }
+inline void frame::interpreter_frame_set_top_frame_sp(intptr_t* top_frame_sp) { get_ijava_state()->top_frame_sp = (intptr_t) top_frame_sp; }
+inline void frame::interpreter_frame_set_sender_sp(intptr_t* sender_sp) { get_ijava_state()->sender_sp = (intptr_t) sender_sp; }
+
+inline intptr_t* frame::interpreter_frame_expression_stack() const {
+ return (intptr_t*)interpreter_frame_monitor_end() - 1;
+}
+
+inline jint frame::interpreter_frame_expression_stack_direction() {
+ return -1;
+}
+
+// top of expression stack
+inline intptr_t* frame::interpreter_frame_tos_address() const {
+ return ((intptr_t*) get_ijava_state()->esp) + Interpreter::stackElementWords;
+}
+
+inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
+ return &interpreter_frame_tos_address()[offset];
+}
+
#endif // CC_INTERP
inline int frame::interpreter_frame_monitor_size() {
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "interp_masm_ppc_64.hpp"
#include "interpreter/interpreterRuntime.hpp"
+#include "prims/jvmtiThreadState.hpp"
#ifdef PRODUCT
#define BLOCK_COMMENT(str) // nothing
@@ -45,6 +46,691 @@
MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
}
+void InterpreterMacroAssembler::branch_to_entry(address entry, Register Rscratch) {
+ assert(entry, "Entry must have been generated by now");
+ if (is_within_range_of_b(entry, pc())) {
+ b(entry);
+ } else {
+ load_const_optimized(Rscratch, entry, R0);
+ mtctr(Rscratch);
+ bctr();
+ }
+}
+
+#ifndef CC_INTERP
+
+void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
+ Register bytecode = R12_scratch2;
+ if (bcp_incr != 0) {
+ lbzu(bytecode, bcp_incr, R14_bcp);
+ } else {
+ lbz(bytecode, 0, R14_bcp);
+ }
+
+ dispatch_Lbyte_code(state, bytecode, Interpreter::dispatch_table(state));
+}
+
+void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
+ // Load current bytecode.
+ Register bytecode = R12_scratch2;
+ lbz(bytecode, 0, R14_bcp);
+ dispatch_Lbyte_code(state, bytecode, table);
+}
+
+// Dispatch code executed in the prolog of a bytecode which does not do it's
+// own dispatch. The dispatch address is computed and placed in R24_dispatch_addr.
+void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
+ Register bytecode = R12_scratch2;
+ lbz(bytecode, bcp_incr, R14_bcp);
+
+ load_dispatch_table(R24_dispatch_addr, Interpreter::dispatch_table(state));
+
+ sldi(bytecode, bytecode, LogBytesPerWord);
+ ldx(R24_dispatch_addr, R24_dispatch_addr, bytecode);
+}
+
+// Dispatch code executed in the epilog of a bytecode which does not do it's
+// own dispatch. The dispatch address in R24_dispatch_addr is used for the
+// dispatch.
+void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
+ mtctr(R24_dispatch_addr);
+ addi(R14_bcp, R14_bcp, bcp_incr);
+ bctr();
+}
+
+void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
+ assert(scratch_reg != R0, "can't use R0 as scratch_reg here");
+ if (JvmtiExport::can_pop_frame()) {
+ Label L;
+
+ // Check the "pending popframe condition" flag in the current thread.
+ lwz(scratch_reg, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+ // Initiate popframe handling only if it is not already being
+ // processed. If the flag has the popframe_processing bit set, it
+ // means that this code is called *during* popframe handling - we
+ // don't want to reenter.
+ andi_(R0, scratch_reg, JavaThread::popframe_pending_bit);
+ beq(CCR0, L);
+
+ andi_(R0, scratch_reg, JavaThread::popframe_processing_bit);
+ bne(CCR0, L);
+
+ // Call the Interpreter::remove_activation_preserving_args_entry()
+ // func to get the address of the same-named entrypoint in the
+ // generated interpreter code.
+ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
+ Interpreter::remove_activation_preserving_args_entry),
+ relocInfo::none);
+
+ // Jump to Interpreter::_remove_activation_preserving_args_entry.
+ mtctr(R3_RET);
+ bctr();
+
+ align(32, 12);
+ bind(L);
+ }
+}
+
+void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
+ const Register Rthr_state_addr = scratch_reg;
+ if (JvmtiExport::can_force_early_return()) {
+ Label Lno_early_ret;
+ ld(Rthr_state_addr, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
+ cmpdi(CCR0, Rthr_state_addr, 0);
+ beq(CCR0, Lno_early_ret);
+
+ lwz(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rthr_state_addr);
+ cmpwi(CCR0, R0, JvmtiThreadState::earlyret_pending);
+ bne(CCR0, Lno_early_ret);
+
+ // Jump to Interpreter::_earlyret_entry.
+ lwz(R3_ARG1, in_bytes(JvmtiThreadState::earlyret_tos_offset()), Rthr_state_addr);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry));
+ mtlr(R3_RET);
+ blr();
+
+ align(32, 12);
+ bind(Lno_early_ret);
+ }
+}
+
+void InterpreterMacroAssembler::load_earlyret_value(TosState state, Register Rscratch1) {
+ const Register RjvmtiState = Rscratch1;
+ const Register Rscratch2 = R0;
+
+ ld(RjvmtiState, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
+ li(Rscratch2, 0);
+
+ switch (state) {
+ case atos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState);
+ std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState);
+ break;
+ case ltos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
+ break;
+ case btos: // fall through
+ case ctos: // fall through
+ case stos: // fall through
+ case itos: lwz(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
+ break;
+ case ftos: lfs(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
+ break;
+ case dtos: lfd(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
+ break;
+ case vtos: break;
+ default : ShouldNotReachHere();
+ }
+
+ // Clean up tos value in the jvmti thread state.
+ std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
+ // Set tos state field to illegal value.
+ li(Rscratch2, ilgl);
+ stw(Rscratch2, in_bytes(JvmtiThreadState::earlyret_tos_offset()), RjvmtiState);
+}
+
+// Common code to dispatch and dispatch_only.
+// Dispatch value in Lbyte_code and increment Lbcp.
+
+void InterpreterMacroAssembler::load_dispatch_table(Register dst, address* table) {
+ address table_base = (address)Interpreter::dispatch_table((TosState)0);
+ intptr_t table_offs = (intptr_t)table - (intptr_t)table_base;
+ if (is_simm16(table_offs)) {
+ addi(dst, R25_templateTableBase, (int)table_offs);
+ } else {
+ load_const_optimized(dst, table, R0);
+ }
+}
+
+void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register bytecode, address* table, bool verify) {
+ if (verify) {
+ unimplemented("dispatch_Lbyte_code: verify"); // See Sparc Implementation to implement this
+ }
+
+#ifdef FAST_DISPATCH
+ unimplemented("dispatch_Lbyte_code FAST_DISPATCH");
+#else
+ assert_different_registers(bytecode, R11_scratch1);
+
+ // Calc dispatch table address.
+ load_dispatch_table(R11_scratch1, table);
+
+ sldi(R12_scratch2, bytecode, LogBytesPerWord);
+ ldx(R11_scratch1, R11_scratch1, R12_scratch2);
+
+ // Jump off!
+ mtctr(R11_scratch1);
+ bctr();
+#endif
+}
+
+void InterpreterMacroAssembler::load_receiver(Register Rparam_count, Register Rrecv_dst) {
+ sldi(Rrecv_dst, Rparam_count, Interpreter::logStackElementSize);
+ ldx(Rrecv_dst, Rrecv_dst, R15_esp);
+}
+
+// helpers for expression stack
+
+void InterpreterMacroAssembler::pop_i(Register r) {
+ lwzu(r, Interpreter::stackElementSize, R15_esp);
+}
+
+void InterpreterMacroAssembler::pop_ptr(Register r) {
+ ldu(r, Interpreter::stackElementSize, R15_esp);
+}
+
+void InterpreterMacroAssembler::pop_l(Register r) {
+ ld(r, Interpreter::stackElementSize, R15_esp);
+ addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize);
+}
+
+void InterpreterMacroAssembler::pop_f(FloatRegister f) {
+ lfsu(f, Interpreter::stackElementSize, R15_esp);
+}
+
+void InterpreterMacroAssembler::pop_d(FloatRegister f) {
+ lfd(f, Interpreter::stackElementSize, R15_esp);
+ addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize);
+}
+
+void InterpreterMacroAssembler::push_i(Register r) {
+ stw(r, 0, R15_esp);
+ addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
+}
+
+void InterpreterMacroAssembler::push_ptr(Register r) {
+ std(r, 0, R15_esp);
+ addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
+}
+
+void InterpreterMacroAssembler::push_l(Register r) {
+ std(r, - Interpreter::stackElementSize, R15_esp);
+ addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
+}
+
+void InterpreterMacroAssembler::push_f(FloatRegister f) {
+ stfs(f, 0, R15_esp);
+ addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
+}
+
+void InterpreterMacroAssembler::push_d(FloatRegister f) {
+ stfd(f, - Interpreter::stackElementSize, R15_esp);
+ addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
+}
+
+void InterpreterMacroAssembler::push_2ptrs(Register first, Register second) {
+ std(first, 0, R15_esp);
+ std(second, -Interpreter::stackElementSize, R15_esp);
+ addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
+}
+
+void InterpreterMacroAssembler::push_l_pop_d(Register l, FloatRegister d) {
+ std(l, 0, R15_esp);
+ lfd(d, 0, R15_esp);
+}
+
+void InterpreterMacroAssembler::push_d_pop_l(FloatRegister d, Register l) {
+ stfd(d, 0, R15_esp);
+ ld(l, 0, R15_esp);
+}
+
+void InterpreterMacroAssembler::push(TosState state) {
+ switch (state) {
+ case atos: push_ptr(); break;
+ case btos:
+ case ctos:
+ case stos:
+ case itos: push_i(); break;
+ case ltos: push_l(); break;
+ case ftos: push_f(); break;
+ case dtos: push_d(); break;
+ case vtos: /* nothing to do */ break;
+ default : ShouldNotReachHere();
+ }
+}
+
+void InterpreterMacroAssembler::pop(TosState state) {
+ switch (state) {
+ case atos: pop_ptr(); break;
+ case btos:
+ case ctos:
+ case stos:
+ case itos: pop_i(); break;
+ case ltos: pop_l(); break;
+ case ftos: pop_f(); break;
+ case dtos: pop_d(); break;
+ case vtos: /* nothing to do */ break;
+ default : ShouldNotReachHere();
+ }
+ verify_oop(R17_tos, state);
+}
+
+void InterpreterMacroAssembler::empty_expression_stack() {
+ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
+}
+
+void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int bcp_offset,
+ Register Rdst,
+ signedOrNot is_signed) {
+ // Read Java big endian format.
+ if (is_signed == Signed) {
+ lha(Rdst, bcp_offset, R14_bcp);
+ } else {
+ lhz(Rdst, bcp_offset, R14_bcp);
+ }
+#if 0
+ assert(Rtmp != Rdst, "need separate temp register");
+ Register Rfirst = Rtmp;
+ lbz(Rfirst, bcp_offset, R14_bcp); // first byte
+ lbz(Rdst, bcp_offset+1, R14_bcp); // second byte
+
+ // Rdst = ((Rfirst<<8) & 0xFF00) | (Rdst &~ 0xFF00)
+ rldimi(/*RA=*/Rdst, /*RS=*/Rfirst, /*sh=*/8, /*mb=*/48);
+ if (is_signed == Signed) {
+ extsh(Rdst, Rdst);
+ }
+#endif
+}
+
+void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int bcp_offset,
+ Register Rdst,
+ signedOrNot is_signed) {
+ // Read Java big endian format.
+ if (bcp_offset & 3) { // Offset unaligned?
+ load_const_optimized(Rdst, bcp_offset);
+ if (is_signed == Signed) {
+ lwax(Rdst, R14_bcp, Rdst);
+ } else {
+ lwzx(Rdst, R14_bcp, Rdst);
+ }
+ } else {
+ if (is_signed == Signed) {
+ lwa(Rdst, bcp_offset, R14_bcp);
+ } else {
+ lwz(Rdst, bcp_offset, R14_bcp);
+ }
+ }
+}
+
+// Load the constant pool cache index from the bytecode stream.
+//
+// Kills / writes:
+// - Rdst, Rscratch
+void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size) {
+ assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
+ if (index_size == sizeof(u2)) {
+ get_2_byte_integer_at_bcp(bcp_offset, Rdst, Unsigned);
+ } else if (index_size == sizeof(u4)) {
+ assert(EnableInvokeDynamic, "giant index used only for JSR 292");
+ get_4_byte_integer_at_bcp(bcp_offset, Rdst, Signed);
+ assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
+ nand(Rdst, Rdst, Rdst); // convert to plain index
+ } else if (index_size == sizeof(u1)) {
+ lbz(Rdst, bcp_offset, R14_bcp);
+ } else {
+ ShouldNotReachHere();
+ }
+ // Rdst now contains cp cache index.
+}
+
+void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size) {
+ get_cache_index_at_bcp(cache, bcp_offset, index_size);
+ sldi(cache, cache, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord));
+ add(cache, R27_constPoolCache, cache);
+}
+
+// Load object from cpool->resolved_references(index).
+void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) {
+ assert_different_registers(result, index);
+ get_constant_pool(result);
+
+ // Convert from field index to resolved_references() index and from
+ // word index to byte offset. Since this is a java object, it can be compressed.
+ Register tmp = index; // reuse
+ sldi(tmp, index, LogBytesPerHeapOop);
+ // Load pointer for resolved_references[] objArray.
+ ld(result, ConstantPool::resolved_references_offset_in_bytes(), result);
+ // JNIHandles::resolve(result)
+ ld(result, 0, result);
+#ifdef ASSERT
+ Label index_ok;
+ lwa(R0, arrayOopDesc::length_offset_in_bytes(), result);
+ sldi(R0, R0, LogBytesPerHeapOop);
+ cmpd(CCR0, tmp, R0);
+ blt(CCR0, index_ok);
+ stop("resolved reference index out of bounds", 0x09256);
+ bind(index_ok);
+#endif
+ // Add in the index.
+ add(result, tmp, result);
+ load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
+}
+
+// Generate a subtype check: branch to ok_is_subtype if sub_klass is
+// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
+void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,
+ Register Rtmp2, Register Rtmp3, Label &ok_is_subtype) {
+ // Profile the not-null value's klass.
+ profile_typecheck(Rsub_klass, Rtmp1, Rtmp2);
+ check_klass_subtype(Rsub_klass, Rsuper_klass, Rtmp1, Rtmp2, ok_is_subtype);
+ profile_typecheck_failed(Rtmp1, Rtmp2);
+}
+
+void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1) {
+ Label done;
+ sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
+ ld(Rscratch1, thread_(stack_overflow_limit));
+ cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
+ bgt(CCR0/*is_stack_overflow*/, done);
+
+ // Load target address of the runtime stub.
+ assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
+ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
+ mtctr(Rscratch1);
+ // Restore caller_sp.
+#ifdef ASSERT
+ ld(Rscratch1, 0, R1_SP);
+ ld(R0, 0, R21_sender_SP);
+ cmpd(CCR0, R0, Rscratch1);
+ asm_assert_eq("backlink", 0x547);
+#endif // ASSERT
+ mr(R1_SP, R21_sender_SP);
+ bctr();
+
+ align(32, 12);
+ bind(done);
+}
+
+// Separate these two to allow for delay slot in middle.
+// These are used to do a test and full jump to exception-throwing code.
+
+// Check that index is in range for array, then shift index by index_shift,
+// and put arrayOop + shifted_index into res.
+// Note: res is still shy of address by array offset into object.
+
+void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Register Rindex, int index_shift, Register Rtmp, Register Rres) {
+ // Check that index is in range for array, then shift index by index_shift,
+ // and put arrayOop + shifted_index into res.
+ // Note: res is still shy of address by array offset into object.
+ // Kills:
+ // - Rindex
+ // Writes:
+ // - Rres: Address that corresponds to the array index if check was successful.
+ verify_oop(Rarray);
+ const Register Rlength = R0;
+ const Register RsxtIndex = Rtmp;
+ Label LisNull, LnotOOR;
+
+ // Array nullcheck
+ if (!ImplicitNullChecks) {
+ cmpdi(CCR0, Rarray, 0);
+ beq(CCR0, LisNull);
+ } else {
+ null_check_throw(Rarray, arrayOopDesc::length_offset_in_bytes(), /*temp*/RsxtIndex);
+ }
+
+ // Rindex might contain garbage in upper bits (remember that we don't sign extend
+ // during integer arithmetic operations). So kill them and put value into same register
+ // where ArrayIndexOutOfBounds would expect the index in.
+ rldicl(RsxtIndex, Rindex, 0, 32); // zero extend 32 bit -> 64 bit
+
+ // Index check
+ lwz(Rlength, arrayOopDesc::length_offset_in_bytes(), Rarray);
+ cmplw(CCR0, Rindex, Rlength);
+ sldi(RsxtIndex, RsxtIndex, index_shift);
+ blt(CCR0, LnotOOR);
+ load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
+ mtctr(Rtmp);
+ bctr();
+
+ if (!ImplicitNullChecks) {
+ bind(LisNull);
+ load_dispatch_table(Rtmp, (address*)Interpreter::_throw_NullPointerException_entry);
+ mtctr(Rtmp);
+ bctr();
+ }
+
+ align(32, 16);
+ bind(LnotOOR);
+
+ // Calc address
+ add(Rres, RsxtIndex, Rarray);
+}
+
+void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {
+ // pop array
+ pop_ptr(array);
+
+ // check array
+ index_check_without_pop(array, index, index_shift, tmp, res);
+}
+
+void InterpreterMacroAssembler::get_const(Register Rdst) {
+ ld(Rdst, in_bytes(Method::const_offset()), R19_method);
+}
+
+void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
+ get_const(Rdst);
+ ld(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst);
+}
+
+void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
+ get_constant_pool(Rdst);
+ ld(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst);
+}
+
+void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
+ get_constant_pool(Rcpool);
+ ld(Rtags, ConstantPool::tags_offset_in_bytes(), Rcpool);
+}
+
+// Unlock if synchronized method.
+//
+// Unlock the receiver if this is a synchronized method.
+// Unlock any Java monitors from synchronized blocks.
+//
+// If there are locked Java monitors
+// If throw_monitor_exception
+// throws IllegalMonitorStateException
+// Else if install_monitor_exception
+// installs IllegalMonitorStateException
+// Else
+// no error processing
+void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
+ bool throw_monitor_exception,
+ bool install_monitor_exception) {
+ Label Lunlocked, Lno_unlock;
+ {
+ Register Rdo_not_unlock_flag = R11_scratch1;
+ Register Raccess_flags = R12_scratch2;
+
+ // Check if synchronized method or unlocking prevented by
+ // JavaThread::do_not_unlock_if_synchronized flag.
+ lbz(Rdo_not_unlock_flag, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+ lwz(Raccess_flags, in_bytes(Method::access_flags_offset()), R19_method);
+ li(R0, 0);
+ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); // reset flag
+
+ push(state);
+
+ // Skip if we don't have to unlock.
+ rldicl_(R0, Raccess_flags, 64-JVM_ACC_SYNCHRONIZED_BIT, 63); // Extract bit and compare to 0.
+ beq(CCR0, Lunlocked);
+
+ cmpwi(CCR0, Rdo_not_unlock_flag, 0);
+ bne(CCR0, Lno_unlock);
+ }
+
+ // Unlock
+ {
+ Register Rmonitor_base = R11_scratch1;
+
+ Label Lunlock;
+ // If it's still locked, everything is ok, unlock it.
+ ld(Rmonitor_base, 0, R1_SP);
+ addi(Rmonitor_base, Rmonitor_base, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
+
+ ld(R0, BasicObjectLock::obj_offset_in_bytes(), Rmonitor_base);
+ cmpdi(CCR0, R0, 0);
+ bne(CCR0, Lunlock);
+
+ // If it's already unlocked, throw exception.
+ if (throw_monitor_exception) {
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
+ should_not_reach_here();
+ } else {
+ if (install_monitor_exception) {
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
+ b(Lunlocked);
+ }
+ }
+
+ bind(Lunlock);
+ unlock_object(Rmonitor_base);
+ }
+
+ // Check that all other monitors are unlocked. Throw IllegelMonitorState exception if not.
+ bind(Lunlocked);
+ {
+ Label Lexception, Lrestart;
+ Register Rcurrent_obj_addr = R11_scratch1;
+ const int delta = frame::interpreter_frame_monitor_size_in_bytes();
+ assert((delta & LongAlignmentMask) == 0, "sizeof BasicObjectLock must be even number of doublewords");
+
+ bind(Lrestart);
+ // Set up search loop: Calc num of iterations.
+ {
+ Register Riterations = R12_scratch2;
+ Register Rmonitor_base = Rcurrent_obj_addr;
+ ld(Rmonitor_base, 0, R1_SP);
+ addi(Rmonitor_base, Rmonitor_base, - frame::ijava_state_size); // Monitor base
+
+ subf_(Riterations, R26_monitor, Rmonitor_base);
+ ble(CCR0, Lno_unlock);
+
+ addi(Rcurrent_obj_addr, Rmonitor_base, BasicObjectLock::obj_offset_in_bytes() - frame::interpreter_frame_monitor_size_in_bytes());
+ // Check if any monitor is on stack, bail out if not
+ srdi(Riterations, Riterations, exact_log2(delta));
+ mtctr(Riterations);
+ }
+
+ // The search loop: Look for locked monitors.
+ {
+ const Register Rcurrent_obj = R0;
+ Label Lloop;
+
+ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
+ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
+ bind(Lloop);
+
+ // Check if current entry is used.
+ cmpdi(CCR0, Rcurrent_obj, 0);
+ bne(CCR0, Lexception);
+ // Preload next iteration's compare value.
+ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
+ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
+ bdnz(Lloop);
+ }
+ // Fell through: Everything's unlocked => finish.
+ b(Lno_unlock);
+
+ // An object is still locked => need to throw exception.
+ bind(Lexception);
+ if (throw_monitor_exception) {
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
+ should_not_reach_here();
+ } else {
+ // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
+ // Unlock does not block, so don't have to worry about the frame.
+ Register Rmonitor_addr = R11_scratch1;
+ addi(Rmonitor_addr, Rcurrent_obj_addr, -BasicObjectLock::obj_offset_in_bytes() + delta);
+ unlock_object(Rmonitor_addr);
+ if (install_monitor_exception) {
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
+ }
+ b(Lrestart);
+ }
+ }
+
+ align(32, 12);
+ bind(Lno_unlock);
+ pop(state);
+}
+
+// Support function for remove_activation & Co.
+void InterpreterMacroAssembler::merge_frames(Register Rsender_sp, Register return_pc, Register Rscratch1, Register Rscratch2) {
+ // Pop interpreter frame.
+ ld(Rscratch1, 0, R1_SP); // *SP
+ ld(Rsender_sp, _ijava_state_neg(sender_sp), Rscratch1); // top_frame_sp
+ ld(Rscratch2, 0, Rscratch1); // **SP
+#ifdef ASSERT
+ {
+ Label Lok;
+ ld(R0, _ijava_state_neg(ijava_reserved), Rscratch1);
+ cmpdi(CCR0, R0, 0x5afe);
+ beq(CCR0, Lok);
+ stop("frame corrupted (remove activation)", 0x5afe);
+ bind(Lok);
+ }
+#endif
+ if (return_pc!=noreg) {
+ ld(return_pc, _abi(lr), Rscratch1); // LR
+ }
+
+ // Merge top frames.
+ subf(Rscratch1, R1_SP, Rsender_sp); // top_frame_sp - SP
+ stdux(Rscratch2, R1_SP, Rscratch1); // atomically set *(SP = top_frame_sp) = **SP
+}
+
+// Remove activation.
+//
+// Unlock the receiver if this is a synchronized method.
+// Unlock any Java monitors from synchronized blocks.
+// Remove the activation from the stack.
+//
+// If there are locked Java monitors
+// If throw_monitor_exception
+// throws IllegalMonitorStateException
+// Else if install_monitor_exception
+// installs IllegalMonitorStateException
+// Else
+// no error processing
+void InterpreterMacroAssembler::remove_activation(TosState state,
+ bool throw_monitor_exception,
+ bool install_monitor_exception) {
+ unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
+
+ // Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
+ notify_method_exit(false, state, NotifyJVMTI, true);
+
+ verify_oop(R17_tos, state);
+ verify_thread();
+
+ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
+ mtlr(R0);
+}
+
+#endif // !CC_INTERP
+
// Lock object
//
// Registers alive
@@ -81,7 +767,6 @@
assert_different_registers(displaced_header, object_mark_addr, current_header, tmp);
-
// markOop displaced_header = obj->mark().set_unlocked();
// Load markOop from object into displaced_header.
@@ -94,7 +779,6 @@
// Set displaced_header to be (markOop of object | UNLOCK_VALUE).
ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
-
// monitor->lock()->set_displaced_header(displaced_header);
// Initialize the box (Must happen before we update the object mark!).
@@ -147,7 +831,6 @@
BasicLock::displaced_header_offset_in_bytes(), monitor);
b(done);
-
// } else {
// // Slow path.
// InterpreterRuntime::monitorenter(THREAD, monitor);
@@ -158,7 +841,7 @@
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
monitor, /*check_for_exceptions=*/true CC_INTERP_ONLY(&& false));
// }
-
+ align(32, 12);
bind(done);
}
}
@@ -173,13 +856,13 @@
void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_exceptions) {
if (UseHeavyMonitors) {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
- monitor, /*check_for_exceptions=*/false);
+ monitor, check_for_exceptions CC_INTERP_ONLY(&& false));
} else {
// template code:
//
// if ((displaced_header = monitor->displaced_header()) == NULL) {
- // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
+ // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL);
// } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
@@ -221,7 +904,7 @@
// If we still have a lightweight lock, unlock the object and be done.
// The object address from the monitor is in object.
- if (!UseBiasedLocking) ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
+ if (!UseBiasedLocking) { ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor); }
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
// We have the displaced header in displaced_header. If the lock is still
@@ -261,6 +944,959 @@
}
}
+#ifndef CC_INTERP
+
+// Load compiled (i2c) or interpreter entry when calling from interpreted and
+// do the call. Centralized so that all interpreter calls will do the same actions.
+// If jvmti single stepping is on for a thread we must not call compiled code.
+//
+// Input:
+// - Rtarget_method: method to call
+// - Rret_addr: return address
+// - 2 scratch regs
+//
+void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, Register Rret_addr, Register Rscratch1, Register Rscratch2) {
+ assert_different_registers(Rscratch1, Rscratch2, Rtarget_method, Rret_addr);
+ // Assume we want to go compiled if available.
+ const Register Rtarget_addr = Rscratch1;
+ const Register Rinterp_only = Rscratch2;
+
+ ld(Rtarget_addr, in_bytes(Method::from_interpreted_offset()), Rtarget_method);
+
+ if (JvmtiExport::can_post_interpreter_events()) {
+ lwz(Rinterp_only, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
+
+ // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+ // compiled code in threads for which the event is enabled. Check here for
+ // interp_only_mode if these events CAN be enabled.
+ Label done;
+ verify_thread();
+ cmpwi(CCR0, Rinterp_only, 0);
+ beq(CCR0, done);
+ ld(Rtarget_addr, in_bytes(Method::interpreter_entry_offset()), Rtarget_method);
+ align(32, 12);
+ bind(done);
+ }
+
+#ifdef ASSERT
+ {
+ Label Lok;
+ cmpdi(CCR0, Rtarget_addr, 0);
+ bne(CCR0, Lok);
+ stop("null entry point");
+ bind(Lok);
+ }
+#endif // ASSERT
+
+ mr(R21_sender_SP, R1_SP);
+
+ // Calc a precise SP for the call. The SP value we calculated in
+ // generate_fixed_frame() is based on the max_stack() value, so we would waste stack space
+ // if esp is not max. Also, the i2c adapter extends the stack space without restoring
+ // our pre-calced value, so repeating calls via i2c would result in stack overflow.
+ // Since esp already points to an empty slot, we just have to sub 1 additional slot
+ // to meet the abi scratch requirements.
+ // The max_stack pointer will get restored by means of the GR_Lmax_stack local in
+ // the return entry of the interpreter.
+ addi(Rscratch2, R15_esp, Interpreter::stackElementSize - frame::abi_reg_args_size);
+ clrrdi(Rscratch2, Rscratch2, exact_log2(frame::alignment_in_bytes)); // round towards smaller address
+ resize_frame_absolute(Rscratch2, Rscratch2, R0);
+
+ mr_if_needed(R19_method, Rtarget_method);
+ mtctr(Rtarget_addr);
+ mtlr(Rret_addr);
+
+ save_interpreter_state(Rscratch2);
+#ifdef ASSERT
+ ld(Rscratch1, _ijava_state_neg(top_frame_sp), Rscratch2); // Rscratch2 contains fp
+ cmpd(CCR0, R21_sender_SP, Rscratch1);
+ asm_assert_eq("top_frame_sp incorrect", 0x951);
+#endif
+
+ bctr();
+}
+
+// Set the method data pointer for the current bcp.
+void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ Label get_continue;
+ ld(R28_mdx, in_bytes(Method::method_data_offset()), R19_method);
+ test_method_data_pointer(get_continue);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), R19_method, R14_bcp);
+
+ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
+ add(R28_mdx, R28_mdx, R3_RET);
+ bind(get_continue);
+}
+
+// Test ImethodDataPtr. If it is null, continue at the specified label.
+void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ cmpdi(CCR0, R28_mdx, 0);
+ beq(CCR0, zero_continue);
+}
+
+void InterpreterMacroAssembler::verify_method_data_pointer() {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+#ifdef ASSERT
+ Label verify_continue;
+ test_method_data_pointer(verify_continue);
+
+ // If the mdp is valid, it will point to a DataLayout header which is
+ // consistent with the bcp. The converse is highly probable also.
+ lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
+ ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
+ addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
+ add(R11_scratch1, R12_scratch2, R12_scratch2);
+ cmpd(CCR0, R11_scratch1, R14_bcp);
+ beq(CCR0, verify_continue);
+
+ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
+
+ bind(verify_continue);
+#endif
+}
+
+void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
+ Register Rscratch,
+ Label &profile_continue) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ // Control will flow to "profile_continue" if the counter is less than the
+ // limit or if we call profile_method().
+ Label done;
+
+ // If no method data exists, and the counter is high enough, make one.
+ int ipl_offs = load_const_optimized(Rscratch, &InvocationCounter::InterpreterProfileLimit, R0, true);
+ lwz(Rscratch, ipl_offs, Rscratch);
+
+ cmpdi(CCR0, R28_mdx, 0);
+ // Test to see if we should create a method data oop.
+ cmpd(CCR1, Rscratch /* InterpreterProfileLimit */, invocation_count);
+ bne(CCR0, done);
+ bge(CCR1, profile_continue);
+
+ // Build it now.
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+ set_method_data_pointer_for_bcp();
+ b(profile_continue);
+
+ align(32, 12);
+ bind(done);
+}
+
+void InterpreterMacroAssembler::test_backedge_count_for_osr(Register backedge_count, Register branch_bcp, Register Rtmp) {
+ assert_different_registers(backedge_count, Rtmp, branch_bcp);
+ assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
+
+ Label did_not_overflow;
+ Label overflow_with_error;
+
+ int ibbl_offs = load_const_optimized(Rtmp, &InvocationCounter::InterpreterBackwardBranchLimit, R0, true);
+ lwz(Rtmp, ibbl_offs, Rtmp);
+ cmpw(CCR0, backedge_count, Rtmp);
+
+ blt(CCR0, did_not_overflow);
+
+ // When ProfileInterpreter is on, the backedge_count comes from the
+ // methodDataOop, which value does not get reset on the call to
+ // frequency_counter_overflow(). To avoid excessive calls to the overflow
+ // routine while the method is being compiled, add a second test to make sure
+ // the overflow function is called only once every overflow_frequency.
+ if (ProfileInterpreter) {
+ const int overflow_frequency = 1024;
+ li(Rtmp, overflow_frequency-1);
+ andr(Rtmp, Rtmp, backedge_count);
+ cmpwi(CCR0, Rtmp, 0);
+ bne(CCR0, did_not_overflow);
+ }
+
+ // Overflow in loop, pass branch bytecode.
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, true);
+
+ // Was an OSR adapter generated?
+ // O0 = osr nmethod
+ cmpdi(CCR0, R3_RET, 0);
+ beq(CCR0, overflow_with_error);
+
+ // Has the nmethod been invalidated already?
+ lwz(Rtmp, nmethod::entry_bci_offset(), R3_RET);
+ cmpwi(CCR0, Rtmp, InvalidOSREntryBci);
+ beq(CCR0, overflow_with_error);
+
+ // Migrate the interpreter frame off of the stack.
+ // We can use all registers because we will not return to interpreter from this point.
+
+ // Save nmethod.
+ const Register osr_nmethod = R31;
+ mr(osr_nmethod, R3_RET);
+ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
+ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
+ reset_last_Java_frame();
+ // OSR buffer is in ARG1
+
+ // Remove the interpreter frame.
+ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
+
+ // Jump to the osr code.
+ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
+ mtlr(R0);
+ mtctr(R11_scratch1);
+ bctr();
+
+ align(32, 12);
+ bind(overflow_with_error);
+ bind(did_not_overflow);
+}
+
+// Store a value at some constant offset from the method data pointer.
+void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+
+ std(value, constant, R28_mdx);
+}
+
+// Increment the value at some constant offset from the method data pointer.
+void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
+ Register counter_addr,
+ Register Rbumped_count,
+ bool decrement) {
+ // Locate the counter at a fixed offset from the mdp:
+ addi(counter_addr, R28_mdx, constant);
+ increment_mdp_data_at(counter_addr, Rbumped_count, decrement);
+}
+
+// Increment the value at some non-fixed (reg + constant) offset from
+// the method data pointer.
+void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
+ int constant,
+ Register scratch,
+ Register Rbumped_count,
+ bool decrement) {
+ // Add the constant to reg to get the offset.
+ add(scratch, R28_mdx, reg);
+ // Then calculate the counter address.
+ addi(scratch, scratch, constant);
+ increment_mdp_data_at(scratch, Rbumped_count, decrement);
+}
+
+void InterpreterMacroAssembler::increment_mdp_data_at(Register counter_addr,
+ Register Rbumped_count,
+ bool decrement) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+
+ // Load the counter.
+ ld(Rbumped_count, 0, counter_addr);
+
+ if (decrement) {
+ // Decrement the register. Set condition codes.
+ addi(Rbumped_count, Rbumped_count, - DataLayout::counter_increment);
+ // Store the decremented counter, if it is still negative.
+ std(Rbumped_count, 0, counter_addr);
+ // Note: add/sub overflow check are not ported, since 64 bit
+ // calculation should never overflow.
+ } else {
+ // Increment the register. Set carry flag.
+ addi(Rbumped_count, Rbumped_count, DataLayout::counter_increment);
+ // Store the incremented counter.
+ std(Rbumped_count, 0, counter_addr);
+ }
+}
+
+// Set a flag value at the current method data pointer position.
+void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
+ Register scratch) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ // Load the data header.
+ lbz(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx);
+ // Set the flag.
+ ori(scratch, scratch, flag_constant);
+ // Store the modified header.
+ stb(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx);
+}
+
+// Test the location at some offset from the method data pointer.
+// If it is not equal to value, branch to the not_equal_continue Label.
+void InterpreterMacroAssembler::test_mdp_data_at(int offset,
+ Register value,
+ Label& not_equal_continue,
+ Register test_out) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+
+ ld(test_out, offset, R28_mdx);
+ cmpd(CCR0, value, test_out);
+ bne(CCR0, not_equal_continue);
+}
+
+// Update the method data pointer by the displacement located at some fixed
+// offset from the method data pointer.
+void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
+ Register scratch) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+
+ ld(scratch, offset_of_disp, R28_mdx);
+ add(R28_mdx, scratch, R28_mdx);
+}
+
+// Update the method data pointer by the displacement located at the
+// offset (reg + offset_of_disp).
+void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
+ int offset_of_disp,
+ Register scratch) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+
+ add(scratch, reg, R28_mdx);
+ ld(scratch, offset_of_disp, scratch);
+ add(R28_mdx, scratch, R28_mdx);
+}
+
+// Update the method data pointer by a simple constant displacement.
+void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+ addi(R28_mdx, R28_mdx, constant);
+}
+
+// Update the method data pointer for a _ret bytecode whose target
+// was not among our cached targets.
+void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
+ Register return_bci) {
+ assert(ProfileInterpreter, "must be profiling interpreter");
+
+ push(state);
+ assert(return_bci->is_nonvolatile(), "need to protect return_bci");
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
+ pop(state);
+}
+
+// Increments the backedge counter.
+// Returns backedge counter + invocation counter in Rdst.
+void InterpreterMacroAssembler::increment_backedge_counter(const Register Rcounters, const Register Rdst,
+ const Register Rtmp1, Register Rscratch) {
+ assert(UseCompiler, "incrementing must be useful");
+ assert_different_registers(Rdst, Rtmp1);
+ const Register invocation_counter = Rtmp1;
+ const Register counter = Rdst;
+ // TODO ppc port assert(4 == InvocationCounter::sz_counter(), "unexpected field size.");
+
+ // Load backedge counter.
+ lwz(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
+ in_bytes(InvocationCounter::counter_offset()), Rcounters);
+ // Load invocation counter.
+ lwz(invocation_counter, in_bytes(MethodCounters::invocation_counter_offset()) +
+ in_bytes(InvocationCounter::counter_offset()), Rcounters);
+
+ // Add the delta to the backedge counter.
+ addi(counter, counter, InvocationCounter::count_increment);
+
+ // Mask the invocation counter.
+ li(Rscratch, InvocationCounter::count_mask_value);
+ andr(invocation_counter, invocation_counter, Rscratch);
+
+ // Store new counter value.
+ stw(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
+ in_bytes(InvocationCounter::counter_offset()), Rcounters);
+ // Return invocation counter + backedge counter.
+ add(counter, counter, invocation_counter);
+}
+
+// Count a taken branch in the bytecodes.
+void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ // We are taking a branch. Increment the taken count.
+ increment_mdp_data_at(in_bytes(JumpData::taken_offset()), scratch, bumped_count);
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
+ bind (profile_continue);
+ }
+}
+
+// Count a not-taken branch in the bytecodes.
+void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch1, Register scratch2) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ // We are taking a branch. Increment the not taken count.
+ increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch1, scratch2);
+
+ // The method data pointer needs to be updated to correspond to the
+ // next bytecode.
+ update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
+ bind (profile_continue);
+ }
+}
+
+// Count a non-virtual call in the bytecodes.
+void InterpreterMacroAssembler::profile_call(Register scratch1, Register scratch2) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ // We are making a call. Increment the count.
+ increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
+ bind (profile_continue);
+ }
+}
+
+// Count a final call in the bytecodes.
+void InterpreterMacroAssembler::profile_final_call(Register scratch1, Register scratch2) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ // We are making a call. Increment the count.
+ increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
+ bind (profile_continue);
+ }
+}
+
+// Count a virtual call in the bytecodes.
+void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver,
+ Register Rscratch1,
+ Register Rscratch2,
+ bool receiver_can_be_null) {
+ if (!ProfileInterpreter) { return; }
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ Label skip_receiver_profile;
+ if (receiver_can_be_null) {
+ Label not_null;
+ cmpdi(CCR0, Rreceiver, 0);
+ bne(CCR0, not_null);
+ // We are making a call. Increment the count for null receiver.
+ increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2);
+ b(skip_receiver_profile);
+ bind(not_null);
+ }
+
+ // Record the receiver type.
+ record_klass_in_profile(Rreceiver, Rscratch1, Rscratch2, true);
+ bind(skip_receiver_profile);
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
+ bind (profile_continue);
+}
+
+void InterpreterMacroAssembler::profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ int mdp_delta = in_bytes(BitData::bit_data_size());
+ if (TypeProfileCasts) {
+ mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
+
+ // Record the object type.
+ record_klass_in_profile(Rklass, Rscratch1, Rscratch2, false);
+ }
+
+ // The method data pointer needs to be updated.
+ update_mdp_by_constant(mdp_delta);
+
+ bind (profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_typecheck_failed(Register Rscratch1, Register Rscratch2) {
+ if (ProfileInterpreter && TypeProfileCasts) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ int count_offset = in_bytes(CounterData::count_offset());
+ // Back up the address, since we have already bumped the mdp.
+ count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
+
+ // *Decrement* the counter. We expect to see zero or small negatives.
+ increment_mdp_data_at(count_offset, Rscratch1, Rscratch2, true);
+
+ bind (profile_continue);
+ }
+}
+
+// Count a ret in the bytecodes.
+void InterpreterMacroAssembler::profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+ uint row;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ // Update the total ret count.
+ increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2 );
+
+ for (row = 0; row < RetData::row_limit(); row++) {
+ Label next_test;
+
+ // See if return_bci is equal to bci[n]:
+ test_mdp_data_at(in_bytes(RetData::bci_offset(row)), return_bci, next_test, scratch1);
+
+ // return_bci is equal to bci[n]. Increment the count.
+ increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch1, scratch2);
+
+ // The method data pointer needs to be updated to reflect the new target.
+ update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch1);
+ b(profile_continue);
+ bind(next_test);
+ }
+
+ update_mdp_for_ret(state, return_bci);
+
+ bind (profile_continue);
+ }
+}
+
+// Count the default case of a switch construct.
+void InterpreterMacroAssembler::profile_switch_default(Register scratch1, Register scratch2) {
+ if (ProfileInterpreter) {
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ // Update the default case count
+ increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
+ scratch1, scratch2);
+
+ // The method data pointer needs to be updated.
+ update_mdp_by_offset(in_bytes(MultiBranchData::default_displacement_offset()),
+ scratch1);
+
+ bind (profile_continue);
+ }
+}
+
+// Count the index'th case of a switch construct.
+void InterpreterMacroAssembler::profile_switch_case(Register index,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ if (ProfileInterpreter) {
+ assert_different_registers(index, scratch1, scratch2, scratch3);
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes().
+ li(scratch3, in_bytes(MultiBranchData::case_array_offset()));
+
+ assert (in_bytes(MultiBranchData::per_case_size()) == 16, "so that shladd works");
+ sldi(scratch1, index, exact_log2(in_bytes(MultiBranchData::per_case_size())));
+ add(scratch1, scratch1, scratch3);
+
+ // Update the case count.
+ increment_mdp_data_at(scratch1, in_bytes(MultiBranchData::relative_count_offset()), scratch2, scratch3);
+
+ // The method data pointer needs to be updated.
+ update_mdp_by_offset(scratch1, in_bytes(MultiBranchData::relative_displacement_offset()), scratch2);
+
+ bind (profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::profile_null_seen(Register Rscratch1, Register Rscratch2) {
+ if (ProfileInterpreter) {
+ assert_different_registers(Rscratch1, Rscratch2);
+ Label profile_continue;
+
+ // If no method data exists, go to profile_continue.
+ test_method_data_pointer(profile_continue);
+
+ set_mdp_flag_at(BitData::null_seen_byte_constant(), Rscratch1);
+
+ // The method data pointer needs to be updated.
+ int mdp_delta = in_bytes(BitData::bit_data_size());
+ if (TypeProfileCasts) {
+ mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
+ }
+ update_mdp_by_constant(mdp_delta);
+
+ bind (profile_continue);
+ }
+}
+
+void InterpreterMacroAssembler::record_klass_in_profile(Register Rreceiver,
+ Register Rscratch1, Register Rscratch2,
+ bool is_virtual_call) {
+ assert(ProfileInterpreter, "must be profiling");
+ assert_different_registers(Rreceiver, Rscratch1, Rscratch2);
+
+ Label done;
+ record_klass_in_profile_helper(Rreceiver, Rscratch1, Rscratch2, 0, done, is_virtual_call);
+ bind (done);
+}
+
+void InterpreterMacroAssembler::record_klass_in_profile_helper(
+ Register receiver, Register scratch1, Register scratch2,
+ int start_row, Label& done, bool is_virtual_call) {
+ if (TypeProfileWidth == 0) {
+ if (is_virtual_call) {
+ increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
+ }
+ return;
+ }
+
+ int last_row = VirtualCallData::row_limit() - 1;
+ assert(start_row <= last_row, "must be work left to do");
+ // Test this row for both the receiver and for null.
+ // Take any of three different outcomes:
+ // 1. found receiver => increment count and goto done
+ // 2. found null => keep looking for case 1, maybe allocate this cell
+ // 3. found something else => keep looking for cases 1 and 2
+ // Case 3 is handled by a recursive call.
+ for (int row = start_row; row <= last_row; row++) {
+ Label next_test;
+ bool test_for_null_also = (row == start_row);
+
+ // See if the receiver is receiver[n].
+ int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
+ test_mdp_data_at(recvr_offset, receiver, next_test, scratch1);
+ // delayed()->tst(scratch);
+
+ // The receiver is receiver[n]. Increment count[n].
+ int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
+ increment_mdp_data_at(count_offset, scratch1, scratch2);
+ b(done);
+ bind(next_test);
+
+ if (test_for_null_also) {
+ Label found_null;
+ // Failed the equality check on receiver[n]... Test for null.
+ if (start_row == last_row) {
+ // The only thing left to do is handle the null case.
+ if (is_virtual_call) {
+ // Scratch1 contains test_out from test_mdp_data_at.
+ cmpdi(CCR0, scratch1, 0);
+ beq(CCR0, found_null);
+ // Receiver did not match any saved receiver and there is no empty row for it.
+ // Increment total counter to indicate polymorphic case.
+ increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
+ b(done);
+ bind(found_null);
+ } else {
+ cmpdi(CCR0, scratch1, 0);
+ bne(CCR0, done);
+ }
+ break;
+ }
+ // Since null is rare, make it be the branch-taken case.
+ cmpdi(CCR0, scratch1, 0);
+ beq(CCR0, found_null);
+
+ // Put all the "Case 3" tests here.
+ record_klass_in_profile_helper(receiver, scratch1, scratch2, start_row + 1, done, is_virtual_call);
+
+ // Found a null. Keep searching for a matching receiver,
+ // but remember that this is an empty (unused) slot.
+ bind(found_null);
+ }
+ }
+
+ // In the fall-through case, we found no matching receiver, but we
+ // observed the receiver[start_row] is NULL.
+
+ // Fill in the receiver field and increment the count.
+ int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
+ set_mdp_data_at(recvr_offset, receiver);
+ int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
+ li(scratch1, DataLayout::counter_increment);
+ set_mdp_data_at(count_offset, scratch1);
+ if (start_row > 0) {
+ b(done);
+ }
+}
+
+// Add a InterpMonitorElem to stack (see frame_sparc.hpp).
+void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
+
+ // Very-local scratch registers.
+ const Register esp = Rtemp1;
+ const Register slot = Rtemp2;
+
+ // Extracted monitor_size.
+ int monitor_size = frame::interpreter_frame_monitor_size_in_bytes();
+ assert(Assembler::is_aligned((unsigned int)monitor_size,
+ (unsigned int)frame::alignment_in_bytes),
+ "size of a monitor must respect alignment of SP");
+
+ resize_frame(-monitor_size, /*temp*/esp); // Allocate space for new monitor
+ std(R1_SP, _ijava_state_neg(top_frame_sp), esp); // esp contains fp
+
+ // Shuffle expression stack down. Recall that stack_base points
+ // just above the new expression stack bottom. Old_tos and new_tos
+ // are used to scan thru the old and new expression stacks.
+ if (!stack_is_empty) {
+ Label copy_slot, copy_slot_finished;
+ const Register n_slots = slot;
+
+ addi(esp, R15_esp, Interpreter::stackElementSize); // Point to first element (pre-pushed stack).
+ subf(n_slots, esp, R26_monitor);
+ srdi_(n_slots, n_slots, LogBytesPerWord); // Compute number of slots to copy.
+ assert(LogBytesPerWord == 3, "conflicts assembler instructions");
+ beq(CCR0, copy_slot_finished); // Nothing to copy.
+
+ mtctr(n_slots);
+
+ // loop
+ bind(copy_slot);
+ ld(slot, 0, esp); // Move expression stack down.
+ std(slot, -monitor_size, esp); // distance = monitor_size
+ addi(esp, esp, BytesPerWord);
+ bdnz(copy_slot);
+
+ bind(copy_slot_finished);
+ }
+
+ addi(R15_esp, R15_esp, -monitor_size);
+ addi(R26_monitor, R26_monitor, -monitor_size);
+
+ // Restart interpreter
+}
+
+// ============================================================================
+// Java locals access
+
+// Load a local variable at index in Rindex into register Rdst_value.
+// Also puts address of local into Rdst_address as a service.
+// Kills:
+// - Rdst_value
+// - Rdst_address
+void InterpreterMacroAssembler::load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex) {
+ sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
+ subf(Rdst_address, Rdst_address, R18_locals);
+ lwz(Rdst_value, 0, Rdst_address);
+}
+
+// Load a local variable at index in Rindex into register Rdst_value.
+// Also puts address of local into Rdst_address as a service.
+// Kills:
+// - Rdst_value
+// - Rdst_address
+void InterpreterMacroAssembler::load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex) {
+ sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
+ subf(Rdst_address, Rdst_address, R18_locals);
+ ld(Rdst_value, -8, Rdst_address);
+}
+
+// Load a local variable at index in Rindex into register Rdst_value.
+// Also puts address of local into Rdst_address as a service.
+// Input:
+// - Rindex: slot nr of local variable
+// Kills:
+// - Rdst_value
+// - Rdst_address
+void InterpreterMacroAssembler::load_local_ptr(Register Rdst_value, Register Rdst_address, Register Rindex) {
+ sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
+ subf(Rdst_address, Rdst_address, R18_locals);
+ ld(Rdst_value, 0, Rdst_address);
+}
+
+// Load a local variable at index in Rindex into register Rdst_value.
+// Also puts address of local into Rdst_address as a service.
+// Kills:
+// - Rdst_value
+// - Rdst_address
+void InterpreterMacroAssembler::load_local_float(FloatRegister Rdst_value, Register Rdst_address, Register Rindex) {
+ sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
+ subf(Rdst_address, Rdst_address, R18_locals);
+ lfs(Rdst_value, 0, Rdst_address);
+}
+
+// Load a local variable at index in Rindex into register Rdst_value.
+// Also puts address of local into Rdst_address as a service.
+// Kills:
+// - Rdst_value
+// - Rdst_address
+void InterpreterMacroAssembler::load_local_double(FloatRegister Rdst_value, Register Rdst_address, Register Rindex) {
+ sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
+ subf(Rdst_address, Rdst_address, R18_locals);
+ lfd(Rdst_value, -8, Rdst_address);
+}
+
+// Store an int value at local variable slot Rindex.
+// Kills:
+// - Rindex
+void InterpreterMacroAssembler::store_local_int(Register Rvalue, Register Rindex) {
+ sldi(Rindex, Rindex, Interpreter::logStackElementSize);
+ subf(Rindex, Rindex, R18_locals);
+ stw(Rvalue, 0, Rindex);
+}
+
+// Store a long value at local variable slot Rindex.
+// Kills:
+// - Rindex
+void InterpreterMacroAssembler::store_local_long(Register Rvalue, Register Rindex) {
+ sldi(Rindex, Rindex, Interpreter::logStackElementSize);
+ subf(Rindex, Rindex, R18_locals);
+ std(Rvalue, -8, Rindex);
+}
+
+// Store an oop value at local variable slot Rindex.
+// Kills:
+// - Rindex
+void InterpreterMacroAssembler::store_local_ptr(Register Rvalue, Register Rindex) {
+ sldi(Rindex, Rindex, Interpreter::logStackElementSize);
+ subf(Rindex, Rindex, R18_locals);
+ std(Rvalue, 0, Rindex);
+}
+
+// Store an int value at local variable slot Rindex.
+// Kills:
+// - Rindex
+void InterpreterMacroAssembler::store_local_float(FloatRegister Rvalue, Register Rindex) {
+ sldi(Rindex, Rindex, Interpreter::logStackElementSize);
+ subf(Rindex, Rindex, R18_locals);
+ stfs(Rvalue, 0, Rindex);
+}
+
+// Store an int value at local variable slot Rindex.
+// Kills:
+// - Rindex
+void InterpreterMacroAssembler::store_local_double(FloatRegister Rvalue, Register Rindex) {
+ sldi(Rindex, Rindex, Interpreter::logStackElementSize);
+ subf(Rindex, Rindex, R18_locals);
+ stfd(Rvalue, -8, Rindex);
+}
+
+// Read pending exception from thread and jump to interpreter.
+// Throw exception entry if one if pending. Fall through otherwise.
+void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1, Register Rscratch2) {
+ assert_different_registers(Rscratch1, Rscratch2, R3);
+ Register Rexception = Rscratch1;
+ Register Rtmp = Rscratch2;
+ Label Ldone;
+ // Get pending exception oop.
+ ld(Rexception, thread_(pending_exception));
+ cmpdi(CCR0, Rexception, 0);
+ beq(CCR0, Ldone);
+ li(Rtmp, 0);
+ mr_if_needed(R3, Rexception);
+ std(Rtmp, thread_(pending_exception)); // Clear exception in thread
+ if (Interpreter::rethrow_exception_entry() != NULL) {
+ // Already got entry address.
+ load_dispatch_table(Rtmp, (address*)Interpreter::rethrow_exception_entry());
+ } else {
+ // Dynamically load entry address.
+ int simm16_rest = load_const_optimized(Rtmp, &Interpreter::_rethrow_exception_entry, R0, true);
+ ld(Rtmp, simm16_rest, Rtmp);
+ }
+ mtctr(Rtmp);
+ save_interpreter_state(Rtmp);
+ bctr();
+
+ align(32, 12);
+ bind(Ldone);
+}
+
+void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
+ save_interpreter_state(R11_scratch1);
+
+ MacroAssembler::call_VM(oop_result, entry_point, false);
+
+ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
+
+ check_and_handle_popframe(R11_scratch1);
+ check_and_handle_earlyret(R11_scratch1);
+ // Now check exceptions manually.
+ if (check_exceptions) {
+ check_and_forward_exception(R11_scratch1, R12_scratch2);
+ }
+}
+
+void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
+ // ARG1 is reserved for the thread.
+ mr_if_needed(R4_ARG2, arg_1);
+ call_VM(oop_result, entry_point, check_exceptions);
+}
+
+void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
+ // ARG1 is reserved for the thread.
+ mr_if_needed(R4_ARG2, arg_1);
+ assert(arg_2 != R4_ARG2, "smashed argument");
+ mr_if_needed(R5_ARG3, arg_2);
+ call_VM(oop_result, entry_point, check_exceptions);
+}
+
+void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
+ // ARG1 is reserved for the thread.
+ mr_if_needed(R4_ARG2, arg_1);
+ assert(arg_2 != R4_ARG2, "smashed argument");
+ mr_if_needed(R5_ARG3, arg_2);
+ assert(arg_3 != R4_ARG2 && arg_3 != R5_ARG3, "smashed argument");
+ mr_if_needed(R6_ARG4, arg_3);
+ call_VM(oop_result, entry_point, check_exceptions);
+}
+
+void InterpreterMacroAssembler::save_interpreter_state(Register scratch) {
+ ld(scratch, 0, R1_SP);
+ std(R15_esp, _ijava_state_neg(esp), scratch);
+ std(R14_bcp, _ijava_state_neg(bcp), scratch);
+ std(R26_monitor, _ijava_state_neg(monitors), scratch);
+ if (ProfileInterpreter) { std(R28_mdx, _ijava_state_neg(mdx), scratch); }
+ // Other entries should be unchanged.
+}
+
+void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool bcp_and_mdx_only) {
+ ld(scratch, 0, R1_SP);
+ ld(R14_bcp, _ijava_state_neg(bcp), scratch); // Changed by VM code (exception).
+ if (ProfileInterpreter) { ld(R28_mdx, _ijava_state_neg(mdx), scratch); } // Changed by VM code.
+ if (!bcp_and_mdx_only) {
+ // Following ones are Metadata.
+ ld(R19_method, _ijava_state_neg(method), scratch);
+ ld(R27_constPoolCache, _ijava_state_neg(cpoolCache), scratch);
+ // Following ones are stack addresses and don't require reload.
+ ld(R15_esp, _ijava_state_neg(esp), scratch);
+ ld(R18_locals, _ijava_state_neg(locals), scratch);
+ ld(R26_monitor, _ijava_state_neg(monitors), scratch);
+ }
+#ifdef ASSERT
+ {
+ Label Lok;
+ subf(R0, R1_SP, scratch);
+ cmpdi(CCR0, R0, frame::abi_reg_args_size + frame::ijava_state_size);
+ bge(CCR0, Lok);
+ stop("frame too small (restore istate)", 0x5432);
+ bind(Lok);
+ }
+ {
+ Label Lok;
+ ld(R0, _ijava_state_neg(ijava_reserved), scratch);
+ cmpdi(CCR0, R0, 0x5afe);
+ beq(CCR0, Lok);
+ stop("frame corrupted (restore istate)", 0x5afe);
+ bind(Lok);
+ }
+#endif
+}
+
+#endif // !CC_INTERP
+
void InterpreterMacroAssembler::get_method_counters(Register method,
Register Rcounters,
Label& skip) {
@@ -321,6 +1957,66 @@
if (state == atos) { MacroAssembler::verify_oop(reg); }
}
+#ifndef CC_INTERP
+// Local helper function for the verify_oop_or_return_address macro.
+static bool verify_return_address(Method* m, int bci) {
+#ifndef PRODUCT
+ address pc = (address)(m->constMethod()) + in_bytes(ConstMethod::codes_offset()) + bci;
+ // Assume it is a valid return address if it is inside m and is preceded by a jsr.
+ if (!m->contains(pc)) return false;
+ address jsr_pc;
+ jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
+ if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true;
+ jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
+ if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true;
+#endif // PRODUCT
+ return false;
+}
+
+void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
+ if (VerifyFPU) {
+ unimplemented("verfiyFPU");
+ }
+}
+
+void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
+ if (!VerifyOops) return;
+
+ // The VM documentation for the astore[_wide] bytecode allows
+ // the TOS to be not only an oop but also a return address.
+ Label test;
+ Label skip;
+ // See if it is an address (in the current method):
+
+ const int log2_bytecode_size_limit = 16;
+ srdi_(Rtmp, reg, log2_bytecode_size_limit);
+ bne(CCR0, test);
+
+ address fd = CAST_FROM_FN_PTR(address, verify_return_address);
+ unsigned int nbytes_save = 10*8; // 10 volatile gprs
+
+ save_LR_CR(Rtmp);
+ push_frame_reg_args(nbytes_save, Rtmp);
+ save_volatile_gprs(R1_SP, 112); // except R0
+
+ load_const_optimized(Rtmp, fd, R0);
+ mr_if_needed(R4_ARG2, reg);
+ mr(R3_ARG1, R19_method);
+ call_c(Rtmp); // call C
+
+ restore_volatile_gprs(R1_SP, 112); // except R0
+ pop_frame();
+ restore_LR_CR(Rtmp);
+ b(skip);
+
+ // Perform a more elaborate out-of-line call.
+ // Not an address; verify it:
+ bind(test);
+ verify_oop(reg);
+ bind(skip);
+}
+#endif // !CC_INTERP
+
// Inline assembly for:
//
// if (thread is in interp_only_mode) {
@@ -343,13 +2039,12 @@
cmpwi(CCR0, R0, 0);
beq(CCR0, jvmti_post_done);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry),
- /*check_exceptions=*/false);
+ /*check_exceptions=*/true CC_INTERP_ONLY(&& false));
bind(jvmti_post_done);
}
}
-
// Inline assembly for:
//
// if (thread is in interp_only_mode) {
@@ -365,26 +2060,33 @@
//
// Native methods have their result stored in d_tmp and l_tmp.
// Java methods have their result stored in the expression stack.
-void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state) {
+void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state,
+ NotifyMethodExitMode mode, bool check_exceptions) {
// JVMTI
// Whenever JVMTI puts a thread in interp_only_mode, method
// entry/exit events are sent for that thread to track stack
// depth. If it is possible to enter interp_only_mode we add
// the code to check if the event should be sent.
- if (JvmtiExport::can_post_interpreter_events()) {
+ if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
Label jvmti_post_done;
lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
cmpwi(CCR0, R0, 0);
beq(CCR0, jvmti_post_done);
+ CC_INTERP_ONLY(assert(is_native_method && !check_exceptions, "must not push state"));
+ if (!is_native_method) push(state); // Expose tos to GC.
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit),
- /*check_exceptions=*/false);
+ /*check_exceptions=*/check_exceptions);
+ if (!is_native_method) pop(state);
align(32, 12);
bind(jvmti_post_done);
}
+
+ // Dtrace support not implemented.
}
+#ifdef CC_INTERP
// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
// (using parent_frame_resize) and push a new interpreter
// TOP_IJAVA_FRAME (using frame_size).
@@ -442,7 +2144,6 @@
std(R1_SP, _top_ijava_frame_abi(top_frame_sp), R1_SP);
}
-#ifdef CC_INTERP
// Turn state's interpreter frame into the current TOP_IJAVA_FRAME.
void InterpreterMacroAssembler::pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3) {
assert_different_registers(R14_state, R15_prev_state, tmp1, tmp2, tmp3);
@@ -471,7 +2172,6 @@
// Used for non-initial callers by unextended_sp().
std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
}
-#endif // CC_INTERP
// Set SP to initial caller's sp, but before fix the back chain.
void InterpreterMacroAssembler::resize_frame_to_initial_caller(Register tmp1, Register tmp2) {
@@ -481,7 +2181,6 @@
mr(R1_SP, tmp1); // ... and resize to initial caller.
}
-#ifdef CC_INTERP
// Pop the current interpreter state (without popping the correspoding
// frame) and restore R14_state and R15_prev_state accordingly.
// Use prev_state_may_be_0 to indicate whether prev_state may be 0
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "assembler_ppc.inline.hpp"
#include "interpreter/invocationCounter.hpp"
-// This file specializes the assembler with interpreter-specific macros
+// This file specializes the assembler with interpreter-specific macros.
class InterpreterMacroAssembler: public MacroAssembler {
@@ -39,15 +39,176 @@
void null_check_throw(Register a, int offset, Register temp_reg);
- // Handy address generation macros
+ void branch_to_entry(address entry, Register Rscratch);
+
+ // Handy address generation macros.
#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
#ifdef CC_INTERP
#define state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R14_state
#define prev_state_(field_name) in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R15_prev_state
+ void pop (TosState state) {}; // Not needed.
+ void push(TosState state) {}; // Not needed.
#endif
+#ifndef CC_INTERP
+ virtual void check_and_handle_popframe(Register java_thread);
+ virtual void check_and_handle_earlyret(Register java_thread);
+
+ // Base routine for all dispatches.
+ void dispatch_base(TosState state, address* table);
+
+ void load_earlyret_value(TosState state, Register Rscratch1);
+
+ static const Address l_tmp;
+ static const Address d_tmp;
+
+ // dispatch routines
+ void dispatch_next(TosState state, int step = 0);
+ void dispatch_via (TosState state, address* table);
+ void load_dispatch_table(Register dst, address* table);
+ void dispatch_Lbyte_code(TosState state, Register bytecode, address* table, bool verify = false);
+
+ // Called by shared interpreter generator.
+ void dispatch_prolog(TosState state, int step = 0);
+ void dispatch_epilog(TosState state, int step = 0);
+
+ // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls.
+ void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
+ void super_call_VM(Register thread_cache, Register oop_result, Register last_java_sp,
+ address entry_point, Register arg_1, Register arg_2, bool check_exception = true);
+
+ // Generate a subtype check: branch to ok_is_subtype if sub_klass is
+ // a subtype of super_klass. Blows registers tmp1, tmp2 and tmp3.
+ void gen_subtype_check(Register sub_klass, Register super_klass,
+ Register tmp1, Register tmp2, Register tmp3, Label &ok_is_subtype);
+
+ // Load object from cpool->resolved_references(index).
+ void load_resolved_reference_at_index(Register result, Register index);
+
+ void generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1);
+ void load_receiver(Register Rparam_count, Register Rrecv_dst);
+
+ // helpers for expression stack
+ void pop_i( Register r = R17_tos);
+ void pop_ptr( Register r = R17_tos);
+ void pop_l( Register r = R17_tos);
+ void pop_f(FloatRegister f = F15_ftos);
+ void pop_d(FloatRegister f = F15_ftos );
+
+ void push_i( Register r = R17_tos);
+ void push_ptr( Register r = R17_tos);
+ void push_l( Register r = R17_tos);
+ void push_f(FloatRegister f = F15_ftos );
+ void push_d(FloatRegister f = F15_ftos);
+
+ void push_2ptrs(Register first, Register second);
+
+ void push_l_pop_d(Register l = R17_tos, FloatRegister d = F15_ftos);
+ void push_d_pop_l(FloatRegister d = F15_ftos, Register l = R17_tos);
+
+ void pop (TosState state); // transition vtos -> state
+ void push(TosState state); // transition state -> vtos
+ void empty_expression_stack(); // Resets both Lesp and SP.
+
+ public:
+ // Load values from bytecode stream:
+
+ enum signedOrNot { Signed, Unsigned };
+ enum setCCOrNot { set_CC, dont_set_CC };
+
+ void get_2_byte_integer_at_bcp(int bcp_offset,
+ Register Rdst,
+ signedOrNot is_signed);
+
+ void get_4_byte_integer_at_bcp(int bcp_offset,
+ Register Rdst,
+ signedOrNot is_signed = Unsigned);
+
+ void get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size);
+
+ void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
+
+
+ // common code
+
+ void field_offset_at(int n, Register tmp, Register dest, Register base);
+ int field_offset_at(Register object, address bcp, int offset);
+ void fast_iaaccess(int n, address bcp);
+ void fast_iagetfield(address bcp);
+ void fast_iaputfield(address bcp, bool do_store_check);
+
+ void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
+ void index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res);
+
+ void get_const(Register Rdst);
+ void get_constant_pool(Register Rdst);
+ void get_constant_pool_cache(Register Rdst);
+ void get_cpool_and_tags(Register Rcpool, Register Rtags);
+ void is_a(Label& L);
+
+ // Java Call Helpers
+ void call_from_interpreter(Register Rtarget_method, Register Rret_addr, Register Rscratch1, Register Rscratch2);
+
+ // --------------------------------------------------
+
+ void unlock_if_synchronized_method(TosState state, bool throw_monitor_exception = true,
+ bool install_monitor_exception = true);
+
+ // Removes the current activation (incl. unlocking of monitors).
+ // Additionally this code is used for earlyReturn in which case we
+ // want to skip throwing an exception and installing an exception.
+ void remove_activation(TosState state,
+ bool throw_monitor_exception = true,
+ bool install_monitor_exception = true);
+ void merge_frames(Register Rtop_frame_sp, Register return_pc, Register Rscratch1, Register Rscratch2); // merge top frames
+
+ void add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2);
+
+ // Local variable access helpers
+ void load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex);
+ void load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex);
+ void load_local_ptr(Register Rdst_value, Register Rdst_address, Register Rindex);
+ void load_local_float(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
+ void load_local_double(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
+ void store_local_int(Register Rvalue, Register Rindex);
+ void store_local_long(Register Rvalue, Register Rindex);
+ void store_local_ptr(Register Rvalue, Register Rindex);
+ void store_local_float(FloatRegister Rvalue, Register Rindex);
+ void store_local_double(FloatRegister Rvalue, Register Rindex);
+
+ // Call VM for std frames
+ // Special call VM versions that check for exceptions and forward exception
+ // via short cut (not via expensive forward exception stub).
+ void check_and_forward_exception(Register Rscratch1, Register Rscratch2);
+ void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
+ void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
+ void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+ void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
+ // Should not be used:
+ void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true) {ShouldNotReachHere();}
+ void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true) {ShouldNotReachHere();}
+ void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true) {ShouldNotReachHere();}
+ void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true) {ShouldNotReachHere();}
+
+ Address first_local_in_stack();
+
+ enum LoadOrStore { load, store };
+ void static_iload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
+ void static_aload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
+ void static_dload_or_store(int which_local, LoadOrStore direction);
+
+ void save_interpreter_state(Register scratch);
+ void restore_interpreter_state(Register scratch, bool bcp_and_mdx_only = false);
+
+ void increment_backedge_counter(const Register Rcounters, Register Rtmp, Register Rtmp2, Register Rscratch);
+ void test_backedge_count_for_osr(Register backedge_count, Register branch_bcp, Register Rtmp);
+
+ void record_static_call_in_profile(Register Rentry, Register Rtmp);
+ void record_receiver_call_in_profile(Register Rklass, Register Rentry, Register Rtmp);
+#endif // !CC_INTERP
+
void get_method_counters(Register method, Register Rcounters, Label& skip);
void increment_invocation_counter(Register iv_be_count, Register Rtmp1, Register Rtmp2_r0);
@@ -55,12 +216,59 @@
void lock_object (Register lock_reg, Register obj_reg);
void unlock_object(Register lock_reg, bool check_for_exceptions = true);
+#ifndef CC_INTERP
+
+ // Interpreter profiling operations
+ void set_method_data_pointer_for_bcp();
+ void test_method_data_pointer(Label& zero_continue);
+ void verify_method_data_pointer();
+ void test_invocation_counter_for_mdp(Register invocation_count, Register Rscratch, Label &profile_continue);
+
+ void set_mdp_data_at(int constant, Register value);
+
+ void increment_mdp_data_at(int constant, Register counter_addr, Register Rbumped_count, bool decrement = false);
+
+ void increment_mdp_data_at(Register counter_addr, Register Rbumped_count, bool decrement = false);
+ void increment_mdp_data_at(Register reg, int constant, Register scratch, Register Rbumped_count, bool decrement = false);
+
+ void set_mdp_flag_at(int flag_constant, Register scratch);
+ void test_mdp_data_at(int offset, Register value, Label& not_equal_continue, Register test_out);
+
+ void update_mdp_by_offset(int offset_of_disp, Register scratch);
+ void update_mdp_by_offset(Register reg, int offset_of_disp,
+ Register scratch);
+ void update_mdp_by_constant(int constant);
+ void update_mdp_for_ret(TosState state, Register return_bci);
+
+ void profile_taken_branch(Register scratch, Register bumped_count);
+ void profile_not_taken_branch(Register scratch1, Register scratch2);
+ void profile_call(Register scratch1, Register scratch2);
+ void profile_final_call(Register scratch1, Register scratch2);
+ void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2, bool receiver_can_be_null);
+ void profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2);
+ void profile_typecheck_failed(Register Rscratch1, Register Rscratch2);
+ void profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2);
+ void profile_switch_default(Register scratch1, Register scratch2);
+ void profile_switch_case(Register index, Register scratch1,Register scratch2, Register scratch3);
+ void profile_null_seen(Register Rscratch1, Register Rscratch2);
+ void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
+ void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
+
+#endif // !CC_INTERP
+
// Debugging
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos
+#ifndef CC_INTERP
+ void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
+ void verify_FPU(int stack_depth, TosState state = ftos);
+#endif // !CC_INTERP
- // support for jvmdi/jvmpi
+ typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
+
+ // Support for jvmdi/jvmpi.
void notify_method_entry();
- void notify_method_exit(bool is_native_method, TosState state);
+ void notify_method_exit(bool is_native_method, TosState state,
+ NotifyMethodExitMode mode, bool check_exceptions);
#ifdef CC_INTERP
// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
--- a/hotspot/src/cpu/ppc/vm/interpreterRT_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/interpreterRT_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -109,8 +109,10 @@
}
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
+#if !defined(ABI_ELFv2)
// Emit fd for current codebuffer. Needs patching!
__ emit_fd();
+#endif
// Generate code to handle arguments.
iterate(fingerprint);
@@ -127,11 +129,13 @@
// Implementation of SignatureHandlerLibrary
void SignatureHandlerLibrary::pd_set_handler(address handler) {
+#if !defined(ABI_ELFv2)
// patch fd here.
FunctionDescriptor* fd = (FunctionDescriptor*) handler;
fd->set_entry(handler + (int)sizeof(FunctionDescriptor));
assert(fd->toc() == (address)0xcafe, "need to adjust TOC here");
+#endif
}
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,10 +51,6 @@
#include "c1/c1_Runtime1.hpp"
#endif
-#ifndef CC_INTERP
-#error "CC_INTERP must be defined on PPC"
-#endif
-
#define __ _masm->
#ifdef PRODUCT
@@ -128,13 +124,13 @@
const Register target_sp = R28_tmp8;
const FloatRegister floatSlot = F0;
- address entry = __ emit_fd();
+ address entry = __ function_entry();
__ save_LR_CR(R0);
__ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
// We use target_sp for storing arguments in the C frame.
__ mr(target_sp, R1_SP);
- __ push_frame_abi112_nonvolatiles(0, R11_scratch1);
+ __ push_frame_reg_args_nonvolatiles(0, R11_scratch1);
__ mr(arg_java, R3_ARG1);
@@ -147,7 +143,8 @@
#ifdef CC_INTERP
__ ld(R19_method, state_(_method));
#else
- __ unimplemented("slow signature handler 1");
+ __ ld(R19_method, 0, target_sp);
+ __ ld(R19_method, _ijava_state_neg(method), R19_method);
#endif
// Get the result handler.
@@ -157,7 +154,8 @@
#ifdef CC_INTERP
__ ld(R19_method, state_(_method));
#else
- __ unimplemented("slow signature handler 2");
+ __ ld(R19_method, 0, target_sp);
+ __ ld(R19_method, _ijava_state_neg(method), R19_method);
#endif
{
@@ -453,7 +451,7 @@
//
// Registers alive
// R16_thread - JavaThread*
- // R19_method - callee's methodOop (method to be invoked)
+ // R19_method - callee's method (method to be invoked)
// R1_SP - SP prepared such that caller's outgoing args are near top
// LR - return address to caller
//
@@ -474,7 +472,7 @@
// Push a new C frame and save LR.
__ save_LR_CR(R0);
- __ push_frame_abi112(0, R11_scratch1);
+ __ push_frame_reg_args(0, R11_scratch1);
// This is not a leaf but we have a JavaFrameAnchor now and we will
// check (create) exceptions afterward so this is ok.
@@ -491,7 +489,12 @@
// Return to frame manager, it will handle the pending exception.
__ blr();
#else
- Unimplemented();
+ // We don't know our caller, so jump to the general forward exception stub,
+ // which will also pop our full frame off. Satisfy the interface of
+ // SharedRuntime::generate_forward_exception()
+ __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0);
+ __ mtctr(R11_scratch1);
+ __ bctr();
#endif
return entry;
@@ -500,8 +503,9 @@
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_accessor_entry(void) {
- if(!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods)))
+ if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
return NULL;
+ }
Label Lslow_path, Lacquire;
@@ -586,10 +590,14 @@
// Load from branch table and dispatch (volatile case: one instruction ahead)
__ sldi(Rflags, Rflags, LogBytesPerWord);
__ cmpwi(CCR6, Rscratch, 1); // volatile?
- __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
+ }
__ ldx(Rbtable, Rbtable, Rflags);
- __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
+ }
__ mtctr(Rbtable);
__ bctr();
@@ -605,7 +613,7 @@
}
assert(all_uninitialized != all_initialized, "consistency"); // either or
- __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+ __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
@@ -614,7 +622,7 @@
if (branch_table[itos] == 0) { // generate only once
__ align(32, 28, 28); // align load
- __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+ __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[itos] = __ pc(); // non-volatile_entry point
__ lwax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
@@ -623,7 +631,7 @@
if (branch_table[ltos] == 0) { // generate only once
__ align(32, 28, 28); // align load
- __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+ __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ltos] = __ pc(); // non-volatile_entry point
__ ldx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
@@ -632,7 +640,7 @@
if (branch_table[btos] == 0) { // generate only once
__ align(32, 28, 28); // align load
- __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+ __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[btos] = __ pc(); // non-volatile_entry point
__ lbzx(R3_RET, Rclass_or_obj, Roffset);
__ extsb(R3_RET, R3_RET);
@@ -642,7 +650,7 @@
if (branch_table[ctos] == 0) { // generate only once
__ align(32, 28, 28); // align load
- __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+ __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[ctos] = __ pc(); // non-volatile_entry point
__ lhzx(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
@@ -651,7 +659,7 @@
if (branch_table[stos] == 0) { // generate only once
__ align(32, 28, 28); // align load
- __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+ __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[stos] = __ pc(); // non-volatile_entry point
__ lhax(R3_RET, Rclass_or_obj, Roffset);
__ beq(CCR6, Lacquire);
@@ -660,7 +668,7 @@
if (branch_table[atos] == 0) { // generate only once
__ align(32, 28, 28); // align load
- __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+ __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
branch_table[atos] = __ pc(); // non-volatile_entry point
__ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
__ verify_oop(R3_RET);
@@ -683,10 +691,7 @@
#endif
__ bind(Lslow_path);
- assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
- __ load_const_optimized(Rscratch, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
- __ mtctr(Rscratch);
- __ bctr();
+ __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
__ flush();
return entry;
@@ -773,10 +778,7 @@
// Generate regular method entry.
__ bind(slow_path);
- assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
- __ load_const_optimized(R11_scratch1, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
- __ mtctr(R11_scratch1);
- __ bctr();
+ __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
__ flush();
return entry;
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,15 +28,23 @@
public:
- // Stack index relative to tos (which points at value)
+ // Stack index relative to tos (which points at value).
static int expr_index_at(int i) {
return stackElementWords * i;
}
- // Already negated by c++ interpreter
+ // Already negated by c++ interpreter.
static int local_index_at(int i) {
assert(i <= 0, "local direction already negated");
return stackElementWords * i;
}
+#ifndef CC_INTERP
+ // The offset in bytes to access a expression stack slot
+ // relative to the esp pointer.
+ static int expr_offset_in_bytes(int slot) {
+ return stackElementSize * slot + wordSize;
+ }
+#endif
+
#endif // CPU_PPC_VM_INTERPRETER_PPC_PP
--- a/hotspot/src/cpu/ppc/vm/javaFrameAnchor_ppc.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/javaFrameAnchor_ppc.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,6 @@
#ifndef CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
#define CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
-#ifndef CC_INTERP
-#error "CC_INTERP must be defined on PPC64"
-#endif
-
public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -594,7 +594,13 @@
"can't identify emitted call");
} else {
// variant 1:
-
+#if defined(ABI_ELFv2)
+ nop();
+ calculate_address_from_global_toc(R12, dest, true, true, false);
+ mtctr(R12);
+ nop();
+ nop();
+#else
mr(R0, R11); // spill R11 -> R0.
// Load the destination address into CTR,
@@ -604,6 +610,7 @@
mtctr(R11);
mr(R11, R0); // spill R11 <- R0.
nop();
+#endif
// do the call/jump
if (link) {
@@ -912,16 +919,16 @@
}
}
-// Push a frame of size `bytes' plus abi112 on top.
-void MacroAssembler::push_frame_abi112(unsigned int bytes, Register tmp) {
- push_frame(bytes + frame::abi_112_size, tmp);
+// Push a frame of size `bytes' plus abi_reg_args on top.
+void MacroAssembler::push_frame_reg_args(unsigned int bytes, Register tmp) {
+ push_frame(bytes + frame::abi_reg_args_size, tmp);
}
// Setup up a new C frame with a spill area for non-volatile GPRs and
// additional space for local variables.
-void MacroAssembler::push_frame_abi112_nonvolatiles(unsigned int bytes,
- Register tmp) {
- push_frame(bytes + frame::abi_112_size + frame::spill_nonvolatiles_size, tmp);
+void MacroAssembler::push_frame_reg_args_nonvolatiles(unsigned int bytes,
+ Register tmp) {
+ push_frame(bytes + frame::abi_reg_args_size + frame::spill_nonvolatiles_size, tmp);
}
// Pop current C frame.
@@ -929,6 +936,42 @@
ld(R1_SP, _abi(callers_sp), R1_SP);
}
+#if defined(ABI_ELFv2)
+address MacroAssembler::branch_to(Register r_function_entry, bool and_link) {
+ // TODO(asmundak): make sure the caller uses R12 as function descriptor
+ // most of the times.
+ if (R12 != r_function_entry) {
+ mr(R12, r_function_entry);
+ }
+ mtctr(R12);
+ // Do a call or a branch.
+ if (and_link) {
+ bctrl();
+ } else {
+ bctr();
+ }
+ _last_calls_return_pc = pc();
+
+ return _last_calls_return_pc;
+}
+
+// Call a C function via a function descriptor and use full C
+// calling conventions. Updates and returns _last_calls_return_pc.
+address MacroAssembler::call_c(Register r_function_entry) {
+ return branch_to(r_function_entry, /*and_link=*/true);
+}
+
+// For tail calls: only branch, don't link, so callee returns to caller of this function.
+address MacroAssembler::call_c_and_return_to_caller(Register r_function_entry) {
+ return branch_to(r_function_entry, /*and_link=*/false);
+}
+
+address MacroAssembler::call_c(address function_entry, relocInfo::relocType rt) {
+ load_const(R12, function_entry, R0);
+ return branch_to(R12, /*and_link=*/true);
+}
+
+#else
// Generic version of a call to C function via a function descriptor
// with variable support for C calling conventions (TOC, ENV, etc.).
// Updates and returns _last_calls_return_pc.
@@ -1077,6 +1120,7 @@
}
return _last_calls_return_pc;
}
+#endif
void MacroAssembler::call_VM_base(Register oop_result,
Register last_java_sp,
@@ -1091,8 +1135,11 @@
// ARG1 must hold thread address.
mr(R3_ARG1, R16_thread);
-
+#if defined(ABI_ELFv2)
+ address return_pc = call_c(entry_point, relocInfo::none);
+#else
address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
+#endif
reset_last_Java_frame();
@@ -1113,7 +1160,11 @@
void MacroAssembler::call_VM_leaf_base(address entry_point) {
BLOCK_COMMENT("call_VM_leaf {");
+#if defined(ABI_ELFv2)
+ call_c(entry_point, relocInfo::none);
+#else
call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
+#endif
BLOCK_COMMENT("} call_VM_leaf");
}
@@ -2227,7 +2278,7 @@
// VM call need frame to access(write) O register.
if (needs_frame) {
save_LR_CR(Rtmp1);
- push_frame_abi112(0, Rtmp2);
+ push_frame_reg_args(0, Rtmp2);
}
if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
@@ -2361,7 +2412,8 @@
#ifdef CC_INTERP
ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
#else
- Unimplemented();
+ address entry = pc();
+ load_const_optimized(tmp1, entry);
#endif
set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
@@ -2421,6 +2473,16 @@
}
}
+void MacroAssembler::store_klass_gap(Register dst_oop, Register val) {
+ if (UseCompressedClassPointers) {
+ if (val == noreg) {
+ val = R0;
+ li(val, 0);
+ }
+ stw(val, oopDesc::klass_gap_offset_in_bytes(), dst_oop); // klass gap if compressed
+ }
+}
+
int MacroAssembler::instr_size_for_decode_klass_not_null() {
if (!UseCompressedClassPointers) return 0;
int num_instrs = 1; // shift or move
@@ -3006,13 +3068,13 @@
mr(R0, tmp);
// kill tmp
save_LR_CR(tmp);
- push_frame_abi112(nbytes_save, tmp);
+ push_frame_reg_args(nbytes_save, tmp);
// restore tmp
mr(tmp, R0);
save_volatile_gprs(R1_SP, 112); // except R0
- // load FunctionDescriptor**
+ // load FunctionDescriptor** / entry_address *
load_const(tmp, fd);
- // load FunctionDescriptor*
+ // load FunctionDescriptor* / entry_address
ld(tmp, 0, tmp);
mr(R4_ARG2, oop);
load_const(R3_ARG1, (address)msg);
@@ -3092,3 +3154,15 @@
}
#endif // !PRODUCT
+
+SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
+ int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
+ assert(sizeof(bool) == 1, "PowerPC ABI");
+ masm->lbz(temp, simm16_offset, temp);
+ masm->cmpwi(CCR0, temp, 0);
+ masm->beq(CCR0, _label);
+}
+
+SkipIfEqualZero::~SkipIfEqualZero() {
+ _masm->bind(_label);
+}
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -279,12 +279,12 @@
// Push a frame of size `bytes'. No abi space provided.
void push_frame(unsigned int bytes, Register tmp);
- // Push a frame of size `bytes' plus abi112 on top.
- void push_frame_abi112(unsigned int bytes, Register tmp);
+ // Push a frame of size `bytes' plus abi_reg_args on top.
+ void push_frame_reg_args(unsigned int bytes, Register tmp);
// Setup up a new C frame with a spill area for non-volatile GPRs and additional
// space for local variables
- void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp);
+ void push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp);
// pop current C frame
void pop_frame();
@@ -296,17 +296,31 @@
private:
address _last_calls_return_pc;
+#if defined(ABI_ELFv2)
+ // Generic version of a call to C function.
+ // Updates and returns _last_calls_return_pc.
+ address branch_to(Register function_entry, bool and_link);
+#else
// Generic version of a call to C function via a function descriptor
// with variable support for C calling conventions (TOC, ENV, etc.).
// updates and returns _last_calls_return_pc.
address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
+#endif
public:
// Get the pc where the last call will return to. returns _last_calls_return_pc.
inline address last_calls_return_pc();
+#if defined(ABI_ELFv2)
+ // Call a C function via a function descriptor and use full C
+ // calling conventions. Updates and returns _last_calls_return_pc.
+ address call_c(Register function_entry);
+ // For tail calls: only branch, don't link, so callee returns to caller of this function.
+ address call_c_and_return_to_caller(Register function_entry);
+ address call_c(address function_entry, relocInfo::relocType rt);
+#else
// Call a C function via a function descriptor and use full C
// calling conventions. Updates and returns _last_calls_return_pc.
address call_c(Register function_descriptor);
@@ -315,6 +329,7 @@
address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
Register toc);
+#endif
protected:
@@ -551,12 +566,14 @@
// Load heap oop and decompress. Loaded oop may not be null.
inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
+ inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
+ /*specify if d must stay uncompressed*/ Register tmp = noreg);
// Null allowed.
inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
// Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
- inline void encode_heap_oop_not_null(Register d);
+ inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
inline void decode_heap_oop_not_null(Register d);
// Null allowed.
@@ -566,6 +583,7 @@
void load_klass(Register dst, Register src);
void load_klass_with_trap_null_check(Register dst, Register src);
void store_klass(Register dst_oop, Register klass, Register tmp = R0);
+ void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
static int instr_size_for_decode_klass_not_null();
void decode_klass_not_null(Register dst, Register src = noreg);
void encode_klass_not_null(Register dst, Register src = noreg);
@@ -649,6 +667,11 @@
void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
+ // Convenience method returning function entry. For the ELFv1 case
+ // creates function descriptor at the current address and returs
+ // the pointer to it. For the ELFv2 case returns the current address.
+ inline address function_entry();
+
#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
@@ -673,4 +696,21 @@
void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
};
+// class SkipIfEqualZero:
+//
+// Instantiating this class will result in assembly code being output that will
+// jump around any code emitted between the creation of the instance and it's
+// automatic destruction at the end of a scope block, depending on the value of
+// the flag passed to the constructor, which will be checked at run-time.
+class SkipIfEqualZero : public StackObj {
+ private:
+ MacroAssembler* _masm;
+ Label _label;
+
+ public:
+ // 'Temp' is a temp register that this object can use (and trash).
+ explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
+ ~SkipIfEqualZero();
+};
+
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -321,6 +321,15 @@
}
}
+inline void MacroAssembler::store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) {
+ if (UseCompressedOops) {
+ Register compressedOop = encode_heap_oop_not_null((tmp != noreg) ? tmp : d, d);
+ stw(compressedOop, offs, s1);
+ } else {
+ std(d, offs, s1);
+ }
+}
+
inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
if (UseCompressedOops) {
lwz(d, offs, s1);
@@ -330,13 +339,17 @@
}
}
-inline void MacroAssembler::encode_heap_oop_not_null(Register d) {
+inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register src) {
+ Register current = (src!=noreg) ? src : d; // Compressed oop is in d if no src provided.
if (Universe::narrow_oop_base() != NULL) {
- sub(d, d, R30);
+ sub(d, current, R30);
+ current = d;
}
if (Universe::narrow_oop_shift() != 0) {
- srdi(d, d, LogMinObjAlignmentInBytes);
+ srdi(d, current, LogMinObjAlignmentInBytes);
+ current = d;
}
+ return current; // Encoded oop is in this register.
}
inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
@@ -385,4 +398,10 @@
twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16);
}
+#if defined(ABI_ELFv2)
+inline address MacroAssembler::function_entry() { return pc(); }
+#else
+inline address MacroAssembler::function_entry() { return emit_fd(); }
+#endif
+
#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
--- a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -453,11 +453,11 @@
if (Verbose) {
tty->print_cr("Registers:");
- const int abi_offset = frame::abi_112_size / 8;
+ const int abi_offset = frame::abi_reg_args_size / 8;
for (int i = R3->encoding(); i <= R12->encoding(); i++) {
Register r = as_Register(i);
int count = i - R3->encoding();
- // The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_112_size)).
+ // The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_reg_args_size)).
tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[abi_offset + count]);
if ((count + 1) % 4 == 0) {
tty->cr();
@@ -524,9 +524,9 @@
__ save_LR_CR(R0);
__ mr(R0, R1_SP); // saved_sp
assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0");
- // push_frame_abi112 only uses R0 if nbytes_save is wider than 16 bit
- __ push_frame_abi112(nbytes_save, R0);
- __ save_volatile_gprs(R1_SP, frame::abi_112_size); // Except R0.
+ // Push_frame_reg_args only uses R0 if nbytes_save is wider than 16 bit.
+ __ push_frame_reg_args(nbytes_save, R0);
+ __ save_volatile_gprs(R1_SP, frame::abi_reg_args_size); // Except R0.
__ load_const(R3_ARG1, (address)adaptername);
__ mr(R4_ARG2, R23_method_handle);
--- a/hotspot/src/cpu/ppc/vm/ppc.ad Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad Fri Mar 14 09:26:27 2014 +0100
@@ -1008,7 +1008,11 @@
}
int MachCallRuntimeNode::ret_addr_offset() {
+#if defined(ABI_ELFv2)
+ return 28;
+#else
return 40;
+#endif
}
//=============================================================================
@@ -3674,6 +3678,10 @@
MacroAssembler _masm(&cbuf);
const address start_pc = __ pc();
+#if defined(ABI_ELFv2)
+ address entry= !($meth$$method) ? NULL : (address)$meth$$method;
+ __ call_c(entry, relocInfo::runtime_call_type);
+#else
// The function we're going to call.
FunctionDescriptor fdtemp;
const FunctionDescriptor* fd = !($meth$$method) ? &fdtemp : (FunctionDescriptor*)$meth$$method;
@@ -3684,6 +3692,7 @@
// Put entry, env, toc into the constant pool, this needs up to 3 constant
// pool entries; call_c_using_toc will optimize the call.
__ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc);
+#endif
// Check the ret_addr_offset.
assert(((MachCallRuntimeNode*)this)->ret_addr_offset() == __ last_calls_return_pc() - start_pc,
@@ -3699,20 +3708,25 @@
__ mtctr($src$$Register);
%}
- // postalloc expand emitter for runtime leaf calls.
+ // Postalloc expand emitter for runtime leaf calls.
enc_class postalloc_expand_java_to_runtime_call(method meth, iRegLdst toc) %{
+ loadConLNodesTuple loadConLNodes_Entry;
+#if defined(ABI_ELFv2)
+ jlong entry_address = (jlong) this->entry_point();
+ assert(entry_address, "need address here");
+ loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address),
+ OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
+#else
// Get the struct that describes the function we are about to call.
FunctionDescriptor* fd = (FunctionDescriptor*) this->entry_point();
assert(fd, "need fd here");
+ jlong entry_address = (jlong) fd->entry();
// new nodes
- loadConLNodesTuple loadConLNodes_Entry;
loadConLNodesTuple loadConLNodes_Env;
loadConLNodesTuple loadConLNodes_Toc;
- MachNode *mtctr = NULL;
- MachCallLeafNode *call = NULL;
// Create nodes and operands for loading the entry point.
- loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->entry()),
+ loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address),
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
@@ -3733,8 +3747,9 @@
// Create nodes and operands for loading the Toc point.
loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->toc()),
OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
+#endif // ABI_ELFv2
// mtctr node
- mtctr = new (C) CallLeafDirect_mtctrNode();
+ MachNode *mtctr = new (C) CallLeafDirect_mtctrNode();
assert(loadConLNodes_Entry._last != NULL, "entry must exist");
mtctr->add_req(0, loadConLNodes_Entry._last);
@@ -3743,10 +3758,10 @@
mtctr->_opnds[1] = new (C) iRegLdstOper();
// call node
- call = new (C) CallLeafDirectNode();
+ MachCallLeafNode *call = new (C) CallLeafDirectNode();
call->_opnds[0] = _opnds[0];
- call->_opnds[1] = new (C) methodOper((intptr_t) fd->entry()); // may get set later
+ call->_opnds[1] = new (C) methodOper((intptr_t) entry_address); // May get set later.
// Make the new call node look like the old one.
call->_name = _name;
@@ -3773,8 +3788,10 @@
// These must be reqired edges, as the registers are live up to
// the call. Else the constants are handled as kills.
call->add_req(mtctr);
+#if !defined(ABI_ELFv2)
call->add_req(loadConLNodes_Env._last);
call->add_req(loadConLNodes_Toc._last);
+#endif
// ...as well as prec
for (uint i = req(); i < len(); ++i) {
@@ -3787,10 +3804,12 @@
// Insert the new nodes.
if (loadConLNodes_Entry._large_hi) nodes->push(loadConLNodes_Entry._large_hi);
if (loadConLNodes_Entry._last) nodes->push(loadConLNodes_Entry._last);
+#if !defined(ABI_ELFv2)
if (loadConLNodes_Env._large_hi) nodes->push(loadConLNodes_Env._large_hi);
if (loadConLNodes_Env._last) nodes->push(loadConLNodes_Env._last);
if (loadConLNodes_Toc._large_hi) nodes->push(loadConLNodes_Toc._large_hi);
if (loadConLNodes_Toc._last) nodes->push(loadConLNodes_Toc._last);
+#endif
nodes->push(mtctr);
nodes->push(call);
%}
@@ -3837,7 +3856,7 @@
// out_preserve_stack_slots for calls to C. Supports the var-args
// backing area for register parms.
//
- varargs_C_out_slots_killed(((frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size));
+ varargs_C_out_slots_killed(((frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size));
// The after-PROLOG location of the return address. Location of
// return address specifies a type (REG or STACK) and a number
--- a/hotspot/src/cpu/ppc/vm/register_ppc.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/register_ppc.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -579,15 +579,27 @@
// Register declarations to be used in frame manager assembly code.
// Use only non-volatile registers in order to keep values across C-calls.
+#ifdef CC_INTERP
REGISTER_DECLARATION(Register, R14_state, R14); // address of new cInterpreter.
REGISTER_DECLARATION(Register, R15_prev_state, R15); // address of old cInterpreter
+#else // CC_INTERP
+REGISTER_DECLARATION(Register, R14_bcp, R14);
+REGISTER_DECLARATION(Register, R15_esp, R15);
+REGISTER_DECLARATION(FloatRegister, F15_ftos, F15);
+#endif // CC_INTERP
REGISTER_DECLARATION(Register, R16_thread, R16); // address of current thread
REGISTER_DECLARATION(Register, R17_tos, R17); // address of Java tos (prepushed).
REGISTER_DECLARATION(Register, R18_locals, R18); // address of first param slot (receiver).
REGISTER_DECLARATION(Register, R19_method, R19); // address of current method
#ifndef DONT_USE_REGISTER_DEFINES
+#ifdef CC_INTERP
#define R14_state AS_REGISTER(Register, R14)
#define R15_prev_state AS_REGISTER(Register, R15)
+#else // CC_INTERP
+#define R14_bcp AS_REGISTER(Register, R14)
+#define R15_esp AS_REGISTER(Register, R15)
+#define F15_ftos AS_REGISTER(FloatRegister, F15)
+#endif // CC_INTERP
#define R16_thread AS_REGISTER(Register, R16)
#define R17_tos AS_REGISTER(Register, R17)
#define R18_locals AS_REGISTER(Register, R18)
@@ -608,6 +620,14 @@
REGISTER_DECLARATION(Register, R27_tmp7, R27);
REGISTER_DECLARATION(Register, R28_tmp8, R28);
REGISTER_DECLARATION(Register, R29_tmp9, R29);
+#ifndef CC_INTERP
+REGISTER_DECLARATION(Register, R24_dispatch_addr, R24);
+REGISTER_DECLARATION(Register, R25_templateTableBase, R25);
+REGISTER_DECLARATION(Register, R26_monitor, R26);
+REGISTER_DECLARATION(Register, R27_constPoolCache, R27);
+REGISTER_DECLARATION(Register, R28_mdx, R28);
+#endif // CC_INTERP
+
#ifndef DONT_USE_REGISTER_DEFINES
#define R21_tmp1 AS_REGISTER(Register, R21)
#define R22_tmp2 AS_REGISTER(Register, R22)
@@ -618,6 +638,16 @@
#define R27_tmp7 AS_REGISTER(Register, R27)
#define R28_tmp8 AS_REGISTER(Register, R28)
#define R29_tmp9 AS_REGISTER(Register, R29)
+#ifndef CC_INTERP
+// Lmonitors : monitor pointer
+// LcpoolCache: constant pool cache
+// mdx: method data index
+#define R24_dispatch_addr AS_REGISTER(Register, R24)
+#define R25_templateTableBase AS_REGISTER(Register, R25)
+#define R26_monitor AS_REGISTER(Register, R26)
+#define R27_constPoolCache AS_REGISTER(Register, R27)
+#define R28_mdx AS_REGISTER(Register, R28)
+#endif
#define CCR4_is_synced AS_REGISTER(ConditionRegister, CCR4)
#endif
--- a/hotspot/src/cpu/ppc/vm/runtime_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/runtime_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -87,7 +87,7 @@
address start = __ pc();
- int frame_size_in_bytes = frame::abi_112_size;
+ int frame_size_in_bytes = frame::abi_reg_args_size;
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
// Exception pc is 'return address' for stack walker.
@@ -99,7 +99,7 @@
// Save callee-saved registers.
// Push a C frame for the exception blob. It is needed for the C call later on.
- __ push_frame_abi112(0, R11_scratch1);
+ __ push_frame_reg_args(0, R11_scratch1);
// This call does all the hard work. It checks if an exception handler
// exists in the method.
@@ -109,8 +109,12 @@
__ set_last_Java_frame(/*sp=*/R1_SP, noreg);
__ mr(R3_ARG1, R16_thread);
+#if defined(ABI_ELFv2)
+ __ call_c((address) OptoRuntime::handle_exception_C, relocInfo::none);
+#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, OptoRuntime::handle_exception_C),
relocInfo::none);
+#endif
address calls_return_pc = __ last_calls_return_pc();
# ifdef ASSERT
__ cmpdi(CCR0, R3_RET, 0);
@@ -162,7 +166,11 @@
__ bind(mh_callsite);
__ mr(R31, R3_RET); // Save branch address.
__ mr(R3_ARG1, R16_thread);
+#if defined(ABI_ELFv2)
+ __ call_c((address) adjust_SP_for_methodhandle_callsite, relocInfo::none);
+#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, adjust_SP_for_methodhandle_callsite), relocInfo::none);
+#endif
// Returns unextended_sp in R3_RET.
__ mtctr(R31); // Move address of exception handler to SR_CTR.
--- a/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/sharedRuntime_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -67,7 +67,7 @@
return_pc_is_thread_saved_exception_pc
};
- static OopMap* push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
+ static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
int return_pc_adjustment,
@@ -200,12 +200,12 @@
RegisterSaver_LiveIntReg( R30 ), // r30 must be the last register
};
-OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
+OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
int return_pc_adjustment,
ReturnPCLocation return_pc_location) {
- // Push an abi112-frame and store all registers which may be live.
+ // Push an abi_reg_args-frame and store all registers which may be live.
// If requested, create an OopMap: Record volatile registers as
// callee-save values in an OopMap so their save locations will be
// propagated to the RegisterMap of the caller frame during
@@ -221,7 +221,7 @@
sizeof(RegisterSaver::LiveRegType);
const int register_save_size = regstosave_num * reg_size;
const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes)
- + frame::abi_112_size;
+ + frame::abi_reg_args_size;
*out_frame_size_in_bytes = frame_size_in_bytes;
const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
const int register_save_offset = frame_size_in_bytes - register_save_size;
@@ -229,7 +229,7 @@
// OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
- BLOCK_COMMENT("push_frame_abi112_and_save_live_registers {");
+ BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
// Save r30 in the last slot of the not yet pushed frame so that we
// can use it as scratch reg.
@@ -294,7 +294,7 @@
offset += reg_size;
}
- BLOCK_COMMENT("} push_frame_abi112_and_save_live_registers");
+ BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
// And we're done.
return map;
@@ -699,15 +699,19 @@
int i;
VMReg reg;
- // Leave room for C-compatible ABI_112.
- int stk = (frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
+ // Leave room for C-compatible ABI_REG_ARGS.
+ int stk = (frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
int arg = 0;
int freg = 0;
// Avoid passing C arguments in the wrong stack slots.
+#if defined(ABI_ELFv2)
+ assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 96,
+ "passing C arguments in wrong stack slots");
+#else
assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112,
"passing C arguments in wrong stack slots");
-
+#endif
// We fill-out regs AND regs2 if an argument must be passed in a
// register AND in a stack slot. If regs2 is NULL in such a
// situation, we bail-out with a fatal error.
@@ -953,6 +957,9 @@
#ifdef CC_INTERP
const Register tos = R17_tos;
+#else
+ const Register tos = R15_esp;
+ __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
#endif
// load TOS
@@ -971,7 +978,7 @@
const BasicType *sig_bt,
const VMRegPair *regs) {
- // Load method's entry-point from methodOop.
+ // Load method's entry-point from method.
__ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
__ mtctr(R12_scratch2);
@@ -992,7 +999,10 @@
#ifdef CC_INTERP
const Register ld_ptr = R17_tos;
+#else
+ const Register ld_ptr = R15_esp;
#endif
+
const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
const int num_value_regs = sizeof(value_regs) / sizeof(Register);
int value_regs_index = 0;
@@ -1083,8 +1093,8 @@
}
}
- BLOCK_COMMENT("Store method oop");
- // Store method oop into thread->callee_target.
+ BLOCK_COMMENT("Store method");
+ // Store method into thread->callee_target.
// We might end up in handle_wrong_method if the callee is
// deoptimized as we race thru here. If that happens we don't want
// to take a safepoint because the caller frame will look
@@ -1504,7 +1514,11 @@
__ block_comment("block_for_jni_critical");
address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical);
+#if defined(ABI_ELFv2)
+ __ call_c(entry_point, relocInfo::runtime_call_type);
+#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type);
+#endif
address start = __ pc() - __ offset(),
calls_return_pc = __ last_calls_return_pc();
oop_maps->add_gc_map(calls_return_pc - start, map);
@@ -1877,7 +1891,7 @@
// Layout of the native wrapper frame:
// (stack grows upwards, memory grows downwards)
//
- // NW [ABI_112] <-- 1) R1_SP
+ // NW [ABI_REG_ARGS] <-- 1) R1_SP
// [outgoing arguments] <-- 2) R1_SP + out_arg_slot_offset
// [oopHandle area] <-- 3) R1_SP + oop_handle_offset (save area for critical natives)
// klass <-- 4) R1_SP + klass_offset
@@ -2211,8 +2225,8 @@
// slow case of monitor enter. Inline a special case of call_VM that
// disallows any pending_exception.
- // Save argument registers and leave room for C-compatible ABI_112.
- int frame_size = frame::abi_112_size +
+ // Save argument registers and leave room for C-compatible ABI_REG_ARGS.
+ int frame_size = frame::abi_reg_args_size +
round_to(total_c_args * wordSize, frame::alignment_in_bytes);
__ mr(R11_scratch1, R1_SP);
RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
@@ -2250,9 +2264,12 @@
// The JNI call
// --------------------------------------------------------------------------
-
+#if defined(ABI_ELFv2)
+ __ call_c(native_func, relocInfo::runtime_call_type);
+#else
FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func;
__ call_c(fd_native_method, relocInfo::runtime_call_type);
+#endif
// Now, we are back from the native code.
@@ -2606,8 +2623,12 @@
#ifdef CC_INTERP
__ std(R1_SP, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
#else
- Unimplemented();
+#ifdef ASSERT
+ __ load_const_optimized(pc_reg, 0x5afe);
+ __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
#endif
+ __ std(R1_SP, _ijava_state_neg(sender_sp), R1_SP);
+#endif // CC_INTERP
__ addi(number_of_frames_reg, number_of_frames_reg, -1);
__ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
__ addi(pcs_reg, pcs_reg, wordSize);
@@ -2679,7 +2700,15 @@
__ std(R12_scratch2, _abi(lr), R1_SP);
// Initialize initial_caller_sp.
+#ifdef CC_INTERP
__ std(frame_size_reg/*old_sp*/, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
+#else
+#ifdef ASSERT
+ __ load_const_optimized(pc_reg, 0x5afe);
+ __ std(pc_reg, _ijava_state_neg(ijava_reserved), R1_SP);
+#endif
+ __ std(frame_size_reg, _ijava_state_neg(sender_sp), R1_SP);
+#endif // CC_INTERP
#ifdef ASSERT
// Make sure that there is at least one entry in the array.
@@ -2724,7 +2753,7 @@
OopMapSet *oop_maps = new OopMapSet();
// size of ABI112 plus spill slots for R3_RET and F1_RET.
- const int frame_size_in_bytes = frame::abi_112_spill_size;
+ const int frame_size_in_bytes = frame::abi_reg_args_spill_size;
const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
@@ -2757,11 +2786,11 @@
// Push the "unpack frame"
// Save everything in sight.
- map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
- &first_frame_size_in_bytes,
- /*generate_oop_map=*/ true,
- return_pc_adjustment_no_exception,
- RegisterSaver::return_pc_is_lr);
+ map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
+ &first_frame_size_in_bytes,
+ /*generate_oop_map=*/ true,
+ return_pc_adjustment_no_exception,
+ RegisterSaver::return_pc_is_lr);
assert(map != NULL, "OopMap must have been created");
__ li(exec_mode_reg, Deoptimization::Unpack_deopt);
@@ -2787,11 +2816,11 @@
// Push the "unpack frame".
// Save everything in sight.
assert(R4 == R4_ARG2, "exception pc must be in r4");
- RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
- &first_frame_size_in_bytes,
- /*generate_oop_map=*/ false,
- return_pc_adjustment_exception,
- RegisterSaver::return_pc_is_r4);
+ RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
+ &first_frame_size_in_bytes,
+ /*generate_oop_map=*/ false,
+ return_pc_adjustment_exception,
+ RegisterSaver::return_pc_is_r4);
// Deopt during an exception. Save exec mode for unpack_frames.
__ li(exec_mode_reg, Deoptimization::Unpack_exception);
@@ -2876,8 +2905,8 @@
// ...).
// Spill live volatile registers since we'll do a call.
- __ std( R3_RET, _abi_112_spill(spill_ret), R1_SP);
- __ stfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
+ __ std( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
+ __ stfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
// Let the unpacker layout information in the skeletal frames just
// allocated.
@@ -2889,8 +2918,8 @@
__ reset_last_Java_frame();
// Restore the volatiles saved above.
- __ ld( R3_RET, _abi_112_spill(spill_ret), R1_SP);
- __ lfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
+ __ ld( R3_RET, _abi_reg_args_spill(spill_ret), R1_SP);
+ __ lfd(F1_RET, _abi_reg_args_spill(spill_fret), R1_SP);
// Pop the unpack frame.
__ pop_frame();
@@ -2900,10 +2929,16 @@
// optional c2i, caller of deoptee, ...).
// Initialize R14_state.
+#ifdef CC_INTERP
__ ld(R14_state, 0, R1_SP);
__ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
// Also inititialize R15_prev_state.
__ restore_prev_state();
+#else
+ __ restore_interpreter_state(R11_scratch1);
+ __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
+#endif // CC_INTERP
+
// Return to the interpreter entry point.
__ blr();
@@ -2930,7 +2965,7 @@
Register unc_trap_reg = R23_tmp3;
OopMapSet* oop_maps = new OopMapSet();
- int frame_size_in_bytes = frame::abi_112_size;
+ int frame_size_in_bytes = frame::abi_reg_args_size;
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
// stack: (deoptee, optional i2c, caller_of_deoptee, ...).
@@ -2943,7 +2978,7 @@
__ save_LR_CR(R11_scratch1);
// Push an "uncommon_trap" frame.
- __ push_frame_abi112(0, R11_scratch1);
+ __ push_frame_reg_args(0, R11_scratch1);
// stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
@@ -2996,7 +3031,7 @@
// interpreter frames just created.
// Push a simple "unpack frame" here.
- __ push_frame_abi112(0, R11_scratch1);
+ __ push_frame_reg_args(0, R11_scratch1);
// stack: (unpack frame, skeletal interpreter frame, ..., optional
// skeletal interpreter frame, optional c2i, caller of deoptee,
@@ -3022,11 +3057,17 @@
// stack: (top interpreter frame, ..., optional interpreter frame,
// optional c2i, caller of deoptee, ...).
+#ifdef CC_INTERP
// Initialize R14_state, ...
__ ld(R11_scratch1, 0, R1_SP);
__ addi(R14_state, R11_scratch1, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
// also initialize R15_prev_state.
__ restore_prev_state();
+#else
+ __ restore_interpreter_state(R11_scratch1);
+ __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
+#endif // CC_INTERP
+
// Return to the interpreter entry point.
__ blr();
@@ -3064,11 +3105,11 @@
}
// Save registers, fpu state, and flags.
- map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
- &frame_size_in_bytes,
- /*generate_oop_map=*/ true,
- /*return_pc_adjustment=*/0,
- return_pc_location);
+ map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
+ &frame_size_in_bytes,
+ /*generate_oop_map=*/ true,
+ /*return_pc_adjustment=*/0,
+ return_pc_location);
// The following is basically a call_VM. However, we need the precise
// address of the call in order to generate an oopmap. Hence, we do all the
@@ -3104,7 +3145,6 @@
frame_size_in_bytes,
/*restore_ctr=*/true);
-
BLOCK_COMMENT(" Jump to forward_exception_entry.");
// Jump to forward_exception_entry, with the issuing PC in LR
// so it looks like the original nmethod called forward_exception_entry.
@@ -3151,11 +3191,11 @@
address start = __ pc();
- map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
- &frame_size_in_bytes,
- /*generate_oop_map*/ true,
- /*return_pc_adjustment*/ 0,
- RegisterSaver::return_pc_is_lr);
+ map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
+ &frame_size_in_bytes,
+ /*generate_oop_map*/ true,
+ /*return_pc_adjustment*/ 0,
+ RegisterSaver::return_pc_is_lr);
// Use noreg as last_Java_pc, the return pc will be reconstructed
// from the physical frame.
@@ -3189,7 +3229,7 @@
RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
- // Get the returned methodOop.
+ // Get the returned method.
__ get_vm_result_2(R19_method);
__ bctr();
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,15 +39,10 @@
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/top.hpp"
-#ifdef TARGET_OS_FAMILY_aix
-# include "thread_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_linux
-# include "thread_linux.inline.hpp"
-#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
+#include "runtime/thread.inline.hpp"
#define __ _masm->
@@ -79,11 +74,11 @@
StubCodeMark mark(this, "StubRoutines", "call_stub");
- address start = __ emit_fd();
+ address start = __ function_entry();
// some sanity checks
- assert((sizeof(frame::abi_48) % 16) == 0, "unaligned");
- assert((sizeof(frame::abi_112) % 16) == 0, "unaligned");
+ assert((sizeof(frame::abi_minframe) % 16) == 0, "unaligned");
+ assert((sizeof(frame::abi_reg_args) % 16) == 0, "unaligned");
assert((sizeof(frame::spill_nonvolatiles) % 16) == 0, "unaligned");
assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
assert((sizeof(frame::entry_frame_locals) % 16) == 0, "unaligned");
@@ -221,7 +216,7 @@
{
BLOCK_COMMENT("Call frame manager or native entry.");
// Call frame manager or native entry.
- Register r_new_arg_entry = R14_state;
+ Register r_new_arg_entry = R14; // PPC_state;
assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
r_arg_method, r_arg_thread);
@@ -234,7 +229,11 @@
// R16_thread - JavaThread*
// Tos must point to last argument - element_size.
+#ifdef CC_INTERP
const Register tos = R17_tos;
+#else
+ const Register tos = R15_esp;
+#endif
__ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
// initialize call_stub locals (step 2)
@@ -248,8 +247,11 @@
assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
// Set R15_prev_state to 0 for simplifying checks in callee.
+#ifdef CC_INTERP
__ li(R15_prev_state, 0);
-
+#else
+ __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
+#endif
// Stack on entry to frame manager / native entry:
//
// F0 [TOP_IJAVA_FRAME_ABI]
@@ -444,7 +446,7 @@
// Save LR/CR and copy exception pc (LR) into R4_ARG2.
__ save_LR_CR(R4_ARG2);
- __ push_frame_abi112(0, R0);
+ __ push_frame_reg_args(0, R0);
// Find exception handler.
__ call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address),
@@ -519,7 +521,7 @@
MacroAssembler* masm = new MacroAssembler(&code);
OopMapSet* oop_maps = new OopMapSet();
- int frame_size_in_bytes = frame::abi_112_size;
+ int frame_size_in_bytes = frame::abi_reg_args_size;
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
StubCodeMark mark(this, "StubRoutines", "throw_exception");
@@ -529,7 +531,7 @@
__ save_LR_CR(R11_scratch1);
// Push a frame.
- __ push_frame_abi112(0, R11_scratch1);
+ __ push_frame_reg_args(0, R11_scratch1);
address frame_complete_pc = __ pc();
@@ -551,8 +553,11 @@
if (arg2 != noreg) {
__ mr(R5_ARG3, arg2);
}
- __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry),
- relocInfo::none);
+#if defined(ABI_ELFv2)
+ __ call_c(runtime_entry, relocInfo::none);
+#else
+ __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
+#endif
// Set an oopmap for the call site.
oop_maps->add_gc_map((int)(gc_map_pc - start), map);
@@ -614,7 +619,7 @@
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
const int spill_slots = 4 * wordSize;
- const int frame_size = frame::abi_112_size + spill_slots;
+ const int frame_size = frame::abi_reg_args_size + spill_slots;
Label filtered;
// Is marking active?
@@ -628,7 +633,7 @@
__ beq(CCR0, filtered);
__ save_LR_CR(R0);
- __ push_frame_abi112(spill_slots, R0);
+ __ push_frame_reg_args(spill_slots, R0);
__ std(from, frame_size - 1 * wordSize, R1_SP);
__ std(to, frame_size - 2 * wordSize, R1_SP);
__ std(count, frame_size - 3 * wordSize, R1_SP);
@@ -672,7 +677,7 @@
if (branchToEnd) {
__ save_LR_CR(R0);
// We need this frame only to spill LR.
- __ push_frame_abi112(0, R0);
+ __ push_frame_reg_args(0, R0);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
__ pop_frame();
__ restore_LR_CR(R0);
@@ -742,7 +747,7 @@
StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
// Implemented as in ClearArray.
- address start = __ emit_fd();
+ address start = __ function_entry();
Register base_ptr_reg = R3_ARG1; // tohw (needs to be 8b aligned)
Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
@@ -820,7 +825,7 @@
//
address generate_handler_for_unsafe_access() {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
- address start = __ emit_fd();
+ address start = __ function_entry();
__ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
return start;
}
@@ -861,7 +866,7 @@
// to read from the safepoint polling page.
address generate_load_from_poll() {
StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
- address start = __ emit_fd();
+ address start = __ function_entry();
__ unimplemented("StubRoutines::verify_oop", 95); // TODO PPC port
return start;
}
@@ -885,7 +890,7 @@
//
address generate_fill(BasicType t, bool aligned, const char* name) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
const Register to = R3_ARG1; // source array address
const Register value = R4_ARG2; // fill value
@@ -1123,7 +1128,7 @@
//
address generate_disjoint_byte_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
Register tmp1 = R6_ARG4;
Register tmp2 = R7_ARG5;
@@ -1254,15 +1259,21 @@
//
address generate_conjoint_byte_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
Register tmp1 = R6_ARG4;
Register tmp2 = R7_ARG5;
Register tmp3 = R8_ARG6;
+#if defined(ABI_ELFv2)
+ address nooverlap_target = aligned ?
+ StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
+ StubRoutines::jbyte_disjoint_arraycopy();
+#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
+#endif
array_overlap_test(nooverlap_target, 0);
// Do reverse copy. We assume the case of actual overlap is rare enough
@@ -1345,7 +1356,7 @@
Register tmp3 = R8_ARG6;
Register tmp4 = R9_ARG7;
- address start = __ emit_fd();
+ address start = __ function_entry();
Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
// don't try anything fancy if arrays don't have many elements
@@ -1474,15 +1485,21 @@
//
address generate_conjoint_short_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
Register tmp1 = R6_ARG4;
Register tmp2 = R7_ARG5;
Register tmp3 = R8_ARG6;
+#if defined(ABI_ELFv2)
+ address nooverlap_target = aligned ?
+ StubRoutines::arrayof_jshort_disjoint_arraycopy() :
+ StubRoutines::jshort_disjoint_arraycopy();
+#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
+#endif
array_overlap_test(nooverlap_target, 1);
@@ -1597,7 +1614,7 @@
//
address generate_disjoint_int_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
generate_disjoint_int_copy_core(aligned);
__ blr();
return start;
@@ -1681,11 +1698,17 @@
//
address generate_conjoint_int_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
+#if defined(ABI_ELFv2)
+ address nooverlap_target = aligned ?
+ StubRoutines::arrayof_jint_disjoint_arraycopy() :
+ StubRoutines::jint_disjoint_arraycopy();
+#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
+#endif
array_overlap_test(nooverlap_target, 2);
@@ -1767,7 +1790,7 @@
//
address generate_disjoint_long_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
generate_disjoint_long_copy_core(aligned);
__ blr();
@@ -1849,11 +1872,17 @@
//
address generate_conjoint_long_copy(bool aligned, const char * name) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
+#if defined(ABI_ELFv2)
+ address nooverlap_target = aligned ?
+ StubRoutines::arrayof_jlong_disjoint_arraycopy() :
+ StubRoutines::jlong_disjoint_arraycopy();
+#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
+#endif
array_overlap_test(nooverlap_target, 3);
generate_conjoint_long_copy_core(aligned);
@@ -1875,11 +1904,17 @@
address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
+#if defined(ABI_ELFv2)
+ address nooverlap_target = aligned ?
+ StubRoutines::arrayof_oop_disjoint_arraycopy() :
+ StubRoutines::oop_disjoint_arraycopy();
+#else
address nooverlap_target = aligned ?
((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
+#endif
gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
@@ -1910,7 +1945,7 @@
//
address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
StubCodeMark mark(this, "StubRoutines", name);
- address start = __ emit_fd();
+ address start = __ function_entry();
gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
@@ -1991,7 +2026,7 @@
StubCodeMark mark(this, "StubRoutines", name);
// Entry point, pc or function descriptor.
- *entry = __ emit_fd();
+ *entry = __ function_entry();
// Load *adr into R4_ARG2, may fault.
*fault_pc = __ pc();
@@ -2056,7 +2091,7 @@
guarantee(!UseAESIntrinsics, "not yet implemented.");
}
- // PPC uses stubs for safefetch.
+ // Safefetch stubs.
generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
&StubRoutines::_safefetch32_fault_pc,
&StubRoutines::_safefetch32_continuation_pc);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013, 2014 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
+#define CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
+
+ protected:
+ address generate_normal_entry(bool synchronized);
+ address generate_native_entry(bool synchronized);
+ address generate_math_entry(AbstractInterpreter::MethodKind kind);
+ address generate_empty_entry(void);
+
+ void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
+ void unlock_method(bool check_exceptions = true);
+
+ void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
+ void generate_counter_overflow(Label& continue_entry);
+
+ void generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals);
+ void generate_stack_overflow_check(Register Rframe_size, Register Rscratch1);
+
+#endif // CPU_PPC_VM_TEMPLATEINTERPRETERGENERATOR_PPC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,1813 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013, 2014 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#ifndef CC_INTERP
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+#undef __
+#define __ _masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+//-----------------------------------------------------------------------------
+
+// Actually we should never reach here since we do stack overflow checks before pushing any frame.
+address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
+ address entry = __ pc();
+ __ unimplemented("generate_StackOverflowError_handler");
+ return entry;
+}
+
+address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
+ address entry = __ pc();
+ __ empty_expression_stack();
+ __ load_const_optimized(R4_ARG2, (address) name);
+ // Index is in R17_tos.
+ __ mr(R5_ARG3, R17_tos);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException));
+ return entry;
+}
+
+#if 0
+// Call special ClassCastException constructor taking object to cast
+// and target class as arguments.
+address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler(const char* name) {
+ address entry = __ pc();
+
+ // Target class oop is in register R6_ARG4 by convention!
+
+ // Expression stack must be empty before entering the VM if an
+ // exception happened.
+ __ empty_expression_stack();
+ // Setup parameters.
+ // Thread will be loaded to R3_ARG1.
+ __ load_const_optimized(R4_ARG2, (address) name);
+ __ mr(R5_ARG3, R17_tos);
+ // R6_ARG4 contains specified class.
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose));
+#ifdef ASSERT
+ // Above call must not return here since exception pending.
+ __ should_not_reach_here();
+#endif
+ return entry;
+}
+#endif
+
+address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
+ address entry = __ pc();
+ // Expression stack must be empty before entering the VM if an
+ // exception happened.
+ __ empty_expression_stack();
+
+ // Load exception object.
+ // Thread will be loaded to R3_ARG1.
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException), R17_tos);
+#ifdef ASSERT
+ // Above call must not return here since exception pending.
+ __ should_not_reach_here();
+#endif
+ return entry;
+}
+
+address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
+ address entry = __ pc();
+ //__ untested("generate_exception_handler_common");
+ Register Rexception = R17_tos;
+
+ // Expression stack must be empty before entering the VM if an exception happened.
+ __ empty_expression_stack();
+
+ __ load_const_optimized(R4_ARG2, (address) name, R11_scratch1);
+ if (pass_oop) {
+ __ mr(R5_ARG3, Rexception);
+ __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), false);
+ } else {
+ __ load_const_optimized(R5_ARG3, (address) message, R11_scratch1);
+ __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), false);
+ }
+
+ // Throw exception.
+ __ mr(R3_ARG1, Rexception);
+ __ load_const_optimized(R11_scratch1, Interpreter::throw_exception_entry(), R12_scratch2);
+ __ mtctr(R11_scratch1);
+ __ bctr();
+
+ return entry;
+}
+
+address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
+ address entry = __ pc();
+ __ unimplemented("generate_continuation_for");
+ return entry;
+}
+
+// This entry is returned to when a call returns to the interpreter.
+// When we arrive here, we expect that the callee stack frame is already popped.
+address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
+ address entry = __ pc();
+
+ // Move the value out of the return register back to the TOS cache of current frame.
+ switch (state) {
+ case ltos:
+ case btos:
+ case ctos:
+ case stos:
+ case atos:
+ case itos: __ mr(R17_tos, R3_RET); break; // RET -> TOS cache
+ case ftos:
+ case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
+ case vtos: break; // Nothing to do, this was a void return.
+ default : ShouldNotReachHere();
+ }
+
+ __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
+ __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
+ __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
+
+ // Compiled code destroys templateTableBase, reload.
+ __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
+
+ const Register cache = R11_scratch1;
+ const Register size = R12_scratch2;
+ __ get_cache_and_index_at_bcp(cache, 1, index_size);
+
+ // Big Endian (get least significant byte of 64 bit value):
+ __ lbz(size, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()) + 7, cache);
+ __ sldi(size, size, Interpreter::logStackElementSize);
+ __ add(R15_esp, R15_esp, size);
+ __ dispatch_next(state, step);
+ return entry;
+}
+
+address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
+ address entry = __ pc();
+ // If state != vtos, we're returning from a native method, which put it's result
+ // into the result register. So move the value out of the return register back
+ // to the TOS cache of current frame.
+
+ switch (state) {
+ case ltos:
+ case btos:
+ case ctos:
+ case stos:
+ case atos:
+ case itos: __ mr(R17_tos, R3_RET); break; // GR_RET -> TOS cache
+ case ftos:
+ case dtos: __ fmr(F15_ftos, F1_RET); break; // TOS cache -> GR_FRET
+ case vtos: break; // Nothing to do, this was a void return.
+ default : ShouldNotReachHere();
+ }
+
+ // Load LcpoolCache @@@ should be already set!
+ __ get_constant_pool_cache(R27_constPoolCache);
+
+ // Handle a pending exception, fall through if none.
+ __ check_and_forward_exception(R11_scratch1, R12_scratch2);
+
+ // Start executing bytecodes.
+ __ dispatch_next(state, step);
+
+ return entry;
+}
+
+// A result handler converts the native result into java format.
+// Use the shared code between c++ and template interpreter.
+address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
+ return AbstractInterpreterGenerator::generate_result_handler_for(type);
+}
+
+address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
+ address entry = __ pc();
+
+ __ push(state);
+ __ call_VM(noreg, runtime_entry);
+ __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
+
+ return entry;
+}
+
+// Helpers for commoning out cases in the various type of method entries.
+
+// Increment invocation count & check for overflow.
+//
+// Note: checking for negative value instead of overflow
+// so we have a 'sticky' overflow test.
+//
+void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
+ // Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not.
+ Register Rscratch1 = R11_scratch1;
+ Register Rscratch2 = R12_scratch2;
+ Register R3_counters = R3_ARG1;
+ Label done;
+
+ if (TieredCompilation) {
+ const int increment = InvocationCounter::count_increment;
+ const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
+ Label no_mdo;
+ if (ProfileInterpreter) {
+ const Register Rmdo = Rscratch1;
+ // If no method data exists, go to profile_continue.
+ __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
+ __ cmpdi(CCR0, Rmdo, 0);
+ __ beq(CCR0, no_mdo);
+
+ // Increment backedge counter in the MDO.
+ const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+ __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
+ __ addi(Rscratch2, Rscratch2, increment);
+ __ stw(Rscratch2, mdo_bc_offs, Rmdo);
+ __ load_const_optimized(Rscratch1, mask, R0);
+ __ and_(Rscratch1, Rscratch2, Rscratch1);
+ __ bne(CCR0, done);
+ __ b(*overflow);
+ }
+
+ // Increment counter in MethodCounters*.
+ const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+ __ bind(no_mdo);
+ __ get_method_counters(R19_method, R3_counters, done);
+ __ lwz(Rscratch2, mo_bc_offs, R3_counters);
+ __ addi(Rscratch2, Rscratch2, increment);
+ __ stw(Rscratch2, mo_bc_offs, R3_counters);
+ __ load_const_optimized(Rscratch1, mask, R0);
+ __ and_(Rscratch1, Rscratch2, Rscratch1);
+ __ beq(CCR0, *overflow);
+
+ __ bind(done);
+
+ } else {
+
+ // Update standard invocation counters.
+ Register Rsum_ivc_bec = R4_ARG2;
+ __ get_method_counters(R19_method, R3_counters, done);
+ __ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2);
+ // Increment interpreter invocation counter.
+ if (ProfileInterpreter) { // %%% Merge this into methodDataOop.
+ __ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
+ __ addi(R12_scratch2, R12_scratch2, 1);
+ __ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
+ }
+ // Check if we must create a method data obj.
+ if (ProfileInterpreter && profile_method != NULL) {
+ const Register profile_limit = Rscratch1;
+ int pl_offs = __ load_const_optimized(profile_limit, &InvocationCounter::InterpreterProfileLimit, R0, true);
+ __ lwz(profile_limit, pl_offs, profile_limit);
+ // Test to see if we should create a method data oop.
+ __ cmpw(CCR0, Rsum_ivc_bec, profile_limit);
+ __ blt(CCR0, *profile_method_continue);
+ // If no method data exists, go to profile_method.
+ __ test_method_data_pointer(*profile_method);
+ }
+ // Finally check for counter overflow.
+ if (overflow) {
+ const Register invocation_limit = Rscratch1;
+ int il_offs = __ load_const_optimized(invocation_limit, &InvocationCounter::InterpreterInvocationLimit, R0, true);
+ __ lwz(invocation_limit, il_offs, invocation_limit);
+ assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit), "unexpected field size");
+ __ cmpw(CCR0, Rsum_ivc_bec, invocation_limit);
+ __ bge(CCR0, *overflow);
+ }
+
+ __ bind(done);
+ }
+}
+
+// Generate code to initiate compilation on invocation counter overflow.
+void TemplateInterpreterGenerator::generate_counter_overflow(Label& continue_entry) {
+ // Generate code to initiate compilation on the counter overflow.
+
+ // InterpreterRuntime::frequency_counter_overflow takes one arguments,
+ // which indicates if the counter overflow occurs at a backwards branch (NULL bcp)
+ // We pass zero in.
+ // The call returns the address of the verified entry point for the method or NULL
+ // if the compilation did not complete (either went background or bailed out).
+ //
+ // Unlike the C++ interpreter above: Check exceptions!
+ // Assumption: Caller must set the flag "do_not_unlock_if_sychronized" if the monitor of a sync'ed
+ // method has not yet been created. Thus, no unlocking of a non-existing monitor can occur.
+
+ __ li(R4_ARG2, 0);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
+
+ // Returns verified_entry_point or NULL.
+ // We ignore it in any case.
+ __ b(continue_entry);
+}
+
+void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_frame_size, Register Rscratch1) {
+ assert_different_registers(Rmem_frame_size, Rscratch1);
+ __ generate_stack_overflow_check_with_compare_and_throw(Rmem_frame_size, Rscratch1);
+}
+
+void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
+ __ unlock_object(R26_monitor, check_exceptions);
+}
+
+// Lock the current method, interpreter register window must be set up!
+void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
+ const Register Robj_to_lock = Rscratch2;
+
+ {
+ if (!flags_preloaded) {
+ __ lwz(Rflags, method_(access_flags));
+ }
+
+#ifdef ASSERT
+ // Check if methods needs synchronization.
+ {
+ Label Lok;
+ __ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
+ __ btrue(CCR0,Lok);
+ __ stop("method doesn't need synchronization");
+ __ bind(Lok);
+ }
+#endif // ASSERT
+ }
+
+ // Get synchronization object to Rscratch2.
+ {
+ const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+ Label Lstatic;
+ Label Ldone;
+
+ __ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT);
+ __ btrue(CCR0, Lstatic);
+
+ // Non-static case: load receiver obj from stack and we're done.
+ __ ld(Robj_to_lock, R18_locals);
+ __ b(Ldone);
+
+ __ bind(Lstatic); // Static case: Lock the java mirror
+ __ ld(Robj_to_lock, in_bytes(Method::const_offset()), R19_method);
+ __ ld(Robj_to_lock, in_bytes(ConstMethod::constants_offset()), Robj_to_lock);
+ __ ld(Robj_to_lock, ConstantPool::pool_holder_offset_in_bytes(), Robj_to_lock);
+ __ ld(Robj_to_lock, mirror_offset, Robj_to_lock);
+
+ __ bind(Ldone);
+ __ verify_oop(Robj_to_lock);
+ }
+
+ // Got the oop to lock => execute!
+ __ add_monitor_to_stack(true, Rscratch1, R0);
+
+ __ std(Robj_to_lock, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
+ __ lock_object(R26_monitor, Robj_to_lock);
+}
+
+// Generate a fixed interpreter frame for pure interpreter
+// and I2N native transition frames.
+//
+// Before (stack grows downwards):
+//
+// | ... |
+// |------------- |
+// | java arg0 |
+// | ... |
+// | java argn |
+// | | <- R15_esp
+// | |
+// |--------------|
+// | abi_112 |
+// | | <- R1_SP
+// |==============|
+//
+//
+// After:
+//
+// | ... |
+// | java arg0 |<- R18_locals
+// | ... |
+// | java argn |
+// |--------------|
+// | |
+// | java locals |
+// | |
+// |--------------|
+// | abi_48 |
+// |==============|
+// | |
+// | istate |
+// | |
+// |--------------|
+// | monitor |<- R26_monitor
+// |--------------|
+// | |<- R15_esp
+// | expression |
+// | stack |
+// | |
+// |--------------|
+// | |
+// | abi_112 |<- R1_SP
+// |==============|
+//
+// The top most frame needs an abi space of 112 bytes. This space is needed,
+// since we call to c. The c function may spill their arguments to the caller
+// frame. When we call to java, we don't need these spill slots. In order to save
+// space on the stack, we resize the caller. However, java local reside in
+// the caller frame and the frame has to be increased. The frame_size for the
+// current frame was calculated based on max_stack as size for the expression
+// stack. At the call, just a part of the expression stack might be used.
+// We don't want to waste this space and cut the frame back accordingly.
+// The resulting amount for resizing is calculated as follows:
+// resize = (number_of_locals - number_of_arguments) * slot_size
+// + (R1_SP - R15_esp) + 48
+//
+// The size for the callee frame is calculated:
+// framesize = 112 + max_stack + monitor + state_size
+//
+// maxstack: Max number of slots on the expression stack, loaded from the method.
+// monitor: We statically reserve room for one monitor object.
+// state_size: We save the current state of the interpreter to this area.
+//
+void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Register Rsize_of_parameters, Register Rsize_of_locals) {
+ Register parent_frame_resize = R6_ARG4, // Frame will grow by this number of bytes.
+ top_frame_size = R7_ARG5,
+ Rconst_method = R8_ARG6;
+
+ assert_different_registers(Rsize_of_parameters, Rsize_of_locals, parent_frame_resize, top_frame_size);
+
+ __ ld(Rconst_method, method_(const));
+ __ lhz(Rsize_of_parameters /* number of params */,
+ in_bytes(ConstMethod::size_of_parameters_offset()), Rconst_method);
+ if (native_call) {
+ // If we're calling a native method, we reserve space for the worst-case signature
+ // handler varargs vector, which is max(Argument::n_register_parameters, parameter_count+2).
+ // We add two slots to the parameter_count, one for the jni
+ // environment and one for a possible native mirror.
+ Label skip_native_calculate_max_stack;
+ __ addi(top_frame_size, Rsize_of_parameters, 2);
+ __ cmpwi(CCR0, top_frame_size, Argument::n_register_parameters);
+ __ bge(CCR0, skip_native_calculate_max_stack);
+ __ li(top_frame_size, Argument::n_register_parameters);
+ __ bind(skip_native_calculate_max_stack);
+ __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
+ __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
+ __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
+ assert(Rsize_of_locals == noreg, "Rsize_of_locals not initialized"); // Only relevant value is Rsize_of_parameters.
+ } else {
+ __ lhz(Rsize_of_locals /* number of params */, in_bytes(ConstMethod::size_of_locals_offset()), Rconst_method);
+ __ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
+ __ sldi(Rsize_of_locals, Rsize_of_locals, Interpreter::logStackElementSize);
+ __ lhz(top_frame_size, in_bytes(ConstMethod::max_stack_offset()), Rconst_method);
+ __ sub(R11_scratch1, Rsize_of_locals, Rsize_of_parameters); // >=0
+ __ sub(parent_frame_resize, R1_SP, R15_esp); // <0, off by Interpreter::stackElementSize!
+ __ sldi(top_frame_size, top_frame_size, Interpreter::logStackElementSize);
+ __ add(parent_frame_resize, parent_frame_resize, R11_scratch1);
+ }
+
+ // Compute top frame size.
+ __ addi(top_frame_size, top_frame_size, frame::abi_reg_args_size + frame::ijava_state_size);
+
+ // Cut back area between esp and max_stack.
+ __ addi(parent_frame_resize, parent_frame_resize, frame::abi_minframe_size - Interpreter::stackElementSize);
+
+ __ round_to(top_frame_size, frame::alignment_in_bytes);
+ __ round_to(parent_frame_resize, frame::alignment_in_bytes);
+ // parent_frame_resize = (locals-parameters) - (ESP-SP-ABI48) Rounded to frame alignment size.
+ // Enlarge by locals-parameters (not in case of native_call), shrink by ESP-SP-ABI48.
+
+ {
+ // --------------------------------------------------------------------------
+ // Stack overflow check
+
+ Label cont;
+ __ add(R11_scratch1, parent_frame_resize, top_frame_size);
+ generate_stack_overflow_check(R11_scratch1, R12_scratch2);
+ }
+
+ // Set up interpreter state registers.
+
+ __ add(R18_locals, R15_esp, Rsize_of_parameters);
+ __ ld(R27_constPoolCache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
+ __ ld(R27_constPoolCache, ConstantPool::cache_offset_in_bytes(), R27_constPoolCache);
+
+ // Set method data pointer.
+ if (ProfileInterpreter) {
+ Label zero_continue;
+ __ ld(R28_mdx, method_(method_data));
+ __ cmpdi(CCR0, R28_mdx, 0);
+ __ beq(CCR0, zero_continue);
+ __ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
+ __ bind(zero_continue);
+ }
+
+ if (native_call) {
+ __ li(R14_bcp, 0); // Must initialize.
+ } else {
+ __ add(R14_bcp, in_bytes(ConstMethod::codes_offset()), Rconst_method);
+ }
+
+ // Resize parent frame.
+ __ mflr(R12_scratch2);
+ __ neg(parent_frame_resize, parent_frame_resize);
+ __ resize_frame(parent_frame_resize, R11_scratch1);
+ __ std(R12_scratch2, _abi(lr), R1_SP);
+
+ __ addi(R26_monitor, R1_SP, - frame::ijava_state_size);
+ __ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
+
+ // Store values.
+ // R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls
+ // in InterpreterMacroAssembler::call_from_interpreter.
+ __ std(R19_method, _ijava_state_neg(method), R1_SP);
+ __ std(R21_sender_SP, _ijava_state_neg(sender_sp), R1_SP);
+ __ std(R27_constPoolCache, _ijava_state_neg(cpoolCache), R1_SP);
+ __ std(R18_locals, _ijava_state_neg(locals), R1_SP);
+
+ // Note: esp, bcp, monitor, mdx live in registers. Hence, the correct version can only
+ // be found in the frame after save_interpreter_state is done. This is always true
+ // for non-top frames. But when a signal occurs, dumping the top frame can go wrong,
+ // because e.g. frame::interpreter_frame_bcp() will not access the correct value
+ // (Enhanced Stack Trace).
+ // The signal handler does not save the interpreter state into the frame.
+ __ li(R0, 0);
+#ifdef ASSERT
+ // Fill remaining slots with constants.
+ __ load_const_optimized(R11_scratch1, 0x5afe);
+ __ load_const_optimized(R12_scratch2, 0xdead);
+#endif
+ // We have to initialize some frame slots for native calls (accessed by GC).
+ if (native_call) {
+ __ std(R26_monitor, _ijava_state_neg(monitors), R1_SP);
+ __ std(R14_bcp, _ijava_state_neg(bcp), R1_SP);
+ if (ProfileInterpreter) { __ std(R28_mdx, _ijava_state_neg(mdx), R1_SP); }
+ }
+#ifdef ASSERT
+ else {
+ __ std(R12_scratch2, _ijava_state_neg(monitors), R1_SP);
+ __ std(R12_scratch2, _ijava_state_neg(bcp), R1_SP);
+ __ std(R12_scratch2, _ijava_state_neg(mdx), R1_SP);
+ }
+ __ std(R11_scratch1, _ijava_state_neg(ijava_reserved), R1_SP);
+ __ std(R12_scratch2, _ijava_state_neg(esp), R1_SP);
+ __ std(R12_scratch2, _ijava_state_neg(lresult), R1_SP);
+ __ std(R12_scratch2, _ijava_state_neg(fresult), R1_SP);
+#endif
+ __ subf(R12_scratch2, top_frame_size, R1_SP);
+ __ std(R0, _ijava_state_neg(oop_tmp), R1_SP);
+ __ std(R12_scratch2, _ijava_state_neg(top_frame_sp), R1_SP);
+
+ // Push top frame.
+ __ push_frame(top_frame_size, R11_scratch1);
+}
+
+// End of helpers
+
+// ============================================================================
+// Various method entries
+//
+
+// Empty method, generate a very fast return. We must skip this entry if
+// someone's debugging, indicated by the flag
+// "interp_mode" in the Thread obj.
+// Note: empty methods are generated mostly methods that do assertions, which are
+// disabled in the "java opt build".
+address TemplateInterpreterGenerator::generate_empty_entry(void) {
+ if (!UseFastEmptyMethods) {
+ NOT_PRODUCT(__ should_not_reach_here();)
+ return Interpreter::entry_for_kind(Interpreter::zerolocals);
+ }
+
+ Label Lslow_path;
+ const Register Rjvmti_mode = R11_scratch1;
+ address entry = __ pc();
+
+ __ lwz(Rjvmti_mode, thread_(interp_only_mode));
+ __ cmpwi(CCR0, Rjvmti_mode, 0);
+ __ bne(CCR0, Lslow_path); // jvmti_mode!=0
+
+ // Noone's debuggin: Simply return.
+ // Pop c2i arguments (if any) off when we return.
+#ifdef ASSERT
+ __ ld(R9_ARG7, 0, R1_SP);
+ __ ld(R10_ARG8, 0, R21_sender_SP);
+ __ cmpd(CCR0, R9_ARG7, R10_ARG8);
+ __ asm_assert_eq("backlink", 0x545);
+#endif // ASSERT
+ __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+
+ // And we're done.
+ __ blr();
+
+ __ bind(Lslow_path);
+ __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
+ __ flush();
+
+ return entry;
+}
+
+// Support abs and sqrt like in compiler.
+// For others we can use a normal (native) entry.
+
+inline bool math_entry_available(AbstractInterpreter::MethodKind kind) {
+ // Provide math entry with debugging on demand.
+ // Note: Debugging changes which code will get executed:
+ // Debugging or disabled InlineIntrinsics: java method will get interpreted and performs a native call.
+ // Not debugging and enabled InlineIntrinics: processor instruction will get used.
+ // Result might differ slightly due to rounding etc.
+ if (!InlineIntrinsics && (!FLAG_IS_ERGO(InlineIntrinsics))) return false; // Generate a vanilla entry.
+
+ return ((kind==Interpreter::java_lang_math_sqrt && VM_Version::has_fsqrt()) ||
+ (kind==Interpreter::java_lang_math_abs));
+}
+
+address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
+ if (!math_entry_available(kind)) {
+ NOT_PRODUCT(__ should_not_reach_here();)
+ return Interpreter::entry_for_kind(Interpreter::zerolocals);
+ }
+
+ Label Lslow_path;
+ const Register Rjvmti_mode = R11_scratch1;
+ address entry = __ pc();
+
+ // Provide math entry with debugging on demand.
+ __ lwz(Rjvmti_mode, thread_(interp_only_mode));
+ __ cmpwi(CCR0, Rjvmti_mode, 0);
+ __ bne(CCR0, Lslow_path); // jvmti_mode!=0
+
+ __ lfd(F1_RET, Interpreter::stackElementSize, R15_esp);
+
+ // Pop c2i arguments (if any) off when we return.
+#ifdef ASSERT
+ __ ld(R9_ARG7, 0, R1_SP);
+ __ ld(R10_ARG8, 0, R21_sender_SP);
+ __ cmpd(CCR0, R9_ARG7, R10_ARG8);
+ __ asm_assert_eq("backlink", 0x545);
+#endif // ASSERT
+ __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+
+ if (kind == Interpreter::java_lang_math_sqrt) {
+ __ fsqrt(F1_RET, F1_RET);
+ } else if (kind == Interpreter::java_lang_math_abs) {
+ __ fabs(F1_RET, F1_RET);
+ } else {
+ ShouldNotReachHere();
+ }
+
+ // And we're done.
+ __ blr();
+
+ // Provide slow path for JVMTI case.
+ __ bind(Lslow_path);
+ __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R12_scratch2);
+ __ flush();
+
+ return entry;
+}
+
+// Interpreter stub for calling a native method. (asm interpreter)
+// This sets up a somewhat different looking stack for calling the
+// native method than the typical interpreter frame setup.
+//
+// On entry:
+// R19_method - method
+// R16_thread - JavaThread*
+// R15_esp - intptr_t* sender tos
+//
+// abstract stack (grows up)
+// [ IJava (caller of JNI callee) ] <-- ASP
+// ...
+address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
+
+ address entry = __ pc();
+
+ const bool inc_counter = UseCompiler || CountCompiledCalls;
+
+ // -----------------------------------------------------------------------------
+ // Allocate a new frame that represents the native callee (i2n frame).
+ // This is not a full-blown interpreter frame, but in particular, the
+ // following registers are valid after this:
+ // - R19_method
+ // - R18_local (points to start of argumuments to native function)
+ //
+ // abstract stack (grows up)
+ // [ IJava (caller of JNI callee) ] <-- ASP
+ // ...
+
+ const Register signature_handler_fd = R11_scratch1;
+ const Register pending_exception = R0;
+ const Register result_handler_addr = R31;
+ const Register native_method_fd = R11_scratch1;
+ const Register access_flags = R22_tmp2;
+ const Register active_handles = R11_scratch1; // R26_monitor saved to state.
+ const Register sync_state = R12_scratch2;
+ const Register sync_state_addr = sync_state; // Address is dead after use.
+ const Register suspend_flags = R11_scratch1;
+
+ //=============================================================================
+ // Allocate new frame and initialize interpreter state.
+
+ Label exception_return;
+ Label exception_return_sync_check;
+ Label stack_overflow_return;
+
+ // Generate new interpreter state and jump to stack_overflow_return in case of
+ // a stack overflow.
+ //generate_compute_interpreter_state(stack_overflow_return);
+
+ Register size_of_parameters = R22_tmp2;
+
+ generate_fixed_frame(true, size_of_parameters, noreg /* unused */);
+
+ //=============================================================================
+ // Increment invocation counter. On overflow, entry to JNI method
+ // will be compiled.
+ Label invocation_counter_overflow, continue_after_compile;
+ if (inc_counter) {
+ if (synchronized) {
+ // Since at this point in the method invocation the exception handler
+ // would try to exit the monitor of synchronized methods which hasn't
+ // been entered yet, we set the thread local variable
+ // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+ // runtime, exception handling i.e. unlock_if_synchronized_method will
+ // check this thread local flag.
+ // This flag has two effects, one is to force an unwind in the topmost
+ // interpreter frame and not perform an unlock while doing so.
+ __ li(R0, 1);
+ __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+ }
+ generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
+
+ __ BIND(continue_after_compile);
+ // Reset the _do_not_unlock_if_synchronized flag.
+ if (synchronized) {
+ __ li(R0, 0);
+ __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+ }
+ }
+
+ // access_flags = method->access_flags();
+ // Load access flags.
+ assert(access_flags->is_nonvolatile(),
+ "access_flags must be in a non-volatile register");
+ // Type check.
+ assert(4 == sizeof(AccessFlags), "unexpected field size");
+ __ lwz(access_flags, method_(access_flags));
+
+ // We don't want to reload R19_method and access_flags after calls
+ // to some helper functions.
+ assert(R19_method->is_nonvolatile(),
+ "R19_method must be a non-volatile register");
+
+ // Check for synchronized methods. Must happen AFTER invocation counter
+ // check, so method is not locked if counter overflows.
+
+ if (synchronized) {
+ lock_method(access_flags, R11_scratch1, R12_scratch2, true);
+
+ // Update monitor in state.
+ __ ld(R11_scratch1, 0, R1_SP);
+ __ std(R26_monitor, _ijava_state_neg(monitors), R11_scratch1);
+ }
+
+ // jvmti/jvmpi support
+ __ notify_method_entry();
+
+ //=============================================================================
+ // Get and call the signature handler.
+
+ __ ld(signature_handler_fd, method_(signature_handler));
+ Label call_signature_handler;
+
+ __ cmpdi(CCR0, signature_handler_fd, 0);
+ __ bne(CCR0, call_signature_handler);
+
+ // Method has never been called. Either generate a specialized
+ // handler or point to the slow one.
+ //
+ // Pass parameter 'false' to avoid exception check in call_VM.
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
+
+ // Check for an exception while looking up the target method. If we
+ // incurred one, bail.
+ __ ld(pending_exception, thread_(pending_exception));
+ __ cmpdi(CCR0, pending_exception, 0);
+ __ bne(CCR0, exception_return_sync_check); // Has pending exception.
+
+ // Reload signature handler, it may have been created/assigned in the meanwhile.
+ __ ld(signature_handler_fd, method_(signature_handler));
+ __ twi_0(signature_handler_fd); // Order wrt. load of klass mirror and entry point (isync is below).
+
+ __ BIND(call_signature_handler);
+
+ // Before we call the signature handler we push a new frame to
+ // protect the interpreter frame volatile registers when we return
+ // from jni but before we can get back to Java.
+
+ // First set the frame anchor while the SP/FP registers are
+ // convenient and the slow signature handler can use this same frame
+ // anchor.
+
+ // We have a TOP_IJAVA_FRAME here, which belongs to us.
+ __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
+
+ // Now the interpreter frame (and its call chain) have been
+ // invalidated and flushed. We are now protected against eager
+ // being enabled in native code. Even if it goes eager the
+ // registers will be reloaded as clean and we will invalidate after
+ // the call so no spurious flush should be possible.
+
+ // Call signature handler and pass locals address.
+ //
+ // Our signature handlers copy required arguments to the C stack
+ // (outgoing C args), R3_ARG1 to R10_ARG8, and FARG1 to FARG13.
+ __ mr(R3_ARG1, R18_locals);
+ __ ld(signature_handler_fd, 0, signature_handler_fd);
+
+ __ call_stub(signature_handler_fd);
+
+ // Remove the register parameter varargs slots we allocated in
+ // compute_interpreter_state. SP+16 ends up pointing to the ABI
+ // outgoing argument area.
+ //
+ // Not needed on PPC64.
+ //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord);
+
+ assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
+ // Save across call to native method.
+ __ mr(result_handler_addr, R3_RET);
+
+ __ isync(); // Acquire signature handler before trying to fetch the native entry point and klass mirror.
+
+ // Set up fixed parameters and call the native method.
+ // If the method is static, get mirror into R4_ARG2.
+ {
+ Label method_is_not_static;
+ // Access_flags is non-volatile and still, no need to restore it.
+
+ // Restore access flags.
+ __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
+ __ bfalse(CCR0, method_is_not_static);
+
+ // constants = method->constants();
+ __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
+ __ ld(R11_scratch1, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
+ // pool_holder = method->constants()->pool_holder();
+ __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
+ R11_scratch1/*constants*/);
+
+ const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+
+ // mirror = pool_holder->klass_part()->java_mirror();
+ __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
+ // state->_native_mirror = mirror;
+
+ __ ld(R11_scratch1, 0, R1_SP);
+ __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
+ // R4_ARG2 = &state->_oop_temp;
+ __ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
+ __ BIND(method_is_not_static);
+ }
+
+ // At this point, arguments have been copied off the stack into
+ // their JNI positions. Oops are boxed in-place on the stack, with
+ // handles copied to arguments. The result handler address is in a
+ // register.
+
+ // Pass JNIEnv address as first parameter.
+ __ addir(R3_ARG1, thread_(jni_environment));
+
+ // Load the native_method entry before we change the thread state.
+ __ ld(native_method_fd, method_(native_function));
+
+ //=============================================================================
+ // Transition from _thread_in_Java to _thread_in_native. As soon as
+ // we make this change the safepoint code needs to be certain that
+ // the last Java frame we established is good. The pc in that frame
+ // just needs to be near here not an actual return address.
+
+ // We use release_store_fence to update values like the thread state, where
+ // we don't want the current thread to continue until all our prior memory
+ // accesses (including the new thread state) are visible to other threads.
+ __ li(R0, _thread_in_native);
+ __ release();
+
+ // TODO PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
+ __ stw(R0, thread_(thread_state));
+
+ if (UseMembar) {
+ __ fence();
+ }
+
+ //=============================================================================
+ // Call the native method. Argument registers must not have been
+ // overwritten since "__ call_stub(signature_handler);" (except for
+ // ARG1 and ARG2 for static methods).
+ __ call_c(native_method_fd);
+
+ __ li(R0, 0);
+ __ ld(R11_scratch1, 0, R1_SP);
+ __ std(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
+ __ stfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
+ __ std(R0/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1); // reset
+
+ // Note: C++ interpreter needs the following here:
+ // The frame_manager_lr field, which we use for setting the last
+ // java frame, gets overwritten by the signature handler. Restore
+ // it now.
+ //__ get_PC_trash_LR(R11_scratch1);
+ //__ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+
+ // Because of GC R19_method may no longer be valid.
+
+ // Block, if necessary, before resuming in _thread_in_Java state.
+ // In order for GC to work, don't clear the last_Java_sp until after
+ // blocking.
+
+ //=============================================================================
+ // Switch thread to "native transition" state before reading the
+ // synchronization state. This additional state is necessary
+ // because reading and testing the synchronization state is not
+ // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
+ // in _thread_in_native state, loads _not_synchronized and is
+ // preempted. VM thread changes sync state to synchronizing and
+ // suspends threads for GC. Thread A is resumed to finish this
+ // native method, but doesn't block here since it didn't see any
+ // synchronization in progress, and escapes.
+
+ // We use release_store_fence to update values like the thread state, where
+ // we don't want the current thread to continue until all our prior memory
+ // accesses (including the new thread state) are visible to other threads.
+ __ li(R0/*thread_state*/, _thread_in_native_trans);
+ __ release();
+ __ stw(R0/*thread_state*/, thread_(thread_state));
+ if (UseMembar) {
+ __ fence();
+ }
+ // Write serialization page so that the VM thread can do a pseudo remote
+ // membar. We use the current thread pointer to calculate a thread
+ // specific offset to write to within the page. This minimizes bus
+ // traffic due to cache line collision.
+ else {
+ __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
+ }
+
+ // Now before we return to java we must look for a current safepoint
+ // (a new safepoint can not start since we entered native_trans).
+ // We must check here because a current safepoint could be modifying
+ // the callers registers right this moment.
+
+ // Acquire isn't strictly necessary here because of the fence, but
+ // sync_state is declared to be volatile, so we do it anyway
+ // (cmp-br-isync on one path, release (same as acquire on PPC64) on the other path).
+ int sync_state_offs = __ load_const_optimized(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/R0, true);
+
+ // TODO PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
+ __ lwz(sync_state, sync_state_offs, sync_state_addr);
+
+ // TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
+ __ lwz(suspend_flags, thread_(suspend_flags));
+
+ Label sync_check_done;
+ Label do_safepoint;
+ // No synchronization in progress nor yet synchronized.
+ __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
+ // Not suspended.
+ __ cmpwi(CCR1, suspend_flags, 0);
+
+ __ bne(CCR0, do_safepoint);
+ __ beq(CCR1, sync_check_done);
+ __ bind(do_safepoint);
+ __ isync();
+ // Block. We do the call directly and leave the current
+ // last_Java_frame setup undisturbed. We must save any possible
+ // native result across the call. No oop is present.
+
+ __ mr(R3_ARG1, R16_thread);
+ __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
+ relocInfo::none);
+
+ __ bind(sync_check_done);
+
+ //=============================================================================
+ // <<<<<< Back in Interpreter Frame >>>>>
+
+ // We are in thread_in_native_trans here and back in the normal
+ // interpreter frame. We don't have to do anything special about
+ // safepoints and we can switch to Java mode anytime we are ready.
+
+ // Note: frame::interpreter_frame_result has a dependency on how the
+ // method result is saved across the call to post_method_exit. For
+ // native methods it assumes that the non-FPU/non-void result is
+ // saved in _native_lresult and a FPU result in _native_fresult. If
+ // this changes then the interpreter_frame_result implementation
+ // will need to be updated too.
+
+ // On PPC64, we have stored the result directly after the native call.
+
+ //=============================================================================
+ // Back in Java
+
+ // We use release_store_fence to update values like the thread state, where
+ // we don't want the current thread to continue until all our prior memory
+ // accesses (including the new thread state) are visible to other threads.
+ __ li(R0/*thread_state*/, _thread_in_Java);
+ __ release();
+ __ stw(R0/*thread_state*/, thread_(thread_state));
+ if (UseMembar) {
+ __ fence();
+ }
+
+ __ reset_last_Java_frame();
+
+ // Jvmdi/jvmpi support. Whether we've got an exception pending or
+ // not, and whether unlocking throws an exception or not, we notify
+ // on native method exit. If we do have an exception, we'll end up
+ // in the caller's context to handle it, so if we don't do the
+ // notify here, we'll drop it on the floor.
+ __ notify_method_exit(true/*native method*/,
+ ilgl /*illegal state (not used for native methods)*/,
+ InterpreterMacroAssembler::NotifyJVMTI,
+ false /*check_exceptions*/);
+
+ //=============================================================================
+ // Handle exceptions
+
+ if (synchronized) {
+ // Don't check for exceptions since we're still in the i2n frame. Do that
+ // manually afterwards.
+ unlock_method(false);
+ }
+
+ // Reset active handles after returning from native.
+ // thread->active_handles()->clear();
+ __ ld(active_handles, thread_(active_handles));
+ // TODO PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
+ __ li(R0, 0);
+ __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
+
+ Label exception_return_sync_check_already_unlocked;
+ __ ld(R0/*pending_exception*/, thread_(pending_exception));
+ __ cmpdi(CCR0, R0/*pending_exception*/, 0);
+ __ bne(CCR0, exception_return_sync_check_already_unlocked);
+
+ //-----------------------------------------------------------------------------
+ // No exception pending.
+
+ // Move native method result back into proper registers and return.
+ // Invoke result handler (may unbox/promote).
+ __ ld(R11_scratch1, 0, R1_SP);
+ __ ld(R3_RET, _ijava_state_neg(lresult), R11_scratch1);
+ __ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
+ __ call_stub(result_handler_addr);
+
+ __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
+
+ // Must use the return pc which was loaded from the caller's frame
+ // as the VM uses return-pc-patching for deoptimization.
+ __ mtlr(R0);
+ __ blr();
+
+ //-----------------------------------------------------------------------------
+ // An exception is pending. We call into the runtime only if the
+ // caller was not interpreted. If it was interpreted the
+ // interpreter will do the correct thing. If it isn't interpreted
+ // (call stub/compiled code) we will change our return and continue.
+
+ __ BIND(exception_return_sync_check);
+
+ if (synchronized) {
+ // Don't check for exceptions since we're still in the i2n frame. Do that
+ // manually afterwards.
+ unlock_method(false);
+ }
+ __ BIND(exception_return_sync_check_already_unlocked);
+
+ const Register return_pc = R31;
+
+ __ ld(return_pc, 0, R1_SP);
+ __ ld(return_pc, _abi(lr), return_pc);
+
+ // Get the address of the exception handler.
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
+ R16_thread,
+ return_pc /* return pc */);
+ __ merge_frames(/*top_frame_sp*/ R21_sender_SP, noreg, R11_scratch1, R12_scratch2);
+
+ // Load the PC of the the exception handler into LR.
+ __ mtlr(R3_RET);
+
+ // Load exception into R3_ARG1 and clear pending exception in thread.
+ __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
+ __ li(R4_ARG2, 0);
+ __ std(R4_ARG2, thread_(pending_exception));
+
+ // Load the original return pc into R4_ARG2.
+ __ mr(R4_ARG2/*issuing_pc*/, return_pc);
+
+ // Return to exception handler.
+ __ blr();
+
+ //=============================================================================
+ // Counter overflow.
+
+ if (inc_counter) {
+ // Handle invocation counter overflow.
+ __ bind(invocation_counter_overflow);
+
+ generate_counter_overflow(continue_after_compile);
+ }
+
+ return entry;
+}
+
+// Generic interpreted method entry to (asm) interpreter.
+//
+address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
+ bool inc_counter = UseCompiler || CountCompiledCalls;
+ address entry = __ pc();
+ // Generate the code to allocate the interpreter stack frame.
+ Register Rsize_of_parameters = R4_ARG2, // Written by generate_fixed_frame.
+ Rsize_of_locals = R5_ARG3; // Written by generate_fixed_frame.
+
+ generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals);
+
+#ifdef FAST_DISPATCH
+ __ unimplemented("Fast dispatch in generate_normal_entry");
+#if 0
+ __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables);
+ // Set bytecode dispatch table base.
+#endif
+#endif
+
+ // --------------------------------------------------------------------------
+ // Zero out non-parameter locals.
+ // Note: *Always* zero out non-parameter locals as Sparc does. It's not
+ // worth to ask the flag, just do it.
+ Register Rslot_addr = R6_ARG4,
+ Rnum = R7_ARG5;
+ Label Lno_locals, Lzero_loop;
+
+ // Set up the zeroing loop.
+ __ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
+ __ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
+ __ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);
+ __ beq(CCR0, Lno_locals);
+ __ li(R0, 0);
+ __ mtctr(Rnum);
+
+ // The zero locals loop.
+ __ bind(Lzero_loop);
+ __ std(R0, 0, Rslot_addr);
+ __ addi(Rslot_addr, Rslot_addr, -Interpreter::stackElementSize);
+ __ bdnz(Lzero_loop);
+
+ __ bind(Lno_locals);
+
+ // --------------------------------------------------------------------------
+ // Counter increment and overflow check.
+ Label invocation_counter_overflow,
+ profile_method,
+ profile_method_continue;
+ if (inc_counter || ProfileInterpreter) {
+
+ Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1;
+ if (synchronized) {
+ // Since at this point in the method invocation the exception handler
+ // would try to exit the monitor of synchronized methods which hasn't
+ // been entered yet, we set the thread local variable
+ // _do_not_unlock_if_synchronized to true. If any exception was thrown by
+ // runtime, exception handling i.e. unlock_if_synchronized_method will
+ // check this thread local flag.
+ // This flag has two effects, one is to force an unwind in the topmost
+ // interpreter frame and not perform an unlock while doing so.
+ __ li(R0, 1);
+ __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+ }
+ // Increment invocation counter and check for overflow.
+ if (inc_counter) {
+ generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
+ }
+
+ __ bind(profile_method_continue);
+
+ // Reset the _do_not_unlock_if_synchronized flag.
+ if (synchronized) {
+ __ li(R0, 0);
+ __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // Locking of synchronized methods. Must happen AFTER invocation_counter
+ // check and stack overflow check, so method is not locked if overflows.
+ if (synchronized) {
+ lock_method(R3_ARG1, R4_ARG2, R5_ARG3);
+ }
+#ifdef ASSERT
+ else {
+ Label Lok;
+ __ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
+ __ andi_(R0, R0, JVM_ACC_SYNCHRONIZED);
+ __ asm_assert_eq("method needs synchronization", 0x8521);
+ __ bind(Lok);
+ }
+#endif // ASSERT
+
+ __ verify_thread();
+
+ // --------------------------------------------------------------------------
+ // JVMTI support
+ __ notify_method_entry();
+
+ // --------------------------------------------------------------------------
+ // Start executing instructions.
+ __ dispatch_next(vtos);
+
+ // --------------------------------------------------------------------------
+ // Out of line counter overflow and MDO creation code.
+ if (ProfileInterpreter) {
+ // We have decided to profile this method in the interpreter.
+ __ bind(profile_method);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
+ __ set_method_data_pointer_for_bcp();
+ __ b(profile_method_continue);
+ }
+
+ if (inc_counter) {
+ // Handle invocation counter overflow.
+ __ bind(invocation_counter_overflow);
+ generate_counter_overflow(profile_method_continue);
+ }
+ return entry;
+}
+
+// =============================================================================
+// Entry points
+
+address AbstractInterpreterGenerator::generate_method_entry(
+ AbstractInterpreter::MethodKind kind) {
+ // Determine code generation flags.
+ bool synchronized = false;
+ address entry_point = NULL;
+
+ switch (kind) {
+ case Interpreter::zerolocals : break;
+ case Interpreter::zerolocals_synchronized: synchronized = true; break;
+ case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
+ case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
+ case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
+ case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
+ case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
+
+ case Interpreter::java_lang_math_sin : // fall thru
+ case Interpreter::java_lang_math_cos : // fall thru
+ case Interpreter::java_lang_math_tan : // fall thru
+ case Interpreter::java_lang_math_abs : // fall thru
+ case Interpreter::java_lang_math_log : // fall thru
+ case Interpreter::java_lang_math_log10 : // fall thru
+ case Interpreter::java_lang_math_sqrt : // fall thru
+ case Interpreter::java_lang_math_pow : // fall thru
+ case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
+ case Interpreter::java_lang_ref_reference_get
+ : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
+ default : ShouldNotReachHere(); break;
+ }
+
+ if (entry_point) {
+ return entry_point;
+ }
+
+ return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized);
+}
+
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+ return !math_entry_available(method_kind(m));
+}
+
+// How much stack a method activation needs in stack slots.
+// We must calc this exactly like in generate_fixed_frame.
+// Note: This returns the conservative size assuming maximum alignment.
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+ const int max_alignment_size = 2;
+ const int abi_scratch = frame::abi_reg_args_size;
+ return method->max_locals() + method->max_stack() + frame::interpreter_frame_monitor_size() + max_alignment_size + abi_scratch;
+}
+
+// Fills a sceletal interpreter frame generated during deoptimizations
+// and returns the frame size in slots.
+//
+// Parameters:
+//
+// interpreter_frame == NULL:
+// Only calculate the size of an interpreter activation, no actual layout.
+// Note: This calculation must exactly parallel the frame setup
+// in TemplateInterpreter::generate_normal_entry. But it does not
+// account for the SP alignment, that might further enhance the
+// frame size, depending on FP.
+//
+// interpreter_frame != NULL:
+// set up the method, locals, and monitors.
+// The frame interpreter_frame, if not NULL, is guaranteed to be the
+// right size, as determined by a previous call to this method.
+// It is also guaranteed to be walkable even though it is in a skeletal state
+//
+// is_top_frame == true:
+// We're processing the *oldest* interpreter frame!
+//
+// pop_frame_extra_args:
+// If this is != 0 we are returning to a deoptimized frame by popping
+// off the callee frame. We want to re-execute the call that called the
+// callee interpreted, but since the return to the interpreter would pop
+// the arguments off advance the esp by dummy popframe_extra_args slots.
+// Popping off those will establish the stack layout as it was before the call.
+//
+int AbstractInterpreter::layout_activation(Method* method,
+ int tempcount,
+ int popframe_extra_args,
+ int moncount,
+ int caller_actual_parameters,
+ int callee_param_count,
+ int callee_locals,
+ frame* caller,
+ frame* interpreter_frame,
+ bool is_top_frame,
+ bool is_bottom_frame) {
+
+ const int max_alignment_space = 2;
+ const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
+ (frame::abi_minframe_size / Interpreter::stackElementSize) ;
+ const int conservative_framesize_in_slots =
+ method->max_stack() + callee_locals - callee_param_count +
+ (moncount * frame::interpreter_frame_monitor_size()) + max_alignment_space +
+ abi_scratch + frame::ijava_state_size / Interpreter::stackElementSize;
+
+ assert(!is_top_frame || conservative_framesize_in_slots * 8 > frame::abi_reg_args_size + frame::ijava_state_size, "frame too small");
+
+ if (interpreter_frame == NULL) {
+ // Since we don't know the exact alignment, we return the conservative size.
+ return (conservative_framesize_in_slots & -2);
+ } else {
+ // Now we know our caller, calc the exact frame layout and size.
+ intptr_t* locals_base = (caller->is_interpreted_frame()) ?
+ caller->interpreter_frame_esp() + caller_actual_parameters :
+ caller->sp() + method->max_locals() - 1 + (frame::abi_minframe_size / Interpreter::stackElementSize) ;
+
+ intptr_t* monitor_base = caller->sp() - frame::ijava_state_size / Interpreter::stackElementSize ;
+ intptr_t* monitor = monitor_base - (moncount * frame::interpreter_frame_monitor_size());
+ intptr_t* esp_base = monitor - 1;
+ intptr_t* esp = esp_base - tempcount - popframe_extra_args;
+ intptr_t* sp = (intptr_t *) (((intptr_t) (esp_base- callee_locals + callee_param_count - method->max_stack()- abi_scratch)) & -StackAlignmentInBytes);
+ intptr_t* sender_sp = caller->sp() + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
+ intptr_t* top_frame_sp = is_top_frame ? sp : sp + (frame::abi_minframe_size - frame::abi_reg_args_size) / Interpreter::stackElementSize;
+
+ interpreter_frame->interpreter_frame_set_method(method);
+ interpreter_frame->interpreter_frame_set_locals(locals_base);
+ interpreter_frame->interpreter_frame_set_cpcache(method->constants()->cache());
+ interpreter_frame->interpreter_frame_set_esp(esp);
+ interpreter_frame->interpreter_frame_set_monitor_end((BasicObjectLock *)monitor);
+ interpreter_frame->interpreter_frame_set_top_frame_sp(top_frame_sp);
+ if (!is_bottom_frame) {
+ interpreter_frame->interpreter_frame_set_sender_sp(sender_sp);
+ }
+
+ int framesize_in_slots = caller->sp() - sp;
+ assert(!is_top_frame ||framesize_in_slots >= (frame::abi_reg_args_size / Interpreter::stackElementSize) + frame::ijava_state_size / Interpreter::stackElementSize, "frame too small");
+ assert(framesize_in_slots <= conservative_framesize_in_slots, "exact frame size must be smaller than the convervative size!");
+ return framesize_in_slots;
+ }
+}
+
+// =============================================================================
+// Exceptions
+
+void TemplateInterpreterGenerator::generate_throw_exception() {
+ Register Rexception = R17_tos,
+ Rcontinuation = R3_RET;
+
+ // --------------------------------------------------------------------------
+ // Entry point if an method returns with a pending exception (rethrow).
+ Interpreter::_rethrow_exception_entry = __ pc();
+ {
+ __ restore_interpreter_state(R11_scratch1); // Sets R11_scratch1 = fp.
+ __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
+ __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
+
+ // Compiled code destroys templateTableBase, reload.
+ __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
+ }
+
+ // Entry point if a interpreted method throws an exception (throw).
+ Interpreter::_throw_exception_entry = __ pc();
+ {
+ __ mr(Rexception, R3_RET);
+
+ __ verify_thread();
+ __ verify_oop(Rexception);
+
+ // Expression stack must be empty before entering the VM in case of an exception.
+ __ empty_expression_stack();
+ // Find exception handler address and preserve exception oop.
+ // Call C routine to find handler and jump to it.
+ __ call_VM(Rexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Rexception);
+ __ mtctr(Rcontinuation);
+ // Push exception for exception handler bytecodes.
+ __ push_ptr(Rexception);
+
+ // Jump to exception handler (may be remove activation entry!).
+ __ bctr();
+ }
+
+ // If the exception is not handled in the current frame the frame is
+ // removed and the exception is rethrown (i.e. exception
+ // continuation is _rethrow_exception).
+ //
+ // Note: At this point the bci is still the bxi for the instruction
+ // which caused the exception and the expression stack is
+ // empty. Thus, for any VM calls at this point, GC will find a legal
+ // oop map (with empty expression stack).
+
+ // In current activation
+ // tos: exception
+ // bcp: exception bcp
+
+ // --------------------------------------------------------------------------
+ // JVMTI PopFrame support
+
+ Interpreter::_remove_activation_preserving_args_entry = __ pc();
+ {
+ // Set the popframe_processing bit in popframe_condition indicating that we are
+ // currently handling popframe, so that call_VMs that may happen later do not
+ // trigger new popframe handling cycles.
+ __ lwz(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+ __ ori(R11_scratch1, R11_scratch1, JavaThread::popframe_processing_bit);
+ __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+ // Empty the expression stack, as in normal exception handling.
+ __ empty_expression_stack();
+ __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
+
+ // Check to see whether we are returning to a deoptimized frame.
+ // (The PopFrame call ensures that the caller of the popped frame is
+ // either interpreted or compiled and deoptimizes it if compiled.)
+ // Note that we don't compare the return PC against the
+ // deoptimization blob's unpack entry because of the presence of
+ // adapter frames in C2.
+ Label Lcaller_not_deoptimized;
+ Register return_pc = R3_ARG1;
+ __ ld(return_pc, 0, R1_SP);
+ __ ld(return_pc, _abi(lr), return_pc);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc);
+ __ cmpdi(CCR0, R3_RET, 0);
+ __ bne(CCR0, Lcaller_not_deoptimized);
+
+ // The deoptimized case.
+ // In this case, we can't call dispatch_next() after the frame is
+ // popped, but instead must save the incoming arguments and restore
+ // them after deoptimization has occurred.
+ __ ld(R4_ARG2, in_bytes(Method::const_offset()), R19_method);
+ __ lhz(R4_ARG2 /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), R4_ARG2);
+ __ slwi(R4_ARG2, R4_ARG2, Interpreter::logStackElementSize);
+ __ addi(R5_ARG3, R18_locals, Interpreter::stackElementSize);
+ __ subf(R5_ARG3, R4_ARG2, R5_ARG3);
+ // Save these arguments.
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R16_thread, R4_ARG2, R5_ARG3);
+
+ // Inform deoptimization that it is responsible for restoring these arguments.
+ __ load_const_optimized(R11_scratch1, JavaThread::popframe_force_deopt_reexecution_bit);
+ __ stw(R11_scratch1, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+ // Return from the current method into the deoptimization blob. Will eventually
+ // end up in the deopt interpeter entry, deoptimization prepared everything that
+ // we will reexecute the call that called us.
+ __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*reload return_pc*/ return_pc, R11_scratch1, R12_scratch2);
+ __ mtlr(return_pc);
+ __ blr();
+
+ // The non-deoptimized case.
+ __ bind(Lcaller_not_deoptimized);
+
+ // Clear the popframe condition flag.
+ __ li(R0, 0);
+ __ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
+
+ // Get out of the current method and re-execute the call that called us.
+ __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ return_pc, R11_scratch1, R12_scratch2);
+ __ restore_interpreter_state(R11_scratch1);
+ __ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
+ __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
+ __ mtlr(return_pc);
+ if (ProfileInterpreter) {
+ __ set_method_data_pointer_for_bcp();
+ }
+ __ dispatch_next(vtos);
+ }
+ // end of JVMTI PopFrame support
+
+ // --------------------------------------------------------------------------
+ // Remove activation exception entry.
+ // This is jumped to if an interpreted method can't handle an exception itself
+ // (we come from the throw/rethrow exception entry above). We're going to call
+ // into the VM to find the exception handler in the caller, pop the current
+ // frame and return the handler we calculated.
+ Interpreter::_remove_activation_entry = __ pc();
+ {
+ __ pop_ptr(Rexception);
+ __ verify_thread();
+ __ verify_oop(Rexception);
+ __ std(Rexception, in_bytes(JavaThread::vm_result_offset()), R16_thread);
+
+ __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, true);
+ __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI, false);
+
+ __ get_vm_result(Rexception);
+
+ // We are done with this activation frame; find out where to go next.
+ // The continuation point will be an exception handler, which expects
+ // the following registers set up:
+ //
+ // RET: exception oop
+ // ARG2: Issuing PC (see generate_exception_blob()), only used if the caller is compiled.
+
+ Register return_pc = R31; // Needs to survive the runtime call.
+ __ ld(return_pc, 0, R1_SP);
+ __ ld(return_pc, _abi(lr), return_pc);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, return_pc);
+
+ // Remove the current activation.
+ __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
+
+ __ mr(R4_ARG2, return_pc);
+ __ mtlr(R3_RET);
+ __ mr(R3_RET, Rexception);
+ __ blr();
+ }
+}
+
+// JVMTI ForceEarlyReturn support.
+// Returns "in the middle" of a method with a "fake" return value.
+address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
+
+ Register Rscratch1 = R11_scratch1,
+ Rscratch2 = R12_scratch2;
+
+ address entry = __ pc();
+ __ empty_expression_stack();
+
+ __ load_earlyret_value(state, Rscratch1);
+
+ __ ld(Rscratch1, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
+ // Clear the earlyret state.
+ __ li(R0, 0);
+ __ stw(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rscratch1);
+
+ __ remove_activation(state, false, false);
+ // Copied from TemplateTable::_return.
+ // Restoration of lr done by remove_activation.
+ switch (state) {
+ case ltos:
+ case btos:
+ case ctos:
+ case stos:
+ case atos:
+ case itos: __ mr(R3_RET, R17_tos); break;
+ case ftos:
+ case dtos: __ fmr(F1_RET, F15_ftos); break;
+ case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
+ // to get visible before the reference to the object gets stored anywhere.
+ __ membar(Assembler::StoreStore); break;
+ default : ShouldNotReachHere();
+ }
+ __ blr();
+
+ return entry;
+} // end of ForceEarlyReturn support
+
+//-----------------------------------------------------------------------------
+// Helper for vtos entry point generation
+
+void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
+ address& bep,
+ address& cep,
+ address& sep,
+ address& aep,
+ address& iep,
+ address& lep,
+ address& fep,
+ address& dep,
+ address& vep) {
+ assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
+ Label L;
+
+ aep = __ pc(); __ push_ptr(); __ b(L);
+ fep = __ pc(); __ push_f(); __ b(L);
+ dep = __ pc(); __ push_d(); __ b(L);
+ lep = __ pc(); __ push_l(); __ b(L);
+ __ align(32, 12, 24); // align L
+ bep = cep = sep =
+ iep = __ pc(); __ push_i();
+ vep = __ pc();
+ __ bind(L);
+ generate_and_dispatch(t);
+}
+
+//-----------------------------------------------------------------------------
+// Generation of individual instructions
+
+// helpers for generate_and_dispatch
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+ : TemplateInterpreterGenerator(code) {
+ generate_all(); // Down here so it can be "virtual".
+}
+
+//-----------------------------------------------------------------------------
+
+// Non-product code
+#ifndef PRODUCT
+address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
+ //__ flush_bundle();
+ address entry = __ pc();
+
+ char *bname = NULL;
+ uint tsize = 0;
+ switch(state) {
+ case ftos:
+ bname = "trace_code_ftos {";
+ tsize = 2;
+ break;
+ case btos:
+ bname = "trace_code_btos {";
+ tsize = 2;
+ break;
+ case ctos:
+ bname = "trace_code_ctos {";
+ tsize = 2;
+ break;
+ case stos:
+ bname = "trace_code_stos {";
+ tsize = 2;
+ break;
+ case itos:
+ bname = "trace_code_itos {";
+ tsize = 2;
+ break;
+ case ltos:
+ bname = "trace_code_ltos {";
+ tsize = 3;
+ break;
+ case atos:
+ bname = "trace_code_atos {";
+ tsize = 2;
+ break;
+ case vtos:
+ // Note: In case of vtos, the topmost of stack value could be a int or doubl
+ // In case of a double (2 slots) we won't see the 2nd stack value.
+ // Maybe we simply should print the topmost 3 stack slots to cope with the problem.
+ bname = "trace_code_vtos {";
+ tsize = 2;
+
+ break;
+ case dtos:
+ bname = "trace_code_dtos {";
+ tsize = 3;
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ BLOCK_COMMENT(bname);
+
+ // Support short-cut for TraceBytecodesAt.
+ // Don't call into the VM if we don't want to trace to speed up things.
+ Label Lskip_vm_call;
+ if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
+ int offs1 = __ load_const_optimized(R11_scratch1, (address) &TraceBytecodesAt, R0, true);
+ int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
+ __ ld(R11_scratch1, offs1, R11_scratch1);
+ __ lwa(R12_scratch2, offs2, R12_scratch2);
+ __ cmpd(CCR0, R12_scratch2, R11_scratch1);
+ __ blt(CCR0, Lskip_vm_call);
+ }
+
+ __ push(state);
+ // Load 2 topmost expression stack values.
+ __ ld(R6_ARG4, tsize*Interpreter::stackElementSize, R15_esp);
+ __ ld(R5_ARG3, Interpreter::stackElementSize, R15_esp);
+ __ mflr(R31);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), /* unused */ R4_ARG2, R5_ARG3, R6_ARG4, false);
+ __ mtlr(R31);
+ __ pop(state);
+
+ if (TraceBytecodesAt > 0 && TraceBytecodesAt < max_intx) {
+ __ bind(Lskip_vm_call);
+ }
+ __ blr();
+ BLOCK_COMMENT("} trace_code");
+ return entry;
+}
+
+void TemplateInterpreterGenerator::count_bytecode() {
+ int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeCounter::_counter_value, R12_scratch2, true);
+ __ lwz(R12_scratch2, offs, R11_scratch1);
+ __ addi(R12_scratch2, R12_scratch2, 1);
+ __ stw(R12_scratch2, offs, R11_scratch1);
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
+ int offs = __ load_const_optimized(R11_scratch1, (address) &BytecodeHistogram::_counters[t->bytecode()], R12_scratch2, true);
+ __ lwz(R12_scratch2, offs, R11_scratch1);
+ __ addi(R12_scratch2, R12_scratch2, 1);
+ __ stw(R12_scratch2, offs, R11_scratch1);
+}
+
+void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
+ const Register addr = R11_scratch1,
+ tmp = R12_scratch2;
+ // Get index, shift out old bytecode, bring in new bytecode, and store it.
+ // _index = (_index >> log2_number_of_codes) |
+ // (bytecode << log2_number_of_codes);
+ int offs1 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_index, tmp, true);
+ __ lwz(tmp, offs1, addr);
+ __ srwi(tmp, tmp, BytecodePairHistogram::log2_number_of_codes);
+ __ ori(tmp, tmp, ((int) t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
+ __ stw(tmp, offs1, addr);
+
+ // Bump bucket contents.
+ // _counters[_index] ++;
+ int offs2 = __ load_const_optimized(addr, (address)&BytecodePairHistogram::_counters, R0, true);
+ __ sldi(tmp, tmp, LogBytesPerInt);
+ __ add(addr, tmp, addr);
+ __ lwz(tmp, offs2, addr);
+ __ addi(tmp, tmp, 1);
+ __ stw(tmp, offs2, addr);
+}
+
+void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
+ // Call a little run-time stub to avoid blow-up for each bytecode.
+ // The run-time runtime saves the right registers, depending on
+ // the tosca in-state for the given template.
+
+ assert(Interpreter::trace_code(t->tos_in()) != NULL,
+ "entry must have been generated");
+
+ // Note: we destroy LR here.
+ __ bl(Interpreter::trace_code(t->tos_in()));
+}
+
+void TemplateInterpreterGenerator::stop_interpreter_at() {
+ Label L;
+ int offs1 = __ load_const_optimized(R11_scratch1, (address) &StopInterpreterAt, R0, true);
+ int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
+ __ ld(R11_scratch1, offs1, R11_scratch1);
+ __ lwa(R12_scratch2, offs2, R12_scratch2);
+ __ cmpd(CCR0, R12_scratch2, R11_scratch1);
+ __ bne(CCR0, L);
+ __ illtrap();
+ __ bind(L);
+}
+
+#endif // !PRODUCT
+#endif // !CC_INTERP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013, 2014 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
+#define CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
+
+ protected:
+
+ // Size of interpreter code. Increase if too small. Interpreter will
+ // fail with a guarantee ("not enough space for interpreter generation");
+ // if too small.
+ // Run with +PrintInterpreter to get the VM to print out the size.
+ // Max size with JVMTI
+
+ const static int InterpreterCodeSize = 210*K;
+
+#endif // CPU_PPC_VM_TEMPLATEINTERPRETER_PPC_HPP
+
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,4082 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013, 2014 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/templateInterpreter.hpp"
+#include "interpreter/templateTable.hpp"
+#include "memory/universe.inline.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "utilities/macros.hpp"
+
+#ifndef CC_INTERP
+
+#undef __
+#define __ _masm->
+
+// ============================================================================
+// Misc helpers
+
+// Do an oop store like *(base + index) = val OR *(base + offset) = val
+// (only one of both variants is possible at the same time).
+// Index can be noreg.
+// Kills:
+// Rbase, Rtmp
+static void do_oop_store(InterpreterMacroAssembler* _masm,
+ Register Rbase,
+ RegisterOrConstant offset,
+ Register Rval, // Noreg means always null.
+ Register Rtmp1,
+ Register Rtmp2,
+ Register Rtmp3,
+ BarrierSet::Name barrier,
+ bool precise,
+ bool check_null) {
+ assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
+
+ switch (barrier) {
+#ifndef SERIALGC
+ case BarrierSet::G1SATBCT:
+ case BarrierSet::G1SATBCTLogging:
+ {
+ // Load and record the previous value.
+ __ g1_write_barrier_pre(Rbase, offset,
+ Rtmp3, /* holder of pre_val ? */
+ Rtmp1, Rtmp2, false /* frame */);
+
+ Label Lnull, Ldone;
+ if (Rval != noreg) {
+ if (check_null) {
+ __ cmpdi(CCR0, Rval, 0);
+ __ beq(CCR0, Lnull);
+ }
+ __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1);
+ // Mark the card.
+ if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
+ __ add(Rbase, offset, Rbase);
+ }
+ __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone);
+ if (check_null) { __ b(Ldone); }
+ }
+
+ if (Rval == noreg || check_null) { // Store null oop.
+ Register Rnull = Rval;
+ __ bind(Lnull);
+ if (Rval == noreg) {
+ Rnull = Rtmp1;
+ __ li(Rnull, 0);
+ }
+ if (UseCompressedOops) {
+ __ stw(Rnull, offset, Rbase);
+ } else {
+ __ std(Rnull, offset, Rbase);
+ }
+ }
+ __ bind(Ldone);
+ }
+ break;
+#endif // SERIALGC
+ case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableExtension:
+ {
+ Label Lnull, Ldone;
+ if (Rval != noreg) {
+ if (check_null) {
+ __ cmpdi(CCR0, Rval, 0);
+ __ beq(CCR0, Lnull);
+ }
+ __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1);
+ // Mark the card.
+ if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
+ __ add(Rbase, offset, Rbase);
+ }
+ __ card_write_barrier_post(Rbase, Rval, Rtmp1);
+ if (check_null) {
+ __ b(Ldone);
+ }
+ }
+
+ if (Rval == noreg || check_null) { // Store null oop.
+ Register Rnull = Rval;
+ __ bind(Lnull);
+ if (Rval == noreg) {
+ Rnull = Rtmp1;
+ __ li(Rnull, 0);
+ }
+ if (UseCompressedOops) {
+ __ stw(Rnull, offset, Rbase);
+ } else {
+ __ std(Rnull, offset, Rbase);
+ }
+ }
+ __ bind(Ldone);
+ }
+ break;
+ case BarrierSet::ModRef:
+ case BarrierSet::Other:
+ ShouldNotReachHere();
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+// ============================================================================
+// Platform-dependent initialization
+
+void TemplateTable::pd_initialize() {
+ // No ppc64 specific initialization.
+}
+
+Address TemplateTable::at_bcp(int offset) {
+ // Not used on ppc.
+ ShouldNotReachHere();
+ return Address();
+}
+
+// Patches the current bytecode (ptr to it located in bcp)
+// in the bytecode stream with a new one.
+void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) {
+ // With sharing on, may need to test method flag.
+ if (!RewriteBytecodes) return;
+ Label L_patch_done;
+
+ switch (new_bc) {
+ case Bytecodes::_fast_aputfield:
+ case Bytecodes::_fast_bputfield:
+ case Bytecodes::_fast_cputfield:
+ case Bytecodes::_fast_dputfield:
+ case Bytecodes::_fast_fputfield:
+ case Bytecodes::_fast_iputfield:
+ case Bytecodes::_fast_lputfield:
+ case Bytecodes::_fast_sputfield:
+ {
+ // We skip bytecode quickening for putfield instructions when
+ // the put_code written to the constant pool cache is zero.
+ // This is required so that every execution of this instruction
+ // calls out to InterpreterRuntime::resolve_get_put to do
+ // additional, required work.
+ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+ assert(load_bc_into_bc_reg, "we use bc_reg as temp");
+ __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
+ // Big Endian: ((*(cache+indices))>>((1+byte_no)*8))&0xFF
+ __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
+ __ cmpwi(CCR0, Rnew_bc, 0);
+ __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
+ __ beq(CCR0, L_patch_done);
+ // __ isync(); // acquire not needed
+ break;
+ }
+
+ default:
+ assert(byte_no == -1, "sanity");
+ if (load_bc_into_bc_reg) {
+ __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
+ }
+ }
+
+ if (JvmtiExport::can_post_breakpoint()) {
+ Label L_fast_patch;
+ __ lbz(Rtemp, 0, R14_bcp);
+ __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
+ __ bne(CCR0, L_fast_patch);
+ // Perform the quickening, slowly, in the bowels of the breakpoint table.
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc);
+ __ b(L_patch_done);
+ __ bind(L_fast_patch);
+ }
+
+ // Patch bytecode.
+ __ stb(Rnew_bc, 0, R14_bcp);
+
+ __ bind(L_patch_done);
+}
+
+// ============================================================================
+// Individual instructions
+
+void TemplateTable::nop() {
+ transition(vtos, vtos);
+ // Nothing to do.
+}
+
+void TemplateTable::shouldnotreachhere() {
+ transition(vtos, vtos);
+ __ stop("shouldnotreachhere bytecode");
+}
+
+void TemplateTable::aconst_null() {
+ transition(vtos, atos);
+ __ li(R17_tos, 0);
+}
+
+void TemplateTable::iconst(int value) {
+ transition(vtos, itos);
+ assert(value >= -1 && value <= 5, "");
+ __ li(R17_tos, value);
+}
+
+void TemplateTable::lconst(int value) {
+ transition(vtos, ltos);
+ assert(value >= -1 && value <= 5, "");
+ __ li(R17_tos, value);
+}
+
+void TemplateTable::fconst(int value) {
+ transition(vtos, ftos);
+ static float zero = 0.0;
+ static float one = 1.0;
+ static float two = 2.0;
+ switch (value) {
+ default: ShouldNotReachHere();
+ case 0: {
+ int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0);
+ __ lfs(F15_ftos, simm16_offset, R11_scratch1);
+ break;
+ }
+ case 1: {
+ int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0);
+ __ lfs(F15_ftos, simm16_offset, R11_scratch1);
+ break;
+ }
+ case 2: {
+ int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0);
+ __ lfs(F15_ftos, simm16_offset, R11_scratch1);
+ break;
+ }
+ }
+}
+
+void TemplateTable::dconst(int value) {
+ transition(vtos, dtos);
+ static double zero = 0.0;
+ static double one = 1.0;
+ switch (value) {
+ case 0: {
+ int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0);
+ __ lfd(F15_ftos, simm16_offset, R11_scratch1);
+ break;
+ }
+ case 1: {
+ int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0);
+ __ lfd(F15_ftos, simm16_offset, R11_scratch1);
+ break;
+ }
+ default: ShouldNotReachHere();
+ }
+}
+
+void TemplateTable::bipush() {
+ transition(vtos, itos);
+ __ lbz(R17_tos, 1, R14_bcp);
+ __ extsb(R17_tos, R17_tos);
+}
+
+void TemplateTable::sipush() {
+ transition(vtos, itos);
+ __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed);
+}
+
+void TemplateTable::ldc(bool wide) {
+ Register Rscratch1 = R11_scratch1,
+ Rscratch2 = R12_scratch2,
+ Rcpool = R3_ARG1;
+
+ transition(vtos, vtos);
+ Label notInt, notClass, exit;
+
+ __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags.
+ if (wide) { // Read index.
+ __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned);
+ } else {
+ __ lbz(Rscratch1, 1, R14_bcp);
+ }
+
+ const int base_offset = ConstantPool::header_size() * wordSize;
+ const int tags_offset = Array<u1>::base_offset_in_bytes();
+
+ // Get type from tags.
+ __ addi(Rscratch2, Rscratch2, tags_offset);
+ __ lbzx(Rscratch2, Rscratch2, Rscratch1);
+
+ __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
+ __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
+ __ cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
+
+ // Resolved class - need to call vm to get java mirror of the class.
+ __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class);
+ __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // Neither resolved class nor unresolved case from above?
+ __ beq(CCR0, notClass);
+
+ __ li(R4, wide ? 1 : 0);
+ call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4);
+ __ push(atos);
+ __ b(exit);
+
+ __ align(32, 12);
+ __ bind(notClass);
+ __ addi(Rcpool, Rcpool, base_offset);
+ __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
+ __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
+ __ bne(CCR0, notInt);
+ __ isync(); // Order load of constant wrt. tags.
+ __ lwax(R17_tos, Rcpool, Rscratch1);
+ __ push(itos);
+ __ b(exit);
+
+ __ align(32, 12);
+ __ bind(notInt);
+#ifdef ASSERT
+ // String and Object are rewritten to fast_aldc
+ __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
+ __ asm_assert_eq("unexpected type", 0x8765);
+#endif
+ __ isync(); // Order load of constant wrt. tags.
+ __ lfsx(F15_ftos, Rcpool, Rscratch1);
+ __ push(ftos);
+
+ __ align(32, 12);
+ __ bind(exit);
+}
+
+// Fast path for caching oop constants.
+void TemplateTable::fast_aldc(bool wide) {
+ transition(vtos, atos);
+
+ int index_size = wide ? sizeof(u2) : sizeof(u1);
+ const Register Rscratch = R11_scratch1;
+ Label resolved;
+
+ // We are resolved if the resolved reference cache entry contains a
+ // non-null object (CallSite, etc.)
+ __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index.
+ __ load_resolved_reference_at_index(R17_tos, Rscratch);
+ __ cmpdi(CCR0, R17_tos, 0);
+ __ bne(CCR0, resolved);
+ __ load_const_optimized(R3_ARG1, (int)bytecode());
+
+ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
+
+ // First time invocation - must resolve first.
+ __ call_VM(R17_tos, entry, R3_ARG1);
+
+ __ align(32, 12);
+ __ bind(resolved);
+ __ verify_oop(R17_tos);
+}
+
+void TemplateTable::ldc2_w() {
+ transition(vtos, vtos);
+ Label Llong, Lexit;
+
+ Register Rindex = R11_scratch1,
+ Rcpool = R12_scratch2,
+ Rtag = R3_ARG1;
+ __ get_cpool_and_tags(Rcpool, Rtag);
+ __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
+
+ const int base_offset = ConstantPool::header_size() * wordSize;
+ const int tags_offset = Array<u1>::base_offset_in_bytes();
+ // Get type from tags.
+ __ addi(Rcpool, Rcpool, base_offset);
+ __ addi(Rtag, Rtag, tags_offset);
+
+ __ lbzx(Rtag, Rtag, Rindex);
+
+ __ sldi(Rindex, Rindex, LogBytesPerWord);
+ __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
+ __ bne(CCR0, Llong);
+ // A double can be placed at word-aligned locations in the constant pool.
+ // Check out Conversions.java for an example.
+ // Also ConstantPool::header_size() is 20, which makes it very difficult
+ // to double-align double on the constant pool. SG, 11/7/97
+ __ isync(); // Order load of constant wrt. tags.
+ __ lfdx(F15_ftos, Rcpool, Rindex);
+ __ push(dtos);
+ __ b(Lexit);
+
+ __ bind(Llong);
+ __ isync(); // Order load of constant wrt. tags.
+ __ ldx(R17_tos, Rcpool, Rindex);
+ __ push(ltos);
+
+ __ bind(Lexit);
+}
+
+// Get the locals index located in the bytecode stream at bcp + offset.
+void TemplateTable::locals_index(Register Rdst, int offset) {
+ __ lbz(Rdst, offset, R14_bcp);
+}
+
+void TemplateTable::iload() {
+ transition(vtos, itos);
+
+ // Get the local value into tos
+ const Register Rindex = R22_tmp2;
+ locals_index(Rindex);
+
+ // Rewrite iload,iload pair into fast_iload2
+ // iload,caload pair into fast_icaload
+ if (RewriteFrequentPairs) {
+ Label Lrewrite, Ldone;
+ Register Rnext_byte = R3_ARG1,
+ Rrewrite_to = R6_ARG4,
+ Rscratch = R11_scratch1;
+
+ // get next byte
+ __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
+
+ // if _iload, wait to rewrite to iload2. We only want to rewrite the
+ // last two iloads in a pair. Comparing against fast_iload means that
+ // the next bytecode is neither an iload or a caload, and therefore
+ // an iload pair.
+ __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
+ __ beq(CCR0, Ldone);
+
+ __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
+ __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
+ __ beq(CCR1, Lrewrite);
+
+ __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
+ __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload);
+ __ beq(CCR0, Lrewrite);
+
+ __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
+
+ __ bind(Lrewrite);
+ patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false);
+ __ bind(Ldone);
+ }
+
+ __ load_local_int(R17_tos, Rindex, Rindex);
+}
+
+// Load 2 integers in a row without dispatching
+void TemplateTable::fast_iload2() {
+ transition(vtos, itos);
+
+ __ lbz(R3_ARG1, 1, R14_bcp);
+ __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp);
+
+ __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1);
+ __ load_local_int(R17_tos, R12_scratch2, R17_tos);
+ __ push_i(R3_ARG1);
+}
+
+void TemplateTable::fast_iload() {
+ transition(vtos, itos);
+ // Get the local value into tos
+
+ const Register Rindex = R11_scratch1;
+ locals_index(Rindex);
+ __ load_local_int(R17_tos, Rindex, Rindex);
+}
+
+// Load a local variable type long from locals area to TOS cache register.
+// Local index resides in bytecodestream.
+void TemplateTable::lload() {
+ transition(vtos, ltos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index(Rindex);
+ __ load_local_long(R17_tos, Rindex, Rindex);
+}
+
+void TemplateTable::fload() {
+ transition(vtos, ftos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index(Rindex);
+ __ load_local_float(F15_ftos, Rindex, Rindex);
+}
+
+void TemplateTable::dload() {
+ transition(vtos, dtos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index(Rindex);
+ __ load_local_double(F15_ftos, Rindex, Rindex);
+}
+
+void TemplateTable::aload() {
+ transition(vtos, atos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index(Rindex);
+ __ load_local_ptr(R17_tos, Rindex, Rindex);
+}
+
+void TemplateTable::locals_index_wide(Register Rdst) {
+ // Offset is 2, not 1, because Lbcp points to wide prefix code.
+ __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned);
+}
+
+void TemplateTable::wide_iload() {
+ // Get the local value into tos.
+
+ const Register Rindex = R11_scratch1;
+ locals_index_wide(Rindex);
+ __ load_local_int(R17_tos, Rindex, Rindex);
+}
+
+void TemplateTable::wide_lload() {
+ transition(vtos, ltos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index_wide(Rindex);
+ __ load_local_long(R17_tos, Rindex, Rindex);
+}
+
+void TemplateTable::wide_fload() {
+ transition(vtos, ftos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index_wide(Rindex);
+ __ load_local_float(F15_ftos, Rindex, Rindex);
+}
+
+void TemplateTable::wide_dload() {
+ transition(vtos, dtos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index_wide(Rindex);
+ __ load_local_double(F15_ftos, Rindex, Rindex);
+}
+
+void TemplateTable::wide_aload() {
+ transition(vtos, atos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index_wide(Rindex);
+ __ load_local_ptr(R17_tos, Rindex, Rindex);
+}
+
+void TemplateTable::iaload() {
+ transition(itos, itos);
+
+ const Register Rload_addr = R3_ARG1,
+ Rarray = R4_ARG2,
+ Rtemp = R5_ARG3;
+ __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
+ __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr);
+}
+
+void TemplateTable::laload() {
+ transition(itos, ltos);
+
+ const Register Rload_addr = R3_ARG1,
+ Rarray = R4_ARG2,
+ Rtemp = R5_ARG3;
+ __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
+ __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr);
+}
+
+void TemplateTable::faload() {
+ transition(itos, ftos);
+
+ const Register Rload_addr = R3_ARG1,
+ Rarray = R4_ARG2,
+ Rtemp = R5_ARG3;
+ __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
+ __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr);
+}
+
+void TemplateTable::daload() {
+ transition(itos, dtos);
+
+ const Register Rload_addr = R3_ARG1,
+ Rarray = R4_ARG2,
+ Rtemp = R5_ARG3;
+ __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
+ __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr);
+}
+
+void TemplateTable::aaload() {
+ transition(itos, atos);
+
+ // tos: index
+ // result tos: array
+ const Register Rload_addr = R3_ARG1,
+ Rarray = R4_ARG2,
+ Rtemp = R5_ARG3;
+ __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr);
+ __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr);
+ __ verify_oop(R17_tos);
+ //__ dcbt(R17_tos); // prefetch
+}
+
+void TemplateTable::baload() {
+ transition(itos, itos);
+
+ const Register Rload_addr = R3_ARG1,
+ Rarray = R4_ARG2,
+ Rtemp = R5_ARG3;
+ __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr);
+ __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr);
+ __ extsb(R17_tos, R17_tos);
+}
+
+void TemplateTable::caload() {
+ transition(itos, itos);
+
+ const Register Rload_addr = R3_ARG1,
+ Rarray = R4_ARG2,
+ Rtemp = R5_ARG3;
+ __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
+ __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
+}
+
+// Iload followed by caload frequent pair.
+void TemplateTable::fast_icaload() {
+ transition(vtos, itos);
+
+ const Register Rload_addr = R3_ARG1,
+ Rarray = R4_ARG2,
+ Rtemp = R11_scratch1;
+
+ locals_index(R17_tos);
+ __ load_local_int(R17_tos, Rtemp, R17_tos);
+ __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
+ __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
+}
+
+void TemplateTable::saload() {
+ transition(itos, itos);
+
+ const Register Rload_addr = R11_scratch1,
+ Rarray = R12_scratch2,
+ Rtemp = R3_ARG1;
+ __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
+ __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr);
+}
+
+void TemplateTable::iload(int n) {
+ transition(vtos, itos);
+
+ __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
+}
+
+void TemplateTable::lload(int n) {
+ transition(vtos, ltos);
+
+ __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
+}
+
+void TemplateTable::fload(int n) {
+ transition(vtos, ftos);
+
+ __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
+}
+
+void TemplateTable::dload(int n) {
+ transition(vtos, dtos);
+
+ __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
+}
+
+void TemplateTable::aload(int n) {
+ transition(vtos, atos);
+
+ __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
+}
+
+void TemplateTable::aload_0() {
+ transition(vtos, atos);
+ // According to bytecode histograms, the pairs:
+ //
+ // _aload_0, _fast_igetfield
+ // _aload_0, _fast_agetfield
+ // _aload_0, _fast_fgetfield
+ //
+ // occur frequently. If RewriteFrequentPairs is set, the (slow)
+ // _aload_0 bytecode checks if the next bytecode is either
+ // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
+ // rewrites the current bytecode into a pair bytecode; otherwise it
+ // rewrites the current bytecode into _0 that doesn't do
+ // the pair check anymore.
+ //
+ // Note: If the next bytecode is _getfield, the rewrite must be
+ // delayed, otherwise we may miss an opportunity for a pair.
+ //
+ // Also rewrite frequent pairs
+ // aload_0, aload_1
+ // aload_0, iload_1
+ // These bytecodes with a small amount of code are most profitable
+ // to rewrite.
+
+ if (RewriteFrequentPairs) {
+
+ Label Lrewrite, Ldont_rewrite;
+ Register Rnext_byte = R3_ARG1,
+ Rrewrite_to = R6_ARG4,
+ Rscratch = R11_scratch1;
+
+ // Get next byte.
+ __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
+
+ // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
+ __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
+ __ beq(CCR0, Ldont_rewrite);
+
+ __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
+ __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
+ __ beq(CCR1, Lrewrite);
+
+ __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
+ __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
+ __ beq(CCR0, Lrewrite);
+
+ __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
+ __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0);
+ __ beq(CCR1, Lrewrite);
+
+ __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0);
+
+ __ bind(Lrewrite);
+ patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false);
+ __ bind(Ldont_rewrite);
+ }
+
+ // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
+ aload(0);
+}
+
+void TemplateTable::istore() {
+ transition(itos, vtos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index(Rindex);
+ __ store_local_int(R17_tos, Rindex);
+}
+
+void TemplateTable::lstore() {
+ transition(ltos, vtos);
+ const Register Rindex = R11_scratch1;
+ locals_index(Rindex);
+ __ store_local_long(R17_tos, Rindex);
+}
+
+void TemplateTable::fstore() {
+ transition(ftos, vtos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index(Rindex);
+ __ store_local_float(F15_ftos, Rindex);
+}
+
+void TemplateTable::dstore() {
+ transition(dtos, vtos);
+
+ const Register Rindex = R11_scratch1;
+ locals_index(Rindex);
+ __ store_local_double(F15_ftos, Rindex);
+}
+
+void TemplateTable::astore() {
+ transition(vtos, vtos);
+
+ const Register Rindex = R11_scratch1;
+ __ pop_ptr();
+ __ verify_oop_or_return_address(R17_tos, Rindex);
+ locals_index(Rindex);
+ __ store_local_ptr(R17_tos, Rindex);
+}
+
+void TemplateTable::wide_istore() {
+ transition(vtos, vtos);
+
+ const Register Rindex = R11_scratch1;
+ __ pop_i();
+ locals_index_wide(Rindex);
+ __ store_local_int(R17_tos, Rindex);
+}
+
+void TemplateTable::wide_lstore() {
+ transition(vtos, vtos);
+
+ const Register Rindex = R11_scratch1;
+ __ pop_l();
+ locals_index_wide(Rindex);
+ __ store_local_long(R17_tos, Rindex);
+}
+
+void TemplateTable::wide_fstore() {
+ transition(vtos, vtos);
+
+ const Register Rindex = R11_scratch1;
+ __ pop_f();
+ locals_index_wide(Rindex);
+ __ store_local_float(F15_ftos, Rindex);
+}
+
+void TemplateTable::wide_dstore() {
+ transition(vtos, vtos);
+
+ const Register Rindex = R11_scratch1;
+ __ pop_d();
+ locals_index_wide(Rindex);
+ __ store_local_double(F15_ftos, Rindex);
+}
+
+void TemplateTable::wide_astore() {
+ transition(vtos, vtos);
+
+ const Register Rindex = R11_scratch1;
+ __ pop_ptr();
+ __ verify_oop_or_return_address(R17_tos, Rindex);
+ locals_index_wide(Rindex);
+ __ store_local_ptr(R17_tos, Rindex);
+}
+
+void TemplateTable::iastore() {
+ transition(itos, vtos);
+
+ const Register Rindex = R3_ARG1,
+ Rstore_addr = R4_ARG2,
+ Rarray = R5_ARG3,
+ Rtemp = R6_ARG4;
+ __ pop_i(Rindex);
+ __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
+ __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr);
+ }
+
+void TemplateTable::lastore() {
+ transition(ltos, vtos);
+
+ const Register Rindex = R3_ARG1,
+ Rstore_addr = R4_ARG2,
+ Rarray = R5_ARG3,
+ Rtemp = R6_ARG4;
+ __ pop_i(Rindex);
+ __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
+ __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr);
+ }
+
+void TemplateTable::fastore() {
+ transition(ftos, vtos);
+
+ const Register Rindex = R3_ARG1,
+ Rstore_addr = R4_ARG2,
+ Rarray = R5_ARG3,
+ Rtemp = R6_ARG4;
+ __ pop_i(Rindex);
+ __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
+ __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr);
+ }
+
+void TemplateTable::dastore() {
+ transition(dtos, vtos);
+
+ const Register Rindex = R3_ARG1,
+ Rstore_addr = R4_ARG2,
+ Rarray = R5_ARG3,
+ Rtemp = R6_ARG4;
+ __ pop_i(Rindex);
+ __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
+ __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr);
+ }
+
+// Pop 3 values from the stack and...
+void TemplateTable::aastore() {
+ transition(vtos, vtos);
+
+ Label Lstore_ok, Lis_null, Ldone;
+ const Register Rindex = R3_ARG1,
+ Rarray = R4_ARG2,
+ Rscratch = R11_scratch1,
+ Rscratch2 = R12_scratch2,
+ Rarray_klass = R5_ARG3,
+ Rarray_element_klass = Rarray_klass,
+ Rvalue_klass = R6_ARG4,
+ Rstore_addr = R31; // Use register which survives VM call.
+
+ __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store.
+ __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index.
+ __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array.
+
+ __ verify_oop(R17_tos);
+ __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr);
+ // Rindex is dead!
+ Register Rscratch3 = Rindex;
+
+ // Do array store check - check for NULL value first.
+ __ cmpdi(CCR0, R17_tos, 0);
+ __ beq(CCR0, Lis_null);
+
+ __ load_klass(Rarray_klass, Rarray);
+ __ load_klass(Rvalue_klass, R17_tos);
+
+ // Do fast instanceof cache test.
+ __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass);
+
+ // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure.
+ __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok);
+
+ // Fell through: subtype check failed => throw an exception.
+ __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry);
+ __ mtctr(R11_scratch1);
+ __ bctr();
+
+ __ bind(Lis_null);
+ do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */,
+ Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
+ __ profile_null_seen(Rscratch, Rscratch2);
+ __ b(Ldone);
+
+ // Store is OK.
+ __ bind(Lstore_ok);
+ do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */,
+ Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
+
+ __ bind(Ldone);
+ // Adjust sp (pops array, index and value).
+ __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize);
+}
+
+void TemplateTable::bastore() {
+ transition(itos, vtos);
+
+ const Register Rindex = R11_scratch1,
+ Rarray = R12_scratch2,
+ Rscratch = R3_ARG1;
+ __ pop_i(Rindex);
+ // tos: val
+ // Rarray: array ptr (popped by index_check)
+ __ index_check(Rarray, Rindex, 0, Rscratch, Rarray);
+ __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray);
+}
+
+void TemplateTable::castore() {
+ transition(itos, vtos);
+
+ const Register Rindex = R11_scratch1,
+ Rarray = R12_scratch2,
+ Rscratch = R3_ARG1;
+ __ pop_i(Rindex);
+ // tos: val
+ // Rarray: array ptr (popped by index_check)
+ __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray);
+ __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray);
+}
+
+void TemplateTable::sastore() {
+ castore();
+}
+
+void TemplateTable::istore(int n) {
+ transition(itos, vtos);
+ __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
+}
+
+void TemplateTable::lstore(int n) {
+ transition(ltos, vtos);
+ __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
+}
+
+void TemplateTable::fstore(int n) {
+ transition(ftos, vtos);
+ __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
+}
+
+void TemplateTable::dstore(int n) {
+ transition(dtos, vtos);
+ __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
+}
+
+void TemplateTable::astore(int n) {
+ transition(vtos, vtos);
+
+ __ pop_ptr();
+ __ verify_oop_or_return_address(R17_tos, R11_scratch1);
+ __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
+}
+
+void TemplateTable::pop() {
+ transition(vtos, vtos);
+
+ __ addi(R15_esp, R15_esp, Interpreter::stackElementSize);
+}
+
+void TemplateTable::pop2() {
+ transition(vtos, vtos);
+
+ __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2);
+}
+
+void TemplateTable::dup() {
+ transition(vtos, vtos);
+
+ __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp);
+ __ push_ptr(R11_scratch1);
+}
+
+void TemplateTable::dup_x1() {
+ transition(vtos, vtos);
+
+ Register Ra = R11_scratch1,
+ Rb = R12_scratch2;
+ // stack: ..., a, b
+ __ ld(Rb, Interpreter::stackElementSize, R15_esp);
+ __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
+ __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
+ __ std(Ra, Interpreter::stackElementSize, R15_esp);
+ __ push_ptr(Rb);
+ // stack: ..., b, a, b
+}
+
+void TemplateTable::dup_x2() {
+ transition(vtos, vtos);
+
+ Register Ra = R11_scratch1,
+ Rb = R12_scratch2,
+ Rc = R3_ARG1;
+
+ // stack: ..., a, b, c
+ __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c
+ __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a
+ __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a
+ __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b
+ // stack: ..., c, b, c
+ __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b
+ // stack: ..., c, a, c
+ __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c
+ __ push_ptr(Rc); // push c
+ // stack: ..., c, a, b, c
+}
+
+void TemplateTable::dup2() {
+ transition(vtos, vtos);
+
+ Register Ra = R11_scratch1,
+ Rb = R12_scratch2;
+ // stack: ..., a, b
+ __ ld(Rb, Interpreter::stackElementSize, R15_esp);
+ __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
+ __ push_2ptrs(Ra, Rb);
+ // stack: ..., a, b, a, b
+}
+
+void TemplateTable::dup2_x1() {
+ transition(vtos, vtos);
+
+ Register Ra = R11_scratch1,
+ Rb = R12_scratch2,
+ Rc = R3_ARG1;
+ // stack: ..., a, b, c
+ __ ld(Rc, Interpreter::stackElementSize, R15_esp);
+ __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);
+ __ std(Rc, Interpreter::stackElementSize * 2, R15_esp);
+ __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);
+ __ std(Ra, Interpreter::stackElementSize, R15_esp);
+ __ std(Rb, Interpreter::stackElementSize * 3, R15_esp);
+ // stack: ..., b, c, a
+ __ push_2ptrs(Rb, Rc);
+ // stack: ..., b, c, a, b, c
+}
+
+void TemplateTable::dup2_x2() {
+ transition(vtos, vtos);
+
+ Register Ra = R11_scratch1,
+ Rb = R12_scratch2,
+ Rc = R3_ARG1,
+ Rd = R4_ARG2;
+ // stack: ..., a, b, c, d
+ __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp);
+ __ ld(Rd, Interpreter::stackElementSize, R15_esp);
+ __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d
+ __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b
+ __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp);
+ __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp);
+ __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c
+ __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a
+ // stack: ..., c, d, a, b
+ __ push_2ptrs(Rc, Rd);
+ // stack: ..., c, d, a, b, c, d
+}
+
+void TemplateTable::swap() {
+ transition(vtos, vtos);
+ // stack: ..., a, b
+
+ Register Ra = R11_scratch1,
+ Rb = R12_scratch2;
+ // stack: ..., a, b
+ __ ld(Rb, Interpreter::stackElementSize, R15_esp);
+ __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
+ __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
+ __ std(Ra, Interpreter::stackElementSize, R15_esp);
+ // stack: ..., b, a
+}
+
+void TemplateTable::iop2(Operation op) {
+ transition(itos, itos);
+
+ Register Rscratch = R11_scratch1;
+
+ __ pop_i(Rscratch);
+ // tos = number of bits to shift
+ // Rscratch = value to shift
+ switch (op) {
+ case add: __ add(R17_tos, Rscratch, R17_tos); break;
+ case sub: __ sub(R17_tos, Rscratch, R17_tos); break;
+ case mul: __ mullw(R17_tos, Rscratch, R17_tos); break;
+ case _and: __ andr(R17_tos, Rscratch, R17_tos); break;
+ case _or: __ orr(R17_tos, Rscratch, R17_tos); break;
+ case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break;
+ case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break;
+ case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break;
+ case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break;
+ default: ShouldNotReachHere();
+ }
+}
+
+void TemplateTable::lop2(Operation op) {
+ transition(ltos, ltos);
+
+ Register Rscratch = R11_scratch1;
+ __ pop_l(Rscratch);
+ switch (op) {
+ case add: __ add(R17_tos, Rscratch, R17_tos); break;
+ case sub: __ sub(R17_tos, Rscratch, R17_tos); break;
+ case _and: __ andr(R17_tos, Rscratch, R17_tos); break;
+ case _or: __ orr(R17_tos, Rscratch, R17_tos); break;
+ case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break;
+ default: ShouldNotReachHere();
+ }
+}
+
+void TemplateTable::idiv() {
+ transition(itos, itos);
+
+ Label Lnormal, Lexception, Ldone;
+ Register Rdividend = R11_scratch1; // Used by irem.
+
+ __ addi(R0, R17_tos, 1);
+ __ cmplwi(CCR0, R0, 2);
+ __ bgt(CCR0, Lnormal); // divisor <-1 or >1
+
+ __ cmpwi(CCR1, R17_tos, 0);
+ __ beq(CCR1, Lexception); // divisor == 0
+
+ __ pop_i(Rdividend);
+ __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1
+ __ b(Ldone);
+
+ __ bind(Lexception);
+ __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
+ __ mtctr(R11_scratch1);
+ __ bctr();
+
+ __ align(32, 12);
+ __ bind(Lnormal);
+ __ pop_i(Rdividend);
+ __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
+ __ bind(Ldone);
+}
+
+void TemplateTable::irem() {
+ transition(itos, itos);
+
+ __ mr(R12_scratch2, R17_tos);
+ idiv();
+ __ mullw(R17_tos, R17_tos, R12_scratch2);
+ __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv.
+}
+
+void TemplateTable::lmul() {
+ transition(ltos, ltos);
+
+ __ pop_l(R11_scratch1);
+ __ mulld(R17_tos, R11_scratch1, R17_tos);
+}
+
+void TemplateTable::ldiv() {
+ transition(ltos, ltos);
+
+ Label Lnormal, Lexception, Ldone;
+ Register Rdividend = R11_scratch1; // Used by lrem.
+
+ __ addi(R0, R17_tos, 1);
+ __ cmpldi(CCR0, R0, 2);
+ __ bgt(CCR0, Lnormal); // divisor <-1 or >1
+
+ __ cmpdi(CCR1, R17_tos, 0);
+ __ beq(CCR1, Lexception); // divisor == 0
+
+ __ pop_l(Rdividend);
+ __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1
+ __ b(Ldone);
+
+ __ bind(Lexception);
+ __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
+ __ mtctr(R11_scratch1);
+ __ bctr();
+
+ __ align(32, 12);
+ __ bind(Lnormal);
+ __ pop_l(Rdividend);
+ __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
+ __ bind(Ldone);
+}
+
+void TemplateTable::lrem() {
+ transition(ltos, ltos);
+
+ __ mr(R12_scratch2, R17_tos);
+ ldiv();
+ __ mulld(R17_tos, R17_tos, R12_scratch2);
+ __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv.
+}
+
+void TemplateTable::lshl() {
+ transition(itos, ltos);
+
+ __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
+ __ pop_l(R11_scratch1);
+ __ sld(R17_tos, R11_scratch1, R17_tos);
+}
+
+void TemplateTable::lshr() {
+ transition(itos, ltos);
+
+ __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
+ __ pop_l(R11_scratch1);
+ __ srad(R17_tos, R11_scratch1, R17_tos);
+}
+
+void TemplateTable::lushr() {
+ transition(itos, ltos);
+
+ __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
+ __ pop_l(R11_scratch1);
+ __ srd(R17_tos, R11_scratch1, R17_tos);
+}
+
+void TemplateTable::fop2(Operation op) {
+ transition(ftos, ftos);
+
+ switch (op) {
+ case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break;
+ case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break;
+ case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break;
+ case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break;
+ case rem:
+ __ pop_f(F1_ARG1);
+ __ fmr(F2_ARG2, F15_ftos);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
+ __ fmr(F15_ftos, F1_RET);
+ break;
+
+ default: ShouldNotReachHere();
+ }
+}
+
+void TemplateTable::dop2(Operation op) {
+ transition(dtos, dtos);
+
+ switch (op) {
+ case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break;
+ case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break;
+ case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break;
+ case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break;
+ case rem:
+ __ pop_d(F1_ARG1);
+ __ fmr(F2_ARG2, F15_ftos);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
+ __ fmr(F15_ftos, F1_RET);
+ break;
+
+ default: ShouldNotReachHere();
+ }
+}
+
+// Negate the value in the TOS cache.
+void TemplateTable::ineg() {
+ transition(itos, itos);
+
+ __ neg(R17_tos, R17_tos);
+}
+
+// Negate the value in the TOS cache.
+void TemplateTable::lneg() {
+ transition(ltos, ltos);
+
+ __ neg(R17_tos, R17_tos);
+}
+
+void TemplateTable::fneg() {
+ transition(ftos, ftos);
+
+ __ fneg(F15_ftos, F15_ftos);
+}
+
+void TemplateTable::dneg() {
+ transition(dtos, dtos);
+
+ __ fneg(F15_ftos, F15_ftos);
+}
+
+// Increments a local variable in place.
+void TemplateTable::iinc() {
+ transition(vtos, vtos);
+
+ const Register Rindex = R11_scratch1,
+ Rincrement = R0,
+ Rvalue = R12_scratch2;
+
+ locals_index(Rindex); // Load locals index from bytecode stream.
+ __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream.
+ __ extsb(Rincrement, Rincrement);
+
+ __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex.
+
+ __ add(Rvalue, Rincrement, Rvalue);
+ __ stw(Rvalue, 0, Rindex);
+}
+
+void TemplateTable::wide_iinc() {
+ transition(vtos, vtos);
+
+ Register Rindex = R11_scratch1,
+ Rlocals_addr = Rindex,
+ Rincr = R12_scratch2;
+ locals_index_wide(Rindex);
+ __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed);
+ __ load_local_int(R17_tos, Rlocals_addr, Rindex);
+ __ add(R17_tos, Rincr, R17_tos);
+ __ stw(R17_tos, 0, Rlocals_addr);
+}
+
+void TemplateTable::convert() {
+ // %%%%% Factor this first part accross platforms
+#ifdef ASSERT
+ TosState tos_in = ilgl;
+ TosState tos_out = ilgl;
+ switch (bytecode()) {
+ case Bytecodes::_i2l: // fall through
+ case Bytecodes::_i2f: // fall through
+ case Bytecodes::_i2d: // fall through
+ case Bytecodes::_i2b: // fall through
+ case Bytecodes::_i2c: // fall through
+ case Bytecodes::_i2s: tos_in = itos; break;
+ case Bytecodes::_l2i: // fall through
+ case Bytecodes::_l2f: // fall through
+ case Bytecodes::_l2d: tos_in = ltos; break;
+ case Bytecodes::_f2i: // fall through
+ case Bytecodes::_f2l: // fall through
+ case Bytecodes::_f2d: tos_in = ftos; break;
+ case Bytecodes::_d2i: // fall through
+ case Bytecodes::_d2l: // fall through
+ case Bytecodes::_d2f: tos_in = dtos; break;
+ default : ShouldNotReachHere();
+ }
+ switch (bytecode()) {
+ case Bytecodes::_l2i: // fall through
+ case Bytecodes::_f2i: // fall through
+ case Bytecodes::_d2i: // fall through
+ case Bytecodes::_i2b: // fall through
+ case Bytecodes::_i2c: // fall through
+ case Bytecodes::_i2s: tos_out = itos; break;
+ case Bytecodes::_i2l: // fall through
+ case Bytecodes::_f2l: // fall through
+ case Bytecodes::_d2l: tos_out = ltos; break;
+ case Bytecodes::_i2f: // fall through
+ case Bytecodes::_l2f: // fall through
+ case Bytecodes::_d2f: tos_out = ftos; break;
+ case Bytecodes::_i2d: // fall through
+ case Bytecodes::_l2d: // fall through
+ case Bytecodes::_f2d: tos_out = dtos; break;
+ default : ShouldNotReachHere();
+ }
+ transition(tos_in, tos_out);
+#endif
+
+ // Conversion
+ Label done;
+ switch (bytecode()) {
+ case Bytecodes::_i2l:
+ __ extsw(R17_tos, R17_tos);
+ break;
+
+ case Bytecodes::_l2i:
+ // Nothing to do, we'll continue to work with the lower bits.
+ break;
+
+ case Bytecodes::_i2b:
+ __ extsb(R17_tos, R17_tos);
+ break;
+
+ case Bytecodes::_i2c:
+ __ rldicl(R17_tos, R17_tos, 0, 64-2*8);
+ break;
+
+ case Bytecodes::_i2s:
+ __ extsh(R17_tos, R17_tos);
+ break;
+
+ case Bytecodes::_i2d:
+ __ extsw(R17_tos, R17_tos);
+ case Bytecodes::_l2d:
+ __ push_l_pop_d();
+ __ fcfid(F15_ftos, F15_ftos);
+ break;
+
+ case Bytecodes::_i2f:
+ __ extsw(R17_tos, R17_tos);
+ __ push_l_pop_d();
+ if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
+ // Comment: alternatively, load with sign extend could be done by lfiwax.
+ __ fcfids(F15_ftos, F15_ftos);
+ } else {
+ __ fcfid(F15_ftos, F15_ftos);
+ __ frsp(F15_ftos, F15_ftos);
+ }
+ break;
+
+ case Bytecodes::_l2f:
+ if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
+ __ push_l_pop_d();
+ __ fcfids(F15_ftos, F15_ftos);
+ } else {
+ // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp.
+ __ mr(R3_ARG1, R17_tos);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f));
+ __ fmr(F15_ftos, F1_RET);
+ }
+ break;
+
+ case Bytecodes::_f2d:
+ // empty
+ break;
+
+ case Bytecodes::_d2f:
+ __ frsp(F15_ftos, F15_ftos);
+ break;
+
+ case Bytecodes::_d2i:
+ case Bytecodes::_f2i:
+ __ fcmpu(CCR0, F15_ftos, F15_ftos);
+ __ li(R17_tos, 0); // 0 in case of NAN
+ __ bso(CCR0, done);
+ __ fctiwz(F15_ftos, F15_ftos);
+ __ push_d_pop_l();
+ break;
+
+ case Bytecodes::_d2l:
+ case Bytecodes::_f2l:
+ __ fcmpu(CCR0, F15_ftos, F15_ftos);
+ __ li(R17_tos, 0); // 0 in case of NAN
+ __ bso(CCR0, done);
+ __ fctidz(F15_ftos, F15_ftos);
+ __ push_d_pop_l();
+ break;
+
+ default: ShouldNotReachHere();
+ }
+ __ bind(done);
+}
+
+// Long compare
+void TemplateTable::lcmp() {
+ transition(ltos, itos);
+
+ const Register Rscratch = R11_scratch1;
+ __ pop_l(Rscratch); // first operand, deeper in stack
+
+ __ cmpd(CCR0, Rscratch, R17_tos); // compare
+ __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
+ __ srwi(Rscratch, R17_tos, 30);
+ __ srawi(R17_tos, R17_tos, 31);
+ __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
+}
+
+// fcmpl/fcmpg and dcmpl/dcmpg bytecodes
+// unordered_result == -1 => fcmpl or dcmpl
+// unordered_result == 1 => fcmpg or dcmpg
+void TemplateTable::float_cmp(bool is_float, int unordered_result) {
+ const FloatRegister Rfirst = F0_SCRATCH,
+ Rsecond = F15_ftos;
+ const Register Rscratch = R11_scratch1;
+
+ if (is_float) {
+ __ pop_f(Rfirst);
+ } else {
+ __ pop_d(Rfirst);
+ }
+
+ Label Lunordered, Ldone;
+ __ fcmpu(CCR0, Rfirst, Rsecond); // compare
+ if (unordered_result) {
+ __ bso(CCR0, Lunordered);
+ }
+ __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
+ __ srwi(Rscratch, R17_tos, 30);
+ __ srawi(R17_tos, R17_tos, 31);
+ __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
+ if (unordered_result) {
+ __ b(Ldone);
+ __ bind(Lunordered);
+ __ load_const_optimized(R17_tos, unordered_result);
+ }
+ __ bind(Ldone);
+}
+
+// Branch_conditional which takes TemplateTable::Condition.
+void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) {
+ bool positive = false;
+ Assembler::Condition cond = Assembler::equal;
+ switch (cc) {
+ case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break;
+ case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break;
+ case TemplateTable::less: positive = true ; cond = Assembler::less ; break;
+ case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break;
+ case TemplateTable::greater: positive = true ; cond = Assembler::greater; break;
+ case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break;
+ default: ShouldNotReachHere();
+ }
+ int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
+ int bi = Assembler::bi0(crx, cond);
+ __ bc(bo, bi, L);
+}
+
+void TemplateTable::branch(bool is_jsr, bool is_wide) {
+
+ // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
+ __ verify_thread();
+
+ const Register Rscratch1 = R11_scratch1,
+ Rscratch2 = R12_scratch2,
+ Rscratch3 = R3_ARG1,
+ R4_counters = R4_ARG2,
+ bumped_count = R31,
+ Rdisp = R22_tmp2;
+
+ __ profile_taken_branch(Rscratch1, bumped_count);
+
+ // Get (wide) offset.
+ if (is_wide) {
+ __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
+ } else {
+ __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
+ }
+
+ // --------------------------------------------------------------------------
+ // Handle all the JSR stuff here, then exit.
+ // It's much shorter and cleaner than intermingling with the
+ // non-JSR normal-branch stuff occurring below.
+ if (is_jsr) {
+ // Compute return address as bci in Otos_i.
+ __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
+ __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3));
+ __ subf(R17_tos, Rscratch1, Rscratch2);
+
+ // Bump bcp to target of JSR.
+ __ add(R14_bcp, Rdisp, R14_bcp);
+ // Push returnAddress for "ret" on stack.
+ __ push_ptr(R17_tos);
+ // And away we go!
+ __ dispatch_next(vtos);
+ return;
+ }
+
+ // --------------------------------------------------------------------------
+ // Normal (non-jsr) branch handling
+
+ const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
+ if (increment_invocation_counter_for_backward_branches) {
+ //__ unimplemented("branch invocation counter");
+
+ Label Lforward;
+ __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
+
+ // Check branch direction.
+ __ cmpdi(CCR0, Rdisp, 0);
+ __ bgt(CCR0, Lforward);
+
+ __ get_method_counters(R19_method, R4_counters, Lforward);
+
+ if (TieredCompilation) {
+ Label Lno_mdo, Loverflow;
+ const int increment = InvocationCounter::count_increment;
+ const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
+ if (ProfileInterpreter) {
+ Register Rmdo = Rscratch1;
+
+ // If no method data exists, go to profile_continue.
+ __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
+ __ cmpdi(CCR0, Rmdo, 0);
+ __ beq(CCR0, Lno_mdo);
+
+ // Increment backedge counter in the MDO.
+ const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+ __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
+ __ load_const_optimized(Rscratch3, mask, R0);
+ __ addi(Rscratch2, Rscratch2, increment);
+ __ stw(Rscratch2, mdo_bc_offs, Rmdo);
+ __ and_(Rscratch3, Rscratch2, Rscratch3);
+ __ bne(CCR0, Lforward);
+ __ b(Loverflow);
+ }
+
+ // If there's no MDO, increment counter in method.
+ const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
+ __ bind(Lno_mdo);
+ __ lwz(Rscratch2, mo_bc_offs, R4_counters);
+ __ load_const_optimized(Rscratch3, mask, R0);
+ __ addi(Rscratch2, Rscratch2, increment);
+ __ stw(Rscratch2, mo_bc_offs, R19_method);
+ __ and_(Rscratch3, Rscratch2, Rscratch3);
+ __ bne(CCR0, Lforward);
+
+ __ bind(Loverflow);
+
+ // Notify point for loop, pass branch bytecode.
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true);
+
+ // Was an OSR adapter generated?
+ // O0 = osr nmethod
+ __ cmpdi(CCR0, R3_RET, 0);
+ __ beq(CCR0, Lforward);
+
+ // Has the nmethod been invalidated already?
+ __ lwz(R0, nmethod::entry_bci_offset(), R3_RET);
+ __ cmpwi(CCR0, R0, InvalidOSREntryBci);
+ __ beq(CCR0, Lforward);
+
+ // Migrate the interpreter frame off of the stack.
+ // We can use all registers because we will not return to interpreter from this point.
+
+ // Save nmethod.
+ const Register osr_nmethod = R31;
+ __ mr(osr_nmethod, R3_RET);
+ __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
+ __ reset_last_Java_frame();
+ // OSR buffer is in ARG1.
+
+ // Remove the interpreter frame.
+ __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
+
+ // Jump to the osr code.
+ __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
+ __ mtlr(R0);
+ __ mtctr(R11_scratch1);
+ __ bctr();
+
+ } else {
+
+ const Register invoke_ctr = Rscratch1;
+ // Update Backedge branch separately from invocations.
+ __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3);
+
+ if (ProfileInterpreter) {
+ __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward);
+ if (UseOnStackReplacement) {
+ __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2);
+ }
+ } else {
+ if (UseOnStackReplacement) {
+ __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2);
+ }
+ }
+ }
+
+ __ bind(Lforward);
+
+ } else {
+ // Bump bytecode pointer by displacement (take the branch).
+ __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
+ }
+ // Continue with bytecode @ target.
+ // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
+ // %%%%% and changing dispatch_next to dispatch_only.
+ __ dispatch_next(vtos);
+}
+
+// Helper function for if_cmp* methods below.
+// Factored out common compare and branch code.
+void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) {
+ Label Lnot_taken;
+ // Note: The condition code we get is the condition under which we
+ // *fall through*! So we have to inverse the CC here.
+
+ if (is_jint) {
+ if (cmp0) {
+ __ cmpwi(CCR0, Rfirst, 0);
+ } else {
+ __ cmpw(CCR0, Rfirst, Rsecond);
+ }
+ } else {
+ if (cmp0) {
+ __ cmpdi(CCR0, Rfirst, 0);
+ } else {
+ __ cmpd(CCR0, Rfirst, Rsecond);
+ }
+ }
+ branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true);
+
+ // Conition is false => Jump!
+ branch(false, false);
+
+ // Condition is not true => Continue.
+ __ align(32, 12);
+ __ bind(Lnot_taken);
+ __ profile_not_taken_branch(Rscratch1, Rscratch2);
+}
+
+// Compare integer values with zero and fall through if CC holds, branch away otherwise.
+void TemplateTable::if_0cmp(Condition cc) {
+ transition(itos, vtos);
+
+ if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true);
+}
+
+// Compare integer values and fall through if CC holds, branch away otherwise.
+//
+// Interface:
+// - Rfirst: First operand (older stack value)
+// - tos: Second operand (younger stack value)
+void TemplateTable::if_icmp(Condition cc) {
+ transition(itos, vtos);
+
+ const Register Rfirst = R0,
+ Rsecond = R17_tos;
+
+ __ pop_i(Rfirst);
+ if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false);
+}
+
+void TemplateTable::if_nullcmp(Condition cc) {
+ transition(atos, vtos);
+
+ if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true);
+}
+
+void TemplateTable::if_acmp(Condition cc) {
+ transition(atos, vtos);
+
+ const Register Rfirst = R0,
+ Rsecond = R17_tos;
+
+ __ pop_ptr(Rfirst);
+ if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false);
+}
+
+void TemplateTable::ret() {
+ locals_index(R11_scratch1);
+ __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1);
+
+ __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2);
+
+ __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
+ __ add(R11_scratch1, R17_tos, R11_scratch1);
+ __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
+ __ dispatch_next(vtos);
+}
+
+void TemplateTable::wide_ret() {
+ transition(vtos, vtos);
+
+ const Register Rindex = R3_ARG1,
+ Rscratch1 = R11_scratch1,
+ Rscratch2 = R12_scratch2;
+
+ locals_index_wide(Rindex);
+ __ load_local_ptr(R17_tos, R17_tos, Rindex);
+ __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2);
+ // Tos now contains the bci, compute the bcp from that.
+ __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
+ __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset()));
+ __ add(R14_bcp, Rscratch1, Rscratch2);
+ __ dispatch_next(vtos);
+}
+
+void TemplateTable::tableswitch() {
+ transition(itos, vtos);
+
+ Label Ldispatch, Ldefault_case;
+ Register Rlow_byte = R3_ARG1,
+ Rindex = Rlow_byte,
+ Rhigh_byte = R4_ARG2,
+ Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset
+ Rscratch1 = R11_scratch1,
+ Rscratch2 = R12_scratch2,
+ Roffset = R6_ARG4;
+
+ // Align bcp.
+ __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
+ __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
+
+ // Load lo & hi.
+ __ lwz(Rlow_byte, BytesPerInt, Rdef_offset_addr);
+ __ lwz(Rhigh_byte, BytesPerInt * 2, Rdef_offset_addr);
+
+ // Check for default case (=index outside [low,high]).
+ __ cmpw(CCR0, R17_tos, Rlow_byte);
+ __ cmpw(CCR1, R17_tos, Rhigh_byte);
+ __ blt(CCR0, Ldefault_case);
+ __ bgt(CCR1, Ldefault_case);
+
+ // Lookup dispatch offset.
+ __ sub(Rindex, R17_tos, Rlow_byte);
+ __ extsw(Rindex, Rindex);
+ __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
+ __ sldi(Rindex, Rindex, LogBytesPerInt);
+ __ addi(Rindex, Rindex, 3 * BytesPerInt);
+ __ lwax(Roffset, Rdef_offset_addr, Rindex);
+ __ b(Ldispatch);
+
+ __ bind(Ldefault_case);
+ __ profile_switch_default(Rhigh_byte, Rscratch1);
+ __ lwa(Roffset, 0, Rdef_offset_addr);
+
+ __ bind(Ldispatch);
+
+ __ add(R14_bcp, Roffset, R14_bcp);
+ __ dispatch_next(vtos);
+}
+
+void TemplateTable::lookupswitch() {
+ transition(itos, itos);
+ __ stop("lookupswitch bytecode should have been rewritten");
+}
+
+// Table switch using linear search through cases.
+// Bytecode stream format:
+// Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
+// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
+void TemplateTable::fast_linearswitch() {
+ transition(itos, vtos);
+
+ Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case;
+
+ Register Rcount = R3_ARG1,
+ Rcurrent_pair = R4_ARG2,
+ Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
+ Roffset = R31, // Might need to survive C call.
+ Rvalue = R12_scratch2,
+ Rscratch = R11_scratch1,
+ Rcmp_value = R17_tos;
+
+ // Align bcp.
+ __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
+ __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
+
+ // Setup loop counter and limit.
+ __ lwz(Rcount, BytesPerInt, Rdef_offset_addr); // Load count.
+ __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
+
+ // Set up search loop.
+ __ cmpwi(CCR0, Rcount, 0);
+ __ beq(CCR0, Ldefault_case);
+
+ __ mtctr(Rcount);
+
+ // linear table search
+ __ bind(Lsearch_loop);
+
+ __ lwz(Rvalue, 0, Rcurrent_pair);
+ __ lwa(Roffset, 1 * BytesPerInt, Rcurrent_pair);
+
+ __ cmpw(CCR0, Rvalue, Rcmp_value);
+ __ beq(CCR0, Lfound);
+
+ __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
+ __ bdnz(Lsearch_loop);
+
+ // default case
+ __ bind(Ldefault_case);
+
+ __ lwa(Roffset, 0, Rdef_offset_addr);
+ if (ProfileInterpreter) {
+ __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
+ __ b(Lcontinue_execution);
+ }
+
+ // Entry found, skip Roffset bytecodes and continue.
+ __ bind(Lfound);
+ if (ProfileInterpreter) {
+ // Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints
+ // beyond the actual current pair due to the auto update load above!
+ __ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr);
+ __ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt);
+ __ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1);
+ __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
+ __ bind(Lcontinue_execution);
+ }
+ __ add(R14_bcp, Roffset, R14_bcp);
+ __ dispatch_next(vtos);
+}
+
+// Table switch using binary search (value/offset pairs are ordered).
+// Bytecode stream format:
+// Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
+// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
+void TemplateTable::fast_binaryswitch() {
+
+ transition(itos, vtos);
+ // Implementation using the following core algorithm: (copied from Intel)
+ //
+ // int binary_search(int key, LookupswitchPair* array, int n) {
+ // // Binary search according to "Methodik des Programmierens" by
+ // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
+ // int i = 0;
+ // int j = n;
+ // while (i+1 < j) {
+ // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
+ // // with Q: for all i: 0 <= i < n: key < a[i]
+ // // where a stands for the array and assuming that the (inexisting)
+ // // element a[n] is infinitely big.
+ // int h = (i + j) >> 1;
+ // // i < h < j
+ // if (key < array[h].fast_match()) {
+ // j = h;
+ // } else {
+ // i = h;
+ // }
+ // }
+ // // R: a[i] <= key < a[i+1] or Q
+ // // (i.e., if key is within array, i is the correct index)
+ // return i;
+ // }
+
+ // register allocation
+ const Register Rkey = R17_tos; // already set (tosca)
+ const Register Rarray = R3_ARG1;
+ const Register Ri = R4_ARG2;
+ const Register Rj = R5_ARG3;
+ const Register Rh = R6_ARG4;
+ const Register Rscratch = R11_scratch1;
+
+ const int log_entry_size = 3;
+ const int entry_size = 1 << log_entry_size;
+
+ Label found;
+
+ // Find Array start,
+ __ addi(Rarray, R14_bcp, 3 * BytesPerInt);
+ __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt));
+
+ // initialize i & j
+ __ li(Ri,0);
+ __ lwz(Rj, -BytesPerInt, Rarray);
+
+ // and start.
+ Label entry;
+ __ b(entry);
+
+ // binary search loop
+ { Label loop;
+ __ bind(loop);
+ // int h = (i + j) >> 1;
+ __ srdi(Rh, Rh, 1);
+ // if (key < array[h].fast_match()) {
+ // j = h;
+ // } else {
+ // i = h;
+ // }
+ __ sldi(Rscratch, Rh, log_entry_size);
+ __ lwzx(Rscratch, Rscratch, Rarray);
+
+ // if (key < current value)
+ // Rh = Rj
+ // else
+ // Rh = Ri
+ Label Lgreater;
+ __ cmpw(CCR0, Rkey, Rscratch);
+ __ bge(CCR0, Lgreater);
+ __ mr(Rj, Rh);
+ __ b(entry);
+ __ bind(Lgreater);
+ __ mr(Ri, Rh);
+
+ // while (i+1 < j)
+ __ bind(entry);
+ __ addi(Rscratch, Ri, 1);
+ __ cmpw(CCR0, Rscratch, Rj);
+ __ add(Rh, Ri, Rj); // start h = i + j >> 1;
+
+ __ blt(CCR0, loop);
+ }
+
+ // End of binary search, result index is i (must check again!).
+ Label default_case;
+ Label continue_execution;
+ if (ProfileInterpreter) {
+ __ mr(Rh, Ri); // Save index in i for profiling.
+ }
+ // Ri = value offset
+ __ sldi(Ri, Ri, log_entry_size);
+ __ add(Ri, Ri, Rarray);
+ __ lwz(Rscratch, 0, Ri);
+
+ Label not_found;
+ // Ri = offset offset
+ __ cmpw(CCR0, Rkey, Rscratch);
+ __ beq(CCR0, not_found);
+ // entry not found -> j = default offset
+ __ lwz(Rj, -2 * BytesPerInt, Rarray);
+ __ b(default_case);
+
+ __ bind(not_found);
+ // entry found -> j = offset
+ __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
+ __ lwz(Rj, BytesPerInt, Ri);
+
+ if (ProfileInterpreter) {
+ __ b(continue_execution);
+ }
+
+ __ bind(default_case); // fall through (if not profiling)
+ __ profile_switch_default(Ri, Rscratch);
+
+ __ bind(continue_execution);
+
+ __ extsw(Rj, Rj);
+ __ add(R14_bcp, Rj, R14_bcp);
+ __ dispatch_next(vtos);
+}
+
+void TemplateTable::_return(TosState state) {
+ transition(state, state);
+ assert(_desc->calls_vm(),
+ "inconsistent calls_vm information"); // call in remove_activation
+
+ if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
+
+ Register Rscratch = R11_scratch1,
+ Rklass = R12_scratch2,
+ Rklass_flags = Rklass;
+ Label Lskip_register_finalizer;
+
+ // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case.
+ assert(state == vtos, "only valid state");
+ __ ld(R17_tos, 0, R18_locals);
+
+ // Load klass of this obj.
+ __ load_klass(Rklass, R17_tos);
+ __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass);
+ __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER));
+ __ bfalse(CCR0, Lskip_register_finalizer);
+
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
+
+ __ align(32, 12);
+ __ bind(Lskip_register_finalizer);
+ }
+
+ // Move the result value into the correct register and remove memory stack frame.
+ __ remove_activation(state, /* throw_monitor_exception */ true);
+ // Restoration of lr done by remove_activation.
+ switch (state) {
+ case ltos:
+ case btos:
+ case ctos:
+ case stos:
+ case atos:
+ case itos: __ mr(R3_RET, R17_tos); break;
+ case ftos:
+ case dtos: __ fmr(F1_RET, F15_ftos); break;
+ case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
+ // to get visible before the reference to the object gets stored anywhere.
+ __ membar(Assembler::StoreStore); break;
+ default : ShouldNotReachHere();
+ }
+ __ blr();
+}
+
+// ============================================================================
+// Constant pool cache access
+//
+// Memory ordering:
+//
+// Like done in C++ interpreter, we load the fields
+// - _indices
+// - _f12_oop
+// acquired, because these are asked if the cache is already resolved. We don't
+// want to float loads above this check.
+// See also comments in ConstantPoolCacheEntry::bytecode_1(),
+// ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
+
+// Call into the VM if call site is not yet resolved
+//
+// Input regs:
+// - None, all passed regs are outputs.
+//
+// Returns:
+// - Rcache: The const pool cache entry that contains the resolved result.
+// - Rresult: Either noreg or output for f1/f2.
+//
+// Kills:
+// - Rscratch
+void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
+
+ __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
+ Label Lresolved, Ldone;
+
+ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
+ // We are resolved if the indices offset contains the current bytecode.
+ // Big Endian:
+ __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
+ // Acquire by cmp-br-isync (see below).
+ __ cmpdi(CCR0, Rscratch, (int)bytecode());
+ __ beq(CCR0, Lresolved);
+
+ address entry = NULL;
+ switch (bytecode()) {
+ case Bytecodes::_getstatic : // fall through
+ case Bytecodes::_putstatic : // fall through
+ case Bytecodes::_getfield : // fall through
+ case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
+ case Bytecodes::_invokevirtual : // fall through
+ case Bytecodes::_invokespecial : // fall through
+ case Bytecodes::_invokestatic : // fall through
+ case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
+ case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
+ case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
+ default : ShouldNotReachHere(); break;
+ }
+ __ li(R4_ARG2, (int)bytecode());
+ __ call_VM(noreg, entry, R4_ARG2, true);
+
+ // Update registers with resolved info.
+ __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
+ __ b(Ldone);
+
+ __ bind(Lresolved);
+ __ isync(); // Order load wrt. succeeding loads.
+ __ bind(Ldone);
+}
+
+// Load the constant pool cache entry at field accesses into registers.
+// The Rcache and Rindex registers must be set before call.
+// Input:
+// - Rcache, Rindex
+// Output:
+// - Robj, Roffset, Rflags
+void TemplateTable::load_field_cp_cache_entry(Register Robj,
+ Register Rcache,
+ Register Rindex /* unused on PPC64 */,
+ Register Roffset,
+ Register Rflags,
+ bool is_static = false) {
+ assert_different_registers(Rcache, Rflags, Roffset);
+ // assert(Rindex == noreg, "parameter not used on PPC64");
+
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
+ __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache);
+ __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache);
+ if (is_static) {
+ __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
+ __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
+ // Acquire not needed here. Following access has an address dependency on this value.
+ }
+}
+
+// Load the constant pool cache entry at invokes into registers.
+// Resolve if necessary.
+
+// Input Registers:
+// - None, bcp is used, though
+//
+// Return registers:
+// - Rmethod (f1 field or f2 if invokevirtual)
+// - Ritable_index (f2 field)
+// - Rflags (flags field)
+//
+// Kills:
+// - R21
+//
+void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
+ Register Rmethod,
+ Register Ritable_index,
+ Register Rflags,
+ bool is_invokevirtual,
+ bool is_invokevfinal,
+ bool is_invokedynamic) {
+
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
+ // Determine constant pool cache field offsets.
+ assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
+ const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset()));
+ const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset());
+ // Access constant pool cache fields.
+ const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset());
+
+ Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP.
+
+ if (is_invokevfinal) {
+ assert(Ritable_index == noreg, "register not used");
+ // Already resolved.
+ __ get_cache_and_index_at_bcp(Rcache, 1);
+ } else {
+ resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2));
+ }
+
+ __ ld(Rmethod, method_offset, Rcache);
+ __ ld(Rflags, flags_offset, Rcache);
+
+ if (Ritable_index != noreg) {
+ __ ld(Ritable_index, index_offset, Rcache);
+ }
+}
+
+// ============================================================================
+// Field access
+
+// Volatile variables demand their effects be made known to all CPU's
+// in order. Store buffers on most chips allow reads & writes to
+// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
+// without some kind of memory barrier (i.e., it's not sufficient that
+// the interpreter does not reorder volatile references, the hardware
+// also must not reorder them).
+//
+// According to the new Java Memory Model (JMM):
+// (1) All volatiles are serialized wrt to each other. ALSO reads &
+// writes act as aquire & release, so:
+// (2) A read cannot let unrelated NON-volatile memory refs that
+// happen after the read float up to before the read. It's OK for
+// non-volatile memory refs that happen before the volatile read to
+// float down below it.
+// (3) Similar a volatile write cannot let unrelated NON-volatile
+// memory refs that happen BEFORE the write float down to after the
+// write. It's OK for non-volatile memory refs that happen after the
+// volatile write to float up before it.
+//
+// We only put in barriers around volatile refs (they are expensive),
+// not _between_ memory refs (that would require us to track the
+// flavor of the previous memory refs). Requirements (2) and (3)
+// require some barriers before volatile stores and after volatile
+// loads. These nearly cover requirement (1) but miss the
+// volatile-store-volatile-load case. This final case is placed after
+// volatile-stores although it could just as well go before
+// volatile-loads.
+
+// The registers cache and index expected to be set before call.
+// Correct values of the cache and index registers are preserved.
+// Kills:
+// Rcache (if has_tos)
+// Rscratch
+void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) {
+
+ assert_different_registers(Rcache, Rscratch);
+
+ if (JvmtiExport::can_post_field_access()) {
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
+ Label Lno_field_access_post;
+
+ // Check if post field access in enabled.
+ int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true);
+ __ lwz(Rscratch, offs, Rscratch);
+
+ __ cmpwi(CCR0, Rscratch, 0);
+ __ beq(CCR0, Lno_field_access_post);
+
+ // Post access enabled - do it!
+ __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
+ if (is_static) {
+ __ li(R17_tos, 0);
+ } else {
+ if (has_tos) {
+ // The fast bytecode versions have obj ptr in register.
+ // Thus, save object pointer before call_VM() clobbers it
+ // put object on tos where GC wants it.
+ __ push_ptr(R17_tos);
+ } else {
+ // Load top of stack (do not pop the value off the stack).
+ __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp);
+ }
+ __ verify_oop(R17_tos);
+ }
+ // tos: object pointer or NULL if static
+ // cache: cache entry pointer
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache);
+ if (!is_static && has_tos) {
+ // Restore object pointer.
+ __ pop_ptr(R17_tos);
+ __ verify_oop(R17_tos);
+ } else {
+ // Cache is still needed to get class or obj.
+ __ get_cache_and_index_at_bcp(Rcache, 1);
+ }
+
+ __ align(32, 12);
+ __ bind(Lno_field_access_post);
+ }
+}
+
+// kills R11_scratch1
+void TemplateTable::pop_and_check_object(Register Roop) {
+ Register Rtmp = R11_scratch1;
+
+ assert_different_registers(Rtmp, Roop);
+ __ pop_ptr(Roop);
+ // For field access must check obj.
+ __ null_check_throw(Roop, -1, Rtmp);
+ __ verify_oop(Roop);
+}
+
+// PPC64: implement volatile loads as fence-store-acquire.
+void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
+ transition(vtos, vtos);
+
+ Label Lacquire, Lisync;
+
+ const Register Rcache = R3_ARG1,
+ Rclass_or_obj = R22_tmp2,
+ Roffset = R23_tmp3,
+ Rflags = R31,
+ Rbtable = R5_ARG3,
+ Rbc = R6_ARG4,
+ Rscratch = R12_scratch2;
+
+ static address field_branch_table[number_of_states],
+ static_branch_table[number_of_states];
+
+ address* branch_table = is_static ? static_branch_table : field_branch_table;
+
+ // Get field offset.
+ resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
+
+ // JVMTI support
+ jvmti_post_field_access(Rcache, Rscratch, is_static, false);
+
+ // Load after possible GC.
+ load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
+
+ // Load pointer to branch table.
+ __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
+
+ // Get volatile flag.
+ __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
+ // Note: sync is needed before volatile load on PPC64.
+
+ // Check field type.
+ __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
+
+#ifdef ASSERT
+ Label LFlagInvalid;
+ __ cmpldi(CCR0, Rflags, number_of_states);
+ __ bge(CCR0, LFlagInvalid);
+#endif
+
+ // Load from branch table and dispatch (volatile case: one instruction ahead).
+ __ sldi(Rflags, Rflags, LogBytesPerWord);
+ __ cmpwi(CCR6, Rscratch, 1); // Volatile?
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
+ }
+ __ ldx(Rbtable, Rbtable, Rflags);
+
+ // Get the obj from stack.
+ if (!is_static) {
+ pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
+ } else {
+ __ verify_oop(Rclass_or_obj);
+ }
+
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
+ }
+ __ mtctr(Rbtable);
+ __ bctr();
+
+#ifdef ASSERT
+ __ bind(LFlagInvalid);
+ __ stop("got invalid flag", 0x654);
+
+ // __ bind(Lvtos);
+ address pc_before_fence = __ pc();
+ __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
+ assert(branch_table[vtos] == 0, "can't compute twice");
+ branch_table[vtos] = __ pc(); // non-volatile_entry point
+ __ stop("vtos unexpected", 0x655);
+#endif
+
+ __ align(32, 28, 28); // Align load.
+ // __ bind(Ldtos);
+ __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[dtos] == 0, "can't compute twice");
+ branch_table[dtos] = __ pc(); // non-volatile_entry point
+ __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
+ __ push(dtos);
+ if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
+ {
+ Label acquire_double;
+ __ beq(CCR6, acquire_double); // Volatile?
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ bind(acquire_double);
+ __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
+ __ beq_predict_taken(CCR0, Lisync);
+ __ b(Lisync); // In case of NAN.
+ }
+
+ __ align(32, 28, 28); // Align load.
+ // __ bind(Lftos);
+ __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[ftos] == 0, "can't compute twice");
+ branch_table[ftos] = __ pc(); // non-volatile_entry point
+ __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
+ __ push(ftos);
+ if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); }
+ {
+ Label acquire_float;
+ __ beq(CCR6, acquire_float); // Volatile?
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ bind(acquire_float);
+ __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
+ __ beq_predict_taken(CCR0, Lisync);
+ __ b(Lisync); // In case of NAN.
+ }
+
+ __ align(32, 28, 28); // Align load.
+ // __ bind(Litos);
+ __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[itos] == 0, "can't compute twice");
+ branch_table[itos] = __ pc(); // non-volatile_entry point
+ __ lwax(R17_tos, Rclass_or_obj, Roffset);
+ __ push(itos);
+ if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
+ __ beq(CCR6, Lacquire); // Volatile?
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align load.
+ // __ bind(Lltos);
+ __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[ltos] == 0, "can't compute twice");
+ branch_table[ltos] = __ pc(); // non-volatile_entry point
+ __ ldx(R17_tos, Rclass_or_obj, Roffset);
+ __ push(ltos);
+ if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
+ __ beq(CCR6, Lacquire); // Volatile?
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align load.
+ // __ bind(Lbtos);
+ __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[btos] == 0, "can't compute twice");
+ branch_table[btos] = __ pc(); // non-volatile_entry point
+ __ lbzx(R17_tos, Rclass_or_obj, Roffset);
+ __ extsb(R17_tos, R17_tos);
+ __ push(btos);
+ if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
+ __ beq(CCR6, Lacquire); // Volatile?
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align load.
+ // __ bind(Lctos);
+ __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[ctos] == 0, "can't compute twice");
+ branch_table[ctos] = __ pc(); // non-volatile_entry point
+ __ lhzx(R17_tos, Rclass_or_obj, Roffset);
+ __ push(ctos);
+ if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
+ __ beq(CCR6, Lacquire); // Volatile?
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align load.
+ // __ bind(Lstos);
+ __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[stos] == 0, "can't compute twice");
+ branch_table[stos] = __ pc(); // non-volatile_entry point
+ __ lhax(R17_tos, Rclass_or_obj, Roffset);
+ __ push(stos);
+ if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
+ __ beq(CCR6, Lacquire); // Volatile?
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align load.
+ // __ bind(Latos);
+ __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[atos] == 0, "can't compute twice");
+ branch_table[atos] = __ pc(); // non-volatile_entry point
+ __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
+ __ verify_oop(R17_tos);
+ __ push(atos);
+ //__ dcbt(R17_tos); // prefetch
+ if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
+ __ beq(CCR6, Lacquire); // Volatile?
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 12);
+ __ bind(Lacquire);
+ __ twi_0(R17_tos);
+ __ bind(Lisync);
+ __ isync(); // acquire
+
+#ifdef ASSERT
+ for (int i = 0; i<number_of_states; ++i) {
+ assert(branch_table[i], "get initialization");
+ //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
+ // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
+ }
+#endif
+}
+
+void TemplateTable::getfield(int byte_no) {
+ getfield_or_static(byte_no, false);
+}
+
+void TemplateTable::getstatic(int byte_no) {
+ getfield_or_static(byte_no, true);
+}
+
+// The registers cache and index expected to be set before call.
+// The function may destroy various registers, just not the cache and index registers.
+void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
+
+ assert_different_registers(Rcache, Rscratch, R6_ARG4);
+
+ if (JvmtiExport::can_post_field_modification()) {
+ Label Lno_field_mod_post;
+
+ // Check if post field access in enabled.
+ int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
+ __ lwz(Rscratch, offs, Rscratch);
+
+ __ cmpwi(CCR0, Rscratch, 0);
+ __ beq(CCR0, Lno_field_mod_post);
+
+ // Do the post
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
+ const Register Robj = Rscratch;
+
+ __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
+ if (is_static) {
+ // Life is simple. Null out the object pointer.
+ __ li(Robj, 0);
+ } else {
+ // In case of the fast versions, value lives in registers => put it back on tos.
+ int offs = Interpreter::expr_offset_in_bytes(0);
+ Register base = R15_esp;
+ switch(bytecode()) {
+ case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break;
+ case Bytecodes::_fast_iputfield: // Fall through
+ case Bytecodes::_fast_bputfield: // Fall through
+ case Bytecodes::_fast_cputfield: // Fall through
+ case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break;
+ case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break;
+ case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break;
+ case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break;
+ default: {
+ offs = 0;
+ base = Robj;
+ const Register Rflags = Robj;
+ Label is_one_slot;
+ // Life is harder. The stack holds the value on top, followed by the
+ // object. We don't know the size of the value, though; it could be
+ // one or two words depending on its type. As a result, we must find
+ // the type to determine where the object is.
+ __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian
+ __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
+
+ __ cmpwi(CCR0, Rflags, ltos);
+ __ cmpwi(CCR1, Rflags, dtos);
+ __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1));
+ __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
+ __ beq(CCR0, is_one_slot);
+ __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2));
+ __ bind(is_one_slot);
+ break;
+ }
+ }
+ __ ld(Robj, offs, base);
+ __ verify_oop(Robj);
+ }
+
+ __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0));
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4);
+ __ get_cache_and_index_at_bcp(Rcache, 1);
+
+ // In case of the fast versions, value lives in registers => put it back on tos.
+ switch(bytecode()) {
+ case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
+ case Bytecodes::_fast_iputfield: // Fall through
+ case Bytecodes::_fast_bputfield: // Fall through
+ case Bytecodes::_fast_cputfield: // Fall through
+ case Bytecodes::_fast_sputfield: __ pop_i(); break;
+ case Bytecodes::_fast_lputfield: __ pop_l(); break;
+ case Bytecodes::_fast_fputfield: __ pop_f(); break;
+ case Bytecodes::_fast_dputfield: __ pop_d(); break;
+ default: break; // Nothin' to do.
+ }
+
+ __ align(32, 12);
+ __ bind(Lno_field_mod_post);
+ }
+}
+
+// PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
+void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
+ Label Lvolatile;
+
+ const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
+ Rclass_or_obj = R31, // Needs to survive C call.
+ Roffset = R22_tmp2, // Needs to survive C call.
+ Rflags = R3_ARG1,
+ Rbtable = R4_ARG2,
+ Rscratch = R11_scratch1,
+ Rscratch2 = R12_scratch2,
+ Rscratch3 = R6_ARG4,
+ Rbc = Rscratch3;
+ const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
+
+ static address field_branch_table[number_of_states],
+ static_branch_table[number_of_states];
+
+ address* branch_table = is_static ? static_branch_table : field_branch_table;
+
+ // Stack (grows up):
+ // value
+ // obj
+
+ // Load the field offset.
+ resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
+ jvmti_post_field_mod(Rcache, Rscratch, is_static);
+ load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
+
+ // Load pointer to branch table.
+ __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
+
+ // Get volatile flag.
+ __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
+
+ // Check the field type.
+ __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
+
+#ifdef ASSERT
+ Label LFlagInvalid;
+ __ cmpldi(CCR0, Rflags, number_of_states);
+ __ bge(CCR0, LFlagInvalid);
+#endif
+
+ // Load from branch table and dispatch (volatile case: one instruction ahead).
+ __ sldi(Rflags, Rflags, LogBytesPerWord);
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile?
+ __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
+ __ ldx(Rbtable, Rbtable, Rflags);
+
+ __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
+ __ mtctr(Rbtable);
+ __ bctr();
+
+#ifdef ASSERT
+ __ bind(LFlagInvalid);
+ __ stop("got invalid flag", 0x656);
+
+ // __ bind(Lvtos);
+ address pc_before_release = __ pc();
+ __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
+ assert(branch_table[vtos] == 0, "can't compute twice");
+ branch_table[vtos] = __ pc(); // non-volatile_entry point
+ __ stop("vtos unexpected", 0x657);
+#endif
+
+ __ align(32, 28, 28); // Align pop.
+ // __ bind(Ldtos);
+ __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[dtos] == 0, "can't compute twice");
+ branch_table[dtos] = __ pc(); // non-volatile_entry point
+ __ pop(dtos);
+ if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
+ __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
+ if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); }
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ beq(CR_is_vol, Lvolatile); // Volatile?
+ }
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align pop.
+ // __ bind(Lftos);
+ __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[ftos] == 0, "can't compute twice");
+ branch_table[ftos] = __ pc(); // non-volatile_entry point
+ __ pop(ftos);
+ if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
+ __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
+ if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); }
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ beq(CR_is_vol, Lvolatile); // Volatile?
+ }
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align pop.
+ // __ bind(Litos);
+ __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[itos] == 0, "can't compute twice");
+ branch_table[itos] = __ pc(); // non-volatile_entry point
+ __ pop(itos);
+ if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
+ __ stwx(R17_tos, Rclass_or_obj, Roffset);
+ if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); }
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ beq(CR_is_vol, Lvolatile); // Volatile?
+ }
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align pop.
+ // __ bind(Lltos);
+ __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[ltos] == 0, "can't compute twice");
+ branch_table[ltos] = __ pc(); // non-volatile_entry point
+ __ pop(ltos);
+ if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
+ __ stdx(R17_tos, Rclass_or_obj, Roffset);
+ if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); }
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ beq(CR_is_vol, Lvolatile); // Volatile?
+ }
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align pop.
+ // __ bind(Lbtos);
+ __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[btos] == 0, "can't compute twice");
+ branch_table[btos] = __ pc(); // non-volatile_entry point
+ __ pop(btos);
+ if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
+ __ stbx(R17_tos, Rclass_or_obj, Roffset);
+ if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); }
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ beq(CR_is_vol, Lvolatile); // Volatile?
+ }
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align pop.
+ // __ bind(Lctos);
+ __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[ctos] == 0, "can't compute twice");
+ branch_table[ctos] = __ pc(); // non-volatile_entry point
+ __ pop(ctos);
+ if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
+ __ sthx(R17_tos, Rclass_or_obj, Roffset);
+ if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); }
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ beq(CR_is_vol, Lvolatile); // Volatile?
+ }
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align pop.
+ // __ bind(Lstos);
+ __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[stos] == 0, "can't compute twice");
+ branch_table[stos] = __ pc(); // non-volatile_entry point
+ __ pop(stos);
+ if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
+ __ sthx(R17_tos, Rclass_or_obj, Roffset);
+ if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); }
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ beq(CR_is_vol, Lvolatile); // Volatile?
+ }
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 28, 28); // Align pop.
+ // __ bind(Latos);
+ __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
+ assert(branch_table[atos] == 0, "can't compute twice");
+ branch_table[atos] = __ pc(); // non-volatile_entry point
+ __ pop(atos);
+ if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
+ do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
+ if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); }
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ beq(CR_is_vol, Lvolatile); // Volatile?
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 12);
+ __ bind(Lvolatile);
+ __ fence();
+ }
+ // fallthru: __ b(Lexit);
+
+#ifdef ASSERT
+ for (int i = 0; i<number_of_states; ++i) {
+ assert(branch_table[i], "put initialization");
+ //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
+ // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
+ }
+#endif
+}
+
+void TemplateTable::putfield(int byte_no) {
+ putfield_or_static(byte_no, false);
+}
+
+void TemplateTable::putstatic(int byte_no) {
+ putfield_or_static(byte_no, true);
+}
+
+// See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
+void TemplateTable::jvmti_post_fast_field_mod() {
+ __ should_not_reach_here();
+}
+
+void TemplateTable::fast_storefield(TosState state) {
+ transition(state, vtos);
+
+ const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
+ Rclass_or_obj = R31, // Needs to survive C call.
+ Roffset = R22_tmp2, // Needs to survive C call.
+ Rflags = R3_ARG1,
+ Rscratch = R11_scratch1,
+ Rscratch2 = R12_scratch2,
+ Rscratch3 = R4_ARG2;
+ const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
+
+ // Constant pool already resolved => Load flags and offset of field.
+ __ get_cache_and_index_at_bcp(Rcache, 1);
+ jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
+ load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
+
+ // Get the obj and the final store addr.
+ pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
+
+ // Get volatile flag.
+ __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
+ {
+ Label LnotVolatile;
+ __ beq(CCR0, LnotVolatile);
+ __ release();
+ __ align(32, 12);
+ __ bind(LnotVolatile);
+ }
+
+ // Do the store and fencing.
+ switch(bytecode()) {
+ case Bytecodes::_fast_aputfield:
+ // Store into the field.
+ do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
+ break;
+
+ case Bytecodes::_fast_iputfield:
+ __ stwx(R17_tos, Rclass_or_obj, Roffset);
+ break;
+
+ case Bytecodes::_fast_lputfield:
+ __ stdx(R17_tos, Rclass_or_obj, Roffset);
+ break;
+
+ case Bytecodes::_fast_bputfield:
+ __ stbx(R17_tos, Rclass_or_obj, Roffset);
+ break;
+
+ case Bytecodes::_fast_cputfield:
+ case Bytecodes::_fast_sputfield:
+ __ sthx(R17_tos, Rclass_or_obj, Roffset);
+ break;
+
+ case Bytecodes::_fast_fputfield:
+ __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
+ break;
+
+ case Bytecodes::_fast_dputfield:
+ __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
+ break;
+
+ default: ShouldNotReachHere();
+ }
+
+ if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ Label LVolatile;
+ __ beq(CR_is_vol, LVolatile);
+ __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
+
+ __ align(32, 12);
+ __ bind(LVolatile);
+ __ fence();
+ }
+}
+
+void TemplateTable::fast_accessfield(TosState state) {
+ transition(atos, state);
+
+ Label LisVolatile;
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
+
+ const Register Rcache = R3_ARG1,
+ Rclass_or_obj = R17_tos,
+ Roffset = R22_tmp2,
+ Rflags = R23_tmp3,
+ Rscratch = R12_scratch2;
+
+ // Constant pool already resolved. Get the field offset.
+ __ get_cache_and_index_at_bcp(Rcache, 1);
+ load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
+
+ // JVMTI support
+ jvmti_post_field_access(Rcache, Rscratch, false, true);
+
+ // Get the load address.
+ __ null_check_throw(Rclass_or_obj, -1, Rscratch);
+
+ // Get volatile flag.
+ __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
+ __ bne(CCR0, LisVolatile);
+
+ switch(bytecode()) {
+ case Bytecodes::_fast_agetfield:
+ {
+ __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
+ __ verify_oop(R17_tos);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
+
+ __ bind(LisVolatile);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
+ __ verify_oop(R17_tos);
+ __ twi_0(R17_tos);
+ __ isync();
+ break;
+ }
+ case Bytecodes::_fast_igetfield:
+ {
+ __ lwax(R17_tos, Rclass_or_obj, Roffset);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
+
+ __ bind(LisVolatile);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ lwax(R17_tos, Rclass_or_obj, Roffset);
+ __ twi_0(R17_tos);
+ __ isync();
+ break;
+ }
+ case Bytecodes::_fast_lgetfield:
+ {
+ __ ldx(R17_tos, Rclass_or_obj, Roffset);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
+
+ __ bind(LisVolatile);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ ldx(R17_tos, Rclass_or_obj, Roffset);
+ __ twi_0(R17_tos);
+ __ isync();
+ break;
+ }
+ case Bytecodes::_fast_bgetfield:
+ {
+ __ lbzx(R17_tos, Rclass_or_obj, Roffset);
+ __ extsb(R17_tos, R17_tos);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
+
+ __ bind(LisVolatile);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ lbzx(R17_tos, Rclass_or_obj, Roffset);
+ __ twi_0(R17_tos);
+ __ extsb(R17_tos, R17_tos);
+ __ isync();
+ break;
+ }
+ case Bytecodes::_fast_cgetfield:
+ {
+ __ lhzx(R17_tos, Rclass_or_obj, Roffset);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
+
+ __ bind(LisVolatile);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ lhzx(R17_tos, Rclass_or_obj, Roffset);
+ __ twi_0(R17_tos);
+ __ isync();
+ break;
+ }
+ case Bytecodes::_fast_sgetfield:
+ {
+ __ lhax(R17_tos, Rclass_or_obj, Roffset);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
+
+ __ bind(LisVolatile);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ lhax(R17_tos, Rclass_or_obj, Roffset);
+ __ twi_0(R17_tos);
+ __ isync();
+ break;
+ }
+ case Bytecodes::_fast_fgetfield:
+ {
+ __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
+
+ __ bind(LisVolatile);
+ Label Ldummy;
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
+ __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
+ __ bne_predict_not_taken(CCR0, Ldummy);
+ __ bind(Ldummy);
+ __ isync();
+ break;
+ }
+ case Bytecodes::_fast_dgetfield:
+ {
+ __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
+
+ __ bind(LisVolatile);
+ Label Ldummy;
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
+ __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
+ __ bne_predict_not_taken(CCR0, Ldummy);
+ __ bind(Ldummy);
+ __ isync();
+ break;
+ }
+ default: ShouldNotReachHere();
+ }
+}
+
+void TemplateTable::fast_xaccess(TosState state) {
+ transition(vtos, state);
+
+ Label LisVolatile;
+ ByteSize cp_base_offset = ConstantPoolCache::base_offset();
+ const Register Rcache = R3_ARG1,
+ Rclass_or_obj = R17_tos,
+ Roffset = R22_tmp2,
+ Rflags = R23_tmp3,
+ Rscratch = R12_scratch2;
+
+ __ ld(Rclass_or_obj, 0, R18_locals);
+
+ // Constant pool already resolved. Get the field offset.
+ __ get_cache_and_index_at_bcp(Rcache, 2);
+ load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
+
+ // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
+
+ // Needed to report exception at the correct bcp.
+ __ addi(R14_bcp, R14_bcp, 1);
+
+ // Get the load address.
+ __ null_check_throw(Rclass_or_obj, -1, Rscratch);
+
+ // Get volatile flag.
+ __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
+ __ bne(CCR0, LisVolatile);
+
+ switch(state) {
+ case atos:
+ {
+ __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
+ __ verify_oop(R17_tos);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
+
+ __ bind(LisVolatile);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
+ __ verify_oop(R17_tos);
+ __ twi_0(R17_tos);
+ __ isync();
+ break;
+ }
+ case itos:
+ {
+ __ lwax(R17_tos, Rclass_or_obj, Roffset);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
+
+ __ bind(LisVolatile);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ lwax(R17_tos, Rclass_or_obj, Roffset);
+ __ twi_0(R17_tos);
+ __ isync();
+ break;
+ }
+ case ftos:
+ {
+ __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
+ __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
+
+ __ bind(LisVolatile);
+ Label Ldummy;
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
+ __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
+ __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
+ __ bne_predict_not_taken(CCR0, Ldummy);
+ __ bind(Ldummy);
+ __ isync();
+ break;
+ }
+ default: ShouldNotReachHere();
+ }
+ __ addi(R14_bcp, R14_bcp, -1);
+}
+
+// ============================================================================
+// Calls
+
+// Common code for invoke
+//
+// Input:
+// - byte_no
+//
+// Output:
+// - Rmethod: The method to invoke next.
+// - Rret_addr: The return address to return to.
+// - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic)
+// - Rrecv: Cache for "this" pointer, might be noreg if static call.
+// - Rflags: Method flags from const pool cache.
+//
+// Kills:
+// - Rscratch1
+//
+void TemplateTable::prepare_invoke(int byte_no,
+ Register Rmethod, // linked method (or i-klass)
+ Register Rret_addr,// return address
+ Register Rindex, // itable index, MethodType, etc.
+ Register Rrecv, // If caller wants to see it.
+ Register Rflags, // If caller wants to test it.
+ Register Rscratch
+ ) {
+ // Determine flags.
+ const Bytecodes::Code code = bytecode();
+ const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
+ const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
+ const bool is_invokehandle = code == Bytecodes::_invokehandle;
+ const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
+ const bool is_invokespecial = code == Bytecodes::_invokespecial;
+ const bool load_receiver = (Rrecv != noreg);
+ assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
+
+ assert_different_registers(Rmethod, Rindex, Rflags, Rscratch);
+ assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch);
+ assert_different_registers(Rret_addr, Rscratch);
+
+ load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic);
+
+ // Saving of SP done in call_from_interpreter.
+
+ // Maybe push "appendix" to arguments.
+ if (is_invokedynamic || is_invokehandle) {
+ Label Ldone;
+ __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63);
+ __ beq(CCR0, Ldone);
+ // Push "appendix" (MethodType, CallSite, etc.).
+ // This must be done before we get the receiver,
+ // since the parameter_size includes it.
+ __ load_resolved_reference_at_index(Rscratch, Rindex);
+ __ verify_oop(Rscratch);
+ __ push_ptr(Rscratch);
+ __ bind(Ldone);
+ }
+
+ // Load receiver if needed (after appendix is pushed so parameter size is correct).
+ if (load_receiver) {
+ const Register Rparam_count = Rscratch;
+ __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask);
+ __ load_receiver(Rparam_count, Rrecv);
+ __ verify_oop(Rrecv);
+ }
+
+ // Get return address.
+ {
+ Register Rtable_addr = Rscratch;
+ Register Rret_type = Rret_addr;
+ address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
+
+ // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
+ __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
+ __ load_dispatch_table(Rtable_addr, (address*)table_addr);
+ __ sldi(Rret_type, Rret_type, LogBytesPerWord);
+ // Get return address.
+ __ ldx(Rret_addr, Rtable_addr, Rret_type);
+ }
+}
+
+// Helper for virtual calls. Load target out of vtable and jump off!
+// Kills all passed registers.
+void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) {
+
+ assert_different_registers(Rrecv_klass, Rtemp, Rret);
+ const Register Rtarget_method = Rindex;
+
+ // Get target method & entry point.
+ const int base = InstanceKlass::vtable_start_offset() * wordSize;
+ // Calc vtable addr scale the vtable index by 8.
+ __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize));
+ // Load target.
+ __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
+ __ ldx(Rtarget_method, Rindex, Rrecv_klass);
+ __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
+}
+
+// Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time.
+void TemplateTable::invokevirtual(int byte_no) {
+ transition(vtos, vtos);
+
+ Register Rtable_addr = R11_scratch1,
+ Rret_type = R12_scratch2,
+ Rret_addr = R5_ARG3,
+ Rflags = R22_tmp2, // Should survive C call.
+ Rrecv = R3_ARG1,
+ Rrecv_klass = Rrecv,
+ Rvtableindex_or_method = R31, // Should survive C call.
+ Rnum_params = R4_ARG2,
+ Rnew_bc = R6_ARG4;
+
+ Label LnotFinal;
+
+ load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
+
+ __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
+ __ bfalse(CCR0, LnotFinal);
+
+ patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
+ invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
+
+ __ align(32, 12);
+ __ bind(LnotFinal);
+ // Load "this" pointer (receiver).
+ __ rldicl(Rnum_params, Rflags, 64, 48);
+ __ load_receiver(Rnum_params, Rrecv);
+ __ verify_oop(Rrecv);
+
+ // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
+ __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
+ __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
+ __ sldi(Rret_type, Rret_type, LogBytesPerWord);
+ __ ldx(Rret_addr, Rret_type, Rtable_addr);
+ __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1);
+ __ load_klass(Rrecv_klass, Rrecv);
+ __ verify_klass_ptr(Rrecv_klass);
+ __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
+
+ generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);
+}
+
+void TemplateTable::fast_invokevfinal(int byte_no) {
+ transition(vtos, vtos);
+
+ assert(byte_no == f2_byte, "use this argument");
+ Register Rflags = R22_tmp2,
+ Rmethod = R31;
+ load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false);
+ invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2);
+}
+
+void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) {
+
+ assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2);
+
+ // Load receiver from stack slot.
+ Register Rrecv = Rscratch2;
+ Register Rnum_params = Rrecv;
+
+ __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod);
+ __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params);
+
+ // Get return address.
+ Register Rtable_addr = Rscratch1,
+ Rret_addr = Rflags,
+ Rret_type = Rret_addr;
+ // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
+ __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
+ __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
+ __ sldi(Rret_type, Rret_type, LogBytesPerWord);
+ __ ldx(Rret_addr, Rret_type, Rtable_addr);
+
+ // Load receiver and receiver NULL check.
+ __ load_receiver(Rnum_params, Rrecv);
+ __ null_check_throw(Rrecv, -1, Rscratch1);
+
+ __ profile_final_call(Rrecv, Rscratch1);
+
+ // Do the call.
+ __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
+}
+
+void TemplateTable::invokespecial(int byte_no) {
+ assert(byte_no == f1_byte, "use this argument");
+ transition(vtos, vtos);
+
+ Register Rtable_addr = R3_ARG1,
+ Rret_addr = R4_ARG2,
+ Rflags = R5_ARG3,
+ Rreceiver = R6_ARG4,
+ Rmethod = R31;
+
+ prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1);
+
+ // Receiver NULL check.
+ __ null_check_throw(Rreceiver, -1, R11_scratch1);
+
+ __ profile_call(R11_scratch1, R12_scratch2);
+ __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
+}
+
+void TemplateTable::invokestatic(int byte_no) {
+ assert(byte_no == f1_byte, "use this argument");
+ transition(vtos, vtos);
+
+ Register Rtable_addr = R3_ARG1,
+ Rret_addr = R4_ARG2,
+ Rflags = R5_ARG3;
+
+ prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
+
+ __ profile_call(R11_scratch1, R12_scratch2);
+ __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
+}
+
+void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
+ Register Rret,
+ Register Rflags,
+ Register Rindex,
+ Register Rtemp1,
+ Register Rtemp2) {
+
+ assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
+ Label LnotFinal;
+
+ // Check for vfinal.
+ __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
+ __ bfalse(CCR0, LnotFinal);
+
+ Register Rscratch = Rflags; // Rflags is dead now.
+
+ // Final call case.
+ __ profile_final_call(Rtemp1, Rscratch);
+ // Do the final call - the index (f2) contains the method.
+ __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
+
+ // Non-final callc case.
+ __ bind(LnotFinal);
+ __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
+ generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch);
+}
+
+void TemplateTable::invokeinterface(int byte_no) {
+ assert(byte_no == f1_byte, "use this argument");
+ transition(vtos, vtos);
+
+ const Register Rscratch1 = R11_scratch1,
+ Rscratch2 = R12_scratch2,
+ Rscratch3 = R9_ARG7,
+ Rscratch4 = R10_ARG8,
+ Rtable_addr = Rscratch2,
+ Rinterface_klass = R5_ARG3,
+ Rret_type = R8_ARG6,
+ Rret_addr = Rret_type,
+ Rindex = R6_ARG4,
+ Rreceiver = R4_ARG2,
+ Rrecv_klass = Rreceiver,
+ Rflags = R7_ARG5;
+
+ prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1);
+
+ // Get receiver klass.
+ __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3);
+ __ load_klass(Rrecv_klass, Rreceiver);
+
+ // Check corner case object method.
+ Label LobjectMethod;
+
+ __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
+ __ btrue(CCR0, LobjectMethod);
+
+ // Fallthrough: The normal invokeinterface case.
+ __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
+
+ // Find entry point to call.
+ Label Lthrow_icc, Lthrow_ame;
+ // Result will be returned in Rindex.
+ __ mr(Rscratch4, Rrecv_klass);
+ __ mr(Rscratch3, Rindex);
+ __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc);
+
+ __ cmpdi(CCR0, Rindex, 0);
+ __ beq(CCR0, Lthrow_ame);
+ // Found entry. Jump off!
+ __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
+
+ // Vtable entry was NULL => Throw abstract method error.
+ __ bind(Lthrow_ame);
+ __ mr(Rrecv_klass, Rscratch4);
+ __ mr(Rindex, Rscratch3);
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+
+ // Interface was not found => Throw incompatible class change error.
+ __ bind(Lthrow_icc);
+ __ mr(Rrecv_klass, Rscratch4);
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
+
+ __ should_not_reach_here();
+
+ // Special case of invokeinterface called for virtual method of
+ // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
+ // The invokeinterface was rewritten to a invokevirtual, hence we have
+ // to handle this corner case. This code isn't produced by javac, but could
+ // be produced by another compliant java compiler.
+ __ bind(LobjectMethod);
+ invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2);
+}
+
+void TemplateTable::invokedynamic(int byte_no) {
+ transition(vtos, vtos);
+
+ const Register Rret_addr = R3_ARG1,
+ Rflags = R4_ARG2,
+ Rmethod = R22_tmp2,
+ Rscratch1 = R11_scratch1,
+ Rscratch2 = R12_scratch2;
+
+ if (!EnableInvokeDynamic) {
+ // We should not encounter this bytecode if !EnableInvokeDynamic.
+ // The verifier will stop it. However, if we get past the verifier,
+ // this will stop the thread in a reasonable way, without crashing the JVM.
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
+ // The call_VM checks for exception, so we should never return here.
+ __ should_not_reach_here();
+ return;
+ }
+
+ prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2);
+
+ // Profile this call.
+ __ profile_call(Rscratch1, Rscratch2);
+
+ // Off we go. With the new method handles, we don't jump to a method handle
+ // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens
+ // to be the callsite object the bootstrap method returned. This is passed to a
+ // "link" method which does the dispatch (Most likely just grabs the MH stored
+ // inside the callsite and does an invokehandle).
+ __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
+}
+
+void TemplateTable::invokehandle(int byte_no) {
+ transition(vtos, vtos);
+
+ const Register Rret_addr = R3_ARG1,
+ Rflags = R4_ARG2,
+ Rrecv = R5_ARG3,
+ Rmethod = R22_tmp2,
+ Rscratch1 = R11_scratch1,
+ Rscratch2 = R12_scratch2;
+
+ if (!EnableInvokeDynamic) {
+ // Rewriter does not generate this bytecode.
+ __ should_not_reach_here();
+ return;
+ }
+
+ prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2);
+ __ verify_method_ptr(Rmethod);
+ __ null_check_throw(Rrecv, -1, Rscratch2);
+
+ __ profile_final_call(Rrecv, Rscratch1);
+
+ // Still no call from handle => We call the method handle interpreter here.
+ __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
+}
+
+// =============================================================================
+// Allocation
+
+// Puts allocated obj ref onto the expression stack.
+void TemplateTable::_new() {
+ transition(vtos, atos);
+
+ Label Lslow_case,
+ Ldone,
+ Linitialize_header,
+ Lallocate_shared,
+ Linitialize_object; // Including clearing the fields.
+
+ const Register RallocatedObject = R17_tos,
+ RinstanceKlass = R9_ARG7,
+ Rscratch = R11_scratch1,
+ Roffset = R8_ARG6,
+ Rinstance_size = Roffset,
+ Rcpool = R4_ARG2,
+ Rtags = R3_ARG1,
+ Rindex = R5_ARG3;
+
+ const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
+
+ // --------------------------------------------------------------------------
+ // Check if fast case is possible.
+
+ // Load pointers to const pool and const pool's tags array.
+ __ get_cpool_and_tags(Rcpool, Rtags);
+ // Load index of constant pool entry.
+ __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
+
+ if (UseTLAB) {
+ // Make sure the class we're about to instantiate has been resolved
+ // This is done before loading instanceKlass to be consistent with the order
+ // how Constant Pool is updated (see ConstantPoolCache::klass_at_put).
+ __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
+ __ lbzx(Rtags, Rindex, Rtags);
+
+ __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
+ __ bne(CCR0, Lslow_case);
+
+ // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
+ __ sldi(Roffset, Rindex, LogBytesPerWord);
+ __ addi(Rscratch, Rcpool, sizeof(ConstantPool));
+ __ isync(); // Order load of instance Klass wrt. tags.
+ __ ldx(RinstanceKlass, Roffset, Rscratch);
+
+ // Make sure klass is fully initialized and get instance_size.
+ __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
+ __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass);
+
+ __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized);
+ // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class.
+ __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0?
+
+ __ crnand(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // slow path bit set or not fully initialized?
+ __ beq(CCR0, Lslow_case);
+
+ // --------------------------------------------------------------------------
+ // Fast case:
+ // Allocate the instance.
+ // 1) Try to allocate in the TLAB.
+ // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden.
+ // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
+
+ Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
+ Register RnewTopValue = R6_ARG4;
+ Register RendValue = R7_ARG5;
+
+ // Check if we can allocate in the TLAB.
+ __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
+ __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread);
+
+ __ add(RnewTopValue, Rinstance_size, RoldTopValue);
+
+ // If there is enough space, we do not CAS and do not clear.
+ __ cmpld(CCR0, RnewTopValue, RendValue);
+ __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case);
+
+ __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
+
+ if (ZeroTLAB) {
+ // The fields have already been cleared.
+ __ b(Linitialize_header);
+ } else {
+ // Initialize both the header and fields.
+ __ b(Linitialize_object);
+ }
+
+ // Fall through: TLAB was too small.
+ if (allow_shared_alloc) {
+ Register RtlabWasteLimitValue = R10_ARG8;
+ Register RfreeValue = RnewTopValue;
+
+ __ bind(Lallocate_shared);
+ // Check if tlab should be discarded (refill_waste_limit >= free).
+ __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
+ __ subf(RfreeValue, RoldTopValue, RendValue);
+ __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords
+ __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue);
+ __ bge(CCR0, Lslow_case);
+
+ // Increment waste limit to prevent getting stuck on this slow path.
+ __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment());
+ __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
+ }
+ // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
+ }
+ // else: Always go the slow path.
+
+ // --------------------------------------------------------------------------
+ // slow case
+ __ bind(Lslow_case);
+ call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
+
+ if (UseTLAB) {
+ __ b(Ldone);
+ // --------------------------------------------------------------------------
+ // Init1: Zero out newly allocated memory.
+
+ if (!ZeroTLAB || allow_shared_alloc) {
+ // Clear object fields.
+ __ bind(Linitialize_object);
+
+ // Initialize remaining object fields.
+ Register Rbase = Rtags;
+ __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
+ __ addi(Rbase, RallocatedObject, sizeof(oopDesc));
+ __ srdi(Rinstance_size, Rinstance_size, 3);
+
+ // Clear out object skipping header. Takes also care of the zero length case.
+ __ clear_memory_doubleword(Rbase, Rinstance_size);
+ // fallthru: __ b(Linitialize_header);
+ }
+
+ // --------------------------------------------------------------------------
+ // Init2: Initialize the header: mark, klass
+ __ bind(Linitialize_header);
+
+ // Init mark.
+ if (UseBiasedLocking) {
+ __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
+ } else {
+ __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
+ }
+ __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
+
+ // Init klass.
+ __ store_klass_gap(RallocatedObject);
+ __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
+
+ // Check and trigger dtrace event.
+ {
+ SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes);
+ __ push(atos);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
+ __ pop(atos);
+ }
+ }
+
+ // continue
+ __ bind(Ldone);
+
+ // Must prevent reordering of stores for object initialization with stores that publish the new object.
+ __ membar(Assembler::StoreStore);
+}
+
+void TemplateTable::newarray() {
+ transition(itos, atos);
+
+ __ lbz(R4, 1, R14_bcp);
+ __ extsw(R5, R17_tos);
+ call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */);
+
+ // Must prevent reordering of stores for object initialization with stores that publish the new object.
+ __ membar(Assembler::StoreStore);
+}
+
+void TemplateTable::anewarray() {
+ transition(itos, atos);
+
+ __ get_constant_pool(R4);
+ __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned);
+ __ extsw(R6, R17_tos); // size
+ call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */);
+
+ // Must prevent reordering of stores for object initialization with stores that publish the new object.
+ __ membar(Assembler::StoreStore);
+}
+
+// Allocate a multi dimensional array
+void TemplateTable::multianewarray() {
+ transition(vtos, atos);
+
+ Register Rptr = R31; // Needs to survive C call.
+
+ // Put ndims * wordSize into frame temp slot
+ __ lbz(Rptr, 3, R14_bcp);
+ __ sldi(Rptr, Rptr, Interpreter::logStackElementSize);
+ // Esp points past last_dim, so set to R4 to first_dim address.
+ __ add(R4, Rptr, R15_esp);
+ call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */);
+ // Pop all dimensions off the stack.
+ __ add(R15_esp, Rptr, R15_esp);
+
+ // Must prevent reordering of stores for object initialization with stores that publish the new object.
+ __ membar(Assembler::StoreStore);
+}
+
+void TemplateTable::arraylength() {
+ transition(atos, itos);
+
+ Label LnoException;
+ __ verify_oop(R17_tos);
+ __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1);
+ __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos);
+}
+
+// ============================================================================
+// Typechecks
+
+void TemplateTable::checkcast() {
+ transition(atos, atos);
+
+ Label Ldone, Lis_null, Lquicked, Lresolved;
+ Register Roffset = R5_ARG3,
+ RobjKlass = R4_ARG2,
+ RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect this register.
+ Rcpool = R11_scratch1,
+ Rtags = R12_scratch2;
+
+ // Null does not pass.
+ __ cmpdi(CCR0, R17_tos, 0);
+ __ beq(CCR0, Lis_null);
+
+ // Get constant pool tag to find out if the bytecode has already been "quickened".
+ __ get_cpool_and_tags(Rcpool, Rtags);
+
+ __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
+
+ __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
+ __ lbzx(Rtags, Rtags, Roffset);
+
+ __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
+ __ beq(CCR0, Lquicked);
+
+ // Call into the VM to "quicken" instanceof.
+ __ push_ptr(); // for GC
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+ __ get_vm_result_2(RspecifiedKlass);
+ __ pop_ptr(); // Restore receiver.
+ __ b(Lresolved);
+
+ // Extract target class from constant pool.
+ __ bind(Lquicked);
+ __ sldi(Roffset, Roffset, LogBytesPerWord);
+ __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
+ __ isync(); // Order load of specified Klass wrt. tags.
+ __ ldx(RspecifiedKlass, Rcpool, Roffset);
+
+ // Do the checkcast.
+ __ bind(Lresolved);
+ // Get value klass in RobjKlass.
+ __ load_klass(RobjKlass, R17_tos);
+ // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
+ __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
+
+ // Not a subtype; so must throw exception
+ // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention.
+ __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry);
+ __ mtctr(R11_scratch1);
+ __ bctr();
+
+ // Profile the null case.
+ __ align(32, 12);
+ __ bind(Lis_null);
+ __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch.
+
+ __ align(32, 12);
+ __ bind(Ldone);
+}
+
+// Output:
+// - tos == 0: Obj was null or not an instance of class.
+// - tos == 1: Obj was an instance of class.
+void TemplateTable::instanceof() {
+ transition(atos, itos);
+
+ Label Ldone, Lis_null, Lquicked, Lresolved;
+ Register Roffset = R5_ARG3,
+ RobjKlass = R4_ARG2,
+ RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register.
+ Rcpool = R11_scratch1,
+ Rtags = R12_scratch2;
+
+ // Null does not pass.
+ __ cmpdi(CCR0, R17_tos, 0);
+ __ beq(CCR0, Lis_null);
+
+ // Get constant pool tag to find out if the bytecode has already been "quickened".
+ __ get_cpool_and_tags(Rcpool, Rtags);
+
+ __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
+
+ __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
+ __ lbzx(Rtags, Rtags, Roffset);
+
+ __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
+ __ beq(CCR0, Lquicked);
+
+ // Call into the VM to "quicken" instanceof.
+ __ push_ptr(); // for GC
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
+ __ get_vm_result_2(RspecifiedKlass);
+ __ pop_ptr(); // Restore receiver.
+ __ b(Lresolved);
+
+ // Extract target class from constant pool.
+ __ bind(Lquicked);
+ __ sldi(Roffset, Roffset, LogBytesPerWord);
+ __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
+ __ isync(); // Order load of specified Klass wrt. tags.
+ __ ldx(RspecifiedKlass, Rcpool, Roffset);
+
+ // Do the checkcast.
+ __ bind(Lresolved);
+ // Get value klass in RobjKlass.
+ __ load_klass(RobjKlass, R17_tos);
+ // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
+ __ li(R17_tos, 1);
+ __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
+ __ li(R17_tos, 0);
+
+ if (ProfileInterpreter) {
+ __ b(Ldone);
+ }
+
+ // Profile the null case.
+ __ align(32, 12);
+ __ bind(Lis_null);
+ __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch.
+
+ __ align(32, 12);
+ __ bind(Ldone);
+}
+
+// =============================================================================
+// Breakpoints
+
+void TemplateTable::_breakpoint() {
+ transition(vtos, vtos);
+
+ // Get the unpatched byte code.
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp);
+ __ mr(R31, R3_RET);
+
+ // Post the breakpoint event.
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp);
+
+ // Complete the execution of original bytecode.
+ __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos));
+}
+
+// =============================================================================
+// Exceptions
+
+void TemplateTable::athrow() {
+ transition(atos, vtos);
+
+ // Exception oop is in tos
+ __ verify_oop(R17_tos);
+
+ __ null_check_throw(R17_tos, -1, R11_scratch1);
+
+ // Throw exception interpreter entry expects exception oop to be in R3.
+ __ mr(R3_RET, R17_tos);
+ __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry());
+ __ mtctr(R11_scratch1);
+ __ bctr();
+}
+
+// =============================================================================
+// Synchronization
+// Searches the basic object lock list on the stack for a free slot
+// and uses it to lock the obect in tos.
+//
+// Recursive locking is enabled by exiting the search if the same
+// object is already found in the list. Thus, a new basic lock obj lock
+// is allocated "higher up" in the stack and thus is found first
+// at next monitor exit.
+void TemplateTable::monitorenter() {
+ transition(atos, vtos);
+
+ __ verify_oop(R17_tos);
+
+ Register Rcurrent_monitor = R11_scratch1,
+ Rcurrent_obj = R12_scratch2,
+ Robj_to_lock = R17_tos,
+ Rscratch1 = R3_ARG1,
+ Rscratch2 = R4_ARG2,
+ Rscratch3 = R5_ARG3,
+ Rcurrent_obj_addr = R6_ARG4;
+
+ // ------------------------------------------------------------------------------
+ // Null pointer exception.
+ __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
+
+ // Try to acquire a lock on the object.
+ // Repeat until succeeded (i.e., until monitorenter returns true).
+
+ // ------------------------------------------------------------------------------
+ // Find a free slot in the monitor block.
+ Label Lfound, Lexit, Lallocate_new;
+ ConditionRegister found_free_slot = CCR0,
+ found_same_obj = CCR1,
+ reached_limit = CCR6;
+ {
+ Label Lloop, Lentry;
+ Register Rlimit = Rcurrent_monitor;
+
+ // Set up search loop - start with topmost monitor.
+ __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
+
+ __ ld(Rlimit, 0, R1_SP);
+ __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base
+
+ // Check if any slot is present => short cut to allocation if not.
+ __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
+ __ bgt(reached_limit, Lallocate_new);
+
+ // Pre-load topmost slot.
+ __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
+ __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
+ // The search loop.
+ __ bind(Lloop);
+ // Found free slot?
+ __ cmpdi(found_free_slot, Rcurrent_obj, 0);
+ // Is this entry for same obj? If so, stop the search and take the found
+ // free slot or allocate a new one to enable recursive locking.
+ __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock);
+ __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
+ __ beq(found_free_slot, Lexit);
+ __ beq(found_same_obj, Lallocate_new);
+ __ bgt(reached_limit, Lallocate_new);
+ // Check if last allocated BasicLockObj reached.
+ __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
+ __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
+ // Next iteration if unchecked BasicObjectLocks exist on the stack.
+ __ b(Lloop);
+ }
+
+ // ------------------------------------------------------------------------------
+ // Check if we found a free slot.
+ __ bind(Lexit);
+
+ __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
+ __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize);
+ __ b(Lfound);
+
+ // We didn't find a free BasicObjLock => allocate one.
+ __ align(32, 12);
+ __ bind(Lallocate_new);
+ __ add_monitor_to_stack(false, Rscratch1, Rscratch2);
+ __ mr(Rcurrent_monitor, R26_monitor);
+ __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
+
+ // ------------------------------------------------------------------------------
+ // We now have a slot to lock.
+ __ bind(Lfound);
+
+ // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
+ // The object has already been poped from the stack, so the expression stack looks correct.
+ __ addi(R14_bcp, R14_bcp, 1);
+
+ __ std(Robj_to_lock, 0, Rcurrent_obj_addr);
+ __ lock_object(Rcurrent_monitor, Robj_to_lock);
+
+ // Check if there's enough space on the stack for the monitors after locking.
+ Label Lskip_stack_check;
+ // Optimization: If the monitors stack section is less then a std page size (4K) don't run
+ // the stack check. There should be enough shadow pages to fit that in.
+ __ ld(Rscratch3, 0, R1_SP);
+ __ sub(Rscratch3, Rscratch3, R26_monitor);
+ __ cmpdi(CCR0, Rscratch3, 4*K);
+ __ blt(CCR0, Lskip_stack_check);
+
+ DEBUG_ONLY(__ untested("stack overflow check during monitor enter");)
+ __ li(Rscratch1, 0);
+ __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2);
+
+ __ align(32, 12);
+ __ bind(Lskip_stack_check);
+
+ // The bcp has already been incremented. Just need to dispatch to next instruction.
+ __ dispatch_next(vtos);
+}
+
+void TemplateTable::monitorexit() {
+ transition(atos, vtos);
+ __ verify_oop(R17_tos);
+
+ Register Rcurrent_monitor = R11_scratch1,
+ Rcurrent_obj = R12_scratch2,
+ Robj_to_lock = R17_tos,
+ Rcurrent_obj_addr = R3_ARG1,
+ Rlimit = R4_ARG2;
+ Label Lfound, Lillegal_monitor_state;
+
+ // Check corner case: unbalanced monitorEnter / Exit.
+ __ ld(Rlimit, 0, R1_SP);
+ __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
+
+ // Null pointer check.
+ __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
+
+ __ cmpld(CCR0, R26_monitor, Rlimit);
+ __ bgt(CCR0, Lillegal_monitor_state);
+
+ // Find the corresponding slot in the monitors stack section.
+ {
+ Label Lloop;
+
+ // Start with topmost monitor.
+ __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
+ __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes());
+ __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
+ __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
+
+ __ bind(Lloop);
+ // Is this entry for same obj?
+ __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
+ __ beq(CCR0, Lfound);
+
+ // Check if last allocated BasicLockObj reached.
+
+ __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
+ __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit);
+ __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
+
+ // Next iteration if unchecked BasicObjectLocks exist on the stack.
+ __ ble(CCR0, Lloop);
+ }
+
+ // Fell through without finding the basic obj lock => throw up!
+ __ bind(Lillegal_monitor_state);
+ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
+ __ should_not_reach_here();
+
+ __ align(32, 12);
+ __ bind(Lfound);
+ __ addi(Rcurrent_monitor, Rcurrent_obj_addr,
+ -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
+ __ unlock_object(Rcurrent_monitor);
+}
+
+// ============================================================================
+// Wide bytecodes
+
+// Wide instructions. Simply redirects to the wide entry point for that instruction.
+void TemplateTable::wide() {
+ transition(vtos, vtos);
+
+ const Register Rtable = R11_scratch1,
+ Rindex = R12_scratch2,
+ Rtmp = R0;
+
+ __ lbz(Rindex, 1, R14_bcp);
+
+ __ load_dispatch_table(Rtable, Interpreter::_wentry_point);
+
+ __ slwi(Rindex, Rindex, LogBytesPerWord);
+ __ ldx(Rtmp, Rtable, Rindex);
+ __ mtctr(Rtmp);
+ __ bctr();
+ // Note: the bcp increment step is part of the individual wide bytecode implementations.
+}
+#endif // !CC_INTERP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013, 2014 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
+#define CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
+
+ static void prepare_invoke(int byte_no, Register Rmethod, Register Rret_addr, Register Rindex, Register Rrecv, Register Rflags, Register Rscratch);
+ static void invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2);
+ static void generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp);
+ static void invokeinterface_object_method(Register Rrecv_klass, Register Rret, Register Rflags, Register Rindex, Register Rtemp, Register Rtemp2);
+
+ // Branch_conditional which takes TemplateTable::Condition.
+ static void branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert = false);
+ static void if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0);
+
+#endif // CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -24,7 +24,8 @@
*/
#include "precompiled.hpp"
-#include "assembler_ppc.inline.hpp"
+#include "asm/assembler.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/java.hpp"
@@ -168,7 +169,7 @@
uint32_t *code = (uint32_t *)a->pc();
// Emit code.
- void (*test1)() = (void(*)())(void *)a->emit_fd();
+ void (*test1)() = (void(*)())(void *)a->function_entry();
Label l1;
@@ -242,7 +243,7 @@
a->blr();
// Emit code.
- void (*test2)() = (void(*)())(void *)a->emit_fd();
+ void (*test2)() = (void(*)())(void *)a->function_entry();
// uint32_t *code = (uint32_t *)a->pc();
Label l2;
@@ -383,8 +384,12 @@
#endif // COMPILER2
void VM_Version::determine_features() {
+#if defined(ABI_ELFv2)
+ const int code_size = (num_features+1+2*7)*BytesPerInstWord; // TODO(asmundak): calculation is incorrect.
+#else
// 7 InstWords for each call (function descriptor + blr instruction).
const int code_size = (num_features+1+2*7)*BytesPerInstWord;
+#endif
int features = 0;
// create test area
@@ -398,7 +403,7 @@
MacroAssembler* a = new MacroAssembler(&cb);
// Emit code.
- void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->emit_fd();
+ void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->function_entry();
uint32_t *code = (uint32_t *)a->pc();
// Don't use R0 in ldarx.
// Keep R3_ARG1 unmodified, it contains &field (see below).
@@ -415,7 +420,7 @@
a->blr();
// Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
- void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->emit_fd();
+ void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->function_entry();
a->dcbz(R3_ARG1); // R3_ARG1 = addr
a->blr();
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -413,16 +413,15 @@
// Update standard invocation counters
__ increment_invocation_counter(Rcounters, O0, G4_scratch);
if (ProfileInterpreter) {
- Address interpreter_invocation_counter(Rcounters, 0,
+ Address interpreter_invocation_counter(Rcounters,
in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
__ ld(interpreter_invocation_counter, G4_scratch);
__ inc(G4_scratch);
__ st(G4_scratch, interpreter_invocation_counter);
}
- Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit);
- __ sethi(invocation_limit);
- __ ld(invocation_limit, G3_scratch);
+ AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
+ __ load_contents(invocation_limit, G3_scratch);
__ cmp(O0, G3_scratch);
__ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
__ delayed()->nop();
@@ -439,7 +438,7 @@
// do nothing for empty methods (do not even increment invocation counter)
if ( UseFastEmptyMethods) {
// If we need a safepoint check, generate full interpreter entry.
- Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
+ AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, frame_manager_entry);
@@ -471,7 +470,7 @@
if ( UseFastAccessorMethods) {
// Check if we need to reach a safepoint and generate full interpreter
// frame if so.
- Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
+ AddressLiteral sync_state(SafepointSynchronize::address_of_state());
__ load_contents(sync_state, G3_scratch);
__ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
@@ -486,8 +485,8 @@
// read first instruction word and extract bytecode @ 1 and index @ 2
// get first 4 bytes of the bytecodes (big endian!)
- __ ld_ptr(Address(G5_method, 0, in_bytes(Method::const_offset())), G1_scratch);
- __ ld(Address(G1_scratch, 0, in_bytes(ConstMethod::codes_offset())), G1_scratch);
+ __ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
+ __ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
// move index @ 2 far left then to the right most two bytes.
__ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
@@ -590,15 +589,15 @@
const Register Gtmp1 = G3_scratch ;
const Register Gtmp2 = G1_scratch;
const Register RconstMethod = Gtmp1;
- const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
- const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
+ const Address constMethod(G5_method, in_bytes(Method::const_offset()));
+ const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
bool inc_counter = UseCompiler || CountCompiledCalls;
// make sure registers are different!
assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
- const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
+ const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
Label Lentry;
__ bind(Lentry);
@@ -643,7 +642,7 @@
// At this point Lstate points to new interpreter state
//
- const Address do_not_unlock_if_synchronized(G2_thread, 0,
+ const Address do_not_unlock_if_synchronized(G2_thread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
// Since at this point in the method invocation the exception handler
// would try to exit the monitor of synchronized methods which hasn't
@@ -717,17 +716,17 @@
{ Label L;
__ ld_ptr(STATE(_method), G5_method);
- __ ld_ptr(Address(G5_method, 0, in_bytes(Method::signature_handler_offset())), G3_scratch);
+ __ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
__ tst(G3_scratch);
__ brx(Assembler::notZero, false, Assembler::pt, L);
__ delayed()->nop();
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), G5_method, false);
__ ld_ptr(STATE(_method), G5_method);
- Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
+ Address exception_addr(G2_thread, in_bytes(Thread::pending_exception_offset()));
__ ld_ptr(exception_addr, G3_scratch);
__ br_notnull_short(G3_scratch, Assembler::pn, pending_exception_present);
- __ ld_ptr(Address(G5_method, 0, in_bytes(Method::signature_handler_offset())), G3_scratch);
+ __ ld_ptr(Address(G5_method, in_bytes(Method::signature_handler_offset())), G3_scratch);
__ bind(L);
}
@@ -771,13 +770,13 @@
__ br( Assembler::zero, false, Assembler::pt, not_static);
__ delayed()->
// get native function entry point(O0 is a good temp until the very end)
- ld_ptr(Address(G5_method, 0, in_bytes(Method::native_function_offset())), O0);
+ ld_ptr(Address(G5_method, in_bytes(Method::native_function_offset())), O0);
// for static methods insert the mirror argument
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
- __ ld_ptr(Address(G5_method, 0, in_bytes(Method:: const_offset())), O1);
- __ ld_ptr(Address(O1, 0, in_bytes(ConstMethod::constants_offset())), O1);
- __ ld_ptr(Address(O1, 0, ConstantPool::pool_holder_offset_in_bytes()), O1);
+ __ ld_ptr(Address(G5_method, in_bytes(Method:: const_offset())), O1);
+ __ ld_ptr(Address(O1, in_bytes(ConstMethod::constants_offset())), O1);
+ __ ld_ptr(Address(O1, ConstantPool::pool_holder_offset_in_bytes()), O1);
__ ld_ptr(O1, mirror_offset, O1);
// where the mirror handle body is allocated:
#ifdef ASSERT
@@ -831,18 +830,17 @@
// flush the windows now. We don't care about the current (protection) frame
// only the outer frames
- __ flush_windows();
+ __ flushw();
// mark windows as flushed
Address flags(G2_thread,
- 0,
in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset()));
__ set(JavaFrameAnchor::flushed, G3_scratch);
__ st(G3_scratch, flags);
// Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
- Address thread_state(G2_thread, 0, in_bytes(JavaThread::thread_state_offset()));
+ Address thread_state(G2_thread, in_bytes(JavaThread::thread_state_offset()));
#ifdef ASSERT
{ Label L;
__ ld(thread_state, G3_scratch);
@@ -867,7 +865,7 @@
// Block, if necessary, before resuming in _thread_in_Java state.
// In order for GC to work, don't clear the last_Java_sp until after blocking.
{ Label no_block;
- Address sync_state(G3_scratch, SafepointSynchronize::address_of_state());
+ AddressLiteral sync_state(SafepointSynchronize::address_of_state());
// Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization
@@ -890,7 +888,7 @@
Label L;
- Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset()));
+ Address suspend_state(G2_thread, in_bytes(JavaThread::suspend_flags_offset()));
__ br(Assembler::notEqual, false, Assembler::pn, L);
__ delayed()->
ld(suspend_state, G3_scratch);
@@ -965,7 +963,7 @@
// handle exceptions (exception handling will handle unlocking!)
{ Label L;
- Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
+ Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
__ ld_ptr(exception_addr, Gtemp);
__ tst(Gtemp);
@@ -1055,8 +1053,8 @@
assert_different_registers(state, prev_state);
assert_different_registers(prev_state, G3_scratch);
const Register Gtmp = G3_scratch;
- const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
- const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
+ const Address constMethod (G5_method, in_bytes(Method::const_offset()));
+ const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
// slop factor is two extra slots on the expression stack so that
// we always have room to store a result when returning from a call without parameters
@@ -1075,7 +1073,7 @@
if (native) {
const Register RconstMethod = Gtmp;
- const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
+ const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
__ ld_ptr(constMethod, RconstMethod);
__ lduh( size_of_parameters, Gtmp );
__ calc_mem_param_words(Gtmp, Gtmp); // space for native call parameters passed on the stack in words
@@ -1246,8 +1244,8 @@
if (init_value != noreg) {
Label clear_loop;
const Register RconstMethod = O1;
- const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
- const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
+ const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
+ const Address size_of_locals (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
// NOTE: If you change the frame layout, this code will need to
// be updated!
@@ -1496,11 +1494,11 @@
//
// assert_different_registers(state, prev_state);
const Register Gtmp = G3_scratch;
- const RconstMethod = G3_scratch;
+ const Register RconstMethod = G3_scratch;
const Register tmp = O2;
- const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
- const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
- const Address size_of_locals (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
+ const Address constMethod(G5_method, in_bytes(Method::const_offset()));
+ const Address size_of_parameters(RconstMethod, in_bytes(ConstMethod::size_of_parameters_offset()));
+ const Address size_of_locals (RconstMethod, in_bytes(ConstMethod::size_of_locals_offset()));
__ ld_ptr(constMethod, RconstMethod);
__ lduh(size_of_parameters, tmp);
@@ -1555,8 +1553,8 @@
const Register Gtmp1 = G3_scratch;
// const Register Lmirror = L1; // native mirror (native calls only)
- const Address constMethod (G5_method, 0, in_bytes(Method::const_offset()));
- const Address access_flags (G5_method, 0, in_bytes(Method::access_flags_offset()));
+ const Address constMethod (G5_method, in_bytes(Method::const_offset()));
+ const Address access_flags (G5_method, in_bytes(Method::access_flags_offset()));
address entry_point = __ pc();
__ mov(G0, prevState); // no current activation
@@ -1709,7 +1707,7 @@
// We want exception in the thread no matter what we ultimately decide about frame type.
- Address exception_addr (G2_thread, 0, in_bytes(Thread::pending_exception_offset()));
+ Address exception_addr (G2_thread, in_bytes(Thread::pending_exception_offset()));
__ verify_thread();
__ st_ptr(O0, exception_addr);
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -827,6 +827,7 @@
}
if (is_interpreted_frame()) {
+#ifndef CC_INTERP
DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
DESCRIBE_FP_OFFSET(interpreter_frame_padding);
@@ -837,6 +838,7 @@
if ((esp >= sp()) && (esp < fp())) {
values.describe(-1, esp, "*Lesp");
}
+#endif
}
if (!is_compiled_frame()) {
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -2497,6 +2497,24 @@
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
}
+
+
+// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
+void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
+ int increment, int mask,
+ Register scratch1, Register scratch2,
+ Condition cond, Label *where) {
+ ld(counter_addr, scratch1);
+ add(scratch1, increment, scratch1);
+ if (is_simm13(mask)) {
+ andcc(scratch1, mask, G0);
+ } else {
+ set(mask, scratch2);
+ andcc(scratch1, scratch2, G0);
+ }
+ br(cond, false, Assembler::pn, *where);
+ delayed()->st(scratch1, counter_addr);
+}
#endif /* CC_INTERP */
// Inline assembly for:
@@ -2646,20 +2664,3 @@
}
#endif // CC_INTERP
}
-
-// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
-void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
- int increment, int mask,
- Register scratch1, Register scratch2,
- Condition cond, Label *where) {
- ld(counter_addr, scratch1);
- add(scratch1, increment, scratch1);
- if (is_simm13(mask)) {
- andcc(scratch1, mask, G0);
- } else {
- set(mask, scratch2);
- andcc(scratch1, scratch2, G0);
- }
- br(cond, false, Assembler::pn, *where);
- delayed()->st(scratch1, counter_addr);
-}
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "code/codeCache.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/oop.inline.hpp"
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1089,6 +1089,21 @@
emit_arith(0x23, 0xC0, dst, src);
}
+void Assembler::andnl(Register dst, Register src1, Register src2) {
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ int encode = vex_prefix_0F38_and_encode(dst, src1, src2);
+ emit_int8((unsigned char)0xF2);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::andnl(Register dst, Register src1, Address src2) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ vex_prefix_0F38(dst, src1, src2);
+ emit_int8((unsigned char)0xF2);
+ emit_operand(dst, src2);
+}
+
void Assembler::bsfl(Register dst, Register src) {
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_int8(0x0F);
@@ -1110,6 +1125,51 @@
emit_int8((unsigned char)(0xC8 | encode));
}
+void Assembler::blsil(Register dst, Register src) {
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ int encode = vex_prefix_0F38_and_encode(rbx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::blsil(Register dst, Address src) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ vex_prefix_0F38(rbx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_operand(rbx, src);
+}
+
+void Assembler::blsmskl(Register dst, Register src) {
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ int encode = vex_prefix_0F38_and_encode(rdx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::blsmskl(Register dst, Address src) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ vex_prefix_0F38(rdx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_operand(rdx, src);
+}
+
+void Assembler::blsrl(Register dst, Register src) {
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ int encode = vex_prefix_0F38_and_encode(rcx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::blsrl(Register dst, Address src) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ vex_prefix_0F38(rcx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_operand(rcx, src);
+}
+
void Assembler::call(Label& L, relocInfo::relocType rtype) {
// suspect disp32 is always good
int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
@@ -2878,6 +2938,24 @@
emit_operand(dst, src);
}
+void Assembler::tzcntl(Register dst, Register src) {
+ assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
+ emit_int8((unsigned char)0xF3);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_int8(0x0F);
+ emit_int8((unsigned char)0xBC);
+ emit_int8((unsigned char)0xC0 | encode);
+}
+
+void Assembler::tzcntq(Register dst, Register src) {
+ assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
+ emit_int8((unsigned char)0xF3);
+ int encode = prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_int8(0x0F);
+ emit_int8((unsigned char)0xBC);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
void Assembler::ucomisd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
@@ -4837,6 +4915,21 @@
emit_arith(0x23, 0xC0, dst, src);
}
+void Assembler::andnq(Register dst, Register src1, Register src2) {
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ int encode = vex_prefix_0F38_and_encode_q(dst, src1, src2);
+ emit_int8((unsigned char)0xF2);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::andnq(Register dst, Register src1, Address src2) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ vex_prefix_0F38_q(dst, src1, src2);
+ emit_int8((unsigned char)0xF2);
+ emit_operand(dst, src2);
+}
+
void Assembler::bsfq(Register dst, Register src) {
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_int8(0x0F);
@@ -4858,6 +4951,51 @@
emit_int8((unsigned char)(0xC8 | encode));
}
+void Assembler::blsiq(Register dst, Register src) {
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ int encode = vex_prefix_0F38_and_encode_q(rbx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::blsiq(Register dst, Address src) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ vex_prefix_0F38_q(rbx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_operand(rbx, src);
+}
+
+void Assembler::blsmskq(Register dst, Register src) {
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ int encode = vex_prefix_0F38_and_encode_q(rdx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::blsmskq(Register dst, Address src) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ vex_prefix_0F38_q(rdx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_operand(rdx, src);
+}
+
+void Assembler::blsrq(Register dst, Register src) {
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ int encode = vex_prefix_0F38_and_encode_q(rcx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::blsrq(Register dst, Address src) {
+ InstructionMark im(this);
+ assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
+ vex_prefix_0F38_q(rcx, dst, src);
+ emit_int8((unsigned char)0xF3);
+ emit_operand(rcx, src);
+}
+
void Assembler::cdqq() {
prefix(REX_W);
emit_int8((unsigned char)0x99);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -590,10 +590,35 @@
vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256);
}
+ void vex_prefix_0F38(Register dst, Register nds, Address src) {
+ bool vex_w = false;
+ bool vector256 = false;
+ vex_prefix(src, nds->encoding(), dst->encoding(),
+ VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
+ }
+
+ void vex_prefix_0F38_q(Register dst, Register nds, Address src) {
+ bool vex_w = true;
+ bool vector256 = false;
+ vex_prefix(src, nds->encoding(), dst->encoding(),
+ VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
+ }
int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
VexSimdPrefix pre, VexOpcode opc,
bool vex_w, bool vector256);
+ int vex_prefix_0F38_and_encode(Register dst, Register nds, Register src) {
+ bool vex_w = false;
+ bool vector256 = false;
+ return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
+ VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
+ }
+ int vex_prefix_0F38_and_encode_q(Register dst, Register nds, Register src) {
+ bool vex_w = true;
+ bool vector256 = false;
+ return vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(),
+ VEX_SIMD_NONE, VEX_OPCODE_0F_38, vex_w, vector256);
+ }
int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src,
VexSimdPrefix pre, bool vector256 = false,
VexOpcode opc = VEX_OPCODE_0F) {
@@ -897,6 +922,27 @@
void andq(Register dst, Address src);
void andq(Register dst, Register src);
+ // BMI instructions
+ void andnl(Register dst, Register src1, Register src2);
+ void andnl(Register dst, Register src1, Address src2);
+ void andnq(Register dst, Register src1, Register src2);
+ void andnq(Register dst, Register src1, Address src2);
+
+ void blsil(Register dst, Register src);
+ void blsil(Register dst, Address src);
+ void blsiq(Register dst, Register src);
+ void blsiq(Register dst, Address src);
+
+ void blsmskl(Register dst, Register src);
+ void blsmskl(Register dst, Address src);
+ void blsmskq(Register dst, Register src);
+ void blsmskq(Register dst, Address src);
+
+ void blsrl(Register dst, Register src);
+ void blsrl(Register dst, Address src);
+ void blsrq(Register dst, Register src);
+ void blsrq(Register dst, Address src);
+
void bsfl(Register dst, Register src);
void bsrl(Register dst, Register src);
@@ -1574,6 +1620,9 @@
void testq(Register dst, int32_t imm32);
void testq(Register dst, Register src);
+ // BMI - count trailing zeros
+ void tzcntl(Register dst, Register src);
+ void tzcntq(Register dst, Register src);
// Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
void ucomisd(XMMRegister dst, Address src);
--- a/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -250,7 +250,7 @@
return op1 - op2;
}
-inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
+inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
return ((juint) op1) >> (op2 & 0x1f);
}
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -574,7 +574,7 @@
MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());
const Address backedge_counter (rax,
- MethodCounter::backedge_counter_offset() +
+ MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset());
__ get_method_counters(rbx, rax, done);
@@ -982,16 +982,18 @@
// to save/restore.
address entry_point = __ pc();
- const Address constMethod (rbx, Method::const_offset());
const Address access_flags (rbx, Method::access_flags_offset());
- const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
// rsi/r13 == state/locals rdi == prevstate
const Register locals = rdi;
// get parameter size (always needed)
- __ movptr(rcx, constMethod);
- __ load_unsigned_short(rcx, size_of_parameters);
+ {
+ const Address constMethod (rbx, Method::const_offset());
+ const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
+ __ movptr(rcx, constMethod);
+ __ load_unsigned_short(rcx, size_of_parameters);
+ }
// rbx: Method*
// rcx: size of parameters
@@ -1111,14 +1113,16 @@
const Register method = rbx;
const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
- const Address constMethod (method, Method::const_offset());
- const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
-
- // allocate space for parameters
+
+ // allocate space for parameters
__ movptr(method, STATE(_method));
__ verify_method_ptr(method);
- __ movptr(t, constMethod);
- __ load_unsigned_short(t, size_of_parameters);
+ {
+ const Address constMethod (method, Method::const_offset());
+ const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
+ __ movptr(t, constMethod);
+ __ load_unsigned_short(t, size_of_parameters);
+ }
__ shll(t, 2);
#ifdef _LP64
__ subptr(rsp, t);
@@ -2221,7 +2225,6 @@
case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
- case Interpreter::method_handle : entry_point = ((InterpreterGenerator*)this)->generate_method_handle_entry(); break;
case Interpreter::java_lang_math_sin : // fall thru
case Interpreter::java_lang_math_cos : // fall thru
@@ -2229,7 +2232,10 @@
case Interpreter::java_lang_math_abs : // fall thru
case Interpreter::java_lang_math_log : // fall thru
case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
+ case Interpreter::java_lang_math_sqrt : // fall thru
+ case Interpreter::java_lang_math_pow : // fall thru
+ case Interpreter::java_lang_math_exp : // fall thru
+ entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
case Interpreter::java_lang_ref_reference_get
: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
default : ShouldNotReachHere(); break;
@@ -2451,4 +2457,22 @@
return frame_size/BytesPerWord;
}
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+ switch (method_kind(m)) {
+ case Interpreter::java_lang_math_sin : // fall thru
+ case Interpreter::java_lang_math_cos : // fall thru
+ case Interpreter::java_lang_math_tan : // fall thru
+ case Interpreter::java_lang_math_abs : // fall thru
+ case Interpreter::java_lang_math_log : // fall thru
+ case Interpreter::java_lang_math_log10 : // fall thru
+ case Interpreter::java_lang_math_sqrt : // fall thru
+ case Interpreter::java_lang_math_pow : // fall thru
+ case Interpreter::java_lang_math_exp :
+ return false;
+ default:
+ return true;
+ }
+}
+
+
#endif // CC_INTERP (all)
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -687,6 +687,7 @@
void frame::describe_pd(FrameValues& values, int frame_no) {
if (is_interpreted_frame()) {
+#ifndef CC_INTERP
DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
DESCRIBE_FP_OFFSET(interpreter_frame_method);
@@ -695,6 +696,7 @@
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
+#endif
}
}
#endif
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -135,5 +135,11 @@
\
product(bool, UseCountLeadingZerosInstruction, false, \
"Use count leading zeros instruction") \
+ \
+ product(bool, UseCountTrailingZerosInstruction, false, \
+ "Use count trailing zeros instruction") \
+ \
+ product(bool, UseBMI1Instructions, false, \
+ "Use BMI instructions")
#endif // CPU_X86_VM_GLOBALS_X86_HPP
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -266,20 +266,6 @@
addptr(cache, tmp); // construct pointer to cache entry
}
-void InterpreterMacroAssembler::get_method_counters(Register method,
- Register mcs, Label& skip) {
- Label has_counters;
- movptr(mcs, Address(method, Method::method_counters_offset()));
- testptr(mcs, mcs);
- jcc(Assembler::notZero, has_counters);
- call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::build_method_counters), method);
- movptr(mcs, Address(method,Method::method_counters_offset()));
- testptr(mcs, mcs);
- jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
- bind(has_counters);
-}
-
// Load object from cpool->resolved_references(index)
void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register result, Register index) {
@@ -678,6 +664,20 @@
#endif /* !CC_INTERP */
+void InterpreterMacroAssembler::get_method_counters(Register method,
+ Register mcs, Label& skip) {
+ Label has_counters;
+ movptr(mcs, Address(method, Method::method_counters_offset()));
+ testptr(mcs, mcs);
+ jcc(Assembler::notZero, has_counters);
+ call_VM(noreg, CAST_FROM_FN_PTR(address,
+ InterpreterRuntime::build_method_counters), method);
+ movptr(mcs, Address(method,Method::method_counters_offset()));
+ testptr(mcs, mcs);
+ jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
+ bind(has_counters);
+}
+
// Lock object
//
@@ -1359,6 +1359,19 @@
if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
}
+// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
+void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
+ int increment, int mask,
+ Register scratch, bool preloaded,
+ Condition cond, Label* where) {
+ if (!preloaded) {
+ movl(scratch, counter_addr);
+ }
+ incrementl(scratch, increment);
+ movl(counter_addr, scratch);
+ andl(scratch, mask);
+ jcc(cond, *where);
+}
#endif /* CC_INTERP */
@@ -1430,17 +1443,3 @@
NOT_CC_INTERP(pop(state));
}
}
-
-// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
-void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
- int increment, int mask,
- Register scratch, bool preloaded,
- Condition cond, Label* where) {
- if (!preloaded) {
- movl(scratch, counter_addr);
- }
- incrementl(scratch, increment);
- movl(counter_addr, scratch);
- andl(scratch, mask);
- jcc(cond, *where);
-}
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -77,7 +77,6 @@
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
- void get_method_counters(Register method, Register mcs, Label& skip);
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
@@ -156,6 +155,7 @@
bool install_monitor_exception = true,
bool notify_jvmdi = true);
#endif /* !CC_INTERP */
+ void get_method_counters(Register method, Register mcs, Label& skip);
// Debugging
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -271,20 +271,6 @@
addptr(cache, tmp); // construct pointer to cache entry
}
-void InterpreterMacroAssembler::get_method_counters(Register method,
- Register mcs, Label& skip) {
- Label has_counters;
- movptr(mcs, Address(method, Method::method_counters_offset()));
- testptr(mcs, mcs);
- jcc(Assembler::notZero, has_counters);
- call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::build_method_counters), method);
- movptr(mcs, Address(method,Method::method_counters_offset()));
- testptr(mcs, mcs);
- jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
- bind(has_counters);
-}
-
// Load object from cpool->resolved_references(index)
void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register result, Register index) {
@@ -676,6 +662,21 @@
#endif // C_INTERP
+void InterpreterMacroAssembler::get_method_counters(Register method,
+ Register mcs, Label& skip) {
+ Label has_counters;
+ movptr(mcs, Address(method, Method::method_counters_offset()));
+ testptr(mcs, mcs);
+ jcc(Assembler::notZero, has_counters);
+ call_VM(noreg, CAST_FROM_FN_PTR(address,
+ InterpreterRuntime::build_method_counters), method);
+ movptr(mcs, Address(method,Method::method_counters_offset()));
+ testptr(mcs, mcs);
+ jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory
+ bind(has_counters);
+}
+
+
// Lock object
//
// Args:
@@ -1423,6 +1424,20 @@
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
}
+
+// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
+void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
+ int increment, int mask,
+ Register scratch, bool preloaded,
+ Condition cond, Label* where) {
+ if (!preloaded) {
+ movl(scratch, counter_addr);
+ }
+ incrementl(scratch, increment);
+ movl(counter_addr, scratch);
+ andl(scratch, mask);
+ jcc(cond, *where);
+}
#endif // !CC_INTERP
@@ -1491,16 +1506,3 @@
}
}
-// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
-void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
- int increment, int mask,
- Register scratch, bool preloaded,
- Condition cond, Label* where) {
- if (!preloaded) {
- movl(scratch, counter_addr);
- }
- incrementl(scratch, increment);
- movl(counter_addr, scratch);
- andl(scratch, mask);
- jcc(cond, *where);
-}
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -99,7 +99,6 @@
void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2));
void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2));
- void get_method_counters(Register method, Register mcs, Label& skip);
// load cpool->resolved_references(index);
void load_resolved_reference_at_index(Register result, Register index);
@@ -172,6 +171,7 @@
bool install_monitor_exception = true,
bool notify_jvmdi = true);
#endif // CC_INTERP
+ void get_method_counters(Register method, Register mcs, Label& skip);
// Object locking
void lock_object (Register lock_reg);
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -229,10 +229,12 @@
// abstract method entry
+#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
+#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -310,10 +310,12 @@
// abstract method entry
+#ifndef CC_INTERP
// pop return address, reset last_sp to NULL
__ empty_expression_stack();
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
+#endif
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -429,7 +429,7 @@
}
char buf[256];
- jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
cores_per_cpu(), threads_per_core(),
cpu_family(), _model, _stepping,
(supports_cmov() ? ", cmov" : ""),
@@ -455,7 +455,9 @@
(supports_ht() ? ", ht": ""),
(supports_tsc() ? ", tsc": ""),
(supports_tscinv_bit() ? ", tscinvbit": ""),
- (supports_tscinv() ? ", tscinv": ""));
+ (supports_tscinv() ? ", tscinv": ""),
+ (supports_bmi1() ? ", bmi1" : ""),
+ (supports_bmi2() ? ", bmi2" : ""));
_features_str = strdup(buf);
// UseSSE is set to the smaller of what hardware supports and what
@@ -600,13 +602,6 @@
}
}
- // Use count leading zeros count instruction if available.
- if (supports_lzcnt()) {
- if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
- UseCountLeadingZerosInstruction = true;
- }
- }
-
// some defaults for AMD family 15h
if ( cpu_family() == 0x15 ) {
// On family 15h processors default is no sw prefetch
@@ -692,6 +687,35 @@
}
#endif // COMPILER2
+ // Use count leading zeros count instruction if available.
+ if (supports_lzcnt()) {
+ if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
+ UseCountLeadingZerosInstruction = true;
+ }
+ } else if (UseCountLeadingZerosInstruction) {
+ warning("lzcnt instruction is not available on this CPU");
+ FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
+ }
+
+ if (supports_bmi1()) {
+ if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
+ UseBMI1Instructions = true;
+ }
+ } else if (UseBMI1Instructions) {
+ warning("BMI1 instructions are not available on this CPU");
+ FLAG_SET_DEFAULT(UseBMI1Instructions, false);
+ }
+
+ // Use count trailing zeros instruction if available
+ if (supports_bmi1()) {
+ if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
+ UseCountTrailingZerosInstruction = UseBMI1Instructions;
+ }
+ } else if (UseCountTrailingZerosInstruction) {
+ warning("tzcnt instruction is not available on this CPU");
+ FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
+ }
+
// Use population count instruction if available.
if (supports_popcnt()) {
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -141,7 +141,8 @@
struct {
uint32_t LahfSahf : 1,
CmpLegacy : 1,
- : 4,
+ : 3,
+ lzcnt_intel : 1,
lzcnt : 1,
sse4a : 1,
misalignsse : 1,
@@ -251,7 +252,9 @@
CPU_AVX2 = (1 << 18),
CPU_AES = (1 << 19),
CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions
- CPU_CLMUL = (1 << 21) // carryless multiply for CRC
+ CPU_CLMUL = (1 << 21), // carryless multiply for CRC
+ CPU_BMI1 = (1 << 22),
+ CPU_BMI2 = (1 << 23)
} cpuFeatureFlags;
enum {
@@ -423,6 +426,8 @@
if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0)
result |= CPU_AVX2;
}
+ if(_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
+ result |= CPU_BMI1;
if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
result |= CPU_TSC;
if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
@@ -444,6 +449,13 @@
if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0)
result |= CPU_SSE4A;
}
+ // Intel features.
+ if(is_intel()) {
+ if(_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
+ result |= CPU_BMI2;
+ if(_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
+ result |= CPU_LZCNT;
+ }
return result;
}
@@ -560,7 +572,8 @@
static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; }
static bool supports_erms() { return (_cpuFeatures & CPU_ERMS) != 0; }
static bool supports_clmul() { return (_cpuFeatures & CPU_CLMUL) != 0; }
-
+ static bool supports_bmi1() { return (_cpuFeatures & CPU_BMI1) != 0; }
+ static bool supports_bmi2() { return (_cpuFeatures & CPU_BMI2) != 0; }
// Intel features
static bool is_intel_family_core() { return is_intel() &&
extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
--- a/hotspot/src/cpu/x86/vm/x86_32.ad Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad Fri Mar 14 09:26:27 2014 +0100
@@ -5163,6 +5163,19 @@
%}
instruct countTrailingZerosI(rRegI dst, rRegI src, eFlagsReg cr) %{
+ predicate(UseCountTrailingZerosInstruction);
+ match(Set dst (CountTrailingZerosI src));
+ effect(KILL cr);
+
+ format %{ "TZCNT $dst, $src\t# count trailing zeros (int)" %}
+ ins_encode %{
+ __ tzcntl($dst$$Register, $src$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct countTrailingZerosI_bsf(rRegI dst, rRegI src, eFlagsReg cr) %{
+ predicate(!UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosI src));
effect(KILL cr);
@@ -5182,6 +5195,30 @@
%}
instruct countTrailingZerosL(rRegI dst, eRegL src, eFlagsReg cr) %{
+ predicate(UseCountTrailingZerosInstruction);
+ match(Set dst (CountTrailingZerosL src));
+ effect(TEMP dst, KILL cr);
+
+ format %{ "TZCNT $dst, $src.lo\t# count trailing zeros (long) \n\t"
+ "JNC done\n\t"
+ "TZCNT $dst, $src.hi\n\t"
+ "ADD $dst, 32\n"
+ "done:" %}
+ ins_encode %{
+ Register Rdst = $dst$$Register;
+ Register Rsrc = $src$$Register;
+ Label done;
+ __ tzcntl(Rdst, Rsrc);
+ __ jccb(Assembler::carryClear, done);
+ __ tzcntl(Rdst, HIGH_FROM_LOW(Rsrc));
+ __ addl(Rdst, BitsPerInt);
+ __ bind(done);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct countTrailingZerosL_bsf(rRegI dst, eRegL src, eFlagsReg cr) %{
+ predicate(!UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosL src));
effect(TEMP dst, KILL cr);
@@ -8027,6 +8064,123 @@
ins_pipe( ialu_mem_imm );
%}
+// BMI1 instructions
+instruct andnI_rReg_rReg_rReg(rRegI dst, rRegI src1, rRegI src2, immI_M1 minus_1, eFlagsReg cr) %{
+ match(Set dst (AndI (XorI src1 minus_1) src2));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "ANDNL $dst, $src1, $src2" %}
+
+ ins_encode %{
+ __ andnl($dst$$Register, $src1$$Register, $src2$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct andnI_rReg_rReg_mem(rRegI dst, rRegI src1, memory src2, immI_M1 minus_1, eFlagsReg cr) %{
+ match(Set dst (AndI (XorI src1 minus_1) (LoadI src2) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "ANDNL $dst, $src1, $src2" %}
+
+ ins_encode %{
+ __ andnl($dst$$Register, $src1$$Register, $src2$$Address);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsiI_rReg_rReg(rRegI dst, rRegI src, immI0 imm_zero, eFlagsReg cr) %{
+ match(Set dst (AndI (SubI imm_zero src) src));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "BLSIL $dst, $src" %}
+
+ ins_encode %{
+ __ blsil($dst$$Register, $src$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsiI_rReg_mem(rRegI dst, memory src, immI0 imm_zero, eFlagsReg cr) %{
+ match(Set dst (AndI (SubI imm_zero (LoadI src) ) (LoadI src) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "BLSIL $dst, $src" %}
+
+ ins_encode %{
+ __ blsil($dst$$Register, $src$$Address);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsmskI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, eFlagsReg cr)
+%{
+ match(Set dst (XorI (AddI src minus_1) src));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "BLSMSKL $dst, $src" %}
+
+ ins_encode %{
+ __ blsmskl($dst$$Register, $src$$Register);
+ %}
+
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsmskI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, eFlagsReg cr)
+%{
+ match(Set dst (XorI (AddI (LoadI src) minus_1) (LoadI src) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "BLSMSKL $dst, $src" %}
+
+ ins_encode %{
+ __ blsmskl($dst$$Register, $src$$Address);
+ %}
+
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsrI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, eFlagsReg cr)
+%{
+ match(Set dst (AndI (AddI src minus_1) src) );
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "BLSRL $dst, $src" %}
+
+ ins_encode %{
+ __ blsrl($dst$$Register, $src$$Register);
+ %}
+
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsrI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, eFlagsReg cr)
+%{
+ match(Set dst (AndI (AddI (LoadI src) minus_1) (LoadI src) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "BLSRL $dst, $src" %}
+
+ ins_encode %{
+ __ blsrl($dst$$Register, $src$$Address);
+ %}
+
+ ins_pipe(ialu_reg_mem);
+%}
+
// Or Instructions
// Or Register with Register
instruct orI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
@@ -8649,6 +8803,210 @@
ins_pipe( ialu_reg_long_mem );
%}
+// BMI1 instructions
+instruct andnL_eReg_eReg_eReg(eRegL dst, eRegL src1, eRegL src2, immL_M1 minus_1, eFlagsReg cr) %{
+ match(Set dst (AndL (XorL src1 minus_1) src2));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr, TEMP dst);
+
+ format %{ "ANDNL $dst.lo, $src1.lo, $src2.lo\n\t"
+ "ANDNL $dst.hi, $src1.hi, $src2.hi"
+ %}
+
+ ins_encode %{
+ Register Rdst = $dst$$Register;
+ Register Rsrc1 = $src1$$Register;
+ Register Rsrc2 = $src2$$Register;
+ __ andnl(Rdst, Rsrc1, Rsrc2);
+ __ andnl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc1), HIGH_FROM_LOW(Rsrc2));
+ %}
+ ins_pipe(ialu_reg_reg_long);
+%}
+
+instruct andnL_eReg_eReg_mem(eRegL dst, eRegL src1, memory src2, immL_M1 minus_1, eFlagsReg cr) %{
+ match(Set dst (AndL (XorL src1 minus_1) (LoadL src2) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr, TEMP dst);
+
+ ins_cost(125);
+ format %{ "ANDNL $dst.lo, $src1.lo, $src2\n\t"
+ "ANDNL $dst.hi, $src1.hi, $src2+4"
+ %}
+
+ ins_encode %{
+ Register Rdst = $dst$$Register;
+ Register Rsrc1 = $src1$$Register;
+ Address src2_hi = Address::make_raw($src2$$base, $src2$$index, $src2$$scale, $src2$$disp + 4, relocInfo::none);
+
+ __ andnl(Rdst, Rsrc1, $src2$$Address);
+ __ andnl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc1), src2_hi);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsiL_eReg_eReg(eRegL dst, eRegL src, immL0 imm_zero, eFlagsReg cr) %{
+ match(Set dst (AndL (SubL imm_zero src) src));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr, TEMP dst);
+
+ format %{ "MOVL $dst.hi, 0\n\t"
+ "BLSIL $dst.lo, $src.lo\n\t"
+ "JNZ done\n\t"
+ "BLSIL $dst.hi, $src.hi\n"
+ "done:"
+ %}
+
+ ins_encode %{
+ Label done;
+ Register Rdst = $dst$$Register;
+ Register Rsrc = $src$$Register;
+ __ movl(HIGH_FROM_LOW(Rdst), 0);
+ __ blsil(Rdst, Rsrc);
+ __ jccb(Assembler::notZero, done);
+ __ blsil(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
+ __ bind(done);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsiL_eReg_mem(eRegL dst, memory src, immL0 imm_zero, eFlagsReg cr) %{
+ match(Set dst (AndL (SubL imm_zero (LoadL src) ) (LoadL src) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr, TEMP dst);
+
+ ins_cost(125);
+ format %{ "MOVL $dst.hi, 0\n\t"
+ "BLSIL $dst.lo, $src\n\t"
+ "JNZ done\n\t"
+ "BLSIL $dst.hi, $src+4\n"
+ "done:"
+ %}
+
+ ins_encode %{
+ Label done;
+ Register Rdst = $dst$$Register;
+ Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
+
+ __ movl(HIGH_FROM_LOW(Rdst), 0);
+ __ blsil(Rdst, $src$$Address);
+ __ jccb(Assembler::notZero, done);
+ __ blsil(HIGH_FROM_LOW(Rdst), src_hi);
+ __ bind(done);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsmskL_eReg_eReg(eRegL dst, eRegL src, immL_M1 minus_1, eFlagsReg cr)
+%{
+ match(Set dst (XorL (AddL src minus_1) src));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr, TEMP dst);
+
+ format %{ "MOVL $dst.hi, 0\n\t"
+ "BLSMSKL $dst.lo, $src.lo\n\t"
+ "JNC done\n\t"
+ "BLSMSKL $dst.hi, $src.hi\n"
+ "done:"
+ %}
+
+ ins_encode %{
+ Label done;
+ Register Rdst = $dst$$Register;
+ Register Rsrc = $src$$Register;
+ __ movl(HIGH_FROM_LOW(Rdst), 0);
+ __ blsmskl(Rdst, Rsrc);
+ __ jccb(Assembler::carryClear, done);
+ __ blsmskl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
+ __ bind(done);
+ %}
+
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsmskL_eReg_mem(eRegL dst, memory src, immL_M1 minus_1, eFlagsReg cr)
+%{
+ match(Set dst (XorL (AddL (LoadL src) minus_1) (LoadL src) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr, TEMP dst);
+
+ ins_cost(125);
+ format %{ "MOVL $dst.hi, 0\n\t"
+ "BLSMSKL $dst.lo, $src\n\t"
+ "JNC done\n\t"
+ "BLSMSKL $dst.hi, $src+4\n"
+ "done:"
+ %}
+
+ ins_encode %{
+ Label done;
+ Register Rdst = $dst$$Register;
+ Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
+
+ __ movl(HIGH_FROM_LOW(Rdst), 0);
+ __ blsmskl(Rdst, $src$$Address);
+ __ jccb(Assembler::carryClear, done);
+ __ blsmskl(HIGH_FROM_LOW(Rdst), src_hi);
+ __ bind(done);
+ %}
+
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsrL_eReg_eReg(eRegL dst, eRegL src, immL_M1 minus_1, eFlagsReg cr)
+%{
+ match(Set dst (AndL (AddL src minus_1) src) );
+ predicate(UseBMI1Instructions);
+ effect(KILL cr, TEMP dst);
+
+ format %{ "MOVL $dst.hi, $src.hi\n\t"
+ "BLSRL $dst.lo, $src.lo\n\t"
+ "JNC done\n\t"
+ "BLSRL $dst.hi, $src.hi\n"
+ "done:"
+ %}
+
+ ins_encode %{
+ Label done;
+ Register Rdst = $dst$$Register;
+ Register Rsrc = $src$$Register;
+ __ movl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
+ __ blsrl(Rdst, Rsrc);
+ __ jccb(Assembler::carryClear, done);
+ __ blsrl(HIGH_FROM_LOW(Rdst), HIGH_FROM_LOW(Rsrc));
+ __ bind(done);
+ %}
+
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsrL_eReg_mem(eRegL dst, memory src, immL_M1 minus_1, eFlagsReg cr)
+%{
+ match(Set dst (AndL (AddL (LoadL src) minus_1) (LoadL src) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr, TEMP dst);
+
+ ins_cost(125);
+ format %{ "MOVL $dst.hi, $src+4\n\t"
+ "BLSRL $dst.lo, $src\n\t"
+ "JNC done\n\t"
+ "BLSRL $dst.hi, $src+4\n"
+ "done:"
+ %}
+
+ ins_encode %{
+ Label done;
+ Register Rdst = $dst$$Register;
+ Address src_hi = Address::make_raw($src$$base, $src$$index, $src$$scale, $src$$disp + 4, relocInfo::none);
+ __ movl(HIGH_FROM_LOW(Rdst), src_hi);
+ __ blsrl(Rdst, $src$$Address);
+ __ jccb(Assembler::carryClear, done);
+ __ blsrl(HIGH_FROM_LOW(Rdst), src_hi);
+ __ bind(done);
+ %}
+
+ ins_pipe(ialu_reg_mem);
+%}
+
// Or Long Register with Register
instruct orl_eReg(eRegL dst, eRegL src, eFlagsReg cr) %{
match(Set dst (OrL dst src));
--- a/hotspot/src/cpu/x86/vm/x86_64.ad Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad Fri Mar 14 09:26:27 2014 +0100
@@ -6022,6 +6022,19 @@
%}
instruct countTrailingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{
+ predicate(UseCountTrailingZerosInstruction);
+ match(Set dst (CountTrailingZerosI src));
+ effect(KILL cr);
+
+ format %{ "tzcntl $dst, $src\t# count trailing zeros (int)" %}
+ ins_encode %{
+ __ tzcntl($dst$$Register, $src$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct countTrailingZerosI_bsf(rRegI dst, rRegI src, rFlagsReg cr) %{
+ predicate(!UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosI src));
effect(KILL cr);
@@ -6041,6 +6054,19 @@
%}
instruct countTrailingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{
+ predicate(UseCountTrailingZerosInstruction);
+ match(Set dst (CountTrailingZerosL src));
+ effect(KILL cr);
+
+ format %{ "tzcntq $dst, $src\t# count trailing zeros (long)" %}
+ ins_encode %{
+ __ tzcntq($dst$$Register, $src$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct countTrailingZerosL_bsf(rRegI dst, rRegL src, rFlagsReg cr) %{
+ predicate(!UseCountTrailingZerosInstruction);
match(Set dst (CountTrailingZerosL src));
effect(KILL cr);
@@ -8622,6 +8648,122 @@
ins_pipe(ialu_mem_imm);
%}
+// BMI1 instructions
+instruct andnI_rReg_rReg_mem(rRegI dst, rRegI src1, memory src2, immI_M1 minus_1, rFlagsReg cr) %{
+ match(Set dst (AndI (XorI src1 minus_1) (LoadI src2)));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "andnl $dst, $src1, $src2" %}
+
+ ins_encode %{
+ __ andnl($dst$$Register, $src1$$Register, $src2$$Address);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct andnI_rReg_rReg_rReg(rRegI dst, rRegI src1, rRegI src2, immI_M1 minus_1, rFlagsReg cr) %{
+ match(Set dst (AndI (XorI src1 minus_1) src2));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "andnl $dst, $src1, $src2" %}
+
+ ins_encode %{
+ __ andnl($dst$$Register, $src1$$Register, $src2$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsiI_rReg_rReg(rRegI dst, rRegI src, immI0 imm_zero, rFlagsReg cr) %{
+ match(Set dst (AndI (SubI imm_zero src) src));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "blsil $dst, $src" %}
+
+ ins_encode %{
+ __ blsil($dst$$Register, $src$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsiI_rReg_mem(rRegI dst, memory src, immI0 imm_zero, rFlagsReg cr) %{
+ match(Set dst (AndI (SubI imm_zero (LoadI src) ) (LoadI src) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "blsil $dst, $src" %}
+
+ ins_encode %{
+ __ blsil($dst$$Register, $src$$Address);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsmskI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, rFlagsReg cr)
+%{
+ match(Set dst (XorI (AddI (LoadI src) minus_1) (LoadI src) ) );
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "blsmskl $dst, $src" %}
+
+ ins_encode %{
+ __ blsmskl($dst$$Register, $src$$Address);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsmskI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, rFlagsReg cr)
+%{
+ match(Set dst (XorI (AddI src minus_1) src));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "blsmskl $dst, $src" %}
+
+ ins_encode %{
+ __ blsmskl($dst$$Register, $src$$Register);
+ %}
+
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsrI_rReg_rReg(rRegI dst, rRegI src, immI_M1 minus_1, rFlagsReg cr)
+%{
+ match(Set dst (AndI (AddI src minus_1) src) );
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "blsrl $dst, $src" %}
+
+ ins_encode %{
+ __ blsrl($dst$$Register, $src$$Register);
+ %}
+
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsrI_rReg_mem(rRegI dst, memory src, immI_M1 minus_1, rFlagsReg cr)
+%{
+ match(Set dst (AndI (AddI (LoadI src) minus_1) (LoadI src) ) );
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "blsrl $dst, $src" %}
+
+ ins_encode %{
+ __ blsrl($dst$$Register, $src$$Address);
+ %}
+
+ ins_pipe(ialu_reg);
+%}
+
// Or Instructions
// Or Register with Register
instruct orI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
@@ -8853,6 +8995,122 @@
ins_pipe(ialu_mem_imm);
%}
+// BMI1 instructions
+instruct andnL_rReg_rReg_mem(rRegL dst, rRegL src1, memory src2, immL_M1 minus_1, rFlagsReg cr) %{
+ match(Set dst (AndL (XorL src1 minus_1) (LoadL src2)));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "andnq $dst, $src1, $src2" %}
+
+ ins_encode %{
+ __ andnq($dst$$Register, $src1$$Register, $src2$$Address);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct andnL_rReg_rReg_rReg(rRegL dst, rRegL src1, rRegL src2, immL_M1 minus_1, rFlagsReg cr) %{
+ match(Set dst (AndL (XorL src1 minus_1) src2));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "andnq $dst, $src1, $src2" %}
+
+ ins_encode %{
+ __ andnq($dst$$Register, $src1$$Register, $src2$$Register);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsiL_rReg_rReg(rRegL dst, rRegL src, immL0 imm_zero, rFlagsReg cr) %{
+ match(Set dst (AndL (SubL imm_zero src) src));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "blsiq $dst, $src" %}
+
+ ins_encode %{
+ __ blsiq($dst$$Register, $src$$Register);
+ %}
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsiL_rReg_mem(rRegL dst, memory src, immL0 imm_zero, rFlagsReg cr) %{
+ match(Set dst (AndL (SubL imm_zero (LoadL src) ) (LoadL src) ));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "blsiq $dst, $src" %}
+
+ ins_encode %{
+ __ blsiq($dst$$Register, $src$$Address);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsmskL_rReg_mem(rRegL dst, memory src, immL_M1 minus_1, rFlagsReg cr)
+%{
+ match(Set dst (XorL (AddL (LoadL src) minus_1) (LoadL src) ) );
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "blsmskq $dst, $src" %}
+
+ ins_encode %{
+ __ blsmskq($dst$$Register, $src$$Address);
+ %}
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct blsmskL_rReg_rReg(rRegL dst, rRegL src, immL_M1 minus_1, rFlagsReg cr)
+%{
+ match(Set dst (XorL (AddL src minus_1) src));
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "blsmskq $dst, $src" %}
+
+ ins_encode %{
+ __ blsmskq($dst$$Register, $src$$Register);
+ %}
+
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsrL_rReg_rReg(rRegL dst, rRegL src, immL_M1 minus_1, rFlagsReg cr)
+%{
+ match(Set dst (AndL (AddL src minus_1) src) );
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ format %{ "blsrq $dst, $src" %}
+
+ ins_encode %{
+ __ blsrq($dst$$Register, $src$$Register);
+ %}
+
+ ins_pipe(ialu_reg);
+%}
+
+instruct blsrL_rReg_mem(rRegL dst, memory src, immL_M1 minus_1, rFlagsReg cr)
+%{
+ match(Set dst (AndL (AddL (LoadL src) minus_1) (LoadL src)) );
+ predicate(UseBMI1Instructions);
+ effect(KILL cr);
+
+ ins_cost(125);
+ format %{ "blsrq $dst, $src" %}
+
+ ins_encode %{
+ __ blsrq($dst$$Register, $src$$Address);
+ %}
+
+ ins_pipe(ialu_reg);
+%}
+
// Or Instructions
// Or Register with Register
instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
--- a/hotspot/src/os/aix/vm/os_aix.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/os/aix/vm/os_aix.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1135,15 +1135,10 @@
}
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
- {
- // gettimeofday - based on time in seconds since the Epoch thus does not wrap
- info_ptr->max_value = ALL_64_BITS;
-
- // gettimeofday is a real time clock so it skips
- info_ptr->may_skip_backward = true;
- info_ptr->may_skip_forward = true;
- }
-
+ info_ptr->max_value = ALL_64_BITS;
+ // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
+ info_ptr->may_skip_backward = false;
+ info_ptr->may_skip_forward = false;
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
}
@@ -2799,105 +2794,6 @@
return ::read(fd, buf, nBytes);
}
-#define NANOSECS_PER_MILLISEC 1000000
-
-int os::sleep(Thread* thread, jlong millis, bool interruptible) {
- assert(thread == Thread::current(), "thread consistency check");
-
- // Prevent nasty overflow in deadline calculation
- // by handling long sleeps similar to solaris or windows.
- const jlong limit = INT_MAX;
- int result;
- while (millis > limit) {
- if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {
- return result;
- }
- millis -= limit;
- }
-
- ParkEvent * const slp = thread->_SleepEvent;
- slp->reset();
- OrderAccess::fence();
-
- if (interruptible) {
- jlong prevtime = javaTimeNanos();
-
- // Prevent precision loss and too long sleeps
- jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
-
- for (;;) {
- if (os::is_interrupted(thread, true)) {
- return OS_INTRPT;
- }
-
- jlong newtime = javaTimeNanos();
-
- assert(newtime >= prevtime, "time moving backwards");
- // Doing prevtime and newtime in microseconds doesn't help precision,
- // and trying to round up to avoid lost milliseconds can result in a
- // too-short delay.
- millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
-
- if (millis <= 0) {
- return OS_OK;
- }
-
- // Stop sleeping if we passed the deadline
- if (newtime >= deadline) {
- return OS_OK;
- }
-
- prevtime = newtime;
-
- {
- assert(thread->is_Java_thread(), "sanity check");
- JavaThread *jt = (JavaThread *) thread;
- ThreadBlockInVM tbivm(jt);
- OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
-
- jt->set_suspend_equivalent();
-
- slp->park(millis);
-
- // were we externally suspended while we were waiting?
- jt->check_and_wait_while_suspended();
- }
- }
- } else {
- OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
- jlong prevtime = javaTimeNanos();
-
- // Prevent precision loss and too long sleeps
- jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
-
- for (;;) {
- // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
- // the 1st iteration ...
- jlong newtime = javaTimeNanos();
-
- if (newtime - prevtime < 0) {
- // time moving backwards, should only happen if no monotonic clock
- // not a guarantee() because JVM should not abort on kernel/glibc bugs
- // - HS14 Commented out as not implemented.
- // - TODO Maybe we should implement it?
- //assert(!Aix::supports_monotonic_clock(), "time moving backwards");
- } else {
- millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
- }
-
- if (millis <= 0) break;
-
- if (newtime >= deadline) {
- break;
- }
-
- prevtime = newtime;
- slp->park(millis);
- }
- return OS_OK;
- }
-}
-
void os::naked_short_sleep(jlong ms) {
struct timespec req;
@@ -3246,50 +3142,6 @@
guarantee(osthread->sr.is_running(), "Must be running!");
}
-////////////////////////////////////////////////////////////////////////////////
-// interrupt support
-
-void os::interrupt(Thread* thread) {
- assert(Thread::current() == thread || Threads_lock->owned_by_self(),
- "possibility of dangling Thread pointer");
-
- OSThread* osthread = thread->osthread();
-
- if (!osthread->interrupted()) {
- osthread->set_interrupted(true);
- // More than one thread can get here with the same value of osthread,
- // resulting in multiple notifications. We do, however, want the store
- // to interrupted() to be visible to other threads before we execute unpark().
- OrderAccess::fence();
- ParkEvent * const slp = thread->_SleepEvent;
- if (slp != NULL) slp->unpark();
- }
-
- // For JSR166. Unpark even if interrupt status already was set
- if (thread->is_Java_thread())
- ((JavaThread*)thread)->parker()->unpark();
-
- ParkEvent * ev = thread->_ParkEvent;
- if (ev != NULL) ev->unpark();
-
-}
-
-bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
- assert(Thread::current() == thread || Threads_lock->owned_by_self(),
- "possibility of dangling Thread pointer");
-
- OSThread* osthread = thread->osthread();
-
- bool interrupted = osthread->interrupted();
-
- if (interrupted && clear_interrupted) {
- osthread->set_interrupted(false);
- // consider thread->_SleepEvent->reset() ... optional optimization
- }
-
- return interrupted;
-}
-
///////////////////////////////////////////////////////////////////////////////////
// signal handling (except suspend/resume)
--- a/hotspot/src/os/aix/vm/os_aix.inline.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/os/aix/vm/os_aix.inline.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -283,4 +283,10 @@
const char* optval, socklen_t optlen) {
return ::setsockopt(fd, level, optname, optval, optlen);
}
+
+inline bool os::supports_monotonic_clock() {
+ // mread_real_time() is monotonic on AIX (see os::javaTimeNanos() comments)
+ return true;
+}
+
#endif // OS_AIX_VM_OS_AIX_INLINE_HPP
--- a/hotspot/src/share/vm/adlc/formssel.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/adlc/formssel.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -660,6 +660,7 @@
int USE_of_memory = 0;
int DEF_of_memory = 0;
const char* last_memory_DEF = NULL; // to test DEF/USE pairing in asserts
+ const char* last_memory_USE = NULL;
Component *unique = NULL;
Component *comp = NULL;
ComponentList &components = (ComponentList &)_components;
@@ -681,7 +682,16 @@
assert(0 == strcmp(last_memory_DEF, comp->_name), "every memory DEF is followed by a USE of the same name");
last_memory_DEF = NULL;
}
- USE_of_memory++;
+ // Handles same memory being used multiple times in the case of BMI1 instructions.
+ if (last_memory_USE != NULL) {
+ if (strcmp(comp->_name, last_memory_USE) != 0) {
+ USE_of_memory++;
+ }
+ } else {
+ USE_of_memory++;
+ }
+ last_memory_USE = comp->_name;
+
if (DEF_of_memory == 0) // defs take precedence
unique = comp;
} else {
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1436,7 +1436,7 @@
bool need_mem_bar = false;
if (method()->name() == ciSymbol::object_initializer_name() &&
- scope()->wrote_final()) {
+ (scope()->wrote_final() || (AlwaysSafeConstructors && scope()->wrote_fields()))) {
need_mem_bar = true;
}
@@ -1550,6 +1550,10 @@
scope()->set_wrote_final();
}
+ if (code == Bytecodes::_putfield) {
+ scope()->set_wrote_fields();
+ }
+
const int offset = !needs_patching ? field->offset() : -1;
switch (code) {
case Bytecodes::_getstatic: {
@@ -3767,11 +3771,14 @@
}
// now perform tests that are based on flag settings
- if (callee->force_inline()) {
- if (inline_level() > MaxForceInlineLevel) INLINE_BAILOUT("MaxForceInlineLevel");
- print_inlining(callee, "force inline by annotation");
- } else if (callee->should_inline()) {
- print_inlining(callee, "force inline by CompileOracle");
+ if (callee->force_inline() || callee->should_inline()) {
+ if (inline_level() > MaxForceInlineLevel ) INLINE_BAILOUT("MaxForceInlineLevel");
+ if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep");
+
+ const char* msg = "";
+ if (callee->force_inline()) msg = "force inline by annotation";
+ if (callee->should_inline()) msg = "force inline by CompileOracle";
+ print_inlining(callee, msg);
} else {
// use heuristic controls on inlining
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("inlining too deep");
--- a/hotspot/src/share/vm/c1/c1_IR.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/c1/c1_IR.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -142,6 +142,7 @@
_number_of_locks = 0;
_monitor_pairing_ok = method->has_balanced_monitors();
_wrote_final = false;
+ _wrote_fields = false;
_start = NULL;
if (osr_bci == -1) {
--- a/hotspot/src/share/vm/c1/c1_IR.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/c1/c1_IR.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -150,6 +150,7 @@
int _number_of_locks; // the number of monitor lock slots needed
bool _monitor_pairing_ok; // the monitor pairing info
bool _wrote_final; // has written final field
+ bool _wrote_fields; // has written fields
BlockBegin* _start; // the start block, successsors are method entries
BitMap _requires_phi_function; // bit is set if phi functions at loop headers are necessary for a local variable
@@ -184,6 +185,9 @@
BlockBegin* start() const { return _start; }
void set_wrote_final() { _wrote_final = true; }
bool wrote_final () const { return _wrote_final; }
+ void set_wrote_fields() { _wrote_fields = true; }
+ bool wrote_fields () const { return _wrote_fields; }
+
};
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1734,7 +1734,8 @@
(info ? new CodeEmitInfo(info) : NULL));
}
- if (is_volatile && !needs_patching) {
+ bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
+ if (needs_atomic_access && !needs_patching) {
volatile_field_store(value.result(), address, info);
} else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
@@ -1807,7 +1808,8 @@
address = generate_address(object.result(), x->offset(), field_type);
}
- if (is_volatile && !needs_patching) {
+ bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
+ if (needs_atomic_access && !needs_patching) {
volatile_field_load(address, reg, info);
} else {
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -809,11 +809,10 @@
int bci = vfst.bci();
Bytecodes::Code code = caller_method()->java_code_at(bci);
-#ifndef PRODUCT
// this is used by assertions in the access_field_patching_id
BasicType patch_field_type = T_ILLEGAL;
-#endif // PRODUCT
bool deoptimize_for_volatile = false;
+ bool deoptimize_for_atomic = false;
int patch_field_offset = -1;
KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
@@ -839,11 +838,24 @@
// is the path for patching field offsets. load_klass is only
// used for patching references to oops which don't need special
// handling in the volatile case.
+
deoptimize_for_volatile = result.access_flags().is_volatile();
-#ifndef PRODUCT
+ // If we are patching a field which should be atomic, then
+ // the generated code is not correct either, force deoptimizing.
+ // We need to only cover T_LONG and T_DOUBLE fields, as we can
+ // break access atomicity only for them.
+
+ // Strictly speaking, the deoptimizaation on 64-bit platforms
+ // is unnecessary, and T_LONG stores on 32-bit platforms need
+ // to be handled by special patching code when AlwaysAtomicAccesses
+ // becomes product feature. At this point, we are still going
+ // for the deoptimization for consistency against volatile
+ // accesses.
+
patch_field_type = result.field_type();
-#endif
+ deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
+
} else if (load_klass_or_mirror_patch_id) {
Klass* k = NULL;
switch (code) {
@@ -918,13 +930,19 @@
ShouldNotReachHere();
}
- if (deoptimize_for_volatile) {
- // At compile time we assumed the field wasn't volatile but after
- // loading it turns out it was volatile so we have to throw the
+ if (deoptimize_for_volatile || deoptimize_for_atomic) {
+ // At compile time we assumed the field wasn't volatile/atomic but after
+ // loading it turns out it was volatile/atomic so we have to throw the
// compiled code out and let it be regenerated.
if (TracePatching) {
- tty->print_cr("Deoptimizing for patching volatile field reference");
+ if (deoptimize_for_volatile) {
+ tty->print_cr("Deoptimizing for patching volatile field reference");
+ }
+ if (deoptimize_for_atomic) {
+ tty->print_cr("Deoptimizing for patching atomic field reference");
+ }
}
+
// It's possible the nmethod was invalidated in the last
// safepoint, but if it's still alive then make it not_entrant.
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
--- a/hotspot/src/share/vm/ci/ciMethod.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -724,6 +724,11 @@
VM_ENTRY_MARK;
+ // Disable CHA for default methods for now
+ if (root_m->get_Method()->is_default_method()) {
+ return NULL;
+ }
+
methodHandle target;
{
MutexLocker locker(Compile_lock);
--- a/hotspot/src/share/vm/ci/ciMethodData.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/ci/ciMethodData.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -87,8 +87,9 @@
DataLayout* dp_dst = extra_data_base();
for (;; dp_src = MethodData::next_extra(dp_src), dp_dst = MethodData::next_extra(dp_dst)) {
assert(dp_src < end_src, "moved past end of extra data");
- assert(dp_src->tag() == dp_dst->tag(), err_msg("should be same tags %d != %d", dp_src->tag(), dp_dst->tag()));
- switch(dp_src->tag()) {
+ // New traps in the MDO can be added as we translate the copy so
+ // look at the entries in the copy.
+ switch(dp_dst->tag()) {
case DataLayout::speculative_trap_data_tag: {
ciSpeculativeTrapData* data_dst = new ciSpeculativeTrapData(dp_dst);
SpeculativeTrapData* data_src = new SpeculativeTrapData(dp_src);
@@ -102,7 +103,7 @@
// An empty slot or ArgInfoData entry marks the end of the trap data
return;
default:
- fatal(err_msg("bad tag = %d", dp_src->tag()));
+ fatal(err_msg("bad tag = %d", dp_dst->tag()));
}
}
}
--- a/hotspot/src/share/vm/code/codeCache.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/code/codeCache.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -198,14 +198,12 @@
}
maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
(address)_heap->low_boundary()) - unallocated_capacity());
- verify_if_often();
print_trace("allocation", cb, size);
return cb;
}
void CodeCache::free(CodeBlob* cb) {
assert_locked_or_safepoint(CodeCache_lock);
- verify_if_often();
print_trace("free", cb);
if (cb->is_nmethod()) {
@@ -221,7 +219,6 @@
_heap->deallocate(cb);
- verify_if_often();
assert(_number_of_blobs >= 0, "sanity check");
}
@@ -244,12 +241,6 @@
}
-void CodeCache::flush() {
- assert_locked_or_safepoint(CodeCache_lock);
- Unimplemented();
-}
-
-
// Iteration over CodeBlobs
#define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
@@ -269,7 +260,7 @@
CodeBlob* CodeCache::find_blob(void* start) {
CodeBlob* result = find_blob_unsafe(start);
if (result == NULL) return NULL;
- // We could potientially look up non_entrant methods
+ // We could potentially look up non_entrant methods
guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
return result;
}
@@ -741,17 +732,26 @@
}
}
+void CodeCache::print_memory_overhead() {
+ size_t wasted_bytes = 0;
+ CodeBlob *cb;
+ for (cb = first(); cb != NULL; cb = next(cb)) {
+ HeapBlock* heap_block = ((HeapBlock*)cb) - 1;
+ wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size();
+ }
+ // Print bytes that are allocated in the freelist
+ ttyLocker ttl;
+ tty->print_cr("Number of elements in freelist: %d", freelist_length());
+ tty->print_cr("Allocated in freelist: %dkB", bytes_allocated_in_freelist()/K);
+ tty->print_cr("Unused bytes in CodeBlobs: %dkB", (int)(wasted_bytes/K));
+ tty->print_cr("Segment map size: %dkB", allocated_segments()/K); // 1 byte per segment
+}
+
//------------------------------------------------------------------------------------------------
// Non-product version
#ifndef PRODUCT
-void CodeCache::verify_if_often() {
- if (VerifyCodeCacheOften) {
- _heap->verify();
- }
-}
-
void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
if (PrintCodeCache2) { // Need to add a new flag
ResourceMark rm;
@@ -774,7 +774,7 @@
int nmethodUnloaded = 0;
int nmethodJava = 0;
int nmethodNative = 0;
- int maxCodeSize = 0;
+ int max_nm_size = 0;
ResourceMark rm;
CodeBlob *cb;
@@ -798,13 +798,11 @@
if(nm->is_not_entrant()) { nmethodNotEntrant++; }
if(nm->is_zombie()) { nmethodZombie++; }
if(nm->is_unloaded()) { nmethodUnloaded++; }
- if(nm->is_native_method()) { nmethodNative++; }
+ if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; }
if(nm->method() != NULL && nm->is_java_method()) {
nmethodJava++;
- if (nm->insts_size() > maxCodeSize) {
- maxCodeSize = nm->insts_size();
- }
+ max_nm_size = MAX2(max_nm_size, nm->size());
}
} else if (cb->is_runtime_stub()) {
runtimeStubCount++;
@@ -820,18 +818,19 @@
}
int bucketSize = 512;
- int bucketLimit = maxCodeSize / bucketSize + 1;
+ int bucketLimit = max_nm_size / bucketSize + 1;
int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
- memset(buckets,0,sizeof(int) * bucketLimit);
+ memset(buckets, 0, sizeof(int) * bucketLimit);
for (cb = first(); cb != NULL; cb = next(cb)) {
if (cb->is_nmethod()) {
nmethod* nm = (nmethod*)cb;
if(nm->is_java_method()) {
- buckets[nm->insts_size() / bucketSize]++;
- }
+ buckets[nm->size() / bucketSize]++;
+ }
}
}
+
tty->print_cr("Code Cache Entries (total of %d)",total);
tty->print_cr("-------------------------------------------------");
tty->print_cr("nmethods: %d",nmethodCount);
@@ -858,6 +857,7 @@
}
FREE_C_HEAP_ARRAY(int, buckets, mtCode);
+ print_memory_overhead();
}
#endif // !PRODUCT
--- a/hotspot/src/share/vm/code/codeCache.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/code/codeCache.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -58,12 +58,13 @@
static bool _needs_cache_clean;
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
- static void verify_if_often() PRODUCT_RETURN;
-
static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
static int _codemem_full_count;
+ static size_t bytes_allocated_in_freelist() { return _heap->allocated_in_freelist(); }
+ static int allocated_segments() { return _heap->allocated_segments(); }
+ static size_t freelist_length() { return _heap->freelist_length(); }
public:
@@ -78,7 +79,6 @@
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
static void free(CodeBlob* cb); // frees a CodeBlob
- static void flush(); // flushes all CodeBlobs
static bool contains(void *p); // returns whether p is included
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
@@ -150,6 +150,7 @@
// Printing/debugging
static void print(); // prints summary
static void print_internals();
+ static void print_memory_overhead();
static void verify(); // verifies the code cache
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
--- a/hotspot/src/share/vm/code/dependencies.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/code/dependencies.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -725,13 +725,13 @@
}
// ----------------- DependencySignature --------------------------------------
-bool DependencySignature::equals(DependencySignature* sig) const {
- if ((type() != sig->type()) || (args_count() != sig->args_count())) {
+bool DependencySignature::equals(DependencySignature const& s1, DependencySignature const& s2) {
+ if ((s1.type() != s2.type()) || (s1.args_count() != s2.args_count())) {
return false;
}
- for (int i = 0; i < sig->args_count(); i++) {
- if (arg(i) != sig->arg(i)) {
+ for (int i = 0; i < s1.args_count(); i++) {
+ if (s1.arg(i) != s2.arg(i)) {
return false;
}
}
--- a/hotspot/src/share/vm/code/dependencies.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/code/dependencies.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -527,7 +527,7 @@
};
-class DependencySignature : public GenericHashtableEntry<DependencySignature, ResourceObj> {
+class DependencySignature : public ResourceObj {
private:
int _args_count;
uintptr_t _argument_hash[Dependencies::max_arg_count];
@@ -542,12 +542,13 @@
}
}
- bool equals(DependencySignature* sig) const;
- uintptr_t key() const { return _argument_hash[0] >> 2; }
+ static bool equals(DependencySignature const& s1, DependencySignature const& s2);
+ static unsigned hash (DependencySignature const& s1) { return s1.arg(0) >> 2; }
int args_count() const { return _args_count; }
uintptr_t arg(int idx) const { return _argument_hash[idx]; }
Dependencies::DepType type() const { return _type; }
+
};
--- a/hotspot/src/share/vm/code/nmethod.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/code/nmethod.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -39,6 +39,7 @@
#include "prims/jvmtiImpl.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
+#include "utilities/resourceHash.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/xmlstream.hpp"
@@ -2135,7 +2136,11 @@
// Turn off dependency tracing while actually testing dependencies.
NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
- GenericHashtable<DependencySignature, ResourceObj>* table = new GenericHashtable<DependencySignature, ResourceObj>(11027);
+ typedef ResourceHashtable<DependencySignature, int, &DependencySignature::hash,
+ &DependencySignature::equals, 11027> DepTable;
+
+ DepTable* table = new DepTable();
+
// Iterate over live nmethods and check dependencies of all nmethods that are not
// marked for deoptimization. A particular dependency is only checked once.
for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
@@ -2143,9 +2148,10 @@
for (Dependencies::DepStream deps(nm); deps.next(); ) {
// Construct abstraction of a dependency.
DependencySignature* current_sig = new DependencySignature(deps);
- // Determine if 'deps' is already checked. table->add() returns
- // 'true' if the dependency was added (i.e., was not in the hashtable).
- if (table->add(current_sig)) {
+
+ // Determine if dependency is already checked. table->put(...) returns
+ // 'true' if the dependency is added (i.e., was not in the hashtable).
+ if (table->put(*current_sig, 1)) {
if (deps.check_dependency() != NULL) {
// Dependency checking failed. Print out information about the failed
// dependency and finally fail with an assert. We can fail here, since
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -3475,7 +3475,7 @@
tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
#endif
-#if !defined(ZERO)
+#if !defined(ZERO) && defined(PPC)
tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
#endif // !ZERO
tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
--- a/hotspot/src/share/vm/interpreter/templateTable.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/interpreter/templateTable.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -376,6 +376,9 @@
#ifdef TARGET_ARCH_MODEL_ppc_32
# include "templateTable_ppc_32.hpp"
#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "templateTable_ppc_64.hpp"
+#endif
};
#endif /* !CC_INTERP */
--- a/hotspot/src/share/vm/memory/heap.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/memory/heap.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -43,6 +43,7 @@
_next_segment = 0;
_freelist = NULL;
_freelist_segments = 0;
+ _freelist_length = 0;
}
@@ -53,7 +54,7 @@
address p = (address)_segmap.low() + beg;
address q = (address)_segmap.low() + end;
// initialize interval
- while (p < q) *p++ = 0xFF;
+ while (p < q) *p++ = free_sentinel;
}
@@ -67,7 +68,7 @@
int i = 0;
while (p < q) {
*p++ = i++;
- if (i == 0xFF) i = 1;
+ if (i == free_sentinel) i = 1;
}
}
@@ -139,11 +140,6 @@
}
-void CodeHeap::release() {
- Unimplemented();
-}
-
-
bool CodeHeap::expand_by(size_t size) {
// expand _memory space
size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
@@ -157,8 +153,8 @@
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
// expand _segmap space
size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
- if (ds > 0) {
- if (!_segmap.expand_by(ds)) return false;
+ if ((ds > 0) && !_segmap.expand_by(ds)) {
+ return false;
}
assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking");
// initialize additional segmap entries
@@ -167,12 +163,6 @@
return true;
}
-
-void CodeHeap::shrink_by(size_t size) {
- Unimplemented();
-}
-
-
void CodeHeap::clear() {
_next_segment = 0;
mark_segmap_as_free(0, _number_of_committed_segments);
@@ -180,26 +170,23 @@
void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
- size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
+ size_t number_of_segments = size_to_segments(instance_size + header_size());
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
// First check if we can satisfy request from freelist
- debug_only(verify());
+ NOT_PRODUCT(verify());
HeapBlock* block = search_freelist(number_of_segments, is_critical);
- debug_only(if (VerifyCodeCacheOften) verify());
+ NOT_PRODUCT(verify());
+
if (block != NULL) {
assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
assert(!block->free(), "must be marked free");
-#ifdef ASSERT
- memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
-#endif
+ DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, instance_size));
return block->allocated_space();
}
// Ensure minimum size for allocation to the heap.
- if (number_of_segments < CodeCacheMinBlockLength) {
- number_of_segments = CodeCacheMinBlockLength;
- }
+ number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments);
if (!is_critical) {
// Make sure the allocation fits in the unallocated heap without using
@@ -215,9 +202,7 @@
HeapBlock* b = block_at(_next_segment);
b->initialize(number_of_segments);
_next_segment += number_of_segments;
-#ifdef ASSERT
- memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
-#endif
+ DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size));
return b->allocated_space();
} else {
return NULL;
@@ -230,28 +215,56 @@
// Find start of HeapBlock
HeapBlock* b = (((HeapBlock *)p) - 1);
assert(b->allocated_space() == p, "sanity check");
-#ifdef ASSERT
- memset((void *)b->allocated_space(),
- badCodeHeapFreeVal,
- segments_to_size(b->length()) - sizeof(HeapBlock));
-#endif
+ DEBUG_ONLY(memset((void *)b->allocated_space(), badCodeHeapFreeVal,
+ segments_to_size(b->length()) - sizeof(HeapBlock)));
add_to_freelist(b);
-
- debug_only(if (VerifyCodeCacheOften) verify());
+ NOT_PRODUCT(verify());
}
-
+/**
+ * Uses segment map to find the the start (header) of a nmethod. This works as follows:
+ * The memory of the code cache is divided into 'segments'. The size of a segment is
+ * determined by -XX:CodeCacheSegmentSize=XX. Allocation in the code cache can only
+ * happen at segment boundaries. A pointer in the code cache can be mapped to a segment
+ * by calling segment_for(addr). Each time memory is requested from the code cache,
+ * the segmap is updated accordingly. See the following example, which illustrates the
+ * state of code cache and the segment map: (seg -> segment, nm ->nmethod)
+ *
+ * code cache segmap
+ * ----------- ---------
+ * seg 1 | nm 1 | -> | 0 |
+ * seg 2 | nm 1 | -> | 1 |
+ * ... | nm 1 | -> | .. |
+ * seg m | nm 2 | -> | 0 |
+ * seg m+1 | nm 2 | -> | 1 |
+ * ... | nm 2 | -> | 2 |
+ * ... | nm 2 | -> | .. |
+ * ... | nm 2 | -> | 0xFE |
+ * seg m+n | nm 2 | -> | 1 |
+ * ... | nm 2 | -> | |
+ *
+ * A value of '0' in the segmap indicates that this segment contains the beginning of
+ * an nmethod. Let's walk through a simple example: If we want to find the start of
+ * an nmethod that falls into seg 2, we read the value of the segmap[2]. The value
+ * is an offset that points to the segment that contains the start of the nmethod.
+ * Another example: If we want to get the start of nm 2, and we happen to get a pointer
+ * that points to seg m+n, we first read seg[n+m], which returns '1'. So we have to
+ * do one more read of the segmap[m+n-1] to finally get the segment header.
+ */
void* CodeHeap::find_start(void* p) const {
if (!contains(p)) {
return NULL;
}
- size_t i = segment_for(p);
- address b = (address)_segmap.low();
- if (b[i] == 0xFF) {
+ size_t seg_idx = segment_for(p);
+ address seg_map = (address)_segmap.low();
+ if (is_segment_unused(seg_map[seg_idx])) {
return NULL;
}
- while (b[i] > 0) i -= (int)b[i];
- HeapBlock* h = block_at(i);
+ while (seg_map[seg_idx] > 0) {
+ seg_idx -= (int)seg_map[seg_idx];
+ }
+
+ HeapBlock* h = block_at(seg_idx);
if (h->free()) {
return NULL;
}
@@ -272,7 +285,7 @@
}
// Finds the next free heapblock. If the current one is free, that it returned
-void* CodeHeap::next_free(HeapBlock *b) const {
+void* CodeHeap::next_free(HeapBlock* b) const {
// Since free blocks are merged, there is max. on free block
// between two used ones
if (b != NULL && b->free()) b = next_block(b);
@@ -287,7 +300,7 @@
return NULL;
}
-HeapBlock *CodeHeap::block_start(void *q) const {
+HeapBlock* CodeHeap::block_start(void* q) const {
HeapBlock* b = (HeapBlock*)find_start(q);
if (b == NULL) return NULL;
return b - 1;
@@ -312,6 +325,10 @@
return _memory.reserved_size();
}
+int CodeHeap::allocated_segments() const {
+ return (int)_next_segment;
+}
+
size_t CodeHeap::allocated_capacity() const {
// size of used heap - size on freelist
return segments_to_size(_next_segment - _freelist_segments);
@@ -325,7 +342,7 @@
// Free list management
-FreeBlock *CodeHeap::following_block(FreeBlock *b) {
+FreeBlock* CodeHeap::following_block(FreeBlock *b) {
return (FreeBlock*)(((address)b) + _segment_size * b->length());
}
@@ -343,7 +360,7 @@
}
// Try to merge this block with the following block
-void CodeHeap::merge_right(FreeBlock *a) {
+bool CodeHeap::merge_right(FreeBlock* a) {
assert(a->free(), "must be a free block");
if (following_block(a) == a->link()) {
assert(a->link() != NULL && a->link()->free(), "must be free too");
@@ -353,13 +370,20 @@
// Update find_start map
size_t beg = segment_for(a);
mark_segmap_as_used(beg, beg + a->length());
+ _freelist_length--;
+ return true;
}
+ return false;
}
-void CodeHeap::add_to_freelist(HeapBlock *a) {
+
+void CodeHeap::add_to_freelist(HeapBlock* a) {
FreeBlock* b = (FreeBlock*)a;
+ _freelist_length++;
+
assert(b != _freelist, "cannot be removed twice");
+
// Mark as free and update free space count
_freelist_segments += b->length();
b->set_free();
@@ -371,95 +395,96 @@
return;
}
- // Scan for right place to put into list. List
- // is sorted by increasing addresses
- FreeBlock* prev = NULL;
- FreeBlock* cur = _freelist;
- while(cur != NULL && cur < b) {
- assert(prev == NULL || prev < cur, "must be ordered");
- prev = cur;
- cur = cur->link();
- }
-
- assert( (prev == NULL && b < _freelist) ||
- (prev < b && (cur == NULL || b < cur)), "list must be ordered");
-
- if (prev == NULL) {
+ // Since the freelist is ordered (smaller addresses -> larger addresses) and the
+ // element we want to insert into the freelist has a smaller address than the first
+ // element, we can simply add 'b' as the first element and we are done.
+ if (b < _freelist) {
// Insert first in list
b->set_link(_freelist);
_freelist = b;
merge_right(_freelist);
- } else {
- insert_after(prev, b);
+ return;
}
+
+ // Scan for right place to put into list. List
+ // is sorted by increasing addresses
+ FreeBlock* prev = _freelist;
+ FreeBlock* cur = _freelist->link();
+ while(cur != NULL && cur < b) {
+ assert(prev < cur, "Freelist must be ordered");
+ prev = cur;
+ cur = cur->link();
+ }
+ assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered");
+ insert_after(prev, b);
}
-// Search freelist for an entry on the list with the best fit
-// Return NULL if no one was found
+/**
+ * Search freelist for an entry on the list with the best fit.
+ * @return NULL, if no one was found
+ */
FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
- FreeBlock *best_block = NULL;
- FreeBlock *best_prev = NULL;
- size_t best_length = 0;
+ FreeBlock* found_block = NULL;
+ FreeBlock* found_prev = NULL;
+ size_t found_length = 0;
- // Search for smallest block which is bigger than length
- FreeBlock *prev = NULL;
- FreeBlock *cur = _freelist;
+ FreeBlock* prev = NULL;
+ FreeBlock* cur = _freelist;
+ const size_t critical_boundary = (size_t)high_boundary() - CodeCacheMinimumFreeSpace;
+
+ // Search for first block that fits
while(cur != NULL) {
- size_t l = cur->length();
- if (l >= length && (best_block == NULL || best_length > l)) {
-
+ if (cur->length() >= length) {
// Non critical allocations are not allowed to use the last part of the code heap.
- if (!is_critical) {
- // Make sure the end of the allocation doesn't cross into the last part of the code heap
- if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
- // the freelist is sorted by address - if one fails, all consecutive will also fail.
- break;
- }
+ // Make sure the end of the allocation doesn't cross into the last part of the code heap.
+ if (!is_critical && (((size_t)cur + length) > critical_boundary)) {
+ // The freelist is sorted by address - if one fails, all consecutive will also fail.
+ break;
}
+ // Remember block, its previous element, and its length
+ found_block = cur;
+ found_prev = prev;
+ found_length = found_block->length();
- // Remember best block, its previous element, and its length
- best_block = cur;
- best_prev = prev;
- best_length = best_block->length();
+ break;
}
-
// Next element in list
prev = cur;
cur = cur->link();
}
- if (best_block == NULL) {
+ if (found_block == NULL) {
// None found
return NULL;
}
- assert((best_prev == NULL && _freelist == best_block ) ||
- (best_prev != NULL && best_prev->link() == best_block), "sanity check");
-
// Exact (or at least good enough) fit. Remove from list.
// Don't leave anything on the freelist smaller than CodeCacheMinBlockLength.
- if (best_length < length + CodeCacheMinBlockLength) {
- length = best_length;
- if (best_prev == NULL) {
- assert(_freelist == best_block, "sanity check");
+ if (found_length - length < CodeCacheMinBlockLength) {
+ _freelist_length--;
+ length = found_length;
+ if (found_prev == NULL) {
+ assert(_freelist == found_block, "sanity check");
_freelist = _freelist->link();
} else {
+ assert((found_prev->link() == found_block), "sanity check");
// Unmap element
- best_prev->set_link(best_block->link());
+ found_prev->set_link(found_block->link());
}
} else {
// Truncate block and return a pointer to the following block
- best_block->set_length(best_length - length);
- best_block = following_block(best_block);
// Set used bit and length on new block
- size_t beg = segment_for(best_block);
+ found_block->set_length(found_length - length);
+ found_block = following_block(found_block);
+
+ size_t beg = segment_for(found_block);
mark_segmap_as_used(beg, beg + length);
- best_block->set_length(length);
+ found_block->set_length(length);
}
- best_block->set_used();
+ found_block->set_used();
_freelist_segments -= length;
- return best_block;
+ return found_block;
}
//----------------------------------------------------------------------------
@@ -471,33 +496,34 @@
tty->print_cr("The Heap");
}
-#endif
-
void CodeHeap::verify() {
- // Count the number of blocks on the freelist, and the amount of space
- // represented.
- int count = 0;
- size_t len = 0;
- for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
- len += b->length();
- count++;
- }
-
- // Verify that freelist contains the right amount of free space
- // guarantee(len == _freelist_segments, "wrong freelist");
+ if (VerifyCodeCache) {
+ size_t len = 0;
+ int count = 0;
+ for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
+ len += b->length();
+ count++;
+ // Check if we have merged all free blocks
+ assert(merge_right(b) == false, "Missed merging opportunity");
+ }
+ // Verify that freelist contains the right amount of free space
+ assert(len == _freelist_segments, "wrong freelist");
- // Verify that the number of free blocks is not out of hand.
- static int free_block_threshold = 10000;
- if (count > free_block_threshold) {
- warning("CodeHeap: # of free blocks > %d", free_block_threshold);
- // Double the warning limit
- free_block_threshold *= 2;
- }
+ for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) {
+ if (h->free()) count--;
+ }
+ // Verify that the freelist contains the same number of blocks
+ // than free blocks found on the full list.
+ assert(count == 0, "missing free blocks");
- // Verify that the freelist contains the same number of free blocks that is
- // found on the full list.
- for(HeapBlock *h = first_block(); h != NULL; h = next_block(h)) {
- if (h->free()) count--;
+ // Verify that the number of free blocks is not out of hand.
+ static int free_block_threshold = 10000;
+ if (count > free_block_threshold) {
+ warning("CodeHeap: # of free blocks > %d", free_block_threshold);
+ // Double the warning limit
+ free_block_threshold *= 2;
+ }
}
- // guarantee(count == 0, "missing free blocks");
}
+
+#endif
--- a/hotspot/src/share/vm/memory/heap.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/memory/heap.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -92,24 +92,28 @@
FreeBlock* _freelist;
size_t _freelist_segments; // No. of segments in freelist
+ int _freelist_length;
+
+ enum { free_sentinel = 0xFF };
// Helper functions
size_t size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; }
size_t segments_to_size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; }
size_t segment_for(void* p) const { return ((char*)p - _memory.low()) >> _log2_segment_size; }
+ bool is_segment_unused(int val) const { return val == free_sentinel; }
HeapBlock* block_at(size_t i) const { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); }
void mark_segmap_as_free(size_t beg, size_t end);
void mark_segmap_as_used(size_t beg, size_t end);
// Freelist management helpers
- FreeBlock* following_block(FreeBlock *b);
+ FreeBlock* following_block(FreeBlock* b);
void insert_after(FreeBlock* a, FreeBlock* b);
- void merge_right (FreeBlock* a);
+ bool merge_right (FreeBlock* a);
// Toplevel freelist management
- void add_to_freelist(HeapBlock *b);
+ void add_to_freelist(HeapBlock* b);
FreeBlock* search_freelist(size_t length, bool is_critical);
// Iteration helpers
@@ -120,20 +124,18 @@
// to perform additional actions on creation of executable code
void on_code_mapping(char* base, size_t size);
+ void clear(); // clears all heap contents
public:
CodeHeap();
// Heap extents
bool reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
- void release(); // releases all allocated memory
bool expand_by(size_t size); // expands committed memory by size
- void shrink_by(size_t size); // shrinks committed memory by size
- void clear(); // clears all heap contents
// Memory allocation
void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL
- void deallocate(void* p); // deallocates a block
+ void deallocate(void* p); // deallocates a block
// Attributes
char* low_boundary() const { return _memory.low_boundary (); }
@@ -141,12 +143,13 @@
char* high_boundary() const { return _memory.high_boundary(); }
bool contains(const void* p) const { return low_boundary() <= p && p < high(); }
- void* find_start(void* p) const; // returns the block containing p or NULL
- size_t alignment_unit() const; // alignment of any block
- size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit
- static size_t header_size(); // returns the header size for each heap block
+ void* find_start(void* p) const; // returns the block containing p or NULL
+ size_t alignment_unit() const; // alignment of any block
+ size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit
+ static size_t header_size(); // returns the header size for each heap block
- // Iteration
+ size_t allocated_in_freelist() const { return _freelist_segments * CodeCacheSegmentSize; }
+ int freelist_length() const { return _freelist_length; } // number of elements in the freelist
// returns the first block or NULL
void* first() const { return next_free(first_block()); }
@@ -156,6 +159,7 @@
// Statistics
size_t capacity() const;
size_t max_capacity() const;
+ int allocated_segments() const;
size_t allocated_capacity() const;
size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); }
@@ -164,7 +168,7 @@
public:
// Debugging
- void verify();
+ void verify() PRODUCT_RETURN;
void print() PRODUCT_RETURN;
};
--- a/hotspot/src/share/vm/oops/method.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/oops/method.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -108,12 +108,16 @@
#endif
u2 _method_size; // size of this object
u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none)
- u1 _jfr_towrite : 1, // Flags
- _caller_sensitive : 1,
- _force_inline : 1,
- _hidden : 1,
- _dont_inline : 1,
- : 3;
+
+ // Flags
+ enum Flags {
+ _jfr_towrite = 1 << 0,
+ _caller_sensitive = 1 << 1,
+ _force_inline = 1 << 2,
+ _dont_inline = 1 << 3,
+ _hidden = 1 << 4
+ };
+ u1 _flags;
#ifndef PRODUCT
int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging)
@@ -759,16 +763,41 @@
void init_intrinsic_id(); // updates from _none if a match
static vmSymbols::SID klass_id_for_intrinsics(Klass* holder);
- bool jfr_towrite() { return _jfr_towrite; }
- void set_jfr_towrite(bool x) { _jfr_towrite = x; }
- bool caller_sensitive() { return _caller_sensitive; }
- void set_caller_sensitive(bool x) { _caller_sensitive = x; }
- bool force_inline() { return _force_inline; }
- void set_force_inline(bool x) { _force_inline = x; }
- bool dont_inline() { return _dont_inline; }
- void set_dont_inline(bool x) { _dont_inline = x; }
- bool is_hidden() { return _hidden; }
- void set_hidden(bool x) { _hidden = x; }
+ bool jfr_towrite() {
+ return (_flags & _jfr_towrite) != 0;
+ }
+ void set_jfr_towrite(bool x) {
+ _flags = x ? (_flags | _jfr_towrite) : (_flags & ~_jfr_towrite);
+ }
+
+ bool caller_sensitive() {
+ return (_flags & _caller_sensitive) != 0;
+ }
+ void set_caller_sensitive(bool x) {
+ _flags = x ? (_flags | _caller_sensitive) : (_flags & ~_caller_sensitive);
+ }
+
+ bool force_inline() {
+ return (_flags & _force_inline) != 0;
+ }
+ void set_force_inline(bool x) {
+ _flags = x ? (_flags | _force_inline) : (_flags & ~_force_inline);
+ }
+
+ bool dont_inline() {
+ return (_flags & _dont_inline) != 0;
+ }
+ void set_dont_inline(bool x) {
+ _flags = x ? (_flags | _dont_inline) : (_flags & ~_dont_inline);
+ }
+
+ bool is_hidden() {
+ return (_flags & _hidden) != 0;
+ }
+ void set_hidden(bool x) {
+ _flags = x ? (_flags | _hidden) : (_flags & ~_hidden);
+ }
+
ConstMethod::MethodType method_type() const {
return _constMethod->method_type();
}
--- a/hotspot/src/share/vm/oops/methodData.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/oops/methodData.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1071,7 +1071,8 @@
}
// Initialize the MethodData* corresponding to a given method.
-MethodData::MethodData(methodHandle method, int size, TRAPS) {
+MethodData::MethodData(methodHandle method, int size, TRAPS)
+ : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {
No_Safepoint_Verifier no_safepoint; // init function atomic wrt GC
ResourceMark rm;
// Set the method back-pointer.
@@ -1235,7 +1236,7 @@
return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
}
-ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp) {
+ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) {
DataLayout* end = extra_data_limit();
for (;; dp = next_extra(dp)) {
@@ -1257,10 +1258,11 @@
if (m != NULL) {
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
// data->method() may be null in case of a concurrent
- // allocation. Assume it's for the same method and use that
+ // allocation. Maybe it's for the same method. Try to use that
// entry in that case.
if (dp->bci() == bci) {
if (data->method() == NULL) {
+ assert(concurrent, "impossible because no concurrent allocation");
return NULL;
} else if (data->method() == m) {
return data;
@@ -1289,40 +1291,40 @@
// Allocation in the extra data space has to be atomic because not
// all entries have the same size and non atomic concurrent
// allocation would result in a corrupted extra data space.
- while (true) {
- ProfileData* result = bci_to_extra_data_helper(bci, m, dp);
- if (result != NULL) {
+ ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
+ if (result != NULL) {
+ return result;
+ }
+
+ if (create_if_missing && dp < end) {
+ MutexLocker ml(&_extra_data_lock);
+ // Check again now that we have the lock. Another thread may
+ // have added extra data entries.
+ ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
+ if (result != NULL || dp >= end) {
return result;
}
- if (create_if_missing && dp < end) {
- assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
- assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
- u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
- // SpeculativeTrapData is 2 slots. Make sure we have room.
- if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
- return NULL;
- }
- DataLayout temp;
- temp.initialize(tag, bci, 0);
- // May have been set concurrently
- if (dp->header() != temp.header() && !dp->atomic_set_header(temp.header())) {
- // Allocation failure because of concurrent allocation. Try
- // again.
- continue;
- }
- assert(dp->tag() == tag, "sane");
- assert(dp->bci() == bci, "no concurrent allocation");
- if (tag == DataLayout::bit_data_tag) {
- return new BitData(dp);
- } else {
- // If being allocated concurrently, one trap may be lost
- SpeculativeTrapData* data = new SpeculativeTrapData(dp);
- data->set_method(m);
- return data;
- }
+ assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
+ assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
+ u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
+ // SpeculativeTrapData is 2 slots. Make sure we have room.
+ if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
+ return NULL;
}
- return NULL;
+ DataLayout temp;
+ temp.initialize(tag, bci, 0);
+
+ dp->set_header(temp.header());
+ assert(dp->tag() == tag, "sane");
+ assert(dp->bci() == bci, "no concurrent allocation");
+ if (tag == DataLayout::bit_data_tag) {
+ return new BitData(dp);
+ } else {
+ SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+ data->set_method(m);
+ return data;
+ }
}
return NULL;
}
--- a/hotspot/src/share/vm/oops/methodData.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/oops/methodData.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -190,12 +190,6 @@
void set_header(intptr_t value) {
_header._bits = value;
}
- bool atomic_set_header(intptr_t value) {
- if (Atomic::cmpxchg_ptr(value, (volatile intptr_t*)&_header._bits, 0) == 0) {
- return true;
- }
- return false;
- }
intptr_t header() {
return _header._bits;
}
@@ -2047,10 +2041,12 @@
// Cached hint for bci_to_dp and bci_to_data
int _hint_di;
+ Mutex _extra_data_lock;
+
MethodData(methodHandle method, int size, TRAPS);
public:
static MethodData* allocate(ClassLoaderData* loader_data, methodHandle method, TRAPS);
- MethodData() {}; // For ciMethodData
+ MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
bool is_methodData() const volatile { return true; }
@@ -2155,7 +2151,7 @@
// What is the index of the first data entry?
int first_di() const { return 0; }
- ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp);
+ ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent);
// Find or create an extra ProfileData:
ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
--- a/hotspot/src/share/vm/opto/c2_globals.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -452,7 +452,7 @@
product(bool, EliminateAutoBox, true, \
"Control optimizations for autobox elimination") \
\
- experimental(bool, UseImplicitStableValues, false, \
+ diagnostic(bool, UseImplicitStableValues, true, \
"Mark well-known stable fields as such (e.g. String.value)") \
\
product(intx, AutoBoxCacheMax, 128, \
@@ -650,7 +650,7 @@
experimental(bool, ReplaceInParentMaps, false, \
"Propagate type improvements in callers of inlinee if possible") \
\
- experimental(bool, UseTypeSpeculation, false, \
+ product(bool, UseTypeSpeculation, true, \
"Speculatively propagate types from profiles") \
\
diagnostic(bool, UseInlineDepthForSpeculativeTypes, true, \
--- a/hotspot/src/share/vm/opto/graphKit.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/opto/graphKit.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -3007,22 +3007,28 @@
}
Node* cast_obj = NULL;
- const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
- // We may not have profiling here or it may not help us. If we have
- // a speculative type use it to perform an exact cast.
- ciKlass* spec_obj_type = obj_type->speculative_type();
- if (spec_obj_type != NULL ||
- (data != NULL &&
- // Counter has never been decremented (due to cast failure).
- // ...This is a reasonable thing to expect. It is true of
- // all casts inserted by javac to implement generic types.
- data->as_CounterData()->count() >= 0)) {
- cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
- if (cast_obj != NULL) {
- if (failure_control != NULL) // failure is now impossible
- (*failure_control) = top();
- // adjust the type of the phi to the exact klass:
- phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
+ if (tk->klass_is_exact()) {
+ // The following optimization tries to statically cast the speculative type of the object
+ // (for example obtained during profiling) to the type of the superklass and then do a
+ // dynamic check that the type of the object is what we expect. To work correctly
+ // for checkcast and aastore the type of superklass should be exact.
+ const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
+ // We may not have profiling here or it may not help us. If we have
+ // a speculative type use it to perform an exact cast.
+ ciKlass* spec_obj_type = obj_type->speculative_type();
+ if (spec_obj_type != NULL ||
+ (data != NULL &&
+ // Counter has never been decremented (due to cast failure).
+ // ...This is a reasonable thing to expect. It is true of
+ // all casts inserted by javac to implement generic types.
+ data->as_CounterData()->count() >= 0)) {
+ cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
+ if (cast_obj != NULL) {
+ if (failure_control != NULL) // failure is now impossible
+ (*failure_control) = top();
+ // adjust the type of the phi to the exact klass:
+ phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
+ }
}
}
--- a/hotspot/src/share/vm/opto/matcher.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/opto/matcher.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1922,6 +1922,105 @@
return OptoReg::as_OptoReg(regs.first());
}
+// This function identifies sub-graphs in which a 'load' node is
+// input to two different nodes, and such that it can be matched
+// with BMI instructions like blsi, blsr, etc.
+// Example : for b = -a[i] & a[i] can be matched to blsi r32, m32.
+// The graph is (AndL (SubL Con0 LoadL*) LoadL*), where LoadL*
+// refers to the same node.
+#ifdef X86
+// Match the generic fused operations pattern (op1 (op2 Con{ConType} mop) mop)
+// This is a temporary solution until we make DAGs expressible in ADL.
+template<typename ConType>
+class FusedPatternMatcher {
+ Node* _op1_node;
+ Node* _mop_node;
+ int _con_op;
+
+ static int match_next(Node* n, int next_op, int next_op_idx) {
+ if (n->in(1) == NULL || n->in(2) == NULL) {
+ return -1;
+ }
+
+ if (next_op_idx == -1) { // n is commutative, try rotations
+ if (n->in(1)->Opcode() == next_op) {
+ return 1;
+ } else if (n->in(2)->Opcode() == next_op) {
+ return 2;
+ }
+ } else {
+ assert(next_op_idx > 0 && next_op_idx <= 2, "Bad argument index");
+ if (n->in(next_op_idx)->Opcode() == next_op) {
+ return next_op_idx;
+ }
+ }
+ return -1;
+ }
+public:
+ FusedPatternMatcher(Node* op1_node, Node *mop_node, int con_op) :
+ _op1_node(op1_node), _mop_node(mop_node), _con_op(con_op) { }
+
+ bool match(int op1, int op1_op2_idx, // op1 and the index of the op1->op2 edge, -1 if op1 is commutative
+ int op2, int op2_con_idx, // op2 and the index of the op2->con edge, -1 if op2 is commutative
+ typename ConType::NativeType con_value) {
+ if (_op1_node->Opcode() != op1) {
+ return false;
+ }
+ if (_mop_node->outcnt() > 2) {
+ return false;
+ }
+ op1_op2_idx = match_next(_op1_node, op2, op1_op2_idx);
+ if (op1_op2_idx == -1) {
+ return false;
+ }
+ // Memory operation must be the other edge
+ int op1_mop_idx = (op1_op2_idx & 1) + 1;
+
+ // Check that the mop node is really what we want
+ if (_op1_node->in(op1_mop_idx) == _mop_node) {
+ Node *op2_node = _op1_node->in(op1_op2_idx);
+ if (op2_node->outcnt() > 1) {
+ return false;
+ }
+ assert(op2_node->Opcode() == op2, "Should be");
+ op2_con_idx = match_next(op2_node, _con_op, op2_con_idx);
+ if (op2_con_idx == -1) {
+ return false;
+ }
+ // Memory operation must be the other edge
+ int op2_mop_idx = (op2_con_idx & 1) + 1;
+ // Check that the memory operation is the same node
+ if (op2_node->in(op2_mop_idx) == _mop_node) {
+ // Now check the constant
+ const Type* con_type = op2_node->in(op2_con_idx)->bottom_type();
+ if (con_type != Type::TOP && ConType::as_self(con_type)->get_con() == con_value) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+};
+
+
+bool Matcher::is_bmi_pattern(Node *n, Node *m) {
+ if (n != NULL && m != NULL) {
+ if (m->Opcode() == Op_LoadI) {
+ FusedPatternMatcher<TypeInt> bmii(n, m, Op_ConI);
+ return bmii.match(Op_AndI, -1, Op_SubI, 1, 0) ||
+ bmii.match(Op_AndI, -1, Op_AddI, -1, -1) ||
+ bmii.match(Op_XorI, -1, Op_AddI, -1, -1);
+ } else if (m->Opcode() == Op_LoadL) {
+ FusedPatternMatcher<TypeLong> bmil(n, m, Op_ConL);
+ return bmil.match(Op_AndL, -1, Op_SubL, 1, 0) ||
+ bmil.match(Op_AndL, -1, Op_AddL, -1, -1) ||
+ bmil.match(Op_XorL, -1, Op_AddL, -1, -1);
+ }
+ }
+ return false;
+}
+#endif // X86
+
// A method-klass-holder may be passed in the inline_cache_reg
// and then expanded into the inline_cache_reg and a method_oop register
// defined in ad_<arch>.cpp
@@ -2077,6 +2176,14 @@
set_shared(m->in(AddPNode::Base)->in(1));
}
+ // if 'n' and 'm' are part of a graph for BMI instruction, clone this node.
+#ifdef X86
+ if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
+ mstack.push(m, Visit);
+ continue;
+ }
+#endif
+
// Clone addressing expressions as they are "free" in memory access instructions
if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
// Some inputs for address expression are not put on stack
--- a/hotspot/src/share/vm/opto/matcher.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/opto/matcher.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -79,6 +79,9 @@
// Find shared Nodes, or Nodes that otherwise are Matcher roots
void find_shared( Node *n );
+#ifdef X86
+ bool is_bmi_pattern(Node *n, Node *m);
+#endif
// Debug and profile information for nodes in old space:
GrowableArray<Node_Notes*>* _old_node_note_array;
--- a/hotspot/src/share/vm/opto/memnode.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/opto/memnode.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1593,35 +1593,33 @@
// Try to constant-fold a stable array element.
static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
+ assert(ary->const_oop(), "array should be constant");
assert(ary->is_stable(), "array should be stable");
- if (ary->const_oop() != NULL) {
- // Decode the results of GraphKit::array_element_address.
- ciArray* aobj = ary->const_oop()->as_array();
- ciConstant con = aobj->element_value_by_offset(off);
-
- if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
- const Type* con_type = Type::make_from_constant(con);
- if (con_type != NULL) {
- if (con_type->isa_aryptr()) {
- // Join with the array element type, in case it is also stable.
- int dim = ary->stable_dimension();
- con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
- }
- if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
- con_type = con_type->make_narrowoop();
- }
+ // Decode the results of GraphKit::array_element_address.
+ ciArray* aobj = ary->const_oop()->as_array();
+ ciConstant con = aobj->element_value_by_offset(off);
+
+ if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
+ const Type* con_type = Type::make_from_constant(con);
+ if (con_type != NULL) {
+ if (con_type->isa_aryptr()) {
+ // Join with the array element type, in case it is also stable.
+ int dim = ary->stable_dimension();
+ con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
+ }
+ if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
+ con_type = con_type->make_narrowoop();
+ }
#ifndef PRODUCT
- if (TraceIterativeGVN) {
- tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
- con_type->dump(); tty->cr();
- }
+ if (TraceIterativeGVN) {
+ tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
+ con_type->dump(); tty->cr();
+ }
#endif //PRODUCT
- return con_type;
- }
+ return con_type;
}
}
-
return NULL;
}
@@ -1641,7 +1639,7 @@
// Try to guess loaded type from pointer type
if (tp->isa_aryptr()) {
const TypeAryPtr* ary = tp->is_aryptr();
- const Type *t = ary->elem();
+ const Type* t = ary->elem();
// Determine whether the reference is beyond the header or not, by comparing
// the offset against the offset of the start of the array's data.
@@ -1653,10 +1651,9 @@
const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
// Try to constant-fold a stable array element.
- if (FoldStableValues && ary->is_stable()) {
- // Make sure the reference is not into the header
- if (off_beyond_header && off != Type::OffsetBot) {
- assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant");
+ if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) {
+ // Make sure the reference is not into the header and the offset is constant
+ if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) {
const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
if (con_type != NULL) {
return con_type;
--- a/hotspot/src/share/vm/opto/multnode.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/opto/multnode.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -194,7 +194,9 @@
}
}
- ProjNode* other_proj = iff->proj_out(1-_con)->as_Proj();
+ ProjNode* other_proj = iff->proj_out(1-_con);
+ if (other_proj == NULL) // Should never happen, but make Parfait happy.
+ return false;
if (other_proj->is_uncommon_trap_proj(reason)) {
assert(reason == Deoptimization::Reason_none ||
Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
--- a/hotspot/src/share/vm/opto/parse.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/opto/parse.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -338,6 +338,8 @@
GraphKit _exits; // Record all normal returns and throws here.
bool _wrote_final; // Did we write a final field?
bool _wrote_volatile; // Did we write a volatile field?
+ bool _wrote_stable; // Did we write a @Stable field?
+ bool _wrote_fields; // Did we write any field?
bool _count_invocations; // update and test invocation counter
bool _method_data_update; // update method data oop
Node* _alloc_with_final; // An allocation node with final field
@@ -383,6 +385,10 @@
void set_wrote_final(bool z) { _wrote_final = z; }
bool wrote_volatile() const { return _wrote_volatile; }
void set_wrote_volatile(bool z) { _wrote_volatile = z; }
+ bool wrote_stable() const { return _wrote_stable; }
+ void set_wrote_stable(bool z) { _wrote_stable = z; }
+ bool wrote_fields() const { return _wrote_fields; }
+ void set_wrote_fields(bool z) { _wrote_fields = z; }
bool count_invocations() const { return _count_invocations; }
bool method_data_update() const { return _method_data_update; }
Node* alloc_with_final() const { return _alloc_with_final; }
--- a/hotspot/src/share/vm/opto/parse1.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/opto/parse1.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -391,6 +391,8 @@
_depth = 1 + (caller->has_method() ? caller->depth() : 0);
_wrote_final = false;
_wrote_volatile = false;
+ _wrote_stable = false;
+ _wrote_fields = false;
_alloc_with_final = NULL;
_entry_bci = InvocationEntryBci;
_tf = NULL;
@@ -908,26 +910,35 @@
Node* iophi = _exits.i_o();
_exits.set_i_o(gvn().transform(iophi));
- // On PPC64, also add MemBarRelease for constructors which write
- // volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu
- // is set on PPC64, no sync instruction is issued after volatile
- // stores. We want to quarantee the same behaviour as on platforms
- // with total store order, although this is not required by the Java
- // memory model. So as with finals, we add a barrier here.
- if (wrote_final() PPC64_ONLY(|| (wrote_volatile() && method()->is_initializer()))) {
- // This method (which must be a constructor by the rules of Java)
- // wrote a final. The effects of all initializations must be
- // committed to memory before any code after the constructor
- // publishes the reference to the newly constructor object.
- // Rather than wait for the publication, we simply block the
- // writes here. Rather than put a barrier on only those writes
- // which are required to complete, we force all writes to complete.
- //
- // "All bets are off" unless the first publication occurs after a
- // normal return from the constructor. We do not attempt to detect
- // such unusual early publications. But no barrier is needed on
- // exceptional returns, since they cannot publish normally.
- //
+ // Figure out if we need to emit the trailing barrier. The barrier is only
+ // needed in the constructors, and only in three cases:
+ //
+ // 1. The constructor wrote a final. The effects of all initializations
+ // must be committed to memory before any code after the constructor
+ // publishes the reference to the newly constructed object. Rather
+ // than wait for the publication, we simply block the writes here.
+ // Rather than put a barrier on only those writes which are required
+ // to complete, we force all writes to complete.
+ //
+ // 2. On PPC64, also add MemBarRelease for constructors which write
+ // volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu
+ // is set on PPC64, no sync instruction is issued after volatile
+ // stores. We want to guarantee the same behavior as on platforms
+ // with total store order, although this is not required by the Java
+ // memory model. So as with finals, we add a barrier here.
+ //
+ // 3. Experimental VM option is used to force the barrier if any field
+ // was written out in the constructor.
+ //
+ // "All bets are off" unless the first publication occurs after a
+ // normal return from the constructor. We do not attempt to detect
+ // such unusual early publications. But no barrier is needed on
+ // exceptional returns, since they cannot publish normally.
+ //
+ if (method()->is_initializer() &&
+ (wrote_final() ||
+ PPC64_ONLY(wrote_volatile() ||)
+ (AlwaysSafeConstructors && wrote_fields()))) {
_exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
#ifndef PRODUCT
if (PrintOpto && (Verbose || WizardMode)) {
@@ -937,6 +948,19 @@
#endif
}
+ // Any method can write a @Stable field; insert memory barriers after
+ // those also. If there is a predecessor allocation node, bind the
+ // barrier there.
+ if (wrote_stable()) {
+ _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
+#ifndef PRODUCT
+ if (PrintOpto && (Verbose || WizardMode)) {
+ method()->print_name();
+ tty->print_cr(" writes @Stable and needs a memory barrier");
+ }
+#endif
+ }
+
for (MergeMemStream mms(_exits.merged_memory()); mms.next_non_empty(); ) {
// transform each slice of the original memphi:
mms.set_memory(_gvn.transform(mms.memory()));
--- a/hotspot/src/share/vm/opto/parse3.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/opto/parse3.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -233,7 +233,8 @@
// Build the load.
//
MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
- Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, is_vol);
+ bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
+ Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, needs_atomic_access);
// Adjust Java stack
if (type2size[bt] == 1)
@@ -314,7 +315,8 @@
}
store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
} else {
- store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol);
+ bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
+ store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
}
// If reference is volatile, prevent following volatiles ops from
@@ -332,13 +334,23 @@
}
}
+ if (is_field) {
+ set_wrote_fields(true);
+ }
+
// If the field is final, the rules of Java say we are in <init> or <clinit>.
// Note the presence of writes to final non-static fields, so that we
// can insert a memory barrier later on to keep the writes from floating
// out of the constructor.
// Any method can write a @Stable field; insert memory barriers after those also.
if (is_field && (field->is_final() || field->is_stable())) {
- set_wrote_final(true);
+ if (field->is_final()) {
+ set_wrote_final(true);
+ }
+ if (field->is_stable()) {
+ set_wrote_stable(true);
+ }
+
// Preserve allocation ptr to create precedent edge to it in membar
// generated on exit from constructor.
if (C->eliminate_boxing() &&
--- a/hotspot/src/share/vm/runtime/arguments.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -2408,9 +2408,11 @@
status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
+ status &= verify_interval(CodeCacheMinBlockLength, 1, 100, "CodeCacheMinBlockLength");
+ status &= verify_interval(CodeCacheSegmentSize, 1, 1024, "CodeCacheSegmentSize");
// TieredCompilation needs at least 2 compiler threads.
- const int num_min_compiler_threads = (TieredCompilation) ? 2 : 1;
+ const int num_min_compiler_threads = (TieredCompilation && (TieredStopAtLevel >= CompLevel_full_optimization)) ? 2 : 1;
status &=verify_min_value(CICompilerCount, num_min_compiler_threads, "CICompilerCount");
return status;
--- a/hotspot/src/share/vm/runtime/globals.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/runtime/globals.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -535,6 +535,9 @@
develop(bool, CleanChunkPoolAsync, falseInEmbedded, \
"Clean the chunk pool asynchronously") \
\
+ experimental(bool, AlwaysSafeConstructors, false, \
+ "Force safe construction, as if all fields are final.") \
+ \
/* Temporary: See 6948537 */ \
experimental(bool, UseMemSetInBOT, true, \
"(Unstable) uses memset in BOT updates in GC code") \
@@ -811,8 +814,8 @@
product(bool, PrintOopAddress, false, \
"Always print the location of the oop") \
\
- notproduct(bool, VerifyCodeCacheOften, false, \
- "Verify compiled-code cache often") \
+ notproduct(bool, VerifyCodeCache, false, \
+ "Verify code cache on memory allocation/deallocation") \
\
develop(bool, ZapDeadCompiledLocals, false, \
"Zap dead locals in compiler frames") \
@@ -2984,7 +2987,8 @@
"maximum number of nested recursive calls that are inlined") \
\
develop(intx, MaxForceInlineLevel, 100, \
- "maximum number of nested @ForceInline calls that are inlined") \
+ "maximum number of nested calls that are forced for inlining " \
+ "(using CompilerOracle or marked w/ @ForceInline)") \
\
product_pd(intx, InlineSmallCode, \
"Only inline already compiled methods if their code size is " \
@@ -3292,8 +3296,8 @@
"disable this feature") \
\
/* code cache parameters */ \
- /* ppc64 has large code-entry alignment. */ \
- develop(uintx, CodeCacheSegmentSize, 64 PPC64_ONLY(+64), \
+ /* ppc64/tiered compilation has large code-entry alignment. */ \
+ develop(uintx, CodeCacheSegmentSize, 64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)),\
"Code cache segment size (in bytes) - smallest unit of " \
"allocation") \
\
@@ -3795,8 +3799,8 @@
experimental(bool, TrustFinalNonStaticFields, false, \
"trust final non-static declarations for constant folding") \
\
- experimental(bool, FoldStableValues, false, \
- "Private flag to control optimizations for stable variables") \
+ diagnostic(bool, FoldStableValues, true, \
+ "Optimize loads from stable fields (marked w/ @Stable)") \
\
develop(bool, TraceInvokeDynamic, false, \
"trace internal invoke dynamic operations") \
@@ -3864,6 +3868,9 @@
"Allocation less than this value will be allocated " \
"using malloc. Larger allocations will use mmap.") \
\
+ experimental(bool, AlwaysAtomicAccesses, false, \
+ "Accesses to all variables should always be atomic") \
+ \
product(bool, EnableTracing, false, \
"Enable event-based tracing") \
\
--- a/hotspot/src/share/vm/runtime/thread.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/runtime/thread.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -910,7 +910,7 @@
cur != VMOperationRequest_lock &&
cur != VMOperationQueue_lock) ||
cur->rank() == Mutex::special) {
- warning("Thread holding lock at safepoint that vm can block on: %s", cur->name());
+ fatal(err_msg("Thread holding lock at safepoint that vm can block on: %s", cur->name()));
}
}
}
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -2336,6 +2336,12 @@
/* ConstMethod anon-enum */ \
/********************************/ \
\
+ declare_constant(Method::_jfr_towrite) \
+ declare_constant(Method::_caller_sensitive) \
+ declare_constant(Method::_force_inline) \
+ declare_constant(Method::_dont_inline) \
+ declare_constant(Method::_hidden) \
+ \
declare_constant(ConstMethod::_has_linenumber_table) \
declare_constant(ConstMethod::_has_checked_exceptions) \
declare_constant(ConstMethod::_has_localvariable_table) \
--- a/hotspot/src/share/vm/shark/llvmHeaders.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/shark/llvmHeaders.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -36,21 +36,43 @@
#endif
#include <llvm/Analysis/Verifier.h>
+#include <llvm/ExecutionEngine/ExecutionEngine.h>
+
+// includes specific to each version
+#if SHARK_LLVM_VERSION <= 31
+#include <llvm/Support/IRBuilder.h>
+#include <llvm/Type.h>
#include <llvm/Argument.h>
#include <llvm/Constants.h>
#include <llvm/DerivedTypes.h>
-#include <llvm/ExecutionEngine/ExecutionEngine.h>
#include <llvm/Instructions.h>
#include <llvm/LLVMContext.h>
#include <llvm/Module.h>
-#if SHARK_LLVM_VERSION <= 31
-#include <llvm/Support/IRBuilder.h>
-#else
+#elif SHARK_LLVM_VERSION <= 32
#include <llvm/IRBuilder.h>
+#include <llvm/Type.h>
+#include <llvm/Argument.h>
+#include <llvm/Constants.h>
+#include <llvm/DerivedTypes.h>
+#include <llvm/Instructions.h>
+#include <llvm/LLVMContext.h>
+#include <llvm/Module.h>
+#else // SHARK_LLVM_VERSION <= 34
+#include <llvm/IR/IRBuilder.h>
+#include <llvm/IR/Argument.h>
+#include <llvm/IR/Constants.h>
+#include <llvm/IR/DerivedTypes.h>
+#include <llvm/ExecutionEngine/ExecutionEngine.h>
+#include <llvm/IR/Instructions.h>
+#include <llvm/IR/LLVMContext.h>
+#include <llvm/IR/Module.h>
+#include <llvm/ADT/StringRef.h>
+#include <llvm/IR/Type.h>
#endif
+
+// common includes
#include <llvm/Support/Threading.h>
#include <llvm/Support/TargetSelect.h>
-#include <llvm/Type.h>
#include <llvm/ExecutionEngine/JITMemoryManager.h>
#include <llvm/Support/CommandLine.h>
#include <llvm/ExecutionEngine/MCJIT.h>
--- a/hotspot/src/share/vm/shark/sharkCompiler.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/shark/sharkCompiler.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -364,3 +364,7 @@
*(dst++) = '\0';
return buf;
}
+
+void SharkCompiler::print_timers() {
+ // do nothing
+}
--- a/hotspot/src/share/vm/shark/sharkCompiler.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/shark/sharkCompiler.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -56,6 +56,9 @@
// Compile a normal (bytecode) method and install it in the VM
void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
+ // Print compilation timers and statistics
+ void print_timers();
+
// Generate a wrapper for a native (JNI) method
nmethod* generate_native_wrapper(MacroAssembler* masm,
methodHandle target,
--- a/hotspot/src/share/vm/shark/sharkInliner.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/shark/sharkInliner.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -744,6 +744,10 @@
}
bool SharkInliner::attempt_inline(ciMethod *target, SharkState *state) {
+ if (!Inline) {
+ return false;
+ }
+
if (SharkIntrinsics::is_intrinsic(target)) {
SharkIntrinsics::inline_intrinsic(target, state);
return true;
--- a/hotspot/src/share/vm/shark/sharkMemoryManager.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/shark/sharkMemoryManager.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -59,18 +59,6 @@
entry->set_code_limit(FunctionEnd);
}
-unsigned char* SharkMemoryManager::startExceptionTable(const Function* F,
- uintptr_t& ActualSize) {
- return mm()->startExceptionTable(F, ActualSize);
-}
-
-void SharkMemoryManager::endExceptionTable(const Function* F,
- unsigned char* TableStart,
- unsigned char* TableEnd,
- unsigned char* FrameRegister) {
- mm()->endExceptionTable(F, TableStart, TableEnd, FrameRegister);
-}
-
void SharkMemoryManager::setMemoryWritable() {
mm()->setMemoryWritable();
}
@@ -79,10 +67,6 @@
mm()->setMemoryExecutable();
}
-void SharkMemoryManager::deallocateExceptionTable(void *ptr) {
- mm()->deallocateExceptionTable(ptr);
-}
-
void SharkMemoryManager::deallocateFunctionBody(void *ptr) {
mm()->deallocateFunctionBody(ptr);
}
@@ -96,6 +80,17 @@
return mm()->getPointerToNamedFunction(Name, AbortOnFailure);
}
+void SharkMemoryManager::setPoisonMemory(bool poison) {
+ mm()->setPoisonMemory(poison);
+}
+
+unsigned char *SharkMemoryManager::allocateSpace(intptr_t Size,
+ unsigned int Alignment) {
+ return mm()->allocateSpace(Size, Alignment);
+}
+
+#if SHARK_LLVM_VERSION <= 32
+
uint8_t* SharkMemoryManager::allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID) {
return mm()->allocateCodeSection(Size, Alignment, SectionID);
}
@@ -104,11 +99,34 @@
return mm()->allocateDataSection(Size, Alignment, SectionID);
}
-void SharkMemoryManager::setPoisonMemory(bool poison) {
- mm()->setPoisonMemory(poison);
+void SharkMemoryManager::deallocateExceptionTable(void *ptr) {
+ mm()->deallocateExceptionTable(ptr);
+}
+
+unsigned char* SharkMemoryManager::startExceptionTable(const Function* F,
+ uintptr_t& ActualSize) {
+ return mm()->startExceptionTable(F, ActualSize);
+}
+
+void SharkMemoryManager::endExceptionTable(const Function* F,
+ unsigned char* TableStart,
+ unsigned char* TableEnd,
+ unsigned char* FrameRegister) {
+ mm()->endExceptionTable(F, TableStart, TableEnd, FrameRegister);
}
-unsigned char *SharkMemoryManager::allocateSpace(intptr_t Size,
- unsigned int Alignment) {
- return mm()->allocateSpace(Size, Alignment);
+#else
+
+uint8_t *SharkMemoryManager::allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName) {
+ return mm()->allocateCodeSection(Size, Alignment, SectionID, SectionName);
}
+
+uint8_t* SharkMemoryManager::allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, bool IsReadOnly) {
+ return mm()->allocateDataSection(Size, Alignment, SectionID, SectionName, IsReadOnly);
+}
+
+bool SharkMemoryManager::finalizeMemory(std::string *ErrMsg) {
+ return mm()->finalizeMemory(ErrMsg);
+}
+
+#endif
--- a/hotspot/src/share/vm/shark/sharkMemoryManager.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/shark/sharkMemoryManager.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -69,23 +69,32 @@
void endFunctionBody(const llvm::Function* F,
unsigned char* FunctionStart,
unsigned char* FunctionEnd);
- unsigned char* startExceptionTable(const llvm::Function* F,
- uintptr_t& ActualSize);
- void endExceptionTable(const llvm::Function* F,
- unsigned char* TableStart,
- unsigned char* TableEnd,
- unsigned char* FrameRegister);
+
void *getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure = true);
- uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID);
- uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID);
void setPoisonMemory(bool);
uint8_t* allocateGlobal(uintptr_t, unsigned int);
void setMemoryWritable();
void setMemoryExecutable();
- void deallocateExceptionTable(void *ptr);
void deallocateFunctionBody(void *ptr);
unsigned char *allocateSpace(intptr_t Size,
unsigned int Alignment);
+
+#if SHARK_LLVM_VERSION <= 32
+uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID);
+uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID);
+unsigned char* startExceptionTable(const llvm::Function* F,
+ uintptr_t& ActualSize);
+void deallocateExceptionTable(void *ptr);
+void endExceptionTable(const llvm::Function* F,
+ unsigned char* TableStart,
+ unsigned char* TableEnd,
+ unsigned char* FrameRegister);
+#else
+uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, llvm::StringRef SectionName);
+uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, llvm::StringRef SectionName, bool IsReadOnly);
+bool finalizeMemory(std::string *ErrMsg = 0);
+#endif
+
};
#endif // SHARE_VM_SHARK_SHARKMEMORYMANAGER_HPP
--- a/hotspot/src/share/vm/utilities/elfFile.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/utilities/elfFile.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -140,7 +140,7 @@
}
}
-#if defined(PPC64)
+#if defined(PPC64) && !defined(ABI_ELFv2)
// Now read the .opd section wich contains the PPC64 function descriptor table.
// The .opd section is only available on PPC64 (see for example:
// http://refspecs.linuxfoundation.org/LSB_3.1.1/LSB-Core-PPC64/LSB-Core-PPC64/specialsections.html)
--- a/hotspot/src/share/vm/utilities/hashtable.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/utilities/hashtable.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/javaClasses.hpp"
-#include "code/dependencies.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/resourceArea.hpp"
@@ -353,116 +352,6 @@
#endif
-template<class T, class M> GenericHashtable<T, M>::GenericHashtable(int size, bool C_heap, MEMFLAGS memflag) {
- assert(size > 0, " Invalid hashtable size");
- _size = size;
- _C_heap = C_heap;
- _memflag = memflag;
- // Perform subtype-specific resource allocation
- _items = (C_heap) ? NEW_C_HEAP_ARRAY(T*, size, memflag) : NEW_RESOURCE_ARRAY(T*, size);
- memset(_items, 0, sizeof(T*) * size);
-
- DEBUG_ONLY(_num_items = 0;)
-}
-
-template<class T, class M> GenericHashtable<T, M>::~GenericHashtable() {
- if (on_C_heap()) {
- // Check backing array
- for (int i = 0; i < size(); i++) {
- T* item = head(i);
- // Delete all items in linked list
- while (item != NULL) {
- T* next_item = item->next();
- delete item;
- DEBUG_ONLY(_num_items--);
- item = next_item;
- }
- }
- FREE_C_HEAP_ARRAY(T*, _items, _memflag);
- _items = NULL;
- assert (_num_items == 0, "Not all memory released");
- }
-}
-
-/**
- * Return a pointer to the item 'I' that is stored in the hashtable for
- * which match_item->equals(I) == true. If no such item is found, NULL
- * is returned.
- */
-template<class T, class F> T* GenericHashtable<T, F>::contains(T* match_item) {
- if (match_item != NULL) {
- int idx = index(match_item);
- return contains_impl(match_item, idx);
- }
- return NULL;
-}
-
-/**
- * Add item to the hashtable. Return 'true' if the item was added
- * and false otherwise.
- */
-template<class T, class F> bool GenericHashtable<T, F>::add(T* item) {
- if (item != NULL) {
- int idx = index(item);
- T* found_item = contains_impl(item, idx);
- if (found_item == NULL) {
- T* list_head = head(idx);
- item->set_next(list_head);
- item->set_prev(NULL);
-
- if (list_head != NULL) {
- list_head->set_prev(item);
- }
- set_head(item, idx);
- DEBUG_ONLY(_num_items++);
- return true;
- }
- }
- return false;
-}
-
-/**
- * Removes an item 'I' from the hashtable, if present. 'I' is removed, if
- * match_item->equals(I) == true. Removing an item from the hashtable does
- * not free memory.
- */
-template<class T, class F> T* GenericHashtable<T, F>::remove(T* match_item) {
- if (match_item != NULL) {
- int idx = index(match_item);
- T* found_item = contains_impl(match_item, idx);
- if (found_item != NULL) {
- // Remove item from linked list
- T* prev = found_item->prev();
- T* next = found_item->next();
- if (prev != NULL) {
- prev->set_next(next);
- } else {
- set_head(next, idx);
- }
- if (next != NULL) {
- next->set_prev(prev);
- }
-
- DEBUG_ONLY(_num_items--);
- return found_item;
- }
- }
- return NULL;
-}
-
-
-template<class T, class F> T* GenericHashtable<T, F>::contains_impl(T* item, int idx) {
- T* current_item = head(idx);
- while (current_item != NULL) {
- if (current_item->equals(item)) {
- return current_item;
- }
- current_item = current_item->next();
- }
- return NULL;
-}
-
-
// Explicitly instantiate these types
template class Hashtable<ConstantPool*, mtClass>;
template class Hashtable<Symbol*, mtSymbol>;
@@ -482,5 +371,3 @@
template class BasicHashtable<mtSymbol>;
template class BasicHashtable<mtCode>;
template class BasicHashtable<mtInternal>;
-
-template class GenericHashtable<DependencySignature, ResourceObj>;
--- a/hotspot/src/share/vm/utilities/hashtable.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/utilities/hashtable.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -327,86 +327,4 @@
}
};
-
-/*
- * Usage of GenericHashtable:
- *
- * class X : public GenericHashtableEntry<X, ResourceObj> {
- *
- * // Implement virtual functions in class X
- * bool equals(X* sig) const;
- * uintptr_t hash() const;
- * };
- *
- * void foo() {
- * GenericHashtable<X, ResourceObj>* table = new GenericHashtable<X, ResourceObj>(11027, false);
- *
- * X* elem = new X();
- * table->add(elem);
- * table->contains(elem);
- * }
- *
- * You can choose other allocation types as well. For example, to store the hashtable to a
- * particular region (CHeapObj<type>) simply replace ResourceObj with the desired type:
- *
- * class X : public GenericHashtableEntry<X, CHeapObj<mtCode> > { ... };
- *
- * To make the destructor (and remove) of the hashtable work:
- * 1) override the delete operator of X
- * 2) provide a destructor of the X
- *
- * You may also find it convenient to override the new operator.
- *
- * If you use this templates do not forget to add an explicit initialization
- * (at the end of hashtable.cpp).
- *
- * template class GenericHashtable<X, ResourceObj>;
- */
-template <class T, class M> class GenericHashtableEntry : public M {
- private:
- T* _next;
- T* _prev;
- public:
- // Must be implemented by subclass.
- virtual uintptr_t key() const = 0;
- virtual bool equals(T* other) const = 0;
-
- T* next() const { return _next; }
- T* prev() const { return _prev; }
- void set_next(T* item) { _next = item; }
- void set_prev(T* item) { _prev = item; }
-
- // Constructor and destructor
- GenericHashtableEntry() : _next(NULL), _prev(NULL) { };
- virtual ~GenericHashtableEntry() {};
-};
-
-template <class T, class M> class GenericHashtable : public M {
- private:
- T** _items;
- int _size;
- bool _C_heap;
- MEMFLAGS _memflag;
-
- // Accessor methods
- T* head (int idx) const { return _items[idx]; }
- void set_head(T* item, int idx) { _items[idx] = item; }
- int index (T* item) { assert(item != NULL, "missing null check"); return item->key() % size(); }
-
- // Helper function
- T* contains_impl(T* item, int idx);
-
- DEBUG_ONLY(int _num_items;)
- public:
- GenericHashtable(int size, bool C_heap = false, MEMFLAGS memflag = mtNone);
- ~GenericHashtable();
- T* contains(T* match_item);
- T* remove (T* match_item);
- bool add (T* item);
-
-
- bool on_C_heap() const { return _C_heap; }
- int size() const { return _size; }
-};
-
#endif // SHARE_VM_UTILITIES_HASHTABLE_HPP
--- a/hotspot/src/share/vm/utilities/resourceHash.hpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/utilities/resourceHash.hpp Fri Mar 14 09:26:27 2014 +0100
@@ -105,14 +105,20 @@
}
}
- // Inserts or replaces a value in the table
- void put(K const& key, V const& value) {
+ /**
+ * Inserts or replaces a value in the table.
+ * @return: true: if a new item is added
+ * false: if the item already existed and the value is updated
+ */
+ bool put(K const& key, V const& value) {
unsigned hv = HASH(key);
Node** ptr = lookup_node(hv, key);
if (*ptr != NULL) {
(*ptr)->_value = value;
+ return false;
} else {
*ptr = new Node(hv, key, value);
+ return true;
}
}
--- a/hotspot/src/share/vm/utilities/vmError.cpp Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/src/share/vm/utilities/vmError.cpp Fri Mar 14 09:26:27 2014 +0100
@@ -592,13 +592,24 @@
st->cr();
// Compiled code may use EBP register on x86 so it looks like
// non-walkable C frame. Use frame.sender() for java frames.
- if (_thread && _thread->is_Java_thread() && fr.is_java_frame()) {
- RegisterMap map((JavaThread*)_thread, false); // No update
- fr = fr.sender(&map);
- continue;
+ if (_thread && _thread->is_Java_thread()) {
+ // Catch very first native frame by using stack address.
+ // For JavaThread stack_base and stack_size should be set.
+ if (!_thread->on_local_stack((address)(fr.sender_sp() + 1))) {
+ break;
+ }
+ if (fr.is_java_frame()) {
+ RegisterMap map((JavaThread*)_thread, false); // No update
+ fr = fr.sender(&map);
+ } else {
+ fr = os::get_sender_for_C_frame(&fr);
+ }
+ } else {
+ // is_first_C_frame() does only simple checks for frame pointer,
+ // it will pass if java compiled code has a pointer in EBP.
+ if (os::is_first_C_frame(&fr)) break;
+ fr = os::get_sender_for_C_frame(&fr);
}
- if (os::is_first_C_frame(&fr)) break;
- fr = os::get_sender_for_C_frame(&fr);
}
if (count > StackPrintLimit) {
--- a/hotspot/test/TEST.groups Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/TEST.groups Fri Mar 14 09:26:27 2014 +0100
@@ -130,7 +130,9 @@
gc/arguments/TestG1HeapRegionSize.java \
gc/metaspace/TestMetaspaceMemoryPool.java \
runtime/InternalApi/ThreadCpuTimesDeadlock.java \
- serviceability/threads/TestFalseDeadLock.java
+ serviceability/threads/TestFalseDeadLock.java \
+ compiler/tiered/NonTieredLevelsTest.java \
+ compiler/tiered/TieredLevelsTest.java
# Compact 2 adds full VM tests
compact2 = \
--- a/hotspot/test/compiler/ciReplay/TestVM.sh Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/ciReplay/TestVM.sh Fri Mar 14 09:26:27 2014 +0100
@@ -78,8 +78,8 @@
positive_test `expr $stop_level + 50` "TIERED LEVEL $stop_level :: REPLAY" \
"-XX:TieredStopAtLevel=$stop_level"
stop_level=`expr $stop_level + 1`
+ cleanup
done
- cleanup
fi
echo TEST PASSED
--- a/hotspot/test/compiler/ciReplay/common.sh Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/ciReplay/common.sh Fri Mar 14 09:26:27 2014 +0100
@@ -99,14 +99,13 @@
# $2 - non-tiered comp_level
nontiered_tests() {
level=`grep "^compile " $replay_data | awk '{print $6}'`
- # is level available in non-tiere
+ # is level available in non-tiered
if [ "$level" -eq $2 ]
then
positive_test $1 "NON-TIERED :: AVAILABLE COMP_LEVEL" \
-XX:-TieredCompilation
else
negative_test `expr $1 + 1` "NON-TIERED :: UNAVAILABLE COMP_LEVEL" \
- negative_test `expr $1 + 1` "NON-TIERED :: UNAVAILABLE COMP_LEVEL" \
-XX:-TieredCompilation
fi
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/codegen/BMI1.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8031321
+ * @summary Support BMI1 instructions on x86/x64
+ * @run main/othervm -Xbatch -XX:-TieredCompilation -XX:CompileCommand=compileonly,BMITests.* BMI1
+ *
+ */
+
+class MemI {
+ public int x;
+ public MemI(int x) { this.x = x; }
+}
+
+class MemL {
+ public long x;
+ public MemL(long x) { this.x = x; }
+}
+
+class BMITests {
+ static int andnl(int src1, int src2) {
+ return ~src1 & src2;
+ }
+ static long andnq(long src1, long src2) {
+ return ~src1 & src2;
+ }
+ static int andnl(int src1, MemI src2) {
+ return ~src1 & src2.x;
+ }
+ static long andnq(long src1, MemL src2) {
+ return ~src1 & src2.x;
+ }
+ static int blsil(int src1) {
+ return src1 & -src1;
+ }
+ static long blsiq(long src1) {
+ return src1 & -src1;
+ }
+ static int blsil(MemI src1) {
+ return src1.x & -src1.x;
+ }
+ static long blsiq(MemL src1) {
+ return src1.x & -src1.x;
+ }
+ static int blsmskl(int src1) {
+ return (src1 - 1) ^ src1;
+ }
+ static long blsmskq(long src1) {
+ return (src1 - 1) ^ src1;
+ }
+ static int blsmskl(MemI src1) {
+ return (src1.x - 1) ^ src1.x;
+ }
+ static long blsmskq(MemL src1) {
+ return (src1.x - 1) ^ src1.x;
+ }
+ static int blsrl(int src1) {
+ return (src1 - 1) & src1;
+ }
+ static long blsrq(long src1) {
+ return (src1 - 1) & src1;
+ }
+ static int blsrl(MemI src1) {
+ return (src1.x - 1) & src1.x;
+ }
+ static long blsrq(MemL src1) {
+ return (src1.x - 1) & src1.x;
+ }
+ static int lzcntl(int src1) {
+ return Integer.numberOfLeadingZeros(src1);
+ }
+ static int lzcntq(long src1) {
+ return Long.numberOfLeadingZeros(src1);
+ }
+ static int tzcntl(int src1) {
+ return Integer.numberOfTrailingZeros(src1);
+ }
+ static int tzcntq(long src1) {
+ return Long.numberOfTrailingZeros(src1);
+ }
+}
+
+public class BMI1 {
+ private final static int ITERATIONS = 1000000;
+
+ public static void main(String[] args) {
+ int ix = 0x01234567;
+ int iy = 0x89abcdef;
+ MemI imy = new MemI(iy);
+ long lx = 0x0123456701234567L;
+ long ly = 0x89abcdef89abcdefL;
+ MemL lmy = new MemL(ly);
+
+ { // match(Set dst (AndI (XorI src1 minus_1) src2))
+ int z = BMITests.andnl(ix, iy);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.andnl(ix, iy);
+ if (ii != z) {
+ throw new Error("andnl with register failed");
+ }
+ }
+ }
+ { // match(Set dst (AndL (XorL src1 minus_1) src2))
+ long z = BMITests.andnq(lx, ly);
+ for (int i = 0; i < ITERATIONS; i++) {
+ long ll = BMITests.andnq(lx, ly);
+ if (ll != z) {
+ throw new Error("andnq with register failed");
+ }
+ }
+ }
+ { // match(Set dst (AndI (XorI src1 minus_1) (LoadI src2)))
+ int z = BMITests.andnl(ix, imy);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.andnl(ix, imy);
+ if (ii != z) {
+ throw new Error("andnl with memory failed");
+ }
+ }
+ }
+ { // match(Set dst (AndL (XorL src1 minus_1) (LoadL src2)))
+ long z = BMITests.andnq(lx, lmy);
+ for (int i = 0; i < ITERATIONS; i++) {
+ long ll = BMITests.andnq(lx, lmy);
+ if (ll != z) {
+ throw new Error("andnq with memory failed");
+ }
+ }
+ }
+ { // match(Set dst (AndI (SubI imm_zero src) src))
+ int z = BMITests.blsil(ix);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.blsil(ix);
+ if (ii != z) {
+ throw new Error("blsil with register failed");
+ }
+ }
+ }
+ { // match(Set dst (AndL (SubL imm_zero src) src))
+ long z = BMITests.blsiq(lx);
+ for (int i = 0; i < ITERATIONS; i++) {
+ long ll = BMITests.blsiq(lx);
+ if (ll != z) {
+ throw new Error("blsiq with register failed");
+ }
+ }
+ }
+ { // match(Set dst (AndI (SubI imm_zero (LoadI src) ) (LoadI src) ))
+ int z = BMITests.blsil(imy);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.blsil(imy);
+ if (ii != z) {
+ throw new Error("blsil with memory failed");
+ }
+ }
+ }
+ { // match(Set dst (AndL (SubL imm_zero (LoadL src) ) (LoadL src) ))
+ long z = BMITests.blsiq(lmy);
+ for (int i = 0; i < ITERATIONS; i++) {
+ long ll = BMITests.blsiq(lmy);
+ if (ll != z) {
+ throw new Error("blsiq with memory failed");
+ }
+ }
+ }
+
+ { // match(Set dst (XorI (AddI src minus_1) src))
+ int z = BMITests.blsmskl(ix);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.blsmskl(ix);
+ if (ii != z) {
+ throw new Error("blsmskl with register failed");
+ }
+ }
+ }
+ { // match(Set dst (XorL (AddL src minus_1) src))
+ long z = BMITests.blsmskq(lx);
+ for (int i = 0; i < ITERATIONS; i++) {
+ long ll = BMITests.blsmskq(lx);
+ if (ll != z) {
+ throw new Error("blsmskq with register failed");
+ }
+ }
+ }
+ { // match(Set dst (XorI (AddI (LoadI src) minus_1) (LoadI src) ) )
+ int z = BMITests.blsmskl(imy);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.blsmskl(imy);
+ if (ii != z) {
+ throw new Error("blsmskl with memory failed");
+ }
+ }
+ }
+ { // match(Set dst (XorL (AddL (LoadL src) minus_1) (LoadL src) ) )
+ long z = BMITests.blsmskq(lmy);
+ for (int i = 0; i < ITERATIONS; i++) {
+ long ll = BMITests.blsmskq(lmy);
+ if (ll != z) {
+ throw new Error("blsmskq with memory failed");
+ }
+ }
+ }
+
+ { // match(Set dst (AndI (AddI src minus_1) src) )
+ int z = BMITests.blsrl(ix);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.blsrl(ix);
+ if (ii != z) {
+ throw new Error("blsrl with register failed");
+ }
+ }
+ }
+ { // match(Set dst (AndL (AddL src minus_1) src) )
+ long z = BMITests.blsrq(lx);
+ for (int i = 0; i < ITERATIONS; i++) {
+ long ll = BMITests.blsrq(lx);
+ if (ll != z) {
+ throw new Error("blsrq with register failed");
+ }
+ }
+ }
+ { // match(Set dst (AndI (AddI (LoadI src) minus_1) (LoadI src) ) )
+ int z = BMITests.blsrl(imy);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.blsrl(imy);
+ if (ii != z) {
+ throw new Error("blsrl with memory failed");
+ }
+ }
+ }
+ { // match(Set dst (AndL (AddL (LoadL src) minus_1) (LoadL src)) )
+ long z = BMITests.blsrq(lmy);
+ for (int i = 0; i < ITERATIONS; i++) {
+ long ll = BMITests.blsrq(lmy);
+ if (ll != z) {
+ throw new Error("blsrq with memory failed");
+ }
+ }
+ }
+
+ {
+ int z = BMITests.lzcntl(ix);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.lzcntl(ix);
+ if (ii != z) {
+ throw new Error("lzcntl failed");
+ }
+ }
+ }
+ {
+ int z = BMITests.lzcntq(lx);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.lzcntq(lx);
+ if (ii != z) {
+ throw new Error("lzcntq failed");
+ }
+ }
+ }
+
+ {
+ int z = BMITests.tzcntl(ix);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.tzcntl(ix);
+ if (ii != z) {
+ throw new Error("tzcntl failed");
+ }
+ }
+ }
+ {
+ int z = BMITests.tzcntq(lx);
+ for (int i = 0; i < ITERATIONS; i++) {
+ int ii = BMITests.tzcntq(lx);
+ if (ii != z) {
+ throw new Error("tzcntq failed");
+ }
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/inlining/InlineDefaultMethod1.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8036100
+ * @summary Default method returns true for a while, and then returns false
+ * @run main/othervm -Xcomp -XX:CompileOnly=InlineDefaultMethod1::test
+ * -XX:CompileOnly=I1::m -XX:CompileOnly=I2::m
+ * InlineDefaultMethod1
+ */
+interface I1 {
+ default public int m() { return 0; }
+}
+
+interface I2 extends I1 {
+ default public int m() { return 1; }
+}
+
+abstract class A implements I1 {
+}
+
+class B extends A implements I2 {
+}
+
+public class InlineDefaultMethod1 {
+ public static void test(A obj) {
+ int id = obj.m();
+ if (id != 1) {
+ throw new AssertionError("Called wrong method: 1 != "+id);
+ }
+ }
+
+ public static void main(String[] args) throws InterruptedException {
+ test(new B());
+ System.out.println("TEST PASSED");
+ }
+}
--- a/hotspot/test/compiler/membars/DekkerTest.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/membars/DekkerTest.java Fri Mar 14 09:26:27 2014 +0100
@@ -25,9 +25,9 @@
* @test
* @bug 8007898
* @summary Incorrect optimization of Memory Barriers in Matcher::post_store_load_barrier().
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest
- * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CICompilerCount=1 -XX:-TieredCompilation -XX:+StressGCM -XX:+StressLCM DekkerTest
* @author Martin Doerr martin DOT doerr AT sap DOT com
*
* Run 3 times since the failure is intermittent.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stable/TestStableBoolean.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,627 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStableBoolean
+ * @summary tests on stable fields and arrays
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file TestStableBoolean.java
+ * @run main ClassFileInstaller
+ * java/lang/invoke/TestStableBoolean
+ * java/lang/invoke/TestStableBoolean$BooleanStable
+ * java/lang/invoke/TestStableBoolean$StaticBooleanStable
+ * java/lang/invoke/TestStableBoolean$VolatileBooleanStable
+ * java/lang/invoke/TestStableBoolean$BooleanArrayDim1
+ * java/lang/invoke/TestStableBoolean$BooleanArrayDim2
+ * java/lang/invoke/TestStableBoolean$BooleanArrayDim3
+ * java/lang/invoke/TestStableBoolean$BooleanArrayDim4
+ * java/lang/invoke/TestStableBoolean$ObjectArrayLowerDim0
+ * java/lang/invoke/TestStableBoolean$ObjectArrayLowerDim1
+ * java/lang/invoke/TestStableBoolean$NestedStableField
+ * java/lang/invoke/TestStableBoolean$NestedStableField$A
+ * java/lang/invoke/TestStableBoolean$NestedStableField1
+ * java/lang/invoke/TestStableBoolean$NestedStableField1$A
+ * java/lang/invoke/TestStableBoolean$NestedStableField2
+ * java/lang/invoke/TestStableBoolean$NestedStableField2$A
+ * java/lang/invoke/TestStableBoolean$NestedStableField3
+ * java/lang/invoke/TestStableBoolean$NestedStableField3$A
+ * java/lang/invoke/TestStableBoolean$DefaultValue
+ * java/lang/invoke/TestStableBoolean$ObjectArrayLowerDim2
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableBoolean
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableBoolean
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableBoolean
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableBoolean
+ */
+package java.lang.invoke;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+import sun.management.ManagementFactoryHelper;
+import java.lang.reflect.InvocationTargetException;
+
+public class TestStableBoolean {
+ public static void main(String[] args) throws Exception {
+ System.out.println("@Stable enabled: "+isStableEnabled);
+ System.out.println();
+
+ run(DefaultValue.class);
+ run(BooleanStable.class);
+ run(StaticBooleanStable.class);
+ run(VolatileBooleanStable.class);
+
+ // @Stable arrays: Dim 1-4
+ run(BooleanArrayDim1.class);
+ run(BooleanArrayDim2.class);
+ run(BooleanArrayDim3.class);
+ run(BooleanArrayDim4.class);
+
+ // @Stable Object field: dynamic arrays
+ run(ObjectArrayLowerDim0.class);
+ run(ObjectArrayLowerDim1.class);
+ run(ObjectArrayLowerDim2.class);
+
+ // Nested @Stable fields
+ run(NestedStableField.class);
+ run(NestedStableField1.class);
+ run(NestedStableField2.class);
+ run(NestedStableField3.class);
+
+ if (failed) {
+ throw new Error("TEST FAILED");
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DefaultValue {
+ public @Stable boolean v;
+
+ public static final DefaultValue c = new DefaultValue();
+ public static boolean get() { return c.v; }
+ public static void test() throws Exception {
+ boolean val1 = get();
+ c.v = true; boolean val2 = get();
+ assertEquals(val1, false);
+ assertEquals(val2, true);
+ }
+ }
+
+ /* ==================================================== */
+
+ static class BooleanStable {
+ public @Stable boolean v;
+
+ public static final BooleanStable c = new BooleanStable();
+ public static boolean get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = true; boolean val1 = get();
+ c.v = false; boolean val2 = get();
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class StaticBooleanStable {
+ public static @Stable boolean v;
+
+ public static final StaticBooleanStable c = new StaticBooleanStable();
+ public static boolean get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = true; boolean val1 = get();
+ c.v = false; boolean val2 = get();
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class VolatileBooleanStable {
+ public @Stable volatile boolean v;
+
+ public static final VolatileBooleanStable c = new VolatileBooleanStable();
+ public static boolean get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = true; boolean val1 = get();
+ c.v = false; boolean val2 = get();
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+ }
+ }
+
+ /* ==================================================== */
+ // @Stable array == field && all components are stable
+
+ static class BooleanArrayDim1 {
+ public @Stable boolean[] v;
+
+ public static final BooleanArrayDim1 c = new BooleanArrayDim1();
+ public static boolean get() { return c.v[0]; }
+ public static boolean get1() { return c.v[10]; }
+ public static boolean[] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new boolean[1]; c.v[0] = true; boolean val1 = get();
+ c.v[0] = false; boolean val2 = get();
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+ }
+
+ {
+ c.v = new boolean[20]; c.v[10] = true; boolean val1 = get1();
+ c.v[10] = false; boolean val2 = get1();
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+ }
+
+ {
+ c.v = new boolean[1]; boolean[] val1 = get2();
+ c.v = new boolean[1]; boolean[] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class BooleanArrayDim2 {
+ public @Stable boolean[][] v;
+
+ public static final BooleanArrayDim2 c = new BooleanArrayDim2();
+ public static boolean get() { return c.v[0][0]; }
+ public static boolean[] get1() { return c.v[0]; }
+ public static boolean[][] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new boolean[1][1]; c.v[0][0] = true; boolean val1 = get();
+ c.v[0][0] = false; boolean val2 = get();
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+
+ c.v = new boolean[1][1]; c.v[0][0] = false; boolean val3 = get();
+ assertEquals(val3, (isStableEnabled ? true : false));
+
+ c.v[0] = new boolean[1]; c.v[0][0] = false; boolean val4 = get();
+ assertEquals(val4, (isStableEnabled ? true : false));
+ }
+
+ {
+ c.v = new boolean[1][1]; boolean[] val1 = get1();
+ c.v[0] = new boolean[1]; boolean[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new boolean[1][1]; boolean[][] val1 = get2();
+ c.v = new boolean[1][1]; boolean[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class BooleanArrayDim3 {
+ public @Stable boolean[][][] v;
+
+ public static final BooleanArrayDim3 c = new BooleanArrayDim3();
+ public static boolean get() { return c.v[0][0][0]; }
+ public static boolean[] get1() { return c.v[0][0]; }
+ public static boolean[][] get2() { return c.v[0]; }
+ public static boolean[][][] get3() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new boolean[1][1][1]; c.v[0][0][0] = true; boolean val1 = get();
+ c.v[0][0][0] = false; boolean val2 = get();
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+
+ c.v = new boolean[1][1][1]; c.v[0][0][0] = false; boolean val3 = get();
+ assertEquals(val3, (isStableEnabled ? true : false));
+
+ c.v[0] = new boolean[1][1]; c.v[0][0][0] = false; boolean val4 = get();
+ assertEquals(val4, (isStableEnabled ? true : false));
+
+ c.v[0][0] = new boolean[1]; c.v[0][0][0] = false; boolean val5 = get();
+ assertEquals(val5, (isStableEnabled ? true : false));
+ }
+
+ {
+ c.v = new boolean[1][1][1]; boolean[] val1 = get1();
+ c.v[0][0] = new boolean[1]; boolean[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new boolean[1][1][1]; boolean[][] val1 = get2();
+ c.v[0] = new boolean[1][1]; boolean[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new boolean[1][1][1]; boolean[][][] val1 = get3();
+ c.v = new boolean[1][1][1]; boolean[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class BooleanArrayDim4 {
+ public @Stable boolean[][][][] v;
+
+ public static final BooleanArrayDim4 c = new BooleanArrayDim4();
+ public static boolean get() { return c.v[0][0][0][0]; }
+ public static boolean[] get1() { return c.v[0][0][0]; }
+ public static boolean[][] get2() { return c.v[0][0]; }
+ public static boolean[][][] get3() { return c.v[0]; }
+ public static boolean[][][][] get4() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new boolean[1][1][1][1]; c.v[0][0][0][0] = true; boolean val1 = get();
+ c.v[0][0][0][0] = false; boolean val2 = get();
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+
+ c.v = new boolean[1][1][1][1]; c.v[0][0][0][0] = false; boolean val3 = get();
+ assertEquals(val3, (isStableEnabled ? true : false));
+
+ c.v[0] = new boolean[1][1][1]; c.v[0][0][0][0] = false; boolean val4 = get();
+ assertEquals(val4, (isStableEnabled ? true : false));
+
+ c.v[0][0] = new boolean[1][1]; c.v[0][0][0][0] = false; boolean val5 = get();
+ assertEquals(val5, (isStableEnabled ? true : false));
+
+ c.v[0][0][0] = new boolean[1]; c.v[0][0][0][0] = false; boolean val6 = get();
+ assertEquals(val6, (isStableEnabled ? true : false));
+ }
+
+ {
+ c.v = new boolean[1][1][1][1]; boolean[] val1 = get1();
+ c.v[0][0][0] = new boolean[1]; boolean[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new boolean[1][1][1][1]; boolean[][] val1 = get2();
+ c.v[0][0] = new boolean[1][1]; boolean[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new boolean[1][1][1][1]; boolean[][][] val1 = get3();
+ c.v[0] = new boolean[1][1][1]; boolean[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new boolean[1][1][1][1]; boolean[][][][] val1 = get4();
+ c.v = new boolean[1][1][1][1]; boolean[][][][] val2 = get4();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ }
+ }
+
+ /* ==================================================== */
+ // Dynamic Dim is higher than static
+
+ static class ObjectArrayLowerDim0 {
+ public @Stable Object v;
+
+ public static final ObjectArrayLowerDim0 c = new ObjectArrayLowerDim0();
+ public static boolean get() { return ((boolean[])c.v)[0]; }
+ public static boolean[] get1() { return (boolean[])c.v; }
+ public static boolean[] get2() { return (boolean[])c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new boolean[1]; ((boolean[])c.v)[0] = true; boolean val1 = get();
+ ((boolean[])c.v)[0] = false; boolean val2 = get();
+
+ assertEquals(val1, true);
+ assertEquals(val2, false);
+ }
+
+ {
+ c.v = new boolean[1]; boolean[] val1 = get1();
+ c.v = new boolean[1]; boolean[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayLowerDim1 c = new ObjectArrayLowerDim1();
+ public static boolean get() { return ((boolean[][])c.v)[0][0]; }
+ public static boolean[] get1() { return (boolean[])(c.v[0]); }
+ public static Object[] get2() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new boolean[1][1]; ((boolean[][])c.v)[0][0] = true; boolean val1 = get();
+ ((boolean[][])c.v)[0][0] = false; boolean val2 = get();
+
+ assertEquals(val1, true);
+ assertEquals(val2, false);
+ }
+
+ {
+ c.v = new boolean[1][1]; c.v[0] = new boolean[0]; boolean[] val1 = get1();
+ c.v[0] = new boolean[0]; boolean[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new boolean[0][0]; Object[] val1 = get2();
+ c.v = new boolean[0][0]; Object[] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayLowerDim2 c = new ObjectArrayLowerDim2();
+ public static boolean get() { return ((boolean[][][])c.v)[0][0][0]; }
+ public static boolean[] get1() { return (boolean[])(c.v[0][0]); }
+ public static boolean[][] get2() { return (boolean[][])(c.v[0]); }
+ public static Object[][] get3() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new boolean[1][1][1]; ((boolean[][][])c.v)[0][0][0] = true; boolean val1 = get();
+ ((boolean[][][])c.v)[0][0][0] = false; boolean val2 = get();
+
+ assertEquals(val1, true);
+ assertEquals(val2, false);
+ }
+
+ {
+ c.v = new boolean[1][1][1]; c.v[0][0] = new boolean[0]; boolean[] val1 = get1();
+ c.v[0][0] = new boolean[0]; boolean[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new boolean[1][1][1]; c.v[0] = new boolean[0][0]; boolean[][] val1 = get2();
+ c.v[0] = new boolean[0][0]; boolean[][] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new boolean[0][0][0]; Object[][] val1 = get3();
+ c.v = new boolean[0][0][0]; Object[][] val2 = get3();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField {
+ static class A {
+ public @Stable boolean a;
+
+ }
+ public @Stable A v;
+
+ public static final NestedStableField c = new NestedStableField();
+ public static A get() { return c.v; }
+ public static boolean get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.a = true; A val1 = get();
+ c.v.a = false; A val2 = get();
+
+ assertEquals(val1.a, false);
+ assertEquals(val2.a, false);
+ }
+
+ {
+ c.v = new A(); c.v.a = true; boolean val1 = get1();
+ c.v.a = false; boolean val2 = get1();
+ c.v = new A(); c.v.a = false; boolean val3 = get1();
+
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+ assertEquals(val3, (isStableEnabled ? true : false));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField1 {
+ static class A {
+ public @Stable boolean a;
+ public @Stable A next;
+ }
+ public @Stable A v;
+
+ public static final NestedStableField1 c = new NestedStableField1();
+ public static A get() { return c.v.next.next.next.next.next.next.next; }
+ public static boolean get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.next = new A(); c.v.next.next = c.v;
+ c.v.a = true; c.v.next.a = true; A val1 = get();
+ c.v.a = false; c.v.next.a = false; A val2 = get();
+
+ assertEquals(val1.a, false);
+ assertEquals(val2.a, false);
+ }
+
+ {
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = true; boolean val1 = get1();
+ c.v.a = false; boolean val2 = get1();
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = false; boolean val3 = get1();
+
+ assertEquals(val1, true);
+ assertEquals(val2, (isStableEnabled ? true : false));
+ assertEquals(val3, (isStableEnabled ? true : false));
+ }
+ }
+ }
+ /* ==================================================== */
+
+ static class NestedStableField2 {
+ static class A {
+ public @Stable boolean a;
+ public @Stable A left;
+ public A right;
+ }
+
+ public @Stable A v;
+
+ public static final NestedStableField2 c = new NestedStableField2();
+ public static boolean get() { return c.v.left.left.left.a; }
+ public static boolean get1() { return c.v.left.left.right.left.a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.left = c.v.right = c.v;
+ c.v.a = true; boolean val1 = get(); boolean val2 = get1();
+ c.v.a = false; boolean val3 = get(); boolean val4 = get1();
+
+ assertEquals(val1, true);
+ assertEquals(val3, (isStableEnabled ? true : false));
+
+ assertEquals(val2, true);
+ assertEquals(val4, false);
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField3 {
+ static class A {
+ public @Stable boolean a;
+ public @Stable A[] left;
+ public A[] right;
+ }
+
+ public @Stable A[] v;
+
+ public static final NestedStableField3 c = new NestedStableField3();
+ public static boolean get() { return c.v[0].left[1].left[0].left[1].a; }
+ public static boolean get1() { return c.v[1].left[0].left[1].right[0].left[1].a; }
+
+ public static void test() throws Exception {
+ {
+ A elem = new A();
+ c.v = new A[] { elem, elem }; c.v[0].left = c.v[0].right = c.v;
+ elem.a = true; boolean val1 = get(); boolean val2 = get1();
+ elem.a = false; boolean val3 = get(); boolean val4 = get1();
+
+ assertEquals(val1, true);
+ assertEquals(val3, (isStableEnabled ? true : false));
+
+ assertEquals(val2, true);
+ assertEquals(val4, false);
+ }
+ }
+ }
+
+ /* ==================================================== */
+ // Auxiliary methods
+ static void assertEquals(boolean i, boolean j) { if (i != j) throw new AssertionError(i + " != " + j); }
+ static void assertTrue(boolean b) { if (!b) throw new AssertionError(); }
+
+ static boolean failed = false;
+
+ public static void run(Class<?> test) {
+ Throwable ex = null;
+ System.out.print(test.getName()+": ");
+ try {
+ test.getMethod("test").invoke(null);
+ } catch (InvocationTargetException e) {
+ ex = e.getCause();
+ } catch (Throwable e) {
+ ex = e;
+ } finally {
+ if (ex == null) {
+ System.out.println("PASSED");
+ } else {
+ failed = true;
+ System.out.println("FAILED");
+ ex.printStackTrace(System.out);
+ }
+ }
+ }
+
+ static final boolean isStableEnabled;
+ static {
+ HotSpotDiagnosticMXBean diagnostic
+ = ManagementFactoryHelper.getDiagnosticMXBean();
+ VMOption tmp;
+ try {
+ tmp = diagnostic.getVMOption("FoldStableValues");
+ } catch (IllegalArgumentException e) {
+ tmp = null;
+ }
+ isStableEnabled = (tmp == null ? false : Boolean.parseBoolean(tmp.getValue()));
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stable/TestStableByte.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStableByte
+ * @summary tests on stable fields and arrays
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file TestStableByte.java
+ * @run main ClassFileInstaller
+ * java/lang/invoke/TestStableByte
+ * java/lang/invoke/TestStableByte$ByteStable
+ * java/lang/invoke/TestStableByte$StaticByteStable
+ * java/lang/invoke/TestStableByte$VolatileByteStable
+ * java/lang/invoke/TestStableByte$ByteArrayDim1
+ * java/lang/invoke/TestStableByte$ByteArrayDim2
+ * java/lang/invoke/TestStableByte$ByteArrayDim3
+ * java/lang/invoke/TestStableByte$ByteArrayDim4
+ * java/lang/invoke/TestStableByte$ObjectArrayLowerDim0
+ * java/lang/invoke/TestStableByte$ObjectArrayLowerDim1
+ * java/lang/invoke/TestStableByte$NestedStableField
+ * java/lang/invoke/TestStableByte$NestedStableField$A
+ * java/lang/invoke/TestStableByte$NestedStableField1
+ * java/lang/invoke/TestStableByte$NestedStableField1$A
+ * java/lang/invoke/TestStableByte$NestedStableField2
+ * java/lang/invoke/TestStableByte$NestedStableField2$A
+ * java/lang/invoke/TestStableByte$NestedStableField3
+ * java/lang/invoke/TestStableByte$NestedStableField3$A
+ * java/lang/invoke/TestStableByte$DefaultValue
+ * java/lang/invoke/TestStableByte$ObjectArrayLowerDim2
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableByte
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableByte
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableByte
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableByte
+ */
+package java.lang.invoke;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+import sun.management.ManagementFactoryHelper;
+import java.lang.reflect.InvocationTargetException;
+
+public class TestStableByte {
+ public static void main(String[] args) throws Exception {
+ System.out.println("@Stable enabled: "+isStableEnabled);
+ System.out.println();
+
+ run(DefaultValue.class);
+ run(ByteStable.class);
+ run(StaticByteStable.class);
+ run(VolatileByteStable.class);
+
+ // @Stable arrays: Dim 1-4
+ run(ByteArrayDim1.class);
+ run(ByteArrayDim2.class);
+ run(ByteArrayDim3.class);
+ run(ByteArrayDim4.class);
+
+ // @Stable Object field: dynamic arrays
+ run(ObjectArrayLowerDim0.class);
+ run(ObjectArrayLowerDim1.class);
+ run(ObjectArrayLowerDim2.class);
+
+ // Nested @Stable fields
+ run(NestedStableField.class);
+ run(NestedStableField1.class);
+ run(NestedStableField2.class);
+ run(NestedStableField3.class);
+
+ if (failed) {
+ throw new Error("TEST FAILED");
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DefaultValue {
+ public @Stable byte v;
+
+ public static final DefaultValue c = new DefaultValue();
+ public static byte get() { return c.v; }
+ public static void test() throws Exception {
+ byte val1 = get();
+ c.v = 1; byte val2 = get();
+ assertEquals(val1, 0);
+ assertEquals(val2, 1);
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ByteStable {
+ public @Stable byte v;
+
+ public static final ByteStable c = new ByteStable();
+ public static byte get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 5; byte val1 = get();
+ c.v = 127; byte val2 = get();
+ assertEquals(val1, 5);
+ assertEquals(val2, (isStableEnabled ? 5 : 127));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class StaticByteStable {
+ public static @Stable byte v;
+
+ public static final StaticByteStable c = new StaticByteStable();
+ public static byte get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 5; byte val1 = get();
+ c.v = 127; byte val2 = get();
+ assertEquals(val1, 5);
+ assertEquals(val2, (isStableEnabled ? 5 : 127));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class VolatileByteStable {
+ public @Stable volatile byte v;
+
+ public static final VolatileByteStable c = new VolatileByteStable();
+ public static byte get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 5; byte val1 = get();
+ c.v = 127; byte val2 = get();
+ assertEquals(val1, 5);
+ assertEquals(val2, (isStableEnabled ? 5 : 127));
+ }
+ }
+
+ /* ==================================================== */
+ // @Stable array == field && all components are stable
+
+ static class ByteArrayDim1 {
+ public @Stable byte[] v;
+
+ public static final ByteArrayDim1 c = new ByteArrayDim1();
+ public static byte get() { return c.v[0]; }
+ public static byte get1() { return c.v[10]; }
+ public static byte[] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new byte[1]; c.v[0] = 1; byte val1 = get();
+ c.v[0] = 2; byte val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new byte[1]; c.v[0] = 3; byte val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+
+ {
+ c.v = new byte[20]; c.v[10] = 1; byte val1 = get1();
+ c.v[10] = 2; byte val2 = get1();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new byte[20]; c.v[10] = 3; byte val3 = get1();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+
+ {
+ c.v = new byte[1]; byte[] val1 = get2();
+ c.v = new byte[1]; byte[] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ByteArrayDim2 {
+ public @Stable byte[][] v;
+
+ public static final ByteArrayDim2 c = new ByteArrayDim2();
+ public static byte get() { return c.v[0][0]; }
+ public static byte[] get1() { return c.v[0]; }
+ public static byte[][] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new byte[1][1]; c.v[0][0] = 1; byte val1 = get();
+ c.v[0][0] = 2; byte val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new byte[1][1]; c.v[0][0] = 3; byte val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new byte[1]; c.v[0][0] = 4; byte val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+ }
+
+ {
+ c.v = new byte[1][1]; byte[] val1 = get1();
+ c.v[0] = new byte[1]; byte[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new byte[1][1]; byte[][] val1 = get2();
+ c.v = new byte[1][1]; byte[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ByteArrayDim3 {
+ public @Stable byte[][][] v;
+
+ public static final ByteArrayDim3 c = new ByteArrayDim3();
+ public static byte get() { return c.v[0][0][0]; }
+ public static byte[] get1() { return c.v[0][0]; }
+ public static byte[][] get2() { return c.v[0]; }
+ public static byte[][][] get3() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new byte[1][1][1]; c.v[0][0][0] = 1; byte val1 = get();
+ c.v[0][0][0] = 2; byte val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new byte[1][1][1]; c.v[0][0][0] = 3; byte val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new byte[1][1]; c.v[0][0][0] = 4; byte val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+
+ c.v[0][0] = new byte[1]; c.v[0][0][0] = 5; byte val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1 : 5));
+ }
+
+ {
+ c.v = new byte[1][1][1]; byte[] val1 = get1();
+ c.v[0][0] = new byte[1]; byte[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new byte[1][1][1]; byte[][] val1 = get2();
+ c.v[0] = new byte[1][1]; byte[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new byte[1][1][1]; byte[][][] val1 = get3();
+ c.v = new byte[1][1][1]; byte[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ByteArrayDim4 {
+ public @Stable byte[][][][] v;
+
+ public static final ByteArrayDim4 c = new ByteArrayDim4();
+ public static byte get() { return c.v[0][0][0][0]; }
+ public static byte[] get1() { return c.v[0][0][0]; }
+ public static byte[][] get2() { return c.v[0][0]; }
+ public static byte[][][] get3() { return c.v[0]; }
+ public static byte[][][][] get4() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new byte[1][1][1][1]; c.v[0][0][0][0] = 1; byte val1 = get();
+ c.v[0][0][0][0] = 2; byte val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new byte[1][1][1][1]; c.v[0][0][0][0] = 3; byte val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new byte[1][1][1]; c.v[0][0][0][0] = 4; byte val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+
+ c.v[0][0] = new byte[1][1]; c.v[0][0][0][0] = 5; byte val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1 : 5));
+
+ c.v[0][0][0] = new byte[1]; c.v[0][0][0][0] = 6; byte val6 = get();
+ assertEquals(val6, (isStableEnabled ? 1 : 6));
+ }
+
+ {
+ c.v = new byte[1][1][1][1]; byte[] val1 = get1();
+ c.v[0][0][0] = new byte[1]; byte[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new byte[1][1][1][1]; byte[][] val1 = get2();
+ c.v[0][0] = new byte[1][1]; byte[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new byte[1][1][1][1]; byte[][][] val1 = get3();
+ c.v[0] = new byte[1][1][1]; byte[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new byte[1][1][1][1]; byte[][][][] val1 = get4();
+ c.v = new byte[1][1][1][1]; byte[][][][] val2 = get4();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ }
+ }
+
+ /* ==================================================== */
+ // Dynamic Dim is higher than static
+
+ static class ObjectArrayLowerDim0 {
+ public @Stable Object v;
+
+ public static final ObjectArrayLowerDim0 c = new ObjectArrayLowerDim0();
+ public static byte get() { return ((byte[])c.v)[0]; }
+ public static byte[] get1() { return (byte[])c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new byte[1]; ((byte[])c.v)[0] = 1; byte val1 = get();
+ ((byte[])c.v)[0] = 2; byte val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new byte[1]; byte[] val1 = get1();
+ c.v = new byte[1]; byte[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayLowerDim1 c = new ObjectArrayLowerDim1();
+ public static byte get() { return ((byte[][])c.v)[0][0]; }
+ public static byte[] get1() { return (byte[])(c.v[0]); }
+ public static Object[] get2() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new byte[1][1]; ((byte[][])c.v)[0][0] = 1; byte val1 = get();
+ ((byte[][])c.v)[0][0] = 2; byte val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new byte[1][1]; c.v[0] = new byte[0]; byte[] val1 = get1();
+ c.v[0] = new byte[0]; byte[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new byte[0][0]; Object[] val1 = get2();
+ c.v = new byte[0][0]; Object[] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayLowerDim2 c = new ObjectArrayLowerDim2();
+ public static byte get() { return ((byte[][][])c.v)[0][0][0]; }
+ public static byte[] get1() { return (byte[])(c.v[0][0]); }
+ public static byte[][] get2() { return (byte[][])(c.v[0]); }
+ public static Object[][] get3() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new byte[1][1][1]; ((byte[][][])c.v)[0][0][0] = 1; byte val1 = get();
+ ((byte[][][])c.v)[0][0][0] = 2; byte val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new byte[1][1][1]; c.v[0][0] = new byte[0]; byte[] val1 = get1();
+ c.v[0][0] = new byte[0]; byte[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new byte[1][1][1]; c.v[0] = new byte[0][0]; byte[][] val1 = get2();
+ c.v[0] = new byte[0][0]; byte[][] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new byte[0][0][0]; Object[][] val1 = get3();
+ c.v = new byte[0][0][0]; Object[][] val2 = get3();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField {
+ static class A {
+ public @Stable byte a;
+
+ }
+ public @Stable A v;
+
+ public static final NestedStableField c = new NestedStableField();
+ public static A get() { return c.v; }
+ public static byte get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.a = 1; A val1 = get();
+ c.v.a = 2; A val2 = get();
+
+ assertEquals(val1.a, 2);
+ assertEquals(val2.a, 2);
+ }
+
+ {
+ c.v = new A(); c.v.a = 1; byte val1 = get1();
+ c.v.a = 2; byte val2 = get1();
+ c.v = new A(); c.v.a = 3; byte val3 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField1 {
+ static class A {
+ public @Stable byte a;
+ public @Stable A next;
+ }
+ public @Stable A v;
+
+ public static final NestedStableField1 c = new NestedStableField1();
+ public static A get() { return c.v.next.next.next.next.next.next.next; }
+ public static byte get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.next = new A(); c.v.next.next = c.v;
+ c.v.a = 1; c.v.next.a = 1; A val1 = get();
+ c.v.a = 2; c.v.next.a = 2; A val2 = get();
+
+ assertEquals(val1.a, 2);
+ assertEquals(val2.a, 2);
+ }
+
+ {
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 1; byte val1 = get1();
+ c.v.a = 2; byte val2 = get1();
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 3; byte val3 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+ }
+ }
+ /* ==================================================== */
+
+ static class NestedStableField2 {
+ static class A {
+ public @Stable byte a;
+ public @Stable A left;
+ public A right;
+ }
+
+ public @Stable A v;
+
+ public static final NestedStableField2 c = new NestedStableField2();
+ public static byte get() { return c.v.left.left.left.a; }
+ public static byte get1() { return c.v.left.left.right.left.a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.left = c.v.right = c.v;
+ c.v.a = 1; byte val1 = get(); byte val2 = get1();
+ c.v.a = 2; byte val3 = get(); byte val4 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val3, (isStableEnabled ? 1 : 2));
+
+ assertEquals(val2, 1);
+ assertEquals(val4, 2);
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField3 {
+ static class A {
+ public @Stable byte a;
+ public @Stable A[] left;
+ public A[] right;
+ }
+
+ public @Stable A[] v;
+
+ public static final NestedStableField3 c = new NestedStableField3();
+ public static byte get() { return c.v[0].left[1].left[0].left[1].a; }
+ public static byte get1() { return c.v[1].left[0].left[1].right[0].left[1].a; }
+
+ public static void test() throws Exception {
+ {
+ A elem = new A();
+ c.v = new A[] { elem, elem }; c.v[0].left = c.v[0].right = c.v;
+ elem.a = 1; byte val1 = get(); byte val2 = get1();
+ elem.a = 2; byte val3 = get(); byte val4 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val3, (isStableEnabled ? 1 : 2));
+
+ assertEquals(val2, 1);
+ assertEquals(val4, 2);
+ }
+ }
+ }
+
+ /* ==================================================== */
+ // Auxiliary methods
+ static void assertEquals(int i, int j) { if (i != j) throw new AssertionError(i + " != " + j); }
+ static void assertTrue(boolean b) { if (!b) throw new AssertionError(); }
+
+ static boolean failed = false;
+
+ public static void run(Class<?> test) {
+ Throwable ex = null;
+ System.out.print(test.getName()+": ");
+ try {
+ test.getMethod("test").invoke(null);
+ } catch (InvocationTargetException e) {
+ ex = e.getCause();
+ } catch (Throwable e) {
+ ex = e;
+ } finally {
+ if (ex == null) {
+ System.out.println("PASSED");
+ } else {
+ failed = true;
+ System.out.println("FAILED");
+ ex.printStackTrace(System.out);
+ }
+ }
+ }
+
+ static final boolean isStableEnabled;
+ static {
+ HotSpotDiagnosticMXBean diagnostic
+ = ManagementFactoryHelper.getDiagnosticMXBean();
+ VMOption tmp;
+ try {
+ tmp = diagnostic.getVMOption("FoldStableValues");
+ } catch (IllegalArgumentException e) {
+ tmp = null;
+ }
+ isStableEnabled = (tmp == null ? false : Boolean.parseBoolean(tmp.getValue()));
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stable/TestStableChar.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStableChar
+ * @summary tests on stable fields and arrays
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file TestStableChar.java
+ * @run main ClassFileInstaller
+ * java/lang/invoke/TestStableChar
+ * java/lang/invoke/TestStableChar$CharStable
+ * java/lang/invoke/TestStableChar$StaticCharStable
+ * java/lang/invoke/TestStableChar$VolatileCharStable
+ * java/lang/invoke/TestStableChar$CharArrayDim1
+ * java/lang/invoke/TestStableChar$CharArrayDim2
+ * java/lang/invoke/TestStableChar$CharArrayDim3
+ * java/lang/invoke/TestStableChar$CharArrayDim4
+ * java/lang/invoke/TestStableChar$ObjectArrayLowerDim0
+ * java/lang/invoke/TestStableChar$ObjectArrayLowerDim1
+ * java/lang/invoke/TestStableChar$NestedStableField
+ * java/lang/invoke/TestStableChar$NestedStableField$A
+ * java/lang/invoke/TestStableChar$NestedStableField1
+ * java/lang/invoke/TestStableChar$NestedStableField1$A
+ * java/lang/invoke/TestStableChar$NestedStableField2
+ * java/lang/invoke/TestStableChar$NestedStableField2$A
+ * java/lang/invoke/TestStableChar$NestedStableField3
+ * java/lang/invoke/TestStableChar$NestedStableField3$A
+ * java/lang/invoke/TestStableChar$DefaultValue
+ * java/lang/invoke/TestStableChar$ObjectArrayLowerDim2
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableChar
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableChar
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableChar
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableChar
+ */
+package java.lang.invoke;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+import sun.management.ManagementFactoryHelper;
+import java.lang.reflect.InvocationTargetException;
+
+public class TestStableChar {
+ public static void main(String[] args) throws Exception {
+ System.out.println("@Stable enabled: "+isStableEnabled);
+ System.out.println();
+
+ run(DefaultValue.class);
+ run(CharStable.class);
+ run(StaticCharStable.class);
+ run(VolatileCharStable.class);
+
+ // @Stable arrays: Dim 1-4
+ run(CharArrayDim1.class);
+ run(CharArrayDim2.class);
+ run(CharArrayDim3.class);
+ run(CharArrayDim4.class);
+
+ // @Stable Object field: dynamic arrays
+ run(ObjectArrayLowerDim0.class);
+ run(ObjectArrayLowerDim1.class);
+ run(ObjectArrayLowerDim2.class);
+
+ // Nested @Stable fields
+ run(NestedStableField.class);
+ run(NestedStableField1.class);
+ run(NestedStableField2.class);
+ run(NestedStableField3.class);
+
+ if (failed) {
+ throw new Error("TEST FAILED");
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DefaultValue {
+ public @Stable char v;
+
+ public static final DefaultValue c = new DefaultValue();
+ public static char get() { return c.v; }
+ public static void test() throws Exception {
+ char val1 = get();
+ c.v = 'a'; char val2 = get();
+ assertEquals(val1, 0);
+ assertEquals(val2, 'a');
+ }
+ }
+
+ /* ==================================================== */
+
+ static class CharStable {
+ public @Stable char v;
+
+ public static final CharStable c = new CharStable();
+ public static char get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 'a'; char val1 = get();
+ c.v = 'b'; char val2 = get();
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class StaticCharStable {
+ public @Stable char v;
+
+ public static final StaticCharStable c = new StaticCharStable();
+ public static char get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 'a'; char val1 = get();
+ c.v = 'b'; char val2 = get();
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class VolatileCharStable {
+ public @Stable volatile char v;
+
+ public static final VolatileCharStable c = new VolatileCharStable();
+ public static char get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 'a'; char val1 = get();
+ c.v = 'b'; char val2 = get();
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+ }
+ }
+
+ /* ==================================================== */
+ // @Stable array == field && all components are stable
+
+ static class CharArrayDim1 {
+ public @Stable char[] v;
+
+ public static final CharArrayDim1 c = new CharArrayDim1();
+ public static char get() { return c.v[0]; }
+ public static char get1() { return c.v[10]; }
+ public static char[] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new char[1]; c.v[0] = 'a'; char val1 = get();
+ c.v[0] = 'b'; char val2 = get();
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+
+ c.v = new char[1]; c.v[0] = 'c'; char val3 = get();
+ assertEquals(val3, (isStableEnabled ? 'a' : 'c'));
+ }
+
+ {
+ c.v = new char[20]; c.v[10] = 'a'; char val1 = get1();
+ c.v[10] = 'b'; char val2 = get1();
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+
+ c.v = new char[20]; c.v[10] = 'c'; char val3 = get1();
+ assertEquals(val3, (isStableEnabled ? 'a' : 'c'));
+ }
+
+ {
+ c.v = new char[1]; char[] val1 = get2();
+ c.v = new char[1]; char[] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class CharArrayDim2 {
+ public @Stable char[][] v;
+
+ public static final CharArrayDim2 c = new CharArrayDim2();
+ public static char get() { return c.v[0][0]; }
+ public static char[] get1() { return c.v[0]; }
+ public static char[][] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new char[1][1]; c.v[0][0] = 'a'; char val1 = get();
+ c.v[0][0] = 'b'; char val2 = get();
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+
+ c.v = new char[1][1]; c.v[0][0] = 'c'; char val3 = get();
+ assertEquals(val3, (isStableEnabled ? 'a' : 'c'));
+
+ c.v[0] = new char[1]; c.v[0][0] = 'd'; char val4 = get();
+ assertEquals(val4, (isStableEnabled ? 'a' : 'd'));
+ }
+
+ {
+ c.v = new char[1][1]; char[] val1 = get1();
+ c.v[0] = new char[1]; char[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new char[1][1]; char[][] val1 = get2();
+ c.v = new char[1][1]; char[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class CharArrayDim3 {
+ public @Stable char[][][] v;
+
+ public static final CharArrayDim3 c = new CharArrayDim3();
+ public static char get() { return c.v[0][0][0]; }
+ public static char[] get1() { return c.v[0][0]; }
+ public static char[][] get2() { return c.v[0]; }
+ public static char[][][] get3() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new char[1][1][1]; c.v[0][0][0] = 'a'; char val1 = get();
+ c.v[0][0][0] = 'b'; char val2 = get();
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+
+ c.v = new char[1][1][1]; c.v[0][0][0] = 'c'; char val3 = get();
+ assertEquals(val3, (isStableEnabled ? 'a' : 'c'));
+
+ c.v[0] = new char[1][1]; c.v[0][0][0] = 'd'; char val4 = get();
+ assertEquals(val4, (isStableEnabled ? 'a' : 'd'));
+
+ c.v[0][0] = new char[1]; c.v[0][0][0] = 'e'; char val5 = get();
+ assertEquals(val5, (isStableEnabled ? 'a' : 'e'));
+ }
+
+ {
+ c.v = new char[1][1][1]; char[] val1 = get1();
+ c.v[0][0] = new char[1]; char[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new char[1][1][1]; char[][] val1 = get2();
+ c.v[0] = new char[1][1]; char[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new char[1][1][1]; char[][][] val1 = get3();
+ c.v = new char[1][1][1]; char[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class CharArrayDim4 {
+ public @Stable char[][][][] v;
+
+ public static final CharArrayDim4 c = new CharArrayDim4();
+ public static char get() { return c.v[0][0][0][0]; }
+ public static char[] get1() { return c.v[0][0][0]; }
+ public static char[][] get2() { return c.v[0][0]; }
+ public static char[][][] get3() { return c.v[0]; }
+ public static char[][][][] get4() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new char[1][1][1][1]; c.v[0][0][0][0] = 'a'; char val1 = get();
+ c.v[0][0][0][0] = 'b'; char val2 = get();
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+
+ c.v = new char[1][1][1][1]; c.v[0][0][0][0] = 'c'; char val3 = get();
+ assertEquals(val3, (isStableEnabled ? 'a' : 'c'));
+
+ c.v[0] = new char[1][1][1]; c.v[0][0][0][0] = 'd'; char val4 = get();
+ assertEquals(val4, (isStableEnabled ? 'a' : 'd'));
+
+ c.v[0][0] = new char[1][1]; c.v[0][0][0][0] = 'e'; char val5 = get();
+ assertEquals(val5, (isStableEnabled ? 'a' : 'e'));
+
+ c.v[0][0][0] = new char[1]; c.v[0][0][0][0] = 'f'; char val6 = get();
+ assertEquals(val6, (isStableEnabled ? 'a' : 'f'));
+ }
+
+ {
+ c.v = new char[1][1][1][1]; char[] val1 = get1();
+ c.v[0][0][0] = new char[1]; char[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new char[1][1][1][1]; char[][] val1 = get2();
+ c.v[0][0] = new char[1][1]; char[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new char[1][1][1][1]; char[][][] val1 = get3();
+ c.v[0] = new char[1][1][1]; char[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new char[1][1][1][1]; char[][][][] val1 = get4();
+ c.v = new char[1][1][1][1]; char[][][][] val2 = get4();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ }
+ }
+
+ /* ==================================================== */
+ // Dynamic Dim is higher than static
+ static class ObjectArrayLowerDim0 {
+ public @Stable Object v;
+
+ public static final ObjectArrayLowerDim0 c = new ObjectArrayLowerDim0();
+ public static char get() { return ((char[])c.v)[0]; }
+ public static char[] get1() { return (char[])c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new char[1]; ((char[])c.v)[0] = 'a'; char val1 = get();
+ ((char[])c.v)[0] = 'b'; char val2 = get();
+
+ assertEquals(val1, 'a');
+ assertEquals(val2, 'b');
+ }
+
+ {
+ c.v = new char[1]; char[] val1 = get1();
+ c.v = new char[1]; char[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayLowerDim1 c = new ObjectArrayLowerDim1();
+ public static char get() { return ((char[][])c.v)[0][0]; }
+ public static char[] get1() { return (char[])(c.v[0]); }
+ public static Object[] get2() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new char[1][1]; ((char[][])c.v)[0][0] = 'a'; char val1 = get();
+ ((char[][])c.v)[0][0] = 'b'; char val2 = get();
+
+ assertEquals(val1, 'a');
+ assertEquals(val2, 'b');
+ }
+
+ {
+ c.v = new char[1][1]; c.v[0] = new char[0]; char[] val1 = get1();
+ c.v[0] = new char[0]; char[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new char[0][0]; Object[] val1 = get2();
+ c.v = new char[0][0]; Object[] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayLowerDim2 c = new ObjectArrayLowerDim2();
+ public static char get() { return ((char[][][])c.v)[0][0][0]; }
+ public static char[] get1() { return (char[])(c.v[0][0]); }
+ public static char[][] get2() { return (char[][])(c.v[0]); }
+ public static Object[][] get3() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new char[1][1][1]; ((char[][][])c.v)[0][0][0] = 'a'; char val1 = get();
+ ((char[][][])c.v)[0][0][0] = 'b'; char val2 = get();
+
+ assertEquals(val1, 'a');
+ assertEquals(val2, 'b');
+ }
+
+ {
+ c.v = new char[1][1][1]; c.v[0][0] = new char[0]; char[] val1 = get1();
+ c.v[0][0] = new char[0]; char[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new char[1][1][1]; c.v[0] = new char[0][0]; char[][] val1 = get2();
+ c.v[0] = new char[0][0]; char[][] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new char[0][0][0]; Object[][] val1 = get3();
+ c.v = new char[0][0][0]; Object[][] val2 = get3();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField {
+ static class A {
+ public @Stable char a;
+
+ }
+ public @Stable A v;
+
+ public static final NestedStableField c = new NestedStableField();
+ public static A get() { return c.v; }
+ public static char get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.a = 'a'; A val1 = get();
+ c.v.a = 'b'; A val2 = get();
+
+ assertEquals(val1.a, 'b');
+ assertEquals(val2.a, 'b');
+ }
+
+ {
+ c.v = new A(); c.v.a = 'a'; char val1 = get1();
+ c.v.a = 'b'; char val2 = get1();
+ c.v = new A(); c.v.a = 'c'; char val3 = get1();
+
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+ assertEquals(val3, (isStableEnabled ? 'a' : 'c'));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField1 {
+ static class A {
+ public @Stable char a;
+ public @Stable A next;
+ }
+ public @Stable A v;
+
+ public static final NestedStableField1 c = new NestedStableField1();
+ public static A get() { return c.v.next.next.next.next.next.next.next; }
+ public static char get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.next = new A(); c.v.next.next = c.v;
+ c.v.a = 'a'; c.v.next.a = 'a'; A val1 = get();
+ c.v.a = 'b'; c.v.next.a = 'b'; A val2 = get();
+
+ assertEquals(val1.a, 'b');
+ assertEquals(val2.a, 'b');
+ }
+
+ {
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 'a'; char val1 = get1();
+ c.v.a = 'b'; char val2 = get1();
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 'c'; char val3 = get1();
+
+ assertEquals(val1, 'a');
+ assertEquals(val2, (isStableEnabled ? 'a' : 'b'));
+ assertEquals(val3, (isStableEnabled ? 'a' : 'c'));
+ }
+ }
+ }
+ /* ==================================================== */
+
+ static class NestedStableField2 {
+ static class A {
+ public @Stable char a;
+ public @Stable A left;
+ public A right;
+ }
+
+ public @Stable A v;
+
+ public static final NestedStableField2 c = new NestedStableField2();
+ public static char get() { return c.v.left.left.left.a; }
+ public static char get1() { return c.v.left.left.right.left.a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.left = c.v.right = c.v;
+ c.v.a = 'a'; char val1 = get(); char val2 = get1();
+ c.v.a = 'b'; char val3 = get(); char val4 = get1();
+
+ assertEquals(val1, 'a');
+ assertEquals(val3, (isStableEnabled ? 'a' : 'b'));
+
+ assertEquals(val2, 'a');
+ assertEquals(val4, 'b');
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField3 {
+ static class A {
+ public @Stable char a;
+ public @Stable A[] left;
+ public A[] right;
+ }
+
+ public @Stable A[] v;
+
+ public static final NestedStableField3 c = new NestedStableField3();
+ public static char get() { return c.v[0].left[1].left[0].left[1].a; }
+ public static char get1() { return c.v[1].left[0].left[1].right[0].left[1].a; }
+
+ public static void test() throws Exception {
+ {
+ A elem = new A();
+ c.v = new A[] { elem, elem }; c.v[0].left = c.v[0].right = c.v;
+ elem.a = 'a'; char val1 = get(); char val2 = get1();
+ elem.a = 'b'; char val3 = get(); char val4 = get1();
+
+ assertEquals(val1, 'a');
+ assertEquals(val3, (isStableEnabled ? 'a' : 'b'));
+
+ assertEquals(val2, 'a');
+ assertEquals(val4, 'b');
+ }
+ }
+ }
+
+ /* ==================================================== */
+ // Auxiliary methods
+ static void assertEquals(int i, int j) { if (i != j) throw new AssertionError(i + " != " + j); }
+ static void assertTrue(boolean b) { if (!b) throw new AssertionError(); }
+
+ static boolean failed = false;
+
+ public static void run(Class<?> test) {
+ Throwable ex = null;
+ System.out.print(test.getName()+": ");
+ try {
+ test.getMethod("test").invoke(null);
+ } catch (InvocationTargetException e) {
+ ex = e.getCause();
+ } catch (Throwable e) {
+ ex = e;
+ } finally {
+ if (ex == null) {
+ System.out.println("PASSED");
+ } else {
+ failed = true;
+ System.out.println("FAILED");
+ ex.printStackTrace(System.out);
+ }
+ }
+ }
+
+ static final boolean isStableEnabled;
+ static {
+ HotSpotDiagnosticMXBean diagnostic
+ = ManagementFactoryHelper.getDiagnosticMXBean();
+ VMOption tmp;
+ try {
+ tmp = diagnostic.getVMOption("FoldStableValues");
+ } catch (IllegalArgumentException e) {
+ tmp = null;
+ }
+ isStableEnabled = (tmp == null ? false : Boolean.parseBoolean(tmp.getValue()));
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stable/TestStableDouble.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStableDouble
+ * @summary tests on stable fields and arrays
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file TestStableDouble.java
+ * @run main ClassFileInstaller
+ * java/lang/invoke/TestStableDouble
+ * java/lang/invoke/TestStableDouble$DoubleStable
+ * java/lang/invoke/TestStableDouble$StaticDoubleStable
+ * java/lang/invoke/TestStableDouble$VolatileDoubleStable
+ * java/lang/invoke/TestStableDouble$DoubleArrayDim1
+ * java/lang/invoke/TestStableDouble$DoubleArrayDim2
+ * java/lang/invoke/TestStableDouble$DoubleArrayDim3
+ * java/lang/invoke/TestStableDouble$DoubleArrayDim4
+ * java/lang/invoke/TestStableDouble$ObjectArrayLowerDim0
+ * java/lang/invoke/TestStableDouble$ObjectArrayLowerDim1
+ * java/lang/invoke/TestStableDouble$NestedStableField
+ * java/lang/invoke/TestStableDouble$NestedStableField$A
+ * java/lang/invoke/TestStableDouble$NestedStableField1
+ * java/lang/invoke/TestStableDouble$NestedStableField1$A
+ * java/lang/invoke/TestStableDouble$NestedStableField2
+ * java/lang/invoke/TestStableDouble$NestedStableField2$A
+ * java/lang/invoke/TestStableDouble$NestedStableField3
+ * java/lang/invoke/TestStableDouble$NestedStableField3$A
+ * java/lang/invoke/TestStableDouble$DefaultValue
+ * java/lang/invoke/TestStableDouble$ObjectArrayLowerDim2
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableDouble
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableDouble
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableDouble
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableDouble
+ */
+package java.lang.invoke;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+import sun.management.ManagementFactoryHelper;
+import java.lang.reflect.InvocationTargetException;
+
+public class TestStableDouble {
+ public static void main(String[] args) throws Exception {
+ System.out.println("@Stable enabled: "+isStableEnabled);
+ System.out.println();
+
+ run(DefaultValue.class);
+ run(DoubleStable.class);
+ run(StaticDoubleStable.class);
+ run(VolatileDoubleStable.class);
+
+ // @Stable arrays: Dim 1-4
+ run(DoubleArrayDim1.class);
+ run(DoubleArrayDim2.class);
+ run(DoubleArrayDim3.class);
+ run(DoubleArrayDim4.class);
+
+ // @Stable Object field: dynamic arrays
+ run(ObjectArrayLowerDim0.class);
+ run(ObjectArrayLowerDim1.class);
+ run(ObjectArrayLowerDim2.class);
+
+ // Nested @Stable fields
+ run(NestedStableField.class);
+ run(NestedStableField1.class);
+ run(NestedStableField2.class);
+ run(NestedStableField3.class);
+
+ if (failed) {
+ throw new Error("TEST FAILED");
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DefaultValue {
+ public @Stable double v;
+
+ public static final DefaultValue c = new DefaultValue();
+ public static double get() { return c.v; }
+ public static void test() throws Exception {
+ double val1 = get();
+ c.v = 1.0; double val2 = get();
+ assertEquals(val1, 0);
+ assertEquals(val2, 1.0);
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DoubleStable {
+ public @Stable double v;
+
+ public static final DoubleStable c = new DoubleStable();
+ public static double get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1.0; double val1 = get();
+ c.v = Double.MAX_VALUE; double val2 = get();
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : Double.MAX_VALUE));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class StaticDoubleStable {
+ public static @Stable double v;
+
+ public static final StaticDoubleStable c = new StaticDoubleStable();
+ public static double get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1.0; double val1 = get();
+ c.v = Double.MAX_VALUE; double val2 = get();
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : Double.MAX_VALUE));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class VolatileDoubleStable {
+ public @Stable double v;
+
+ public static final VolatileDoubleStable c = new VolatileDoubleStable();
+ public static double get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1.0; double val1 = get();
+ c.v = Double.MAX_VALUE; double val2 = get();
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : Double.MAX_VALUE));
+ }
+ }
+
+ /* ==================================================== */
+ // @Stable array == field && all components are stable
+
+ static class DoubleArrayDim1 {
+ public @Stable double[] v;
+
+ public static final DoubleArrayDim1 c = new DoubleArrayDim1();
+ public static double get() { return c.v[0]; }
+ public static double get1() { return c.v[10]; }
+ public static double[] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new double[1]; c.v[0] = 1.0; double val1 = get();
+ c.v[0] = 2.0; double val2 = get();
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : 2.0));
+
+ c.v = new double[1]; c.v[0] = 3.0; double val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1.0 : 3.0));
+ }
+
+ {
+ c.v = new double[20]; c.v[10] = 1.0; double val1 = get1();
+ c.v[10] = 2.0; double val2 = get1();
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : 2.0));
+
+ c.v = new double[20]; c.v[10] = 3.0; double val3 = get1();
+ assertEquals(val3, (isStableEnabled ? 1.0 : 3.0));
+ }
+
+ {
+ c.v = new double[1]; double[] val1 = get2();
+ c.v = new double[1]; double[] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DoubleArrayDim2 {
+ public @Stable double[][] v;
+
+ public static final DoubleArrayDim2 c = new DoubleArrayDim2();
+ public static double get() { return c.v[0][0]; }
+ public static double[] get1() { return c.v[0]; }
+ public static double[][] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new double[1][1]; c.v[0][0] = 1.0; double val1 = get();
+ c.v[0][0] = 2.0; double val2 = get();
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : 2.0));
+
+ c.v = new double[1][1]; c.v[0][0] = 3.0; double val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1.0 : 3.0));
+
+ c.v[0] = new double[1]; c.v[0][0] = 4.0; double val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1.0 : 4.0));
+ }
+
+ {
+ c.v = new double[1][1]; double[] val1 = get1();
+ c.v[0] = new double[1]; double[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new double[1][1]; double[][] val1 = get2();
+ c.v = new double[1][1]; double[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DoubleArrayDim3 {
+ public @Stable double[][][] v;
+
+ public static final DoubleArrayDim3 c = new DoubleArrayDim3();
+ public static double get() { return c.v[0][0][0]; }
+ public static double[] get1() { return c.v[0][0]; }
+ public static double[][] get2() { return c.v[0]; }
+ public static double[][][] get3() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new double[1][1][1]; c.v[0][0][0] = 1.0; double val1 = get();
+ c.v[0][0][0] = 2.0; double val2 = get();
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : 2.0));
+
+ c.v = new double[1][1][1]; c.v[0][0][0] = 3.0; double val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1.0 : 3.0));
+
+ c.v[0] = new double[1][1]; c.v[0][0][0] = 4.0; double val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1.0 : 4.0));
+
+ c.v[0][0] = new double[1]; c.v[0][0][0] = 5.0; double val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1.0 : 5.0));
+ }
+
+ {
+ c.v = new double[1][1][1]; double[] val1 = get1();
+ c.v[0][0] = new double[1]; double[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new double[1][1][1]; double[][] val1 = get2();
+ c.v[0] = new double[1][1]; double[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new double[1][1][1]; double[][][] val1 = get3();
+ c.v = new double[1][1][1]; double[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DoubleArrayDim4 {
+ public @Stable double[][][][] v;
+
+ public static final DoubleArrayDim4 c = new DoubleArrayDim4();
+ public static double get() { return c.v[0][0][0][0]; }
+ public static double[] get1() { return c.v[0][0][0]; }
+ public static double[][] get2() { return c.v[0][0]; }
+ public static double[][][] get3() { return c.v[0]; }
+ public static double[][][][] get4() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new double[1][1][1][1]; c.v[0][0][0][0] = 1.0; double val1 = get();
+ c.v[0][0][0][0] = 2.0; double val2 = get();
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : 2.0));
+
+ c.v = new double[1][1][1][1]; c.v[0][0][0][0] = 3.0; double val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1.0 : 3.0));
+
+ c.v[0] = new double[1][1][1]; c.v[0][0][0][0] = 4.0; double val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1.0 : 4.0));
+
+ c.v[0][0] = new double[1][1]; c.v[0][0][0][0] = 5.0; double val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1.0 : 5.0));
+
+ c.v[0][0][0] = new double[1]; c.v[0][0][0][0] = 6.0; double val6 = get();
+ assertEquals(val6, (isStableEnabled ? 1.0 : 6.0));
+ }
+
+ {
+ c.v = new double[1][1][1][1]; double[] val1 = get1();
+ c.v[0][0][0] = new double[1]; double[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new double[1][1][1][1]; double[][] val1 = get2();
+ c.v[0][0] = new double[1][1]; double[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new double[1][1][1][1]; double[][][] val1 = get3();
+ c.v[0] = new double[1][1][1]; double[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new double[1][1][1][1]; double[][][][] val1 = get4();
+ c.v = new double[1][1][1][1]; double[][][][] val2 = get4();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ }
+ }
+
+ /* ==================================================== */
+ // Dynamic Dim is higher than static
+
+ static class ObjectArrayLowerDim0 {
+ public @Stable Object v;
+
+ public static final ObjectArrayLowerDim0 c = new ObjectArrayLowerDim0();
+ public static double get() { return ((double[])c.v)[0]; }
+ public static double[] get1() { return (double[])c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new double[1]; ((double[])c.v)[0] = 1.0; double val1 = get();
+ ((double[])c.v)[0] = 2.0; double val2 = get();
+
+ assertEquals(val1, 1.0);
+ assertEquals(val2, 2.0);
+ }
+
+ {
+ c.v = new double[1]; double[] val1 = get1();
+ c.v = new double[1]; double[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayLowerDim1 c = new ObjectArrayLowerDim1();
+ public static double get() { return ((double[][])c.v)[0][0]; }
+ public static double[] get1() { return (double[])(c.v[0]); }
+ public static Object[] get2() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new double[1][1]; ((double[][])c.v)[0][0] = 1.0; double val1 = get();
+ ((double[][])c.v)[0][0] = 2.0; double val2 = get();
+
+ assertEquals(val1, 1.0);
+ assertEquals(val2, 2.0);
+ }
+
+ {
+ c.v = new double[1][1]; c.v[0] = new double[0]; double[] val1 = get1();
+ c.v[0] = new double[0]; double[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new double[0][0]; Object[] val1 = get2();
+ c.v = new double[0][0]; Object[] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayLowerDim2 c = new ObjectArrayLowerDim2();
+ public static double get() { return ((double[][][])c.v)[0][0][0]; }
+ public static double[] get1() { return (double[])(c.v[0][0]); }
+ public static double[][] get2() { return (double[][])(c.v[0]); }
+ public static Object[][] get3() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new double[1][1][1]; ((double[][][])c.v)[0][0][0] = 1.0; double val1 = get();
+ ((double[][][])c.v)[0][0][0] = 2.0; double val2 = get();
+
+ assertEquals(val1, 1.0);
+ assertEquals(val2, 2.0);
+ }
+
+ {
+ c.v = new double[1][1][1]; c.v[0][0] = new double[0]; double[] val1 = get1();
+ c.v[0][0] = new double[0]; double[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new double[1][1][1]; c.v[0] = new double[0][0]; double[][] val1 = get2();
+ c.v[0] = new double[0][0]; double[][] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new double[0][0][0]; Object[][] val1 = get3();
+ c.v = new double[0][0][0]; Object[][] val2 = get3();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField {
+ static class A {
+ public @Stable double a;
+
+ }
+ public @Stable A v;
+
+ public static final NestedStableField c = new NestedStableField();
+ public static A get() { return c.v; }
+ public static double get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.a = 1.0; A val1 = get();
+ c.v.a = 2.0; A val2 = get();
+
+ assertEquals(val1.a, 2.0);
+ assertEquals(val2.a, 2.0);
+ }
+
+ {
+ c.v = new A(); c.v.a = 1.0; double val1 = get1();
+ c.v.a = 2.0; double val2 = get1();
+ c.v = new A(); c.v.a = 3.0; double val3 = get1();
+
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : 2.0));
+ assertEquals(val3, (isStableEnabled ? 1.0 : 3.0));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField1 {
+ static class A {
+ public @Stable double a;
+ public @Stable A next;
+ }
+ public @Stable A v;
+
+ public static final NestedStableField1 c = new NestedStableField1();
+ public static A get() { return c.v.next.next.next.next.next.next.next; }
+ public static double get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.next = new A(); c.v.next.next = c.v;
+ c.v.a = 1.0; c.v.next.a = 1.0; A val1 = get();
+ c.v.a = 2.0; c.v.next.a = 2.0; A val2 = get();
+
+ assertEquals(val1.a, 2.0);
+ assertEquals(val2.a, 2.0);
+ }
+
+ {
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 1.0; double val1 = get1();
+ c.v.a = 2.0; double val2 = get1();
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 3.0; double val3 = get1();
+
+ assertEquals(val1, 1.0);
+ assertEquals(val2, (isStableEnabled ? 1.0 : 2.0));
+ assertEquals(val3, (isStableEnabled ? 1.0 : 3.0));
+ }
+ }
+ }
+ /* ==================================================== */
+
+ static class NestedStableField2 {
+ static class A {
+ public @Stable double a;
+ public @Stable A left;
+ public A right;
+ }
+
+ public @Stable A v;
+
+ public static final NestedStableField2 c = new NestedStableField2();
+ public static double get() { return c.v.left.left.left.a; }
+ public static double get1() { return c.v.left.left.right.left.a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.left = c.v.right = c.v;
+ c.v.a = 1.0; double val1 = get(); double val2 = get1();
+ c.v.a = 2.0; double val3 = get(); double val4 = get1();
+
+ assertEquals(val1, 1.0);
+ assertEquals(val3, (isStableEnabled ? 1.0 : 2.0));
+
+ assertEquals(val2, 1.0);
+ assertEquals(val4, 2.0);
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField3 {
+ static class A {
+ public @Stable double a;
+ public @Stable A[] left;
+ public A[] right;
+ }
+
+ public @Stable A[] v;
+
+ public static final NestedStableField3 c = new NestedStableField3();
+ public static double get() { return c.v[0].left[1].left[0].left[1].a; }
+ public static double get1() { return c.v[1].left[0].left[1].right[0].left[1].a; }
+
+ public static void test() throws Exception {
+ {
+ A elem = new A();
+ c.v = new A[] { elem, elem }; c.v[0].left = c.v[0].right = c.v;
+ elem.a = 1.0; double val1 = get(); double val2 = get1();
+ elem.a = 2.0; double val3 = get(); double val4 = get1();
+
+ assertEquals(val1, 1.0);
+ assertEquals(val3, (isStableEnabled ? 1.0 : 2.0));
+
+ assertEquals(val2, 1.0);
+ assertEquals(val4, 2.0);
+ }
+ }
+ }
+
+ /* ==================================================== */
+ // Auxiliary methods
+ static void assertEquals(double i, double j) { if (i != j) throw new AssertionError(i + " != " + j); }
+ static void assertTrue(boolean b) { if (!b) throw new AssertionError(); }
+
+ static boolean failed = false;
+
+ public static void run(Class<?> test) {
+ Throwable ex = null;
+ System.out.print(test.getName()+": ");
+ try {
+ test.getMethod("test").invoke(null);
+ } catch (InvocationTargetException e) {
+ ex = e.getCause();
+ } catch (Throwable e) {
+ ex = e;
+ } finally {
+ if (ex == null) {
+ System.out.println("PASSED");
+ } else {
+ failed = true;
+ System.out.println("FAILED");
+ ex.printStackTrace(System.out);
+ }
+ }
+ }
+
+ static final boolean isStableEnabled;
+ static {
+ HotSpotDiagnosticMXBean diagnostic
+ = ManagementFactoryHelper.getDiagnosticMXBean();
+ VMOption tmp;
+ try {
+ tmp = diagnostic.getVMOption("FoldStableValues");
+ } catch (IllegalArgumentException e) {
+ tmp = null;
+ }
+ isStableEnabled = (tmp == null ? false : Boolean.parseBoolean(tmp.getValue()));
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stable/TestStableFloat.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStableFloat
+ * @summary tests on stable fields and arrays
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file TestStableFloat.java
+ * @run main ClassFileInstaller
+ * java/lang/invoke/TestStableFloat
+ * java/lang/invoke/TestStableFloat$FloatStable
+ * java/lang/invoke/TestStableFloat$StaticFloatStable
+ * java/lang/invoke/TestStableFloat$VolatileFloatStable
+ * java/lang/invoke/TestStableFloat$FloatArrayDim1
+ * java/lang/invoke/TestStableFloat$FloatArrayDim2
+ * java/lang/invoke/TestStableFloat$FloatArrayDim3
+ * java/lang/invoke/TestStableFloat$FloatArrayDim4
+ * java/lang/invoke/TestStableFloat$ObjectArrayLowerDim0
+ * java/lang/invoke/TestStableFloat$ObjectArrayLowerDim1
+ * java/lang/invoke/TestStableFloat$NestedStableField
+ * java/lang/invoke/TestStableFloat$NestedStableField$A
+ * java/lang/invoke/TestStableFloat$NestedStableField1
+ * java/lang/invoke/TestStableFloat$NestedStableField1$A
+ * java/lang/invoke/TestStableFloat$NestedStableField2
+ * java/lang/invoke/TestStableFloat$NestedStableField2$A
+ * java/lang/invoke/TestStableFloat$NestedStableField3
+ * java/lang/invoke/TestStableFloat$NestedStableField3$A
+ * java/lang/invoke/TestStableFloat$DefaultValue
+ * java/lang/invoke/TestStableFloat$ObjectArrayLowerDim2
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableFloat
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableFloat
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableFloat
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableFloat
+ */
+package java.lang.invoke;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+import sun.management.ManagementFactoryHelper;
+import java.lang.reflect.InvocationTargetException;
+
+public class TestStableFloat {
+ public static void main(String[] args) throws Exception {
+ System.out.println("@Stable enabled: "+isStableEnabled);
+ System.out.println();
+
+ run(DefaultValue.class);
+ run(FloatStable.class);
+ run(StaticFloatStable.class);
+ run(VolatileFloatStable.class);
+
+ // @Stable arrays: Dim 1-4
+ run(FloatArrayDim1.class);
+ run(FloatArrayDim2.class);
+ run(FloatArrayDim3.class);
+ run(FloatArrayDim4.class);
+
+ // @Stable Object field: dynamic arrays
+ run(ObjectArrayLowerDim0.class);
+ run(ObjectArrayLowerDim1.class);
+ run(ObjectArrayLowerDim2.class);
+
+ // Nested @Stable fields
+ run(NestedStableField.class);
+ run(NestedStableField1.class);
+ run(NestedStableField2.class);
+ run(NestedStableField3.class);
+
+ if (failed) {
+ throw new Error("TEST FAILED");
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DefaultValue {
+ public @Stable float v;
+
+ public static final DefaultValue c = new DefaultValue();
+ public static float get() { return c.v; }
+ public static void test() throws Exception {
+ float val1 = get();
+ c.v = 1.0F; float val2 = get();
+ assertEquals(val1, 0F);
+ assertEquals(val2, 1.0F);
+ }
+ }
+
+ /* ==================================================== */
+
+ static class FloatStable {
+ public @Stable float v;
+
+ public static final FloatStable c = new FloatStable();
+ public static float get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1.0F; float val1 = get();
+ c.v = 2.0F; float val2 = get();
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class StaticFloatStable {
+ public static @Stable float v;
+
+ public static final StaticFloatStable c = new StaticFloatStable();
+ public static float get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1.0F; float val1 = get();
+ c.v = 2.0F; float val2 = get();
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class VolatileFloatStable {
+ public @Stable volatile float v;
+
+ public static final VolatileFloatStable c = new VolatileFloatStable();
+ public static float get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1.0F; float val1 = get();
+ c.v = 2.0F; float val2 = get();
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+ }
+ }
+
+ /* ==================================================== */
+ // @Stable array == field && all components are stable
+
+ static class FloatArrayDim1 {
+ public @Stable float[] v;
+
+ public static final FloatArrayDim1 c = new FloatArrayDim1();
+ public static float get() { return c.v[0]; }
+ public static float get1() { return c.v[10]; }
+ public static float[] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new float[1]; c.v[0] = 1.0F; float val1 = get();
+ c.v[0] = 2.0F; float val2 = get();
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+
+ c.v = new float[1]; c.v[0] = 3.0F; float val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1.0F : 3.0F));
+ }
+
+ {
+ c.v = new float[20]; c.v[10] = 1.0F; float val1 = get1();
+ c.v[10] = 2.0F; float val2 = get1();
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+
+ c.v = new float[20]; c.v[10] = 3.0F; float val3 = get1();
+ assertEquals(val3, (isStableEnabled ? 1.0F : 3.0F));
+ }
+
+ {
+ c.v = new float[1]; float[] val1 = get2();
+ c.v = new float[1]; float[] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class FloatArrayDim2 {
+ public @Stable float[][] v;
+
+ public static final FloatArrayDim2 c = new FloatArrayDim2();
+ public static float get() { return c.v[0][0]; }
+ public static float[] get1() { return c.v[0]; }
+ public static float[][] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new float[1][1]; c.v[0][0] = 1.0F; float val1 = get();
+ c.v[0][0] = 2.0F; float val2 = get();
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+
+ c.v = new float[1][1]; c.v[0][0] = 3.0F; float val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1.0F : 3.0F));
+
+ c.v[0] = new float[1]; c.v[0][0] = 4.0F; float val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1.0F : 4.0F));
+ }
+
+ {
+ c.v = new float[1][1]; float[] val1 = get1();
+ c.v[0] = new float[1]; float[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new float[1][1]; float[][] val1 = get2();
+ c.v = new float[1][1]; float[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class FloatArrayDim3 {
+ public @Stable float[][][] v;
+
+ public static final FloatArrayDim3 c = new FloatArrayDim3();
+ public static float get() { return c.v[0][0][0]; }
+ public static float[] get1() { return c.v[0][0]; }
+ public static float[][] get2() { return c.v[0]; }
+ public static float[][][] get3() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new float[1][1][1]; c.v[0][0][0] = 1.0F; float val1 = get();
+ c.v[0][0][0] = 2.0F; float val2 = get();
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+
+ c.v = new float[1][1][1]; c.v[0][0][0] = 3.0F; float val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1.0F : 3.0F));
+
+ c.v[0] = new float[1][1]; c.v[0][0][0] = 4.0F; float val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1.0F : 4.0F));
+
+ c.v[0][0] = new float[1]; c.v[0][0][0] = 5.0F; float val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1.0F : 5.0F));
+ }
+
+ {
+ c.v = new float[1][1][1]; float[] val1 = get1();
+ c.v[0][0] = new float[1]; float[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new float[1][1][1]; float[][] val1 = get2();
+ c.v[0] = new float[1][1]; float[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new float[1][1][1]; float[][][] val1 = get3();
+ c.v = new float[1][1][1]; float[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class FloatArrayDim4 {
+ public @Stable float[][][][] v;
+
+ public static final FloatArrayDim4 c = new FloatArrayDim4();
+ public static float get() { return c.v[0][0][0][0]; }
+ public static float[] get1() { return c.v[0][0][0]; }
+ public static float[][] get2() { return c.v[0][0]; }
+ public static float[][][] get3() { return c.v[0]; }
+ public static float[][][][] get4() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new float[1][1][1][1]; c.v[0][0][0][0] = 1.0F; float val1 = get();
+ c.v[0][0][0][0] = 2.0F; float val2 = get();
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+
+ c.v = new float[1][1][1][1]; c.v[0][0][0][0] = 3.0F; float val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1.0F : 3.0F));
+
+ c.v[0] = new float[1][1][1]; c.v[0][0][0][0] = 4.0F; float val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1.0F : 4.0F));
+
+ c.v[0][0] = new float[1][1]; c.v[0][0][0][0] = 5.0F; float val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1.0F : 5.0F));
+
+ c.v[0][0][0] = new float[1]; c.v[0][0][0][0] = 6.0F; float val6 = get();
+ assertEquals(val6, (isStableEnabled ? 1.0F : 6.0F));
+ }
+
+ {
+ c.v = new float[1][1][1][1]; float[] val1 = get1();
+ c.v[0][0][0] = new float[1]; float[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new float[1][1][1][1]; float[][] val1 = get2();
+ c.v[0][0] = new float[1][1]; float[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new float[1][1][1][1]; float[][][] val1 = get3();
+ c.v[0] = new float[1][1][1]; float[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new float[1][1][1][1]; float[][][][] val1 = get4();
+ c.v = new float[1][1][1][1]; float[][][][] val2 = get4();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ }
+ }
+
+ /* ==================================================== */
+ // Dynamic Dim is higher than static
+
+ static class ObjectArrayLowerDim0 {
+ public @Stable Object v;
+
+ public static final ObjectArrayLowerDim0 c = new ObjectArrayLowerDim0();
+ public static float get() { return ((float[])c.v)[0]; }
+ public static float[] get1() { return (float[])c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new float[1]; ((float[])c.v)[0] = 1.0F; float val1 = get();
+ ((float[])c.v)[0] = 2.0F; float val2 = get();
+
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, 2.0F);
+ }
+
+ {
+ c.v = new float[1]; float[] val1 = get1();
+ c.v = new float[1]; float[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayLowerDim1 c = new ObjectArrayLowerDim1();
+ public static float get() { return ((float[][])c.v)[0][0]; }
+ public static float[] get1() { return (float[])(c.v[0]); }
+ public static Object[] get2() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new float[1][1]; ((float[][])c.v)[0][0] = 1.0F; float val1 = get();
+ ((float[][])c.v)[0][0] = 2.0F; float val2 = get();
+
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, 2.0F);
+ }
+
+ {
+ c.v = new float[1][1]; c.v[0] = new float[0]; float[] val1 = get1();
+ c.v[0] = new float[0]; float[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new float[0][0]; Object[] val1 = get2();
+ c.v = new float[0][0]; Object[] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayLowerDim2 c = new ObjectArrayLowerDim2();
+ public static float get() { return ((float[][][])c.v)[0][0][0]; }
+ public static float[] get1() { return (float[])(c.v[0][0]); }
+ public static float[][] get2() { return (float[][])(c.v[0]); }
+ public static Object[][] get3() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new float[1][1][1]; ((float[][][])c.v)[0][0][0] = 1.0F; float val1 = get();
+ ((float[][][])c.v)[0][0][0] = 2.0F; float val2 = get();
+
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, 2.0F);
+ }
+
+ {
+ c.v = new float[1][1][1]; c.v[0][0] = new float[0]; float[] val1 = get1();
+ c.v[0][0] = new float[0]; float[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new float[1][1][1]; c.v[0] = new float[0][0]; float[][] val1 = get2();
+ c.v[0] = new float[0][0]; float[][] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new float[0][0][0]; Object[][] val1 = get3();
+ c.v = new float[0][0][0]; Object[][] val2 = get3();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField {
+ static class A {
+ public @Stable float a;
+
+ }
+ public @Stable A v;
+
+ public static final NestedStableField c = new NestedStableField();
+ public static A get() { return c.v; }
+ public static float get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.a = 1.0F; A val1 = get();
+ c.v.a = 2.0F; A val2 = get();
+
+ assertEquals(val1.a, 2.0F);
+ assertEquals(val2.a, 2.0F);
+ }
+
+ {
+ c.v = new A(); c.v.a = 1.0F; float val1 = get1();
+ c.v.a = 2.0F; float val2 = get1();
+ c.v = new A(); c.v.a = 3.0F; float val3 = get1();
+
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+ assertEquals(val3, (isStableEnabled ? 1.0F : 3.0F));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField1 {
+ static class A {
+ public @Stable float a;
+ public @Stable A next;
+ }
+ public @Stable A v;
+
+ public static final NestedStableField1 c = new NestedStableField1();
+ public static A get() { return c.v.next.next.next.next.next.next.next; }
+ public static float get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.next = new A(); c.v.next.next = c.v;
+ c.v.a = 1.0F; c.v.next.a = 1.0F; A val1 = get();
+ c.v.a = 2.0F; c.v.next.a = 2.0F; A val2 = get();
+
+ assertEquals(val1.a, 2.0F);
+ assertEquals(val2.a, 2.0F);
+ }
+
+ {
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 1.0F; float val1 = get1();
+ c.v.a = 2.0F; float val2 = get1();
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 3.0F; float val3 = get1();
+
+ assertEquals(val1, 1.0F);
+ assertEquals(val2, (isStableEnabled ? 1.0F : 2.0F));
+ assertEquals(val3, (isStableEnabled ? 1.0F : 3.0F));
+ }
+ }
+ }
+ /* ==================================================== */
+
+ static class NestedStableField2 {
+ static class A {
+ public @Stable float a;
+ public @Stable A left;
+ public A right;
+ }
+
+ public @Stable A v;
+
+ public static final NestedStableField2 c = new NestedStableField2();
+ public static float get() { return c.v.left.left.left.a; }
+ public static float get1() { return c.v.left.left.right.left.a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.left = c.v.right = c.v;
+ c.v.a = 1.0F; float val1 = get(); float val2 = get1();
+ c.v.a = 2.0F; float val3 = get(); float val4 = get1();
+
+ assertEquals(val1, 1.0F);
+ assertEquals(val3, (isStableEnabled ? 1.0F : 2.0F));
+
+ assertEquals(val2, 1.0F);
+ assertEquals(val4, 2.0F);
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField3 {
+ static class A {
+ public @Stable float a;
+ public @Stable A[] left;
+ public A[] right;
+ }
+
+ public @Stable A[] v;
+
+ public static final NestedStableField3 c = new NestedStableField3();
+ public static float get() { return c.v[0].left[1].left[0].left[1].a; }
+ public static float get1() { return c.v[1].left[0].left[1].right[0].left[1].a; }
+
+ public static void test() throws Exception {
+ {
+ A elem = new A();
+ c.v = new A[] { elem, elem }; c.v[0].left = c.v[0].right = c.v;
+ elem.a = 1.0F; float val1 = get(); float val2 = get1();
+ elem.a = 2.0F; float val3 = get(); float val4 = get1();
+
+ assertEquals(val1, 1.0F);
+ assertEquals(val3, (isStableEnabled ? 1.0F : 2.0F));
+
+ assertEquals(val2, 1.0F);
+ assertEquals(val4, 2.0F);
+ }
+ }
+ }
+
+ /* ==================================================== */
+ // Auxiliary methods
+ static void assertEquals(float i, float j) { if (i != j) throw new AssertionError(i + " != " + j); }
+ static void assertTrue(boolean b) { if (!b) throw new AssertionError(); }
+
+ static boolean failed = false;
+
+ public static void run(Class<?> test) {
+ Throwable ex = null;
+ System.out.print(test.getName()+": ");
+ try {
+ test.getMethod("test").invoke(null);
+ } catch (InvocationTargetException e) {
+ ex = e.getCause();
+ } catch (Throwable e) {
+ ex = e;
+ } finally {
+ if (ex == null) {
+ System.out.println("PASSED");
+ } else {
+ failed = true;
+ System.out.println("FAILED");
+ ex.printStackTrace(System.out);
+ }
+ }
+ }
+
+ static final boolean isStableEnabled;
+ static {
+ HotSpotDiagnosticMXBean diagnostic
+ = ManagementFactoryHelper.getDiagnosticMXBean();
+ VMOption tmp;
+ try {
+ tmp = diagnostic.getVMOption("FoldStableValues");
+ } catch (IllegalArgumentException e) {
+ tmp = null;
+ }
+ isStableEnabled = (tmp == null ? false : Boolean.parseBoolean(tmp.getValue()));
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stable/TestStableInt.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStableInt
+ * @summary tests on stable fields and arrays
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file TestStableInt.java
+ * @run main ClassFileInstaller
+ * java/lang/invoke/TestStableInt
+ * java/lang/invoke/TestStableInt$IntStable
+ * java/lang/invoke/TestStableInt$StaticIntStable
+ * java/lang/invoke/TestStableInt$VolatileIntStable
+ * java/lang/invoke/TestStableInt$IntArrayDim1
+ * java/lang/invoke/TestStableInt$IntArrayDim2
+ * java/lang/invoke/TestStableInt$IntArrayDim3
+ * java/lang/invoke/TestStableInt$IntArrayDim4
+ * java/lang/invoke/TestStableInt$ObjectArrayLowerDim0
+ * java/lang/invoke/TestStableInt$ObjectArrayLowerDim1
+ * java/lang/invoke/TestStableInt$NestedStableField
+ * java/lang/invoke/TestStableInt$NestedStableField$A
+ * java/lang/invoke/TestStableInt$NestedStableField1
+ * java/lang/invoke/TestStableInt$NestedStableField1$A
+ * java/lang/invoke/TestStableInt$NestedStableField2
+ * java/lang/invoke/TestStableInt$NestedStableField2$A
+ * java/lang/invoke/TestStableInt$NestedStableField3
+ * java/lang/invoke/TestStableInt$NestedStableField3$A
+ * java/lang/invoke/TestStableInt$DefaultValue
+ * java/lang/invoke/TestStableInt$ObjectArrayLowerDim2
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableInt
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableInt
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableInt
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableInt
+ */
+package java.lang.invoke;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+import sun.management.ManagementFactoryHelper;
+import java.lang.reflect.InvocationTargetException;
+
+public class TestStableInt {
+ public static void main(String[] args) throws Exception {
+ System.out.println("@Stable enabled: "+isStableEnabled);
+ System.out.println();
+
+ run(DefaultValue.class);
+ run(IntStable.class);
+ run(StaticIntStable.class);
+ run(VolatileIntStable.class);
+
+ // @Stable arrays: Dim 1-4
+ run(IntArrayDim1.class);
+ run(IntArrayDim2.class);
+ run(IntArrayDim3.class);
+ run(IntArrayDim4.class);
+
+ // @Stable Object field: dynamic arrays
+ run(ObjectArrayLowerDim0.class);
+ run(ObjectArrayLowerDim1.class);
+ run(ObjectArrayLowerDim2.class);
+
+ // Nested @Stable fields
+ run(NestedStableField.class);
+ run(NestedStableField1.class);
+ run(NestedStableField2.class);
+ run(NestedStableField3.class);
+
+ if (failed) {
+ throw new Error("TEST FAILED");
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DefaultValue {
+ public @Stable int v;
+
+ public static final DefaultValue c = new DefaultValue();
+ public static int get() { return c.v; }
+ public static void test() throws Exception {
+ int val1 = get();
+ c.v = 1; int val2 = get();
+ assertEquals(val1, 0);
+ assertEquals(val2, 1);
+ }
+ }
+
+ /* ==================================================== */
+
+ static class IntStable {
+ public @Stable int v;
+
+ public static final IntStable c = new IntStable();
+ public static int get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1; int val1 = get();
+ c.v = 2; int val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class StaticIntStable {
+ public static @Stable int v;
+
+ public static final StaticIntStable c = new StaticIntStable();
+ public static int get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1; int val1 = get();
+ c.v = 2; int val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class VolatileIntStable {
+ public @Stable volatile int v;
+
+ public static final VolatileIntStable c = new VolatileIntStable();
+ public static int get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1; int val1 = get();
+ c.v = 2; int val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ }
+ }
+
+ /* ==================================================== */
+ // @Stable array == field && all components are stable
+
+ static class IntArrayDim1 {
+ public @Stable int[] v;
+
+ public static final IntArrayDim1 c = new IntArrayDim1();
+ public static int get() { return c.v[0]; }
+ public static int get1() { return c.v[10]; }
+ public static int[] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new int[1]; c.v[0] = 1; int val1 = get();
+ c.v[0] = 2; int val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new int[1]; c.v[0] = 3; int val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+
+ {
+ c.v = new int[20]; c.v[10] = 1; int val1 = get1();
+ c.v[10] = 2; int val2 = get1();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new int[20]; c.v[10] = 3; int val3 = get1();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+
+ {
+ c.v = new int[1]; int[] val1 = get2();
+ c.v = new int[1]; int[] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class IntArrayDim2 {
+ public @Stable int[][] v;
+
+ public static final IntArrayDim2 c = new IntArrayDim2();
+ public static int get() { return c.v[0][0]; }
+ public static int[] get1() { return c.v[0]; }
+ public static int[][] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new int[1][1]; c.v[0][0] = 1; int val1 = get();
+ c.v[0][0] = 2; int val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new int[1][1]; c.v[0][0] = 3; int val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new int[1]; c.v[0][0] = 4; int val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+ }
+
+ {
+ c.v = new int[1][1]; int[] val1 = get1();
+ c.v[0] = new int[1]; int[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new int[1][1]; int[][] val1 = get2();
+ c.v = new int[1][1]; int[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class IntArrayDim3 {
+ public @Stable int[][][] v;
+
+ public static final IntArrayDim3 c = new IntArrayDim3();
+ public static int get() { return c.v[0][0][0]; }
+ public static int[] get1() { return c.v[0][0]; }
+ public static int[][] get2() { return c.v[0]; }
+ public static int[][][] get3() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new int[1][1][1]; c.v[0][0][0] = 1; int val1 = get();
+ c.v[0][0][0] = 2; int val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new int[1][1][1]; c.v[0][0][0] = 3; int val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new int[1][1]; c.v[0][0][0] = 4; int val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+
+ c.v[0][0] = new int[1]; c.v[0][0][0] = 5; int val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1 : 5));
+ }
+
+ {
+ c.v = new int[1][1][1]; int[] val1 = get1();
+ c.v[0][0] = new int[1]; int[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new int[1][1][1]; int[][] val1 = get2();
+ c.v[0] = new int[1][1]; int[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new int[1][1][1]; int[][][] val1 = get3();
+ c.v = new int[1][1][1]; int[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class IntArrayDim4 {
+ public @Stable int[][][][] v;
+
+ public static final IntArrayDim4 c = new IntArrayDim4();
+ public static int get() { return c.v[0][0][0][0]; }
+ public static int[] get1() { return c.v[0][0][0]; }
+ public static int[][] get2() { return c.v[0][0]; }
+ public static int[][][] get3() { return c.v[0]; }
+ public static int[][][][] get4() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new int[1][1][1][1]; c.v[0][0][0][0] = 1; int val1 = get();
+ c.v[0][0][0][0] = 2; int val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new int[1][1][1][1]; c.v[0][0][0][0] = 3; int val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new int[1][1][1]; c.v[0][0][0][0] = 4; int val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+
+ c.v[0][0] = new int[1][1]; c.v[0][0][0][0] = 5; int val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1 : 5));
+
+ c.v[0][0][0] = new int[1]; c.v[0][0][0][0] = 6; int val6 = get();
+ assertEquals(val6, (isStableEnabled ? 1 : 6));
+ }
+
+ {
+ c.v = new int[1][1][1][1]; int[] val1 = get1();
+ c.v[0][0][0] = new int[1]; int[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new int[1][1][1][1]; int[][] val1 = get2();
+ c.v[0][0] = new int[1][1]; int[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new int[1][1][1][1]; int[][][] val1 = get3();
+ c.v[0] = new int[1][1][1]; int[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new int[1][1][1][1]; int[][][][] val1 = get4();
+ c.v = new int[1][1][1][1]; int[][][][] val2 = get4();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ }
+ }
+
+ /* ==================================================== */
+ // Dynamic Dim is higher than static
+
+ static class ObjectArrayLowerDim0 {
+ public @Stable Object v;
+
+ public static final ObjectArrayLowerDim0 c = new ObjectArrayLowerDim0();
+ public static int get() { return ((int[])c.v)[0]; }
+ public static int[] get1() { return (int[])c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new int[1]; ((int[])c.v)[0] = 1; int val1 = get();
+ ((int[])c.v)[0] = 2; int val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new int[1]; int[] val1 = get1();
+ c.v = new int[1]; int[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayLowerDim1 c = new ObjectArrayLowerDim1();
+ public static int get() { return ((int[][])c.v)[0][0]; }
+ public static int[] get1() { return (int[])(c.v[0]); }
+ public static Object[] get2() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new int[1][1]; ((int[][])c.v)[0][0] = 1; int val1 = get();
+ ((int[][])c.v)[0][0] = 2; int val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new int[1][1]; c.v[0] = new int[0]; int[] val1 = get1();
+ c.v[0] = new int[0]; int[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new int[0][0]; Object[] val1 = get2();
+ c.v = new int[0][0]; Object[] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayLowerDim2 c = new ObjectArrayLowerDim2();
+ public static int get() { return ((int[][][])c.v)[0][0][0]; }
+ public static int[] get1() { return (int[])(c.v[0][0]); }
+ public static int[][] get2() { return (int[][])(c.v[0]); }
+ public static Object[][] get3() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new int[1][1][1]; ((int[][][])c.v)[0][0][0] = 1; int val1 = get();
+ ((int[][][])c.v)[0][0][0] = 2; int val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new int[1][1][1]; c.v[0][0] = new int[0]; int[] val1 = get1();
+ c.v[0][0] = new int[0]; int[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new int[1][1][1]; c.v[0] = new int[0][0]; int[][] val1 = get2();
+ c.v[0] = new int[0][0]; int[][] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new int[0][0][0]; Object[][] val1 = get3();
+ c.v = new int[0][0][0]; Object[][] val2 = get3();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField {
+ static class A {
+ public @Stable int a;
+
+ }
+ public @Stable A v;
+
+ public static final NestedStableField c = new NestedStableField();
+ public static A get() { return c.v; }
+ public static int get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.a = 1; A val1 = get();
+ c.v.a = 2; A val2 = get();
+
+ assertEquals(val1.a, 2);
+ assertEquals(val2.a, 2);
+ }
+
+ {
+ c.v = new A(); c.v.a = 1; int val1 = get1();
+ c.v.a = 2; int val2 = get1();
+ c.v = new A(); c.v.a = 3; int val3 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField1 {
+ static class A {
+ public @Stable int a;
+ public @Stable A next;
+ }
+ public @Stable A v;
+
+ public static final NestedStableField1 c = new NestedStableField1();
+ public static A get() { return c.v.next.next.next.next.next.next.next; }
+ public static int get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.next = new A(); c.v.next.next = c.v;
+ c.v.a = 1; c.v.next.a = 1; A val1 = get();
+ c.v.a = 2; c.v.next.a = 2; A val2 = get();
+
+ assertEquals(val1.a, 2);
+ assertEquals(val2.a, 2);
+ }
+
+ {
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 1; int val1 = get1();
+ c.v.a = 2; int val2 = get1();
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 3; int val3 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+ }
+ }
+ /* ==================================================== */
+
+ static class NestedStableField2 {
+ static class A {
+ public @Stable int a;
+ public @Stable A left;
+ public A right;
+ }
+
+ public @Stable A v;
+
+ public static final NestedStableField2 c = new NestedStableField2();
+ public static int get() { return c.v.left.left.left.a; }
+ public static int get1() { return c.v.left.left.right.left.a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.left = c.v.right = c.v;
+ c.v.a = 1; int val1 = get(); int val2 = get1();
+ c.v.a = 2; int val3 = get(); int val4 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val3, (isStableEnabled ? 1 : 2));
+
+ assertEquals(val2, 1);
+ assertEquals(val4, 2);
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField3 {
+ static class A {
+ public @Stable int a;
+ public @Stable A[] left;
+ public A[] right;
+ }
+
+ public @Stable A[] v;
+
+ public static final NestedStableField3 c = new NestedStableField3();
+ public static int get() { return c.v[0].left[1].left[0].left[1].a; }
+ public static int get1() { return c.v[1].left[0].left[1].right[0].left[1].a; }
+
+ public static void test() throws Exception {
+ {
+ A elem = new A();
+ c.v = new A[] { elem, elem }; c.v[0].left = c.v[0].right = c.v;
+ elem.a = 1; int val1 = get(); int val2 = get1();
+ elem.a = 2; int val3 = get(); int val4 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val3, (isStableEnabled ? 1 : 2));
+
+ assertEquals(val2, 1);
+ assertEquals(val4, 2);
+ }
+ }
+ }
+
+ /* ==================================================== */
+ // Auxiliary methods
+ static void assertEquals(int i, int j) { if (i != j) throw new AssertionError(i + " != " + j); }
+ static void assertTrue(boolean b) { if (!b) throw new AssertionError(); }
+
+ static boolean failed = false;
+
+ public static void run(Class<?> test) {
+ Throwable ex = null;
+ System.out.print(test.getName()+": ");
+ try {
+ test.getMethod("test").invoke(null);
+ } catch (InvocationTargetException e) {
+ ex = e.getCause();
+ } catch (Throwable e) {
+ ex = e;
+ } finally {
+ if (ex == null) {
+ System.out.println("PASSED");
+ } else {
+ failed = true;
+ System.out.println("FAILED");
+ ex.printStackTrace(System.out);
+ }
+ }
+ }
+
+ static final boolean isStableEnabled;
+ static {
+ HotSpotDiagnosticMXBean diagnostic
+ = ManagementFactoryHelper.getDiagnosticMXBean();
+ VMOption tmp;
+ try {
+ tmp = diagnostic.getVMOption("FoldStableValues");
+ } catch (IllegalArgumentException e) {
+ tmp = null;
+ }
+ isStableEnabled = (tmp == null ? false : Boolean.parseBoolean(tmp.getValue()));
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stable/TestStableLong.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStableLong
+ * @summary tests on stable fields and arrays
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file TestStableLong.java
+ * @run main ClassFileInstaller
+ * java/lang/invoke/TestStableLong
+ * java/lang/invoke/TestStableLong$LongStable
+ * java/lang/invoke/TestStableLong$StaticLongStable
+ * java/lang/invoke/TestStableLong$VolatileLongStable
+ * java/lang/invoke/TestStableLong$LongArrayDim1
+ * java/lang/invoke/TestStableLong$LongArrayDim2
+ * java/lang/invoke/TestStableLong$LongArrayDim3
+ * java/lang/invoke/TestStableLong$LongArrayDim4
+ * java/lang/invoke/TestStableLong$ObjectArrayLowerDim0
+ * java/lang/invoke/TestStableLong$ObjectArrayLowerDim1
+ * java/lang/invoke/TestStableLong$NestedStableField
+ * java/lang/invoke/TestStableLong$NestedStableField$A
+ * java/lang/invoke/TestStableLong$NestedStableField1
+ * java/lang/invoke/TestStableLong$NestedStableField1$A
+ * java/lang/invoke/TestStableLong$NestedStableField2
+ * java/lang/invoke/TestStableLong$NestedStableField2$A
+ * java/lang/invoke/TestStableLong$NestedStableField3
+ * java/lang/invoke/TestStableLong$NestedStableField3$A
+ * java/lang/invoke/TestStableLong$DefaultValue
+ * java/lang/invoke/TestStableLong$ObjectArrayLowerDim2
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableLong
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableLong
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableLong
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableLong
+ */
+package java.lang.invoke;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+import sun.management.ManagementFactoryHelper;
+import java.lang.reflect.InvocationTargetException;
+
+public class TestStableLong {
+ public static void main(String[] args) throws Exception {
+ System.out.println("@Stable enabled: "+isStableEnabled);
+ System.out.println();
+
+ run(DefaultValue.class);
+ run(LongStable.class);
+ run(StaticLongStable.class);
+ run(VolatileLongStable.class);
+
+ // @Stable arrays: Dim 1-4
+ run(LongArrayDim1.class);
+ run(LongArrayDim2.class);
+ run(LongArrayDim3.class);
+ run(LongArrayDim4.class);
+
+ // @Stable Object field: dynamic arrays
+ run(ObjectArrayLowerDim0.class);
+ run(ObjectArrayLowerDim1.class);
+ run(ObjectArrayLowerDim2.class);
+
+ // Nested @Stable fields
+ run(NestedStableField.class);
+ run(NestedStableField1.class);
+ run(NestedStableField2.class);
+ run(NestedStableField3.class);
+
+ if (failed) {
+ throw new Error("TEST FAILED");
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DefaultValue {
+ public @Stable long v;
+
+ public static final DefaultValue c = new DefaultValue();
+ public static long get() { return c.v; }
+ public static void test() throws Exception {
+ long val1 = get();
+ c.v = 1L; long val2 = get();
+ assertEquals(val1, 0);
+ assertEquals(val2, 1L);
+ }
+ }
+
+ /* ==================================================== */
+
+ static class LongStable {
+ public @Stable long v;
+
+ public static final LongStable c = new LongStable();
+ public static long get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 5; long val1 = get();
+ c.v = Long.MAX_VALUE; long val2 = get();
+ assertEquals(val1, 5);
+ assertEquals(val2, (isStableEnabled ? 5 : Long.MAX_VALUE));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class StaticLongStable {
+ public static @Stable long v;
+
+ public static final StaticLongStable c = new StaticLongStable();
+ public static long get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 5; long val1 = get();
+ c.v = Long.MAX_VALUE; long val2 = get();
+ assertEquals(val1, 5);
+ assertEquals(val2, (isStableEnabled ? 5 : Long.MAX_VALUE));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class VolatileLongStable {
+ public @Stable volatile long v;
+
+ public static final VolatileLongStable c = new VolatileLongStable();
+ public static long get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 5; long val1 = get();
+ c.v = Long.MAX_VALUE; long val2 = get();
+ assertEquals(val1, 5);
+ assertEquals(val2, (isStableEnabled ? 5 : Long.MAX_VALUE));
+ }
+ }
+
+ /* ==================================================== */
+ // @Stable array == field && all components are stable
+
+ static class LongArrayDim1 {
+ public @Stable long[] v;
+
+ public static final LongArrayDim1 c = new LongArrayDim1();
+ public static long get() { return c.v[0]; }
+ public static long get1() { return c.v[10]; }
+ public static long[] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new long[1]; c.v[0] = 1; long val1 = get();
+ c.v[0] = 2; long val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new long[1]; c.v[0] = 3; long val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+
+ {
+ c.v = new long[20]; c.v[10] = 1; long val1 = get1();
+ c.v[10] = 2; long val2 = get1();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new long[20]; c.v[10] = 3; long val3 = get1();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+
+ {
+ c.v = new long[1]; long[] val1 = get2();
+ c.v = new long[1]; long[] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class LongArrayDim2 {
+ public @Stable long[][] v;
+
+ public static final LongArrayDim2 c = new LongArrayDim2();
+ public static long get() { return c.v[0][0]; }
+ public static long[] get1() { return c.v[0]; }
+ public static long[][] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new long[1][1]; c.v[0][0] = 1; long val1 = get();
+ c.v[0][0] = 2; long val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new long[1][1]; c.v[0][0] = 3; long val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new long[1]; c.v[0][0] = 4; long val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+ }
+
+ {
+ c.v = new long[1][1]; long[] val1 = get1();
+ c.v[0] = new long[1]; long[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new long[1][1]; long[][] val1 = get2();
+ c.v = new long[1][1]; long[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class LongArrayDim3 {
+ public @Stable long[][][] v;
+
+ public static final LongArrayDim3 c = new LongArrayDim3();
+ public static long get() { return c.v[0][0][0]; }
+ public static long[] get1() { return c.v[0][0]; }
+ public static long[][] get2() { return c.v[0]; }
+ public static long[][][] get3() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new long[1][1][1]; c.v[0][0][0] = 1; long val1 = get();
+ c.v[0][0][0] = 2; long val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new long[1][1][1]; c.v[0][0][0] = 3; long val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new long[1][1]; c.v[0][0][0] = 4; long val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+
+ c.v[0][0] = new long[1]; c.v[0][0][0] = 5; long val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1 : 5));
+ }
+
+ {
+ c.v = new long[1][1][1]; long[] val1 = get1();
+ c.v[0][0] = new long[1]; long[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new long[1][1][1]; long[][] val1 = get2();
+ c.v[0] = new long[1][1]; long[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new long[1][1][1]; long[][][] val1 = get3();
+ c.v = new long[1][1][1]; long[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class LongArrayDim4 {
+ public @Stable long[][][][] v;
+
+ public static final LongArrayDim4 c = new LongArrayDim4();
+ public static long get() { return c.v[0][0][0][0]; }
+ public static long[] get1() { return c.v[0][0][0]; }
+ public static long[][] get2() { return c.v[0][0]; }
+ public static long[][][] get3() { return c.v[0]; }
+ public static long[][][][] get4() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new long[1][1][1][1]; c.v[0][0][0][0] = 1; long val1 = get();
+ c.v[0][0][0][0] = 2; long val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new long[1][1][1][1]; c.v[0][0][0][0] = 3; long val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new long[1][1][1]; c.v[0][0][0][0] = 4; long val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+
+ c.v[0][0] = new long[1][1]; c.v[0][0][0][0] = 5; long val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1 : 5));
+
+ c.v[0][0][0] = new long[1]; c.v[0][0][0][0] = 6; long val6 = get();
+ assertEquals(val6, (isStableEnabled ? 1 : 6));
+ }
+
+ {
+ c.v = new long[1][1][1][1]; long[] val1 = get1();
+ c.v[0][0][0] = new long[1]; long[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new long[1][1][1][1]; long[][] val1 = get2();
+ c.v[0][0] = new long[1][1]; long[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new long[1][1][1][1]; long[][][] val1 = get3();
+ c.v[0] = new long[1][1][1]; long[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new long[1][1][1][1]; long[][][][] val1 = get4();
+ c.v = new long[1][1][1][1]; long[][][][] val2 = get4();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ }
+ }
+
+ /* ==================================================== */
+ // Dynamic Dim is higher than static
+
+ static class ObjectArrayLowerDim0 {
+ public @Stable Object v;
+
+ public static final ObjectArrayLowerDim0 c = new ObjectArrayLowerDim0();
+ public static long get() { return ((long[])c.v)[0]; }
+ public static long[] get1() { return (long[])c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new long[1]; ((long[])c.v)[0] = 1; long val1 = get();
+ ((long[])c.v)[0] = 2; long val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new long[1]; long[] val1 = get1();
+ c.v = new long[1]; long[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayLowerDim1 c = new ObjectArrayLowerDim1();
+ public static long get() { return ((long[][])c.v)[0][0]; }
+ public static long[] get1() { return (long[])(c.v[0]); }
+ public static Object[] get2() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new long[1][1]; ((long[][])c.v)[0][0] = 1; long val1 = get();
+ ((long[][])c.v)[0][0] = 2; long val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new long[1][1]; c.v[0] = new long[0]; long[] val1 = get1();
+ c.v[0] = new long[0]; long[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new long[0][0]; Object[] val1 = get2();
+ c.v = new long[0][0]; Object[] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayLowerDim2 c = new ObjectArrayLowerDim2();
+ public static long get() { return ((long[][][])c.v)[0][0][0]; }
+ public static long[] get1() { return (long[])(c.v[0][0]); }
+ public static long[][] get2() { return (long[][])(c.v[0]); }
+ public static Object[][] get3() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new long[1][1][1]; ((long[][][])c.v)[0][0][0] = 1L; long val1 = get();
+ ((long[][][])c.v)[0][0][0] = 2L; long val2 = get();
+
+ assertEquals(val1, 1L);
+ assertEquals(val2, 2L);
+ }
+
+ {
+ c.v = new long[1][1][1]; c.v[0][0] = new long[0]; long[] val1 = get1();
+ c.v[0][0] = new long[0]; long[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new long[1][1][1]; c.v[0] = new long[0][0]; long[][] val1 = get2();
+ c.v[0] = new long[0][0]; long[][] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new long[0][0][0]; Object[][] val1 = get3();
+ c.v = new long[0][0][0]; Object[][] val2 = get3();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField {
+ static class A {
+ public @Stable long a;
+
+ }
+ public @Stable A v;
+
+ public static final NestedStableField c = new NestedStableField();
+ public static A get() { return c.v; }
+ public static long get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.a = 1; A val1 = get();
+ c.v.a = 2; A val2 = get();
+
+ assertEquals(val1.a, 2);
+ assertEquals(val2.a, 2);
+ }
+
+ {
+ c.v = new A(); c.v.a = 1; long val1 = get1();
+ c.v.a = 2; long val2 = get1();
+ c.v = new A(); c.v.a = 3; long val3 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField1 {
+ static class A {
+ public @Stable long a;
+ public @Stable A next;
+ }
+ public @Stable A v;
+
+ public static final NestedStableField1 c = new NestedStableField1();
+ public static A get() { return c.v.next.next.next.next.next.next.next; }
+ public static long get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.next = new A(); c.v.next.next = c.v;
+ c.v.a = 1; c.v.next.a = 1; A val1 = get();
+ c.v.a = 2; c.v.next.a = 2; A val2 = get();
+
+ assertEquals(val1.a, 2);
+ assertEquals(val2.a, 2);
+ }
+
+ {
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 1; long val1 = get1();
+ c.v.a = 2; long val2 = get1();
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 3; long val3 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+ }
+ }
+ /* ==================================================== */
+
+ static class NestedStableField2 {
+ static class A {
+ public @Stable long a;
+ public @Stable A left;
+ public A right;
+ }
+
+ public @Stable A v;
+
+ public static final NestedStableField2 c = new NestedStableField2();
+ public static long get() { return c.v.left.left.left.a; }
+ public static long get1() { return c.v.left.left.right.left.a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.left = c.v.right = c.v;
+ c.v.a = 1; long val1 = get(); long val2 = get1();
+ c.v.a = 2; long val3 = get(); long val4 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val3, (isStableEnabled ? 1 : 2));
+
+ assertEquals(val2, 1);
+ assertEquals(val4, 2);
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField3 {
+ static class A {
+ public @Stable long a;
+ public @Stable A[] left;
+ public A[] right;
+ }
+
+ public @Stable A[] v;
+
+ public static final NestedStableField3 c = new NestedStableField3();
+ public static long get() { return c.v[0].left[1].left[0].left[1].a; }
+ public static long get1() { return c.v[1].left[0].left[1].right[0].left[1].a; }
+
+ public static void test() throws Exception {
+ {
+ A elem = new A();
+ c.v = new A[] { elem, elem }; c.v[0].left = c.v[0].right = c.v;
+ elem.a = 1; long val1 = get(); long val2 = get1();
+ elem.a = 2; long val3 = get(); long val4 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val3, (isStableEnabled ? 1 : 2));
+
+ assertEquals(val2, 1);
+ assertEquals(val4, 2);
+ }
+ }
+ }
+
+ /* ==================================================== */
+ // Auxiliary methods
+ static void assertEquals(long i, long j) { if (i != j) throw new AssertionError(i + " != " + j); }
+ static void assertTrue(boolean b) { if (!b) throw new AssertionError(); }
+
+ static boolean failed = false;
+
+ public static void run(Class<?> test) {
+ Throwable ex = null;
+ System.out.print(test.getName()+": ");
+ try {
+ test.getMethod("test").invoke(null);
+ } catch (InvocationTargetException e) {
+ ex = e.getCause();
+ } catch (Throwable e) {
+ ex = e;
+ } finally {
+ if (ex == null) {
+ System.out.println("PASSED");
+ } else {
+ failed = true;
+ System.out.println("FAILED");
+ ex.printStackTrace(System.out);
+ }
+ }
+ }
+
+ static final boolean isStableEnabled;
+ static {
+ HotSpotDiagnosticMXBean diagnostic
+ = ManagementFactoryHelper.getDiagnosticMXBean();
+ VMOption tmp;
+ try {
+ tmp = diagnostic.getVMOption("FoldStableValues");
+ } catch (IllegalArgumentException e) {
+ tmp = null;
+ }
+ isStableEnabled = (tmp == null ? false : Boolean.parseBoolean(tmp.getValue()));
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stable/TestStableObject.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStableObject
+ * @summary tests on stable fields and arrays
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file TestStableObject.java
+ * @run main ClassFileInstaller
+ * java/lang/invoke/TestStableObject
+ * java/lang/invoke/TestStableObject$ObjectStable
+ * java/lang/invoke/TestStableObject$StaticObjectStable
+ * java/lang/invoke/TestStableObject$VolatileObjectStable
+ * java/lang/invoke/TestStableObject$ObjectArrayDim1
+ * java/lang/invoke/TestStableObject$ObjectArrayDim2
+ * java/lang/invoke/TestStableObject$ObjectArrayDim3
+ * java/lang/invoke/TestStableObject$ObjectArrayDim4
+ * java/lang/invoke/TestStableObject$ObjectArrayLowerDim0
+ * java/lang/invoke/TestStableObject$ObjectArrayLowerDim1
+ * java/lang/invoke/TestStableObject$NestedStableField
+ * java/lang/invoke/TestStableObject$NestedStableField$A
+ * java/lang/invoke/TestStableObject$NestedStableField1
+ * java/lang/invoke/TestStableObject$NestedStableField1$A
+ * java/lang/invoke/TestStableObject$NestedStableField2
+ * java/lang/invoke/TestStableObject$NestedStableField2$A
+ * java/lang/invoke/TestStableObject$NestedStableField3
+ * java/lang/invoke/TestStableObject$NestedStableField3$A
+ * java/lang/invoke/TestStableObject$Values
+ * java/lang/invoke/TestStableObject$DefaultValue
+ * java/lang/invoke/TestStableObject$ObjectArrayLowerDim2
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableObject
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableObject
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableObject
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableObject
+ */
+package java.lang.invoke;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+import sun.management.ManagementFactoryHelper;
+import java.lang.reflect.InvocationTargetException;
+
+public class TestStableObject {
+ public static void main(String[] args) throws Exception {
+ System.out.println("@Stable enabled: "+isStableEnabled);
+ System.out.println();
+
+ run(DefaultValue.class);
+ run(ObjectStable.class);
+ run(StaticObjectStable.class);
+ run(VolatileObjectStable.class);
+
+ // @Stable arrays: Dim 1-4
+ run(ObjectArrayDim1.class);
+ run(ObjectArrayDim2.class);
+ run(ObjectArrayDim3.class);
+ run(ObjectArrayDim4.class);
+
+ // @Stable Object field: dynamic arrays
+ run(ObjectArrayLowerDim0.class);
+ run(ObjectArrayLowerDim1.class);
+ run(ObjectArrayLowerDim2.class);
+
+ // Nested @Stable fields
+ run(NestedStableField.class);
+ run(NestedStableField1.class);
+ run(NestedStableField2.class);
+ run(NestedStableField3.class);
+
+ if (failed) {
+ throw new Error("TEST FAILED");
+ }
+ }
+
+ /* ==================================================== */
+
+ enum Values {A, B, C, D, E, F}
+
+ static class DefaultValue {
+ public @Stable Object v;
+
+ public static final DefaultValue c = new DefaultValue();
+ public static Object get() { return c.v; }
+ public static void test() throws Exception {
+ Object val1 = get();
+ c.v = Values.A; Object val2 = get();
+ assertEquals(val1, null);
+ assertEquals(val2, Values.A);
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectStable {
+ public @Stable Values v;
+
+ public static final ObjectStable c = new ObjectStable ();
+ public static Values get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = Values.A; Values val1 = get();
+ c.v = Values.B; Values val2 = get();
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class StaticObjectStable {
+ public static @Stable Values v;
+
+ public static final ObjectStable c = new ObjectStable ();
+ public static Values get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = Values.A; Values val1 = get();
+ c.v = Values.B; Values val2 = get();
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class VolatileObjectStable {
+ public @Stable volatile Values v;
+
+ public static final VolatileObjectStable c = new VolatileObjectStable ();
+ public static Values get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = Values.A; Values val1 = get();
+ c.v = Values.B; Values val2 = get();
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+ }
+ }
+
+ /* ==================================================== */
+ // @Stable array == field && all components are stable
+
+ static class ObjectArrayDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayDim1 c = new ObjectArrayDim1();
+ public static Object get() { return c.v[0]; }
+ public static Object get1() { return c.v[10]; }
+ public static Object[] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new Object[1]; c.v[0] = Values.A; Object val1 = get();
+ c.v[0] = Values.B; Object val2 = get();
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+
+ c.v = new Object[1]; c.v[0] = Values.C; Object val3 = get();
+ assertEquals(val3, (isStableEnabled ? Values.A : Values.C));
+ }
+
+ {
+ c.v = new Object[20]; c.v[10] = Values.A; Object val1 = get1();
+ c.v[10] = Values.B; Object val2 = get1();
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+
+ c.v = new Object[20]; c.v[10] = Values.C; Object val3 = get1();
+ assertEquals(val3, (isStableEnabled ? Values.A : Values.C));
+ }
+
+ {
+ c.v = new Object[1]; Object[] val1 = get2();
+ c.v = new Object[1]; Object[] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayDim2 c = new ObjectArrayDim2();
+ public static Object get() { return c.v[0][0]; }
+ public static Object[] get1() { return c.v[0]; }
+ public static Object[][] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new Object[1][1]; c.v[0][0] = Values.A; Object val1 = get();
+ c.v[0][0] = Values.B; Object val2 = get();
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+
+ c.v = new Object[1][1]; c.v[0][0] = Values.C; Object val3 = get();
+ assertEquals(val3, (isStableEnabled ? Values.A : Values.C));
+
+ c.v[0] = new Object[1]; c.v[0][0] = Values.D; Object val4 = get();
+ assertEquals(val4, (isStableEnabled ? Values.A : Values.D));
+ }
+
+ {
+ c.v = new Object[1][1]; Object[] val1 = get1();
+ c.v[0] = new Object[1]; Object[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new Object[1][1]; Object[][] val1 = get2();
+ c.v = new Object[1][1]; Object[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayDim3 {
+ public @Stable Object[][][] v;
+
+ public static final ObjectArrayDim3 c = new ObjectArrayDim3();
+ public static Object get() { return c.v[0][0][0]; }
+ public static Object[] get1() { return c.v[0][0]; }
+ public static Object[][] get2() { return c.v[0]; }
+ public static Object[][][] get3() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new Object[1][1][1]; c.v[0][0][0] = Values.A; Object val1 = get();
+ c.v[0][0][0] = Values.B; Object val2 = get();
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+
+ c.v = new Object[1][1][1]; c.v[0][0][0] = Values.C; Object val3 = get();
+ assertEquals(val3, (isStableEnabled ? Values.A : Values.C));
+
+ c.v[0] = new Object[1][1]; c.v[0][0][0] = Values.D; Object val4 = get();
+ assertEquals(val4, (isStableEnabled ? Values.A : Values.D));
+
+ c.v[0][0] = new Object[1]; c.v[0][0][0] = Values.E; Object val5 = get();
+ assertEquals(val5, (isStableEnabled ? Values.A : Values.E));
+ }
+
+ {
+ c.v = new Object[1][1][1]; Object[] val1 = get1();
+ c.v[0][0] = new Object[1]; Object[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new Object[1][1][1]; Object[][] val1 = get2();
+ c.v[0] = new Object[1][1]; Object[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new Object[1][1][1]; Object[][][] val1 = get3();
+ c.v = new Object[1][1][1]; Object[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayDim4 {
+ public @Stable Object[][][][] v;
+
+ public static final ObjectArrayDim4 c = new ObjectArrayDim4();
+ public static Object get() { return c.v[0][0][0][0]; }
+ public static Object[] get1() { return c.v[0][0][0]; }
+ public static Object[][] get2() { return c.v[0][0]; }
+ public static Object[][][] get3() { return c.v[0]; }
+ public static Object[][][][] get4() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new Object[1][1][1][1]; c.v[0][0][0][0] = Values.A; Object val1 = get();
+ c.v[0][0][0][0] = Values.B; Object val2 = get();
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+
+ c.v = new Object[1][1][1][1]; c.v[0][0][0][0] = Values.C; Object val3 = get();
+ assertEquals(val3, (isStableEnabled ? Values.A : Values.C));
+
+ c.v[0] = new Object[1][1][1]; c.v[0][0][0][0] = Values.D; Object val4 = get();
+ assertEquals(val4, (isStableEnabled ? Values.A : Values.D));
+
+ c.v[0][0] = new Object[1][1]; c.v[0][0][0][0] = Values.E; Object val5 = get();
+ assertEquals(val5, (isStableEnabled ? Values.A : Values.E));
+
+ c.v[0][0][0] = new Object[1]; c.v[0][0][0][0] = Values.F; Object val6 = get();
+ assertEquals(val6, (isStableEnabled ? Values.A : Values.F));
+ }
+
+ {
+ c.v = new Object[1][1][1][1]; Object[] val1 = get1();
+ c.v[0][0][0] = new Object[1]; Object[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new Object[1][1][1][1]; Object[][] val1 = get2();
+ c.v[0][0] = new Object[1][1]; Object[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new Object[1][1][1][1]; Object[][][] val1 = get3();
+ c.v[0] = new Object[1][1][1]; Object[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new Object[1][1][1][1]; Object[][][][] val1 = get4();
+ c.v = new Object[1][1][1][1]; Object[][][][] val2 = get4();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ }
+ }
+
+ /* ==================================================== */
+ // Dynamic Dim is higher than static
+
+ static class ObjectArrayLowerDim0 {
+ public @Stable Object v;
+
+ public static final ObjectArrayLowerDim0 c = new ObjectArrayLowerDim0();
+ public static Object get() { return ((Object[])c.v)[0]; }
+ public static Object[] get1() { return (Object[])c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new Object[1]; ((Object[])c.v)[0] = Values.A; Object val1 = get();
+ ((Object[])c.v)[0] = Values.B; Object val2 = get();
+
+ assertEquals(val1, Values.A);
+ assertEquals(val2, Values.B);
+ }
+
+ {
+ c.v = new Object[1]; Object[] val1 = get1();
+ c.v = new Object[1]; Object[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayLowerDim1 c = new ObjectArrayLowerDim1();
+ public static Object get() { return ((Object[][])c.v)[0][0]; }
+ public static Object[] get1() { return (Object[])(c.v[0]); }
+ public static Object[] get2() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new Object[1][1]; ((Object[][])c.v)[0][0] = Values.A; Object val1 = get();
+ ((Object[][])c.v)[0][0] = Values.B; Object val2 = get();
+
+ assertEquals(val1, Values.A);
+ assertEquals(val2, Values.B);
+ }
+
+ {
+ c.v = new Object[1][1]; c.v[0] = new Object[0]; Object[] val1 = get1();
+ c.v[0] = new Object[0]; Object[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new Object[0][0]; Object[] val1 = get2();
+ c.v = new Object[0][0]; Object[] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayLowerDim2 c = new ObjectArrayLowerDim2();
+ public static Object get() { return ((Object[][][])c.v)[0][0][0]; }
+ public static Object[] get1() { return (Object[])(c.v[0][0]); }
+ public static Object[][] get2() { return (Object[][])(c.v[0]); }
+ public static Object[][] get3() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new Object[1][1][1]; ((Object[][][])c.v)[0][0][0] = Values.A; Object val1 = get();
+ ((Object[][][])c.v)[0][0][0] = Values.B; Object val2 = get();
+
+ assertEquals(val1, Values.A);
+ assertEquals(val2, Values.B);
+ }
+
+ {
+ c.v = new Object[1][1][1]; c.v[0][0] = new Object[0]; Object[] val1 = get1();
+ c.v[0][0] = new Object[0]; Object[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new Object[1][1][1]; c.v[0] = new Object[0][0]; Object[][] val1 = get2();
+ c.v[0] = new Object[0][0]; Object[][] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new Object[0][0][0]; Object[][] val1 = get3();
+ c.v = new Object[0][0][0]; Object[][] val2 = get3();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField {
+ static class A {
+ public @Stable Object a;
+
+ }
+ public @Stable A v;
+
+ public static final NestedStableField c = new NestedStableField();
+ public static A get() { return c.v; }
+ public static Object get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.a = Values.A; A val1 = get();
+ c.v.a = Values.B; A val2 = get();
+
+ assertEquals(val1.a, Values.B);
+ assertEquals(val2.a, Values.B);
+ }
+
+ {
+ c.v = new A(); c.v.a = Values.A; Object val1 = get1();
+ c.v.a = Values.B; Object val2 = get1();
+ c.v = new A(); c.v.a = Values.C; Object val3 = get1();
+
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+ assertEquals(val3, (isStableEnabled ? Values.A : Values.C));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField1 {
+ static class A {
+ public @Stable Object a;
+ public @Stable A next;
+ }
+ public @Stable A v;
+
+ public static final NestedStableField1 c = new NestedStableField1();
+ public static A get() { return c.v.next.next.next.next.next.next.next; }
+ public static Object get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.next = new A(); c.v.next.next = c.v;
+ c.v.a = Values.A; c.v.next.a = Values.A; A val1 = get();
+ c.v.a = Values.B; c.v.next.a = Values.B; A val2 = get();
+
+ assertEquals(val1.a, Values.B);
+ assertEquals(val2.a, Values.B);
+ }
+
+ {
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = Values.A; Object val1 = get1();
+ c.v.a = Values.B; Object val2 = get1();
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = Values.C; Object val3 = get1();
+
+ assertEquals(val1, Values.A);
+ assertEquals(val2, (isStableEnabled ? Values.A : Values.B));
+ assertEquals(val3, (isStableEnabled ? Values.A : Values.C));
+ }
+ }
+ }
+ /* ==================================================== */
+
+ static class NestedStableField2 {
+ static class A {
+ public @Stable Object a;
+ public @Stable A left;
+ public A right;
+ }
+
+ public @Stable A v;
+
+ public static final NestedStableField2 c = new NestedStableField2();
+ public static Object get() { return c.v.left.left.left.a; }
+ public static Object get1() { return c.v.left.left.right.left.a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.left = c.v.right = c.v;
+ c.v.a = Values.A; Object val1 = get(); Object val2 = get1();
+ c.v.a = Values.B; Object val3 = get(); Object val4 = get1();
+
+ assertEquals(val1, Values.A);
+ assertEquals(val3, (isStableEnabled ? Values.A : Values.B));
+
+ assertEquals(val2, Values.A);
+ assertEquals(val4, Values.B);
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField3 {
+ static class A {
+ public @Stable Object a;
+ public @Stable A[] left;
+ public A[] right;
+ }
+
+ public @Stable A[] v;
+
+ public static final NestedStableField3 c = new NestedStableField3();
+ public static Object get() { return c.v[0].left[1].left[0].left[1].a; }
+ public static Object get1() { return c.v[1].left[0].left[1].right[0].left[1].a; }
+
+ public static void test() throws Exception {
+ {
+ A elem = new A();
+ c.v = new A[] { elem, elem }; c.v[0].left = c.v[0].right = c.v;
+ elem.a = Values.A; Object val1 = get(); Object val2 = get1();
+ elem.a = Values.B; Object val3 = get(); Object val4 = get1();
+
+ assertEquals(val1, Values.A);
+ assertEquals(val3, (isStableEnabled ? Values.A : Values.B));
+
+ assertEquals(val2, Values.A);
+ assertEquals(val4, Values.B);
+ }
+ }
+ }
+
+ /* ==================================================== */
+ // Auxiliary methods
+ static void assertEquals(Object i, Object j) { if (i != j) throw new AssertionError(i + " != " + j); }
+ static void assertTrue(boolean b) { if (!b) throw new AssertionError(); }
+
+ static boolean failed = false;
+
+ public static void run(Class<?> test) {
+ Throwable ex = null;
+ System.out.print(test.getName()+": ");
+ try {
+ test.getMethod("test").invoke(null);
+ } catch (InvocationTargetException e) {
+ ex = e.getCause();
+ } catch (Throwable e) {
+ ex = e;
+ } finally {
+ if (ex == null) {
+ System.out.println("PASSED");
+ } else {
+ failed = true;
+ System.out.println("FAILED");
+ ex.printStackTrace(System.out);
+ }
+ }
+ }
+
+ static final boolean isStableEnabled;
+ static {
+ HotSpotDiagnosticMXBean diagnostic
+ = ManagementFactoryHelper.getDiagnosticMXBean();
+ VMOption tmp;
+ try {
+ tmp = diagnostic.getVMOption("FoldStableValues");
+ } catch (IllegalArgumentException e) {
+ tmp = null;
+ }
+ isStableEnabled = (tmp == null ? false : Boolean.parseBoolean(tmp.getValue()));
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/stable/TestStableShort.java Fri Mar 14 09:26:27 2014 +0100
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStableShort
+ * @summary tests on stable fields and arrays
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file TestStableShort.java
+ * @run main ClassFileInstaller
+ * java/lang/invoke/TestStableShort
+ * java/lang/invoke/TestStableShort$ShortStable
+ * java/lang/invoke/TestStableShort$StaticShortStable
+ * java/lang/invoke/TestStableShort$VolatileShortStable
+ * java/lang/invoke/TestStableShort$ShortArrayDim1
+ * java/lang/invoke/TestStableShort$ShortArrayDim2
+ * java/lang/invoke/TestStableShort$ShortArrayDim3
+ * java/lang/invoke/TestStableShort$ShortArrayDim4
+ * java/lang/invoke/TestStableShort$ObjectArrayLowerDim0
+ * java/lang/invoke/TestStableShort$ObjectArrayLowerDim1
+ * java/lang/invoke/TestStableShort$NestedStableField
+ * java/lang/invoke/TestStableShort$NestedStableField$A
+ * java/lang/invoke/TestStableShort$NestedStableField1
+ * java/lang/invoke/TestStableShort$NestedStableField1$A
+ * java/lang/invoke/TestStableShort$NestedStableField2
+ * java/lang/invoke/TestStableShort$NestedStableField2$A
+ * java/lang/invoke/TestStableShort$NestedStableField3
+ * java/lang/invoke/TestStableShort$NestedStableField3$A
+ * java/lang/invoke/TestStableShort$DefaultValue
+ * java/lang/invoke/TestStableShort$ObjectArrayLowerDim2
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableShort
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:+FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableShort
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:+UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableShort
+ *
+ * @run main/othervm -Xbootclasspath/a:. -XX:+IgnoreUnrecognizedVMOptions
+ * -XX:+UnlockDiagnosticVMOptions -XX:-FoldStableValues -XX:-UseCompressedOop
+ * -server -XX:-TieredCompilation -Xcomp
+ * -XX:CompileOnly=::get,::get1,::get2,::get3,::get4
+ * java.lang.invoke.TestStableShort
+ */
+package java.lang.invoke;
+
+import com.sun.management.HotSpotDiagnosticMXBean;
+import com.sun.management.VMOption;
+import sun.management.ManagementFactoryHelper;
+import java.lang.reflect.InvocationTargetException;
+
+public class TestStableShort {
+ public static void main(String[] args) throws Exception {
+ System.out.println("@Stable enabled: "+isStableEnabled);
+ System.out.println();
+
+ run(DefaultValue.class);
+ run(ShortStable.class);
+ run(StaticShortStable.class);
+ run(VolatileShortStable.class);
+
+ // @Stable arrays: Dim 1-4
+ run(ShortArrayDim1.class);
+ run(ShortArrayDim2.class);
+ run(ShortArrayDim3.class);
+ run(ShortArrayDim4.class);
+
+ // @Stable Object field: dynamic arrays
+ run(ObjectArrayLowerDim0.class);
+ run(ObjectArrayLowerDim1.class);
+ run(ObjectArrayLowerDim2.class);
+
+ // Nested @Stable fields
+ run(NestedStableField.class);
+ run(NestedStableField1.class);
+ run(NestedStableField2.class);
+ run(NestedStableField3.class);
+
+ if (failed) {
+ throw new Error("TEST FAILED");
+ }
+ }
+
+ /* ==================================================== */
+
+ static class DefaultValue {
+ public @Stable short v;
+
+ public static final DefaultValue c = new DefaultValue();
+ public static short get() { return c.v; }
+ public static void test() throws Exception {
+ short val1 = get();
+ c.v = 1; short val2 = get();
+ assertEquals(val1, 0);
+ assertEquals(val2, 1);
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ShortStable {
+ public @Stable short v;
+
+ public static final ShortStable c = new ShortStable();
+ public static short get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1; short val1 = get();
+ c.v = 32767; short val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 32767));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class StaticShortStable {
+ public static @Stable short v;
+
+ public static final StaticShortStable c = new StaticShortStable();
+ public static short get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1; short val1 = get();
+ c.v = 32767; short val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 32767));
+ }
+ }
+
+ /* ==================================================== */
+
+ static class VolatileShortStable {
+ public @Stable volatile short v;
+
+ public static final VolatileShortStable c = new VolatileShortStable();
+ public static short get() { return c.v; }
+ public static void test() throws Exception {
+ c.v = 1; short val1 = get();
+ c.v = 32767; short val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 32767));
+ }
+ }
+
+ /* ==================================================== */
+ // @Stable array == field && all components are stable
+
+ static class ShortArrayDim1 {
+ public @Stable short[] v;
+
+ public static final ShortArrayDim1 c = new ShortArrayDim1();
+ public static short get() { return c.v[0]; }
+ public static short get1() { return c.v[10]; }
+ public static short[] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new short[1]; c.v[0] = 1; short val1 = get();
+ c.v[0] = 2; short val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new short[1]; c.v[0] = 3; short val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+
+ {
+ c.v = new short[20]; c.v[10] = 1; short val1 = get1();
+ c.v[10] = 2; short val2 = get1();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new short[20]; c.v[10] = 3; short val3 = get1();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+
+ {
+ c.v = new short[1]; short[] val1 = get2();
+ c.v = new short[1]; short[] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ShortArrayDim2 {
+ public @Stable short[][] v;
+
+ public static final ShortArrayDim2 c = new ShortArrayDim2();
+ public static short get() { return c.v[0][0]; }
+ public static short[] get1() { return c.v[0]; }
+ public static short[][] get2() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new short[1][1]; c.v[0][0] = 1; short val1 = get();
+ c.v[0][0] = 2; short val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new short[1][1]; c.v[0][0] = 3; short val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new short[1]; c.v[0][0] = 4; short val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+ }
+
+ {
+ c.v = new short[1][1]; short[] val1 = get1();
+ c.v[0] = new short[1]; short[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new short[1][1]; short[][] val1 = get2();
+ c.v = new short[1][1]; short[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ShortArrayDim3 {
+ public @Stable short[][][] v;
+
+ public static final ShortArrayDim3 c = new ShortArrayDim3();
+ public static short get() { return c.v[0][0][0]; }
+ public static short[] get1() { return c.v[0][0]; }
+ public static short[][] get2() { return c.v[0]; }
+ public static short[][][] get3() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new short[1][1][1]; c.v[0][0][0] = 1; short val1 = get();
+ c.v[0][0][0] = 2; short val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new short[1][1][1]; c.v[0][0][0] = 3; short val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new short[1][1]; c.v[0][0][0] = 4; short val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+
+ c.v[0][0] = new short[1]; c.v[0][0][0] = 5; short val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1 : 5));
+ }
+
+ {
+ c.v = new short[1][1][1]; short[] val1 = get1();
+ c.v[0][0] = new short[1]; short[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new short[1][1][1]; short[][] val1 = get2();
+ c.v[0] = new short[1][1]; short[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new short[1][1][1]; short[][][] val1 = get3();
+ c.v = new short[1][1][1]; short[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ShortArrayDim4 {
+ public @Stable short[][][][] v;
+
+ public static final ShortArrayDim4 c = new ShortArrayDim4();
+ public static short get() { return c.v[0][0][0][0]; }
+ public static short[] get1() { return c.v[0][0][0]; }
+ public static short[][] get2() { return c.v[0][0]; }
+ public static short[][][] get3() { return c.v[0]; }
+ public static short[][][][] get4() { return c.v; }
+ public static void test() throws Exception {
+ {
+ c.v = new short[1][1][1][1]; c.v[0][0][0][0] = 1; short val1 = get();
+ c.v[0][0][0][0] = 2; short val2 = get();
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+
+ c.v = new short[1][1][1][1]; c.v[0][0][0][0] = 3; short val3 = get();
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+
+ c.v[0] = new short[1][1][1]; c.v[0][0][0][0] = 4; short val4 = get();
+ assertEquals(val4, (isStableEnabled ? 1 : 4));
+
+ c.v[0][0] = new short[1][1]; c.v[0][0][0][0] = 5; short val5 = get();
+ assertEquals(val5, (isStableEnabled ? 1 : 5));
+
+ c.v[0][0][0] = new short[1]; c.v[0][0][0][0] = 6; short val6 = get();
+ assertEquals(val6, (isStableEnabled ? 1 : 6));
+ }
+
+ {
+ c.v = new short[1][1][1][1]; short[] val1 = get1();
+ c.v[0][0][0] = new short[1]; short[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new short[1][1][1][1]; short[][] val1 = get2();
+ c.v[0][0] = new short[1][1]; short[][] val2 = get2();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new short[1][1][1][1]; short[][][] val1 = get3();
+ c.v[0] = new short[1][1][1]; short[][][] val2 = get3();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new short[1][1][1][1]; short[][][][] val1 = get4();
+ c.v = new short[1][1][1][1]; short[][][][] val2 = get4();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ }
+ }
+
+ /* ==================================================== */
+ // Dynamic Dim is higher than static
+
+ static class ObjectArrayLowerDim0 {
+ public @Stable Object v;
+
+ public static final ObjectArrayLowerDim0 c = new ObjectArrayLowerDim0();
+ public static short get() { return ((short[])c.v)[0]; }
+ public static short[] get1() { return (short[])c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new short[1]; ((short[])c.v)[0] = 1; short val1 = get();
+ ((short[])c.v)[0] = 2; short val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new short[1]; short[] val1 = get1();
+ c.v = new short[1]; short[] val2 = get1();
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim1 {
+ public @Stable Object[] v;
+
+ public static final ObjectArrayLowerDim1 c = new ObjectArrayLowerDim1();
+ public static short get() { return ((short[][])c.v)[0][0]; }
+ public static short[] get1() { return (short[])(c.v[0]); }
+ public static Object[] get2() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new short[1][1]; ((short[][])c.v)[0][0] = 1; short val1 = get();
+ ((short[][])c.v)[0][0] = 2; short val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new short[1][1]; c.v[0] = new short[0]; short[] val1 = get1();
+ c.v[0] = new short[0]; short[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new short[0][0]; Object[] val1 = get2();
+ c.v = new short[0][0]; Object[] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class ObjectArrayLowerDim2 {
+ public @Stable Object[][] v;
+
+ public static final ObjectArrayLowerDim2 c = new ObjectArrayLowerDim2();
+ public static short get() { return ((short[][][])c.v)[0][0][0]; }
+ public static short[] get1() { return (short[])(c.v[0][0]); }
+ public static short[][] get2() { return (short[][])(c.v[0]); }
+ public static Object[][] get3() { return c.v; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new short[1][1][1]; ((short[][][])c.v)[0][0][0] = 1; short val1 = get();
+ ((short[][][])c.v)[0][0][0] = 2; short val2 = get();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, 2);
+ }
+
+ {
+ c.v = new short[1][1][1]; c.v[0][0] = new short[0]; short[] val1 = get1();
+ c.v[0][0] = new short[0]; short[] val2 = get1();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new short[1][1][1]; c.v[0] = new short[0][0]; short[][] val1 = get2();
+ c.v[0] = new short[0][0]; short[][] val2 = get2();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+
+ {
+ c.v = new short[0][0][0]; Object[][] val1 = get3();
+ c.v = new short[0][0][0]; Object[][] val2 = get3();
+
+ assertTrue((isStableEnabled ? (val1 == val2) : (val1 != val2)));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField {
+ static class A {
+ public @Stable short a;
+
+ }
+ public @Stable A v;
+
+ public static final NestedStableField c = new NestedStableField();
+ public static A get() { return c.v; }
+ public static short get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.a = 1; A val1 = get();
+ c.v.a = 2; A val2 = get();
+
+ assertEquals(val1.a, 2);
+ assertEquals(val2.a, 2);
+ }
+
+ {
+ c.v = new A(); c.v.a = 1; short val1 = get1();
+ c.v.a = 2; short val2 = get1();
+ c.v = new A(); c.v.a = 3; short val3 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField1 {
+ static class A {
+ public @Stable short a;
+ public @Stable A next;
+ }
+ public @Stable A v;
+
+ public static final NestedStableField1 c = new NestedStableField1();
+ public static A get() { return c.v.next.next.next.next.next.next.next; }
+ public static short get1() { return get().a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.next = new A(); c.v.next.next = c.v;
+ c.v.a = 1; c.v.next.a = 1; A val1 = get();
+ c.v.a = 2; c.v.next.a = 2; A val2 = get();
+
+ assertEquals(val1.a, 2);
+ assertEquals(val2.a, 2);
+ }
+
+ {
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 1; short val1 = get1();
+ c.v.a = 2; short val2 = get1();
+ c.v = new A(); c.v.next = c.v;
+ c.v.a = 3; short val3 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val2, (isStableEnabled ? 1 : 2));
+ assertEquals(val3, (isStableEnabled ? 1 : 3));
+ }
+ }
+ }
+ /* ==================================================== */
+
+ static class NestedStableField2 {
+ static class A {
+ public @Stable short a;
+ public @Stable A left;
+ public A right;
+ }
+
+ public @Stable A v;
+
+ public static final NestedStableField2 c = new NestedStableField2();
+ public static short get() { return c.v.left.left.left.a; }
+ public static short get1() { return c.v.left.left.right.left.a; }
+
+ public static void test() throws Exception {
+ {
+ c.v = new A(); c.v.left = c.v.right = c.v;
+ c.v.a = 1; short val1 = get(); short val2 = get1();
+ c.v.a = 2; short val3 = get(); short val4 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val3, (isStableEnabled ? 1 : 2));
+
+ assertEquals(val2, 1);
+ assertEquals(val4, 2);
+ }
+ }
+ }
+
+ /* ==================================================== */
+
+ static class NestedStableField3 {
+ static class A {
+ public @Stable short a;
+ public @Stable A[] left;
+ public A[] right;
+ }
+
+ public @Stable A[] v;
+
+ public static final NestedStableField3 c = new NestedStableField3();
+ public static short get() { return c.v[0].left[1].left[0].left[1].a; }
+ public static short get1() { return c.v[1].left[0].left[1].right[0].left[1].a; }
+
+ public static void test() throws Exception {
+ {
+ A elem = new A();
+ c.v = new A[] { elem, elem }; c.v[0].left = c.v[0].right = c.v;
+ elem.a = 1; short val1 = get(); short val2 = get1();
+ elem.a = 2; short val3 = get(); short val4 = get1();
+
+ assertEquals(val1, 1);
+ assertEquals(val3, (isStableEnabled ? 1 : 2));
+
+ assertEquals(val2, 1);
+ assertEquals(val4, 2);
+ }
+ }
+ }
+
+ /* ==================================================== */
+ // Auxiliary methods
+ static void assertEquals(int i, int j) { if (i != j) throw new AssertionError(i + " != " + j); }
+ static void assertTrue(boolean b) { if (!b) throw new AssertionError(); }
+
+ static boolean failed = false;
+
+ public static void run(Class<?> test) {
+ Throwable ex = null;
+ System.out.print(test.getName()+": ");
+ try {
+ test.getMethod("test").invoke(null);
+ } catch (InvocationTargetException e) {
+ ex = e.getCause();
+ } catch (Throwable e) {
+ ex = e;
+ } finally {
+ if (ex == null) {
+ System.out.println("PASSED");
+ } else {
+ failed = true;
+ System.out.println("FAILED");
+ ex.printStackTrace(System.out);
+ }
+ }
+ }
+
+ static final boolean isStableEnabled;
+ static {
+ HotSpotDiagnosticMXBean diagnostic
+ = ManagementFactoryHelper.getDiagnosticMXBean();
+ VMOption tmp;
+ try {
+ tmp = diagnostic.getVMOption("FoldStableValues");
+ } catch (IllegalArgumentException e) {
+ tmp = null;
+ }
+ isStableEnabled = (tmp == null ? false : Boolean.parseBoolean(tmp.getValue()));
+ }
+}
--- a/hotspot/test/compiler/tiered/NonTieredLevelsTest.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/tiered/NonTieredLevelsTest.java Fri Mar 14 09:26:27 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,6 +70,9 @@
@Override
protected void test() throws Exception {
+ if (skipXcompOSR()) {
+ return;
+ }
checkNotCompiled();
compile();
checkCompiled();
--- a/hotspot/test/compiler/tiered/TieredLevelsTest.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/tiered/TieredLevelsTest.java Fri Mar 14 09:26:27 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,9 @@
@Override
protected void test() throws Exception {
+ if (skipXcompOSR()) {
+ return;
+ }
checkNotCompiled();
compile();
checkCompiled();
--- a/hotspot/test/compiler/types/TestMeetTopArrayExactConstantArray.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/types/TestMeetTopArrayExactConstantArray.java Fri Mar 14 09:26:27 2014 +0100
@@ -25,7 +25,7 @@
* @test
* @bug 8027571
* @summary meet of TopPTR exact array with constant array is not symmetric
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseOnStackReplacement -XX:TypeProfileLevel=222 -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation -XX:-BackgroundCompilation TestMeetTopArrayExactConstantArray
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseOnStackReplacement -XX:TypeProfileLevel=222 -XX:+UseTypeSpeculation -XX:-BackgroundCompilation TestMeetTopArrayExactConstantArray
*
*/
--- a/hotspot/test/compiler/types/TestSpeculationFailedHigherEqual.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/types/TestSpeculationFailedHigherEqual.java Fri Mar 14 09:26:27 2014 +0100
@@ -25,7 +25,7 @@
* @test
* @bug 8027422
* @summary type methods shouldn't always operate on speculative part
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:TypeProfileLevel=222 -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation -XX:-BackgroundCompilation TestSpeculationFailedHigherEqual
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:TypeProfileLevel=222 -XX:+UseTypeSpeculation -XX:-BackgroundCompilation TestSpeculationFailedHigherEqual
*
*/
--- a/hotspot/test/compiler/types/TypeSpeculation.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/types/TypeSpeculation.java Fri Mar 14 09:26:27 2014 +0100
@@ -25,7 +25,7 @@
* @test
* @bug 8024070
* @summary Test that type speculation doesn't cause incorrect execution
- * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TypeProfileLevel=222 -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation TypeSpeculation
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TypeProfileLevel=222 -XX:+UseTypeSpeculation TypeSpeculation
*
*/
--- a/hotspot/test/compiler/uncommontrap/TestSpecTrapClassUnloading.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/uncommontrap/TestSpecTrapClassUnloading.java Fri Mar 14 09:26:27 2014 +0100
@@ -25,7 +25,7 @@
* @test
* @bug 8031752
* @summary speculative traps need to be cleaned up at GC
- * @run main/othervm -XX:-TieredCompilation -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation -XX:TypeProfileLevel=222 -XX:CompileCommand=exclude,java.lang.reflect.Method::invoke -XX:CompileCommand=exclude,sun.reflect.DelegatingMethodAccessorImpl::invoke -Xmx1M TestSpecTrapClassUnloading
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:+UseTypeSpeculation -XX:TypeProfileLevel=222 -XX:CompileCommand=exclude,java.lang.reflect.Method::invoke -XX:CompileCommand=exclude,sun.reflect.DelegatingMethodAccessorImpl::invoke -Xmx1M TestSpecTrapClassUnloading
*
*/
--- a/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java Fri Mar 14 09:26:27 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -196,6 +196,29 @@
}
/**
+ * Checks, that {@linkplain #method} is not compiled at the given compilation
+ * level or above.
+ *
+ * @param compLevel
+ *
+ * @throws RuntimeException if {@linkplain #method} is in compiler queue or
+ * is compiled, or if {@linkplain #method} has zero
+ * compilation level.
+ */
+
+ protected final void checkNotCompiled(int compLevel) {
+ if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
+ throw new RuntimeException(method + " must not be in queue");
+ }
+ if (WHITE_BOX.getMethodCompilationLevel(method, false) >= compLevel) {
+ throw new RuntimeException(method + " comp_level must be >= maxCompLevel");
+ }
+ if (WHITE_BOX.getMethodCompilationLevel(method, true) >= compLevel) {
+ throw new RuntimeException(method + " osr_comp_level must be >= maxCompLevel");
+ }
+ }
+
+ /**
* Checks, that {@linkplain #method} is not compiled.
*
* @throws RuntimeException if {@linkplain #method} is in compiler queue or
@@ -380,6 +403,20 @@
/** flag for OSR test case */
boolean isOsr();
}
+
+ /**
+ * @return {@code true} if the current test case is OSR and the mode is
+ * Xcomp, otherwise {@code false}
+ */
+ protected boolean skipXcompOSR() {
+ boolean result = testCase.isOsr()
+ && CompilerWhiteBoxTest.MODE.startsWith("compiled ");
+ if (result && IS_VERBOSE) {
+ System.err.printf("Warning: %s is not applicable in %s%n",
+ testCase.name(), CompilerWhiteBoxTest.MODE);
+ }
+ return result;
+ }
}
enum SimpleTestCase implements CompilerWhiteBoxTest.TestCase {
--- a/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java Fri Mar 14 09:26:27 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,11 +51,8 @@
*/
@Override
protected void test() throws Exception {
- if (testCase.isOsr() && CompilerWhiteBoxTest.MODE.startsWith(
- "compiled ")) {
- System.err.printf("Warning: %s is not applicable in %s%n",
- testCase.name(), CompilerWhiteBoxTest.MODE);
- return;
+ if (skipXcompOSR()) {
+ return;
}
compile();
checkCompiled();
--- a/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java Fri Mar 14 09:26:27 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,11 +51,8 @@
*/
@Override
protected void test() throws Exception {
- if (testCase.isOsr() && CompilerWhiteBoxTest.MODE.startsWith(
- "compiled ")) {
- System.err.printf("Warning: %s is not applicable in %s%n",
- testCase.name(), CompilerWhiteBoxTest.MODE);
- return;
+ if (skipXcompOSR()) {
+ return;
}
compile();
checkCompiled();
--- a/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java Fri Mar 14 09:26:27 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,13 +24,17 @@
/*
* @test IsMethodCompilableTest
* @bug 8007270 8006683 8007288 8022832
- * @library /testlibrary /testlibrary/whitebox
+ * @library /testlibrary /testlibrary/whitebox /testlibrary/com/oracle/java/testlibrary
* @build IsMethodCompilableTest
* @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
+ * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
+ * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
* @summary testing of WB::isMethodCompilable()
* @author igor.ignatyev@oracle.com
*/
+
+import com.oracle.java.testlibrary.Platform;
+
public class IsMethodCompilableTest extends CompilerWhiteBoxTest {
/**
* Value of {@code -XX:PerMethodRecompilationCutoff}
@@ -43,7 +47,7 @@
if (tmp == -1) {
PER_METHOD_RECOMPILATION_CUTOFF = -1 /* Inf */;
} else {
- PER_METHOD_RECOMPILATION_CUTOFF = 1 + (0xFFFFFFFFL & tmp);
+ PER_METHOD_RECOMPILATION_CUTOFF = (0xFFFFFFFFL & tmp);
}
}
@@ -60,19 +64,23 @@
/**
* Tests {@code WB::isMethodCompilable()} by recompilation of tested method
* 'PerMethodRecompilationCutoff' times and checks compilation status. Also
- * checks that WB::clearMethodState() clears no-compilable flags.
+ * checks that WB::clearMethodState() clears no-compilable flags. Only
+ * applicable to c2 compiled methods.
*
* @throws Exception if one of the checks fails.
*/
@Override
protected void test() throws Exception {
- if (testCase.isOsr() && CompilerWhiteBoxTest.MODE.startsWith(
- "compiled ")) {
- System.err.printf("Warning: %s is not applicable in %s%n",
- testCase.name(), CompilerWhiteBoxTest.MODE);
- return;
+
+ // Only c2 compilations can be disabled through PerMethodRecompilationCutoff
+ if (!Platform.isServer()) {
+ return;
}
- if (!isCompilable()) {
+
+ if (skipXcompOSR()) {
+ return;
+ }
+ if (!isCompilable(COMP_LEVEL_FULL_OPTIMIZATION)) {
throw new RuntimeException(method + " must be compilable");
}
System.out.println("PerMethodRecompilationCutoff = "
@@ -83,39 +91,37 @@
return;
}
- // deoptimize 'PerMethodRecompilationCutoff' times and clear state
- for (long i = 0L, n = PER_METHOD_RECOMPILATION_CUTOFF - 1; i < n; ++i) {
- compileAndDeoptimize();
+ // deoptimize 'PerMethodRecompilationCutoff' times
+ for (long attempts = 0, successes = 0;
+ (successes < PER_METHOD_RECOMPILATION_CUTOFF) &&
+ (attempts < PER_METHOD_RECOMPILATION_CUTOFF*2) &&
+ isCompilable(COMP_LEVEL_FULL_OPTIMIZATION); attempts++) {
+ if (compileAndDeoptimize() == COMP_LEVEL_FULL_OPTIMIZATION) {
+ successes++;
+ }
}
- if (!testCase.isOsr() && !isCompilable()) {
+
+ if (!testCase.isOsr() && !isCompilable(COMP_LEVEL_FULL_OPTIMIZATION)) {
// in osr test case count of deopt maybe more than iterations
throw new RuntimeException(method + " is not compilable after "
- + (PER_METHOD_RECOMPILATION_CUTOFF - 1) + " iterations");
+ + PER_METHOD_RECOMPILATION_CUTOFF + " iterations");
}
- WHITE_BOX.clearMethodState(method);
- // deoptimize 'PerMethodRecompilationCutoff' + 1 times
- long i;
- for (i = 0L; i < PER_METHOD_RECOMPILATION_CUTOFF
- && isCompilable(); ++i) {
- compileAndDeoptimize();
- }
- if (!testCase.isOsr() && i != PER_METHOD_RECOMPILATION_CUTOFF) {
- // in osr test case count of deopt maybe more than iterations
- throw new RuntimeException(method + " is not compilable after "
- + i + " iterations, but must only after "
- + PER_METHOD_RECOMPILATION_CUTOFF);
- }
- if (isCompilable()) {
+ // Now compile once more
+ compileAndDeoptimize();
+
+ if (isCompilable(COMP_LEVEL_FULL_OPTIMIZATION)) {
throw new RuntimeException(method + " is still compilable after "
+ PER_METHOD_RECOMPILATION_CUTOFF + " iterations");
}
+ checkNotCompiled();
compile();
- checkNotCompiled();
+ waitBackgroundCompilation();
+ checkNotCompiled(COMP_LEVEL_FULL_OPTIMIZATION);
// WB.clearMethodState() must reset no-compilable flags
WHITE_BOX.clearMethodState(method);
- if (!isCompilable()) {
+ if (!isCompilable(COMP_LEVEL_FULL_OPTIMIZATION)) {
throw new RuntimeException(method
+ " is not compilable after clearMethodState()");
}
@@ -123,9 +129,11 @@
checkCompiled();
}
- private void compileAndDeoptimize() throws Exception {
+ private int compileAndDeoptimize() throws Exception {
compile();
waitBackgroundCompilation();
+ int compLevel = getCompLevel();
deoptimize();
+ return compLevel;
}
}
--- a/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java Fri Mar 14 09:26:27 2014 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,11 +53,8 @@
*/
@Override
protected void test() throws Exception {
- if (testCase.isOsr() && CompilerWhiteBoxTest.MODE.startsWith(
- "compiled ")) {
- System.err.printf("Warning: %s is not applicable in %s%n",
- testCase.name(), CompilerWhiteBoxTest.MODE);
- return;
+ if (skipXcompOSR()) {
+ return;
}
checkNotCompiled();
if (!isCompilable()) {
--- a/hotspot/test/testlibrary/com/oracle/java/testlibrary/Platform.java Wed Mar 05 12:31:09 2014 -0500
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/Platform.java Fri Mar 14 09:26:27 2014 +0100
@@ -28,6 +28,15 @@
private static final String dataModel = System.getProperty("sun.arch.data.model");
private static final String vmVersion = System.getProperty("java.vm.version");
private static final String osArch = System.getProperty("os.arch");
+ private static final String vmName = System.getProperty("java.vm.name");
+
+ public static boolean isClient() {
+ return vmName.endsWith(" Client VM");
+ }
+
+ public static boolean isServer() {
+ return vmName.endsWith(" Server VM");
+ }
public static boolean is32bit() {
return dataModel.equals("32");