--- a/hotspot/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp Thu Oct 27 11:20:37 2016 +0200
+++ b/hotspot/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp Thu Oct 27 12:18:36 2016 +0200
@@ -1894,15 +1894,18 @@
__ beq(combined_check, slow);
}
+ // If the compiler was not able to prove that exact type of the source or the destination
+ // of the arraycopy is an array type, check at runtime if the source or the destination is
+ // an instance type.
if (flags & LIR_OpArrayCopy::type_check) {
- if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
+ if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst);
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
__ bge(CCR0, slow);
}
- if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
+ if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src);
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.cpp Thu Oct 27 11:20:37 2016 +0200
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.cpp Thu Oct 27 12:18:36 2016 +0200
@@ -221,6 +221,7 @@
values.describe(frame_no, (intptr_t*)&(get_ijava_state()->name), #name);
DESCRIBE_ADDRESS(method);
+ DESCRIBE_ADDRESS(mirror);
DESCRIBE_ADDRESS(locals);
DESCRIBE_ADDRESS(monitors);
DESCRIBE_ADDRESS(cpoolCache);
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.hpp Thu Oct 27 11:20:37 2016 +0200
+++ b/hotspot/src/cpu/ppc/vm/frame_ppc.hpp Thu Oct 27 12:18:36 2016 +0200
@@ -257,8 +257,7 @@
struct ijava_state {
#ifdef ASSERT
- uint64_t ijava_reserved; // Used for assertion.
- uint64_t ijava_reserved2; // Inserted for alignment.
+ uint64_t ijava_reserved; // Used for assertion.
#endif
uint64_t method;
uint64_t mirror;
@@ -274,7 +273,6 @@
uint64_t oop_tmp;
uint64_t lresult;
uint64_t fresult;
- // Aligned to frame::alignment_in_bytes (16).
};
enum {
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp Thu Oct 27 11:20:37 2016 +0200
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp Thu Oct 27 12:18:36 2016 +0200
@@ -1922,7 +1922,7 @@
// Check the supertype display:
if (must_load_sco) {
// The super check offset is always positive...
- lwz(check_cache_offset, sco_offset, super_klass);
+ lwz(check_cache_offset, sco_offset, super_klass);
super_check_offset = RegisterOrConstant(check_cache_offset);
// super_check_offset is register.
assert_different_registers(sub_klass, super_klass, cached_super, super_check_offset.as_register());
@@ -3325,12 +3325,10 @@
}
}
-void MacroAssembler::load_mirror(Register mirror, Register method) {
- const int mirror_offset = in_bytes(Klass::java_mirror_offset());
- ld(mirror, in_bytes(Method::const_offset()), method);
- ld(mirror, in_bytes(ConstMethod::constants_offset()), mirror);
+void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) {
+ ld(mirror, in_bytes(ConstMethod::constants_offset()), const_method);
ld(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
- ld(mirror, mirror_offset, mirror);
+ ld(mirror, in_bytes(Klass::java_mirror_offset()), mirror);
}
// Clear Array
@@ -4345,8 +4343,8 @@
* @param t3 volatile register
*/
void MacroAssembler::kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
- Register constants, Register barretConstants,
- Register t0, Register t1, Register t2, Register t3, Register t4) {
+ Register constants, Register barretConstants,
+ Register t0, Register t1, Register t2, Register t3, Register t4) {
assert_different_registers(crc, buf, len, table);
Label L_alignedHead, L_tail, L_alignTail, L_start, L_end;
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp Thu Oct 27 11:20:37 2016 +0200
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp Thu Oct 27 12:18:36 2016 +0200
@@ -723,7 +723,7 @@
void store_klass(Register dst_oop, Register klass, Register tmp = R0);
void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
- void load_mirror(Register mirror, Register method);
+ void load_mirror_from_const_method(Register mirror, Register const_method);
static int instr_size_for_decode_klass_not_null();
void decode_klass_not_null(Register dst, Register src = noreg);
--- a/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp Thu Oct 27 11:20:37 2016 +0200
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp Thu Oct 27 12:18:36 2016 +0200
@@ -915,7 +915,9 @@
__ b(Ldone);
__ bind(Lstatic); // Static case: Lock the java mirror
- __ load_mirror(Robj_to_lock, R19_method);
+ // Load mirror from interpreter frame.
+ __ ld(Robj_to_lock, _abi(callers_sp), R1_SP);
+ __ ld(Robj_to_lock, _ijava_state_neg(mirror), Robj_to_lock);
__ bind(Ldone);
__ verify_oop(Robj_to_lock);
@@ -1077,12 +1079,12 @@
__ resize_frame(parent_frame_resize, R11_scratch1);
__ std(R12_scratch2, _abi(lr), R1_SP);
+ // Get mirror and store it in the frame as GC root for this Method*.
+ __ load_mirror_from_const_method(R12_scratch2, Rconst_method);
+
__ addi(R26_monitor, R1_SP, - frame::ijava_state_size);
__ addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
- // Get mirror and store it in the frame as GC root for this Method*.
- __ load_mirror(R12_scratch2, R19_method);
-
// Store values.
// R15_esp, R14_bcp, R26_monitor, R28_mdx are saved at java calls
// in InterpreterMacroAssembler::call_from_interpreter.
@@ -1380,13 +1382,12 @@
__ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
__ bfalse(CCR0, method_is_not_static);
- __ load_mirror(R12_scratch2, R19_method);
- // state->_native_mirror = mirror;
-
- __ ld(R11_scratch1, 0, R1_SP);
- __ std(R12_scratch2/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
+ __ ld(R11_scratch1, _abi(callers_sp), R1_SP);
+ // Load mirror from interpreter frame.
+ __ ld(R12_scratch2, _ijava_state_neg(mirror), R11_scratch1);
// R4_ARG2 = &state->_oop_temp;
__ addi(R4_ARG2, R11_scratch1, _ijava_state_neg(oop_tmp));
+ __ std(R12_scratch2/*mirror*/, _ijava_state_neg(oop_tmp), R11_scratch1);
BIND(method_is_not_static);
}
@@ -2157,12 +2158,12 @@
// Restoration of lr done by remove_activation.
switch (state) {
// Narrow result if state is itos but result type is smaller.
- case itos: __ narrow(R17_tos); /* fall through */
- case ltos:
case btos:
case ztos:
case ctos:
case stos:
+ case itos: __ narrow(R17_tos); /* fall through */
+ case ltos:
case atos: __ mr(R3_RET, R17_tos); break;
case ftos:
case dtos: __ fmr(F1_RET, F15_ftos); break;
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp Thu Oct 27 11:20:37 2016 +0200
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp Thu Oct 27 12:18:36 2016 +0200
@@ -2133,10 +2133,6 @@
// since compiled code callers expect the result to already be narrowed.
case itos: __ narrow(R17_tos); /* fall through */
case ltos:
- case btos:
- case ztos:
- case ctos:
- case stos:
case atos: __ mr(R3_RET, R17_tos); break;
case ftos:
case dtos: __ fmr(F1_RET, F15_ftos); break;
@@ -2548,7 +2544,6 @@
assert(branch_table[ztos] == 0, "can't compute twice");
branch_table[ztos] = __ pc(); // non-volatile_entry point
__ lbzx(R17_tos, Rclass_or_obj, Roffset);
- __ extsb(R17_tos, R17_tos);
__ push(ztos);
if (!is_static && rc == may_rewrite) {
// use btos rewriting, no truncating to t/f bit is needed for getfield.