--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu May 11 16:33:18 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Mon May 15 09:40:23 2017 -0400
@@ -1390,7 +1390,6 @@
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
__ reserved_stack_check();
}
- // the poll may need a register so just pick one that isn't the return register
__ set((intptr_t)os::get_polling_page(), L0);
__ relocate(relocInfo::poll_return_type);
__ ld_ptr(L0, 0, G0);
@@ -1556,8 +1555,9 @@
Label skip;
if (type == T_INT) {
__ br(acond, false, Assembler::pt, skip);
- } else
+ } else {
__ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
+ }
if (opr1->is_constant() && opr1->type() == T_INT) {
Register dest = result->as_register();
if (Assembler::is_simm13(opr1->as_jint())) {
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Thu May 11 16:33:18 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Mon May 15 09:40:23 2017 -0400
@@ -56,7 +56,6 @@
#define DEFAULT_STACK_RED_PAGES (1)
#define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0))
-// Stack slots are 2X larger in LP64 than in the 32 bit VM.
define_pd_global(intx, CompilerThreadStackSize, 1024);
define_pd_global(intx, ThreadStackSize, 1024);
define_pd_global(intx, VMThreadStackSize, 1024);
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Thu May 11 16:33:18 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Mon May 15 09:40:23 2017 -0400
@@ -327,7 +327,6 @@
slot += 2;
}
break;
- break;
case T_FLOAT:
if (flt_reg < flt_reg_max) {
--- a/hotspot/src/cpu/sparc/vm/sparc.ad Thu May 11 16:33:18 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad Mon May 15 09:40:23 2017 -0400
@@ -352,7 +352,6 @@
reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
// 64-bit, longs in 1 register: use all 64-bit integer registers
-// 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
);
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu May 11 16:33:18 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Mon May 15 09:40:23 2017 -0400
@@ -1236,8 +1236,7 @@
__ delayed()->inc(to);
__ BIND(L_skip_alignment);
}
- if (!aligned)
- {
+ if (!aligned) {
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise fall through to the next
// code for aligned copy.
@@ -1338,8 +1337,7 @@
// The 'count' is decremented in copy_16_bytes_backward_with_shift()
// in unaligned case.
__ dec(count, 16);
- } else
- {
+ } else {
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise jump to the next
// code for aligned copy (and substracting 16 from 'count' before jump).
@@ -1449,8 +1447,7 @@
__ sth(O4, to, -2);
__ BIND(L_skip_alignment2);
}
- if (!aligned)
- {
+ if (!aligned) {
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise fall through to the next
// code for aligned copy.
@@ -1567,14 +1564,14 @@
__ BIND(L_skip_align2);
}
if (!aligned) {
- // align to 8 bytes, we know we are 4 byte aligned to start
- __ andcc(to, 7, G0);
- __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
- __ delayed()->nop();
- __ stw(value, to, 0);
- __ inc(to, 4);
- __ dec(count, 1 << shift);
- __ BIND(L_fill_32_bytes);
+ // align to 8 bytes, we know we are 4 byte aligned to start
+ __ andcc(to, 7, G0);
+ __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
+ __ delayed()->nop();
+ __ stw(value, to, 0);
+ __ inc(to, 4);
+ __ dec(count, 1 << shift);
+ __ BIND(L_fill_32_bytes);
}
if (t == T_INT) {
@@ -1781,8 +1778,7 @@
// The 'count' is decremented in copy_16_bytes_backward_with_shift()
// in unaligned case.
__ dec(count, 8);
- } else
- {
+ } else {
// Copy with shift 16 bytes per iteration if arrays do not have
// the same alignment mod 8, otherwise jump to the next
// code for aligned copy (and substracting 8 from 'count' before jump).
@@ -1891,8 +1887,7 @@
// Aligned arrays have 4 bytes alignment in 32-bits VM
// and 8 bytes - in 64-bits VM.
//
- if (!aligned)
- {
+ if (!aligned) {
// The next check could be put under 'ifndef' since the code in
// generate_disjoint_long_copy_core() has own checks and set 'offset'.
@@ -3089,8 +3084,7 @@
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, entry, NULL,
"oop_arraycopy_uninit",
/*dest_uninitialized*/true);
- } else
- {
+ } else {
// oop arraycopy is always aligned on 32bit and 64bit without compressed oops
StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy;
StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy;
@@ -5107,8 +5101,8 @@
void stub_prolog(StubCodeDesc* cdesc) {
# ifdef ASSERT
// put extra information in the stub code, to make it more readable
-// Write the high part of the address
-// [RGV] Check if there is a dependency on the size of this prolog
+ // Write the high part of the address
+ // [RGV] Check if there is a dependency on the size of this prolog
__ emit_data((intptr_t)cdesc >> 32, relocInfo::none);
__ emit_data((intptr_t)cdesc, relocInfo::none);
__ emit_data(++_stub_count, relocInfo::none);