8150388: Remove SPARC 32-bit support
authorgtriantafill
Wed, 12 Apr 2017 17:52:04 -0400
changeset 46381 020219e46c86
parent 46380 4a51438196cf
child 46382 5520c435279b
8150388: Remove SPARC 32-bit support Reviewed-by: hseigel, coleenp, dholmes, kvn
hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp
hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp
hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp
hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
hotspot/src/cpu/sparc/vm/c1_LIR_sparc.cpp
hotspot/src/cpu/sparc/vm/c1_LinearScan_sparc.hpp
hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp
hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp
hotspot/src/cpu/sparc/vm/copy_sparc.hpp
hotspot/src/cpu/sparc/vm/frame_sparc.cpp
hotspot/src/cpu/sparc/vm/frame_sparc.hpp
hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp
hotspot/src/cpu/sparc/vm/globals_sparc.hpp
hotspot/src/cpu/sparc/vm/icBuffer_sparc.cpp
hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp
hotspot/src/cpu/sparc/vm/javaFrameAnchor_sparc.hpp
hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp
hotspot/src/cpu/sparc/vm/jniTypes_sparc.hpp
hotspot/src/cpu/sparc/vm/jni_sparc.h
hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp
hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp
hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp
hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp
hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp
hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp
hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp
hotspot/src/cpu/sparc/vm/relocInfo_sparc.hpp
hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
hotspot/src/cpu/sparc/vm/sparc.ad
hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp
hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp
hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp
hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp
hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp
hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp
hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp
hotspot/src/os_cpu/linux_sparc/vm/prefetch_linux_sparc.inline.hpp
hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp
hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp
hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp
hotspot/src/os_cpu/solaris_sparc/vm/prefetch_solaris_sparc.inline.hpp
hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp
--- a/hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/abstractInterpreter_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -270,9 +270,7 @@
     assert(locals < interpreter_frame->sp() || locals > (interpreter_frame->sp() + 16), "locals in save area");
     assert(locals < interpreter_frame->fp() || locals >= (interpreter_frame->fp() + 16), "locals in save area");
   }
-#ifdef _LP64
   assert(*interpreter_frame->register_addr(I5_savedSP) & 1, "must be odd");
-#endif
 
   *interpreter_frame->register_addr(Lmethod)     = (intptr_t) method;
   *interpreter_frame->register_addr(Llocals)     = (intptr_t) locals;
--- a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -159,21 +159,12 @@
 
  public:
 
-#ifdef _LP64
   static LIR_Opr as_long_opr(Register r) {
     return as_long_single_opr(r);
   }
   static LIR_Opr as_pointer_opr(Register r) {
     return as_long_single_opr(r);
   }
-#else
-  static LIR_Opr as_long_opr(Register r) {
-    return as_long_pair_opr(r);
-  }
-  static LIR_Opr as_pointer_opr(Register r) {
-    return as_opr(r);
-  }
-#endif
   static LIR_Opr as_float_opr(FloatRegister r) {
     return LIR_OprFact::single_fpu(r->encoding());
   }
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -556,11 +556,9 @@
     // guarantee that 32-bit loads always sign extended but that isn't
     // true and since sign extension isn't free, it would impose a
     // slight cost.
-#ifdef _LP64
     if  (op->type() == T_INT) {
       __ br(acond, false, Assembler::pn, *(op->label()));
     } else
-#endif
       __ brx(acond, false, Assembler::pn, *(op->label()));
   }
   // The peephole pass fills the delay slot
@@ -576,12 +574,7 @@
       Register rlo  = dst->as_register_lo();
       Register rhi  = dst->as_register_hi();
       Register rval = op->in_opr()->as_register();
-#ifdef _LP64
       __ sra(rval, 0, rlo);
-#else
-      __ mov(rval, rlo);
-      __ sra(rval, BitsPerInt-1, rhi);
-#endif
       break;
     }
     case Bytecodes::_i2d:
@@ -614,11 +607,7 @@
       Register rlo  = op->in_opr()->as_register_lo();
       Register rhi  = op->in_opr()->as_register_hi();
       Register rdst = dst->as_register();
-#ifdef _LP64
       __ sra(rlo, 0, rdst);
-#else
-      __ mov(rlo, rdst);
-#endif
       break;
     }
     case Bytecodes::_d2f:
@@ -711,7 +700,6 @@
       case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
       case T_INT   : __ stw(from_reg->as_register(), base, offset); break;
       case T_LONG  :
-#ifdef _LP64
         if (unaligned || PatchALot) {
           // Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem)
           assert(G3_scratch != base, "can't handle this");
@@ -722,11 +710,6 @@
         } else {
           __ stx(from_reg->as_register_lo(), base, offset);
         }
-#else
-        assert(Assembler::is_simm13(offset + 4), "must be");
-        __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
-        __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
-#endif
         break;
       case T_ADDRESS:
       case T_METADATA:
@@ -778,12 +761,7 @@
     case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
     case T_INT   : __ stw(from_reg->as_register(), base, disp); break;
     case T_LONG  :
-#ifdef _LP64
       __ stx(from_reg->as_register_lo(), base, disp);
-#else
-      assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
-      __ std(from_reg->as_register_hi(), base, disp);
-#endif
       break;
     case T_ADDRESS:
       __ st_ptr(from_reg->as_register(), base, disp);
@@ -826,40 +804,22 @@
       case T_INT   : __ ld(base, offset, to_reg->as_register()); break;
       case T_LONG  :
         if (!unaligned && !PatchALot) {
-#ifdef _LP64
           __ ldx(base, offset, to_reg->as_register_lo());
-#else
-          assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
-                 "must be sequential");
-          __ ldd(base, offset, to_reg->as_register_hi());
-#endif
         } else {
-#ifdef _LP64
           assert(base != to_reg->as_register_lo(), "can't handle this");
           assert(O7 != to_reg->as_register_lo(), "can't handle this");
           __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
           __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
           __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
           __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
-#else
-          if (base == to_reg->as_register_lo()) {
-            __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
-            __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
-          } else {
-            __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
-            __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
-          }
-#endif
         }
         break;
       case T_METADATA:  __ ld_ptr(base, offset, to_reg->as_register()); break;
       case T_ADDRESS:
-#ifdef _LP64
         if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
           __ lduw(base, offset, to_reg->as_register());
           __ decode_klass_not_null(to_reg->as_register());
         } else
-#endif
         {
           __ ld_ptr(base, offset, to_reg->as_register());
         }
@@ -921,13 +881,7 @@
     case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
     case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
     case T_LONG  :
-#ifdef _LP64
       __ ldx(base, disp, to_reg->as_register_lo());
-#else
-      assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
-             "must be sequential");
-      __ ldd(base, disp, to_reg->as_register_hi());
-#endif
       break;
     default      : ShouldNotReachHere();
   }
@@ -1107,16 +1061,9 @@
         jlong con = c->as_jlong();
 
         if (to_reg->is_double_cpu()) {
-#ifdef _LP64
           __ set(con,  to_reg->as_register_lo());
-#else
-          __ set(low(con),  to_reg->as_register_lo());
-          __ set(high(con), to_reg->as_register_hi());
-#endif
-#ifdef _LP64
         } else if (to_reg->is_single_cpu()) {
           __ set(con, to_reg->as_register());
-#endif
         } else {
           ShouldNotReachHere();
           assert(to_reg->is_double_fpu(), "wrong register kind");
@@ -1190,12 +1137,7 @@
           __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
         } else {
           assert(to_reg->is_double_cpu(), "Must be a long register.");
-#ifdef _LP64
           __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
-#else
-          __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
-          __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
-#endif
         }
 
       }
@@ -1366,22 +1308,10 @@
     }
   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
     if (from_reg->is_double_cpu()) {
-#ifdef _LP64
       __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
-#else
-      assert(to_reg->is_double_cpu() &&
-             from_reg->as_register_hi() != to_reg->as_register_lo() &&
-             from_reg->as_register_lo() != to_reg->as_register_hi(),
-             "should both be long and not overlap");
-      // long to long moves
-      __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
-      __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
-#endif
-#ifdef _LP64
     } else if (to_reg->is_double_cpu()) {
       // int to int moves
       __ mov(from_reg->as_register(), to_reg->as_register_lo());
-#endif
     } else {
       // int to int moves
       __ mov(from_reg->as_register(), to_reg->as_register());
@@ -1461,20 +1391,6 @@
     __ reserved_stack_check();
   }
   // the poll may need a register so just pick one that isn't the return register
-#if defined(TIERED) && !defined(_LP64)
-  if (result->type_field() == LIR_OprDesc::long_type) {
-    // Must move the result to G1
-    // Must leave proper result in O0,O1 and G1 (TIERED only)
-    __ sllx(I0, 32, G1);          // Shift bits into high G1
-    __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
-    __ or3 (I1, G1, G1);          // OR 64 bits into G1
-#ifdef ASSERT
-    // mangle it so any problems will show up
-    __ set(0xdeadbeef, I0);
-    __ set(0xdeadbeef, I1);
-#endif
-  }
-#endif // TIERED
   __ set((intptr_t)os::get_polling_page(), L0);
   __ relocate(relocInfo::poll_return_type);
   __ ld_ptr(L0, 0, G0);
@@ -1568,23 +1484,11 @@
     Register xhi = opr1->as_register_hi();
     if (opr2->is_constant() && opr2->as_jlong() == 0) {
       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
-#ifdef _LP64
       __ orcc(xhi, G0, G0);
-#else
-      __ orcc(xhi, xlo, G0);
-#endif
     } else if (opr2->is_register()) {
       Register ylo = opr2->as_register_lo();
       Register yhi = opr2->as_register_hi();
-#ifdef _LP64
       __ cmp(xlo, ylo);
-#else
-      __ subcc(xlo, ylo, xlo);
-      __ subccc(xhi, yhi, xhi);
-      if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
-        __ orcc(xhi, xlo, G0);
-      }
-#endif
     } else {
       ShouldNotReachHere();
     }
@@ -1612,13 +1516,7 @@
       ShouldNotReachHere();
     }
   } else if (code == lir_cmp_l2i) {
-#ifdef _LP64
     __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
-#else
-    __ lcmp(left->as_register_hi(),  left->as_register_lo(),
-            right->as_register_hi(), right->as_register_lo(),
-            dst->as_register());
-#endif
   } else {
     ShouldNotReachHere();
   }
@@ -1656,11 +1554,9 @@
     ShouldNotReachHere();
   }
   Label skip;
-#ifdef _LP64
     if  (type == T_INT) {
       __ br(acond, false, Assembler::pt, skip);
     } else
-#endif
       __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit
   if (opr1->is_constant() && opr1->type() == T_INT) {
     Register dest = result->as_register();
@@ -1720,7 +1616,6 @@
       }
 
     } else if (dest->is_double_cpu()) {
-#ifdef _LP64
       Register dst_lo = dest->as_register_lo();
       Register op1_lo = left->as_pointer_register();
       Register op2_lo = right->as_pointer_register();
@@ -1736,28 +1631,6 @@
 
         default: ShouldNotReachHere();
       }
-#else
-      Register op1_lo = left->as_register_lo();
-      Register op1_hi = left->as_register_hi();
-      Register op2_lo = right->as_register_lo();
-      Register op2_hi = right->as_register_hi();
-      Register dst_lo = dest->as_register_lo();
-      Register dst_hi = dest->as_register_hi();
-
-      switch (code) {
-        case lir_add:
-          __ addcc(op1_lo, op2_lo, dst_lo);
-          __ addc (op1_hi, op2_hi, dst_hi);
-          break;
-
-        case lir_sub:
-          __ subcc(op1_lo, op2_lo, dst_lo);
-          __ subc (op1_hi, op2_hi, dst_hi);
-          break;
-
-        default: ShouldNotReachHere();
-      }
-#endif
     } else {
       assert (right->is_single_cpu(), "Just Checking");
 
@@ -1852,23 +1725,14 @@
       int simm13 = (int)c;
       switch (code) {
         case lir_logic_and:
-#ifndef _LP64
-          __ and3 (left->as_register_hi(), 0,      dest->as_register_hi());
-#endif
           __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
           break;
 
         case lir_logic_or:
-#ifndef _LP64
-          __ or3 (left->as_register_hi(), 0,      dest->as_register_hi());
-#endif
           __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
           break;
 
         case lir_logic_xor:
-#ifndef _LP64
-          __ xor3 (left->as_register_hi(), 0,      dest->as_register_hi());
-#endif
           __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
           break;
 
@@ -1886,7 +1750,6 @@
         default: ShouldNotReachHere();
       }
     } else {
-#ifdef _LP64
       Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
                                                                         left->as_register_lo();
       Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
@@ -1898,26 +1761,6 @@
         case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
         default: ShouldNotReachHere();
       }
-#else
-      switch (code) {
-        case lir_logic_and:
-          __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
-          __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
-          break;
-
-        case lir_logic_or:
-          __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
-          __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
-          break;
-
-        case lir_logic_xor:
-          __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
-          __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
-          break;
-
-        default: ShouldNotReachHere();
-      }
-#endif
     }
   }
 }
@@ -1975,12 +1818,10 @@
   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
 
-#ifdef _LP64
   // higher 32bits must be null
   __ sra(dst_pos, 0, dst_pos);
   __ sra(src_pos, 0, src_pos);
   __ sra(length, 0, length);
-#endif
 
   // set up the arraycopy stub information
   ArrayCopyStub* stub = op->stub();
@@ -2316,7 +2157,6 @@
 
 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
   if (dest->is_single_cpu()) {
-#ifdef _LP64
     if (left->type() == T_OBJECT) {
       switch (code) {
         case lir_shl:  __ sllx  (left->as_register(), count->as_register(), dest->as_register()); break;
@@ -2325,7 +2165,6 @@
         default: ShouldNotReachHere();
       }
     } else
-#endif
       switch (code) {
         case lir_shl:  __ sll   (left->as_register(), count->as_register(), dest->as_register()); break;
         case lir_shr:  __ sra   (left->as_register(), count->as_register(), dest->as_register()); break;
@@ -2333,27 +2172,17 @@
         default: ShouldNotReachHere();
       }
   } else {
-#ifdef _LP64
     switch (code) {
       case lir_shl:  __ sllx  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
       case lir_shr:  __ srax  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
       case lir_ushr: __ srlx  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
       default: ShouldNotReachHere();
     }
-#else
-    switch (code) {
-      case lir_shl:  __ lshl  (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
-      case lir_shr:  __ lshr  (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
-      case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
-      default: ShouldNotReachHere();
-    }
-#endif
   }
 }
 
 
 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
-#ifdef _LP64
   if (left->type() == T_OBJECT) {
     count = count & 63;  // shouldn't shift by more than sizeof(intptr_t)
     Register l = left->as_register();
@@ -2366,7 +2195,6 @@
     }
     return;
   }
-#endif
 
   if (dest->is_single_cpu()) {
     count = count & 0x1F; // Java spec
@@ -2425,7 +2253,7 @@
          op->tmp4()->as_register()  == O1 &&
          op->klass()->as_register() == G5, "must be");
 
-  LP64_ONLY( __ signx(op->len()->as_register()); )
+  __ signx(op->len()->as_register());
   if (UseSlowPath ||
       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
@@ -2748,7 +2576,6 @@
     Register new_value_hi = op->new_value()->as_register_hi();
     Register t1 = op->tmp1()->as_register();
     Register t2 = op->tmp2()->as_register();
-#ifdef _LP64
     __ mov(cmp_value_lo, t1);
     __ mov(new_value_lo, t2);
     // perform the compare and swap operation
@@ -2756,23 +2583,6 @@
     // generate condition code - if the swap succeeded, t2 ("new value" reg) was
     // overwritten with the original value in "addr" and will be equal to t1.
     __ cmp(t1, t2);
-#else
-    // move high and low halves of long values into single registers
-    __ sllx(cmp_value_hi, 32, t1);         // shift high half into temp reg
-    __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
-    __ or3(t1, cmp_value_lo, t1);          // t1 holds 64-bit compare value
-    __ sllx(new_value_hi, 32, t2);
-    __ srl(new_value_lo, 0, new_value_lo);
-    __ or3(t2, new_value_lo, t2);          // t2 holds 64-bit value to swap
-    // perform the compare and swap operation
-    __ casx(addr, t1, t2);
-    // generate condition code - if the swap succeeded, t2 ("new value" reg) was
-    // overwritten with the original value in "addr" and will be equal to t1.
-    // Produce icc flag for 32bit.
-    __ sub(t1, t2, t2);
-    __ srlx(t2, 32, t1);
-    __ orcc(t2, t1, G0);
-#endif
   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
     Register addr = op->addr()->as_pointer_register();
     Register cmp_value = op->cmp_value()->as_register();
@@ -2914,13 +2724,8 @@
   assert(data->is_CounterData(), "need CounterData for calls");
   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
   Register mdo  = op->mdo()->as_register();
-#ifdef _LP64
   assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
   Register tmp1 = op->tmp1()->as_register_lo();
-#else
-  assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
-  Register tmp1 = op->tmp1()->as_register();
-#endif
   metadata2reg(md->constant_encoding(), mdo);
   int mdo_offset_bias = 0;
   if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
@@ -3200,12 +3005,7 @@
     assert (left->is_double_cpu(), "Must be a long");
     Register Rlow = left->as_register_lo();
     Register Rhi = left->as_register_hi();
-#ifdef _LP64
     __ sub(G0, Rlow, dest->as_register_lo());
-#else
-    __ subcc(G0, Rlow, dest->as_register_lo());
-    __ subc (G0, Rhi,  dest->as_register_hi());
-#endif
   }
 }
 
@@ -3245,9 +3045,7 @@
 
 
 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
-#ifdef _LP64
   ShouldNotReachHere();
-#endif
 
   NEEDS_CLEANUP;
   if (type == T_LONG) {
@@ -3491,31 +3289,6 @@
           inst->insert_before(i + 1, delay_op);
           i++;
         }
-
-#if defined(TIERED) && !defined(_LP64)
-        // fixup the return value from G1 to O0/O1 for long returns.
-        // It's done here instead of in LIRGenerator because there's
-        // such a mismatch between the single reg and double reg
-        // calling convention.
-        LIR_OpJavaCall* callop = op->as_OpJavaCall();
-        if (callop->result_opr() == FrameMap::out_long_opr) {
-          LIR_OpJavaCall* call;
-          LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
-          for (int a = 0; a < arguments->length(); a++) {
-            arguments[a] = callop->arguments()[a];
-          }
-          if (op->code() == lir_virtual_call) {
-            call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
-                                      callop->vtable_offset(), arguments, callop->info());
-          } else {
-            call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
-                                      callop->addr(), arguments, callop->info());
-          }
-          inst->at_put(i - 1, call);
-          inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
-                                                 T_LONG, lir_patch_none, NULL));
-        }
-#endif
         break;
       }
     }
@@ -3533,14 +3306,10 @@
   } else if (data->is_oop()) {
     Register obj = data->as_register();
     Register narrow = tmp->as_register();
-#ifdef _LP64
     assert(UseCompressedOops, "swap is 32bit only");
     __ encode_heap_oop(obj, narrow);
     __ swap(as_Address(addr), narrow);
     __ decode_heap_oop(narrow, obj);
-#else
-    __ swap(as_Address(addr), obj);
-#endif
   } else {
     ShouldNotReachHere();
   }
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,11 +61,7 @@
                        ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
 
   enum {
-#ifdef _LP64
     _call_stub_size = 68,
-#else
-    _call_stub_size = 20,
-#endif // _LP64
     _call_aot_stub_size = 0,
     _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
     _deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,7 @@
 LIR_Opr LIRGenerator::exceptionPcOpr()               { return FrameMap::Oissuing_pc_opr; }
 LIR_Opr LIRGenerator::syncLockOpr()                  { return new_register(T_INT); }
 LIR_Opr LIRGenerator::syncTempOpr()                  { return new_register(T_OBJECT); }
-LIR_Opr LIRGenerator::getThreadTemp()                { return rlock_callee_saved(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); }
+LIR_Opr LIRGenerator::getThreadTemp()                { return rlock_callee_saved(T_LONG); }
 
 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
   LIR_Opr opr;
@@ -215,13 +215,11 @@
       }
     }
   } else {
-#ifdef _LP64
     if (index_opr->type() == T_INT) {
       LIR_Opr tmp = new_register(T_LONG);
       __ convert(Bytecodes::_i2l, index_opr, tmp);
       index_opr = tmp;
     }
-#endif
 
     base_opr = new_pointer_register();
     assert (index_opr->is_register(), "Must be register");
@@ -1310,20 +1308,12 @@
 
 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
                                         CodeEmitInfo* info) {
-#ifdef _LP64
   __ store(value, address, info);
-#else
-  __ volatile_store_mem_reg(value, address, info);
-#endif
 }
 
 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
                                        CodeEmitInfo* info) {
-#ifdef _LP64
   __ load(address, result, info);
-#else
-  __ volatile_load_mem_reg(address, result, info);
-#endif
 }
 
 
@@ -1333,11 +1323,6 @@
   LIR_Opr index_op = offset;
 
   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-#ifndef _LP64
-  if (is_volatile && type == T_LONG) {
-    __ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
-  } else
-#endif
     {
       if (type == T_BOOLEAN) {
         type = T_BYTE;
@@ -1367,11 +1352,6 @@
 
 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
                                      BasicType type, bool is_volatile) {
-#ifndef _LP64
-  if (is_volatile && type == T_LONG) {
-    __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
-  } else
-#endif
     {
     LIR_Address* addr = new LIR_Address(src, offset, type);
     __ load(addr, dst);
@@ -1396,17 +1376,13 @@
   // Because we want a 2-arg form of xchg
   __ move(data, dst);
 
-  assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type");
+  assert (!x->is_add() && (type == T_INT || (is_obj && UseCompressedOops)), "unexpected type");
   LIR_Address* addr;
   if (offset->is_constant()) {
 
-#ifdef _LP64
     jlong l = offset->as_jlong();
     assert((jlong)((jint)l) == l, "offset too large for constant");
     jint c = (jint)l;
-#else
-    jint c = offset->as_jint();
-#endif
     addr = new LIR_Address(src.result(), c, type);
   } else {
     addr = new LIR_Address(src.result(), offset, type);
--- a/hotspot/src/cpu/sparc/vm/c1_LIR_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_LIR_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -48,16 +48,9 @@
 void LIR_Address::verify() const {
   assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
   assert(disp() == 0 || index()->is_illegal(), "can't have both");
-#ifdef _LP64
   assert(base()->is_cpu_register(), "wrong base operand");
   assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
   assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
          "wrong type for addresses");
-#else
-  assert(base()->is_single_cpu(), "wrong base operand");
-  assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
-  assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
-         "wrong type for addresses");
-#endif
 }
 #endif // PRODUCT
--- a/hotspot/src/cpu/sparc/vm/c1_LinearScan_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_LinearScan_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,11 +32,7 @@
 inline int LinearScan::num_physical_regs(BasicType type) {
   // Sparc requires two cpu registers for long
   // and two cpu registers for double
-#ifdef _LP64
   if (type == T_DOUBLE) {
-#else
-  if (type == T_DOUBLE || type == T_LONG) {
-#endif
     return 2;
   }
   return 1;
@@ -44,11 +40,7 @@
 
 
 inline bool LinearScan::requires_adjacent_regs(BasicType type) {
-#ifdef _LP64
   return type == T_DOUBLE;
-#else
-  return type == T_DOUBLE || type == T_LONG;
-#endif
 }
 
 inline bool LinearScan::is_caller_save(int assigned_reg) {
--- a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -273,13 +273,6 @@
       add(obj, hdr_size_in_bytes, t1);               // compute address of first element
       sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
       initialize_body(t1, t2);
-#ifndef _LP64
-    } else if (con_size_in_bytes < threshold * 2) {
-      // on v9 we can do double word stores to fill twice as much space.
-      assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
-      assert(con_size_in_bytes % 8 == 0, "double word aligned");
-      for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += 2 * HeapWordSize) stx(G0, obj, i);
-#endif
     } else if (con_size_in_bytes <= threshold) {
       // use explicit NULL stores
       for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += HeapWordSize)     st_ptr(G0, obj, i);
--- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -930,11 +930,7 @@
 
         Label not_already_dirty, restart, refill, young_card;
 
-#ifdef _LP64
         __ srlx(addr, CardTableModRefBS::card_shift, addr);
-#else
-        __ srl(addr, CardTableModRefBS::card_shift, addr);
-#endif
 
         AddressLiteral rs(byte_map_base);
         __ set(rs, cardtable);         // cardtable := <card table base>
--- a/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,6 @@
 define_pd_global(bool, SuperWordLoopUnrollAnalysis,  false);
 define_pd_global(bool, IdealizeClearArrayNode,       true);
 
-#ifdef _LP64
 // We need to make sure that all generated code is within
 // 2 gigs of the libjvm.so runtime routines so we can use
 // the faster "call" instruction rather than the expensive
@@ -82,17 +81,6 @@
 
 // Ergonomics related flags
 define_pd_global(uint64_t,MaxRAM,                    128ULL*G);
-#else
-// InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize,         1536*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize,        32*M);
-define_pd_global(intx, NonProfiledCodeHeapSize,      13*M);
-define_pd_global(intx, ProfiledCodeHeapSize,         14*M);
-define_pd_global(intx, NonNMethodCodeHeapSize,       5*M );
-define_pd_global(intx, CodeCacheExpansionSize,       32*K);
-// Ergonomics related flags
-define_pd_global(uint64_t, MaxRAM,                   4ULL*G);
-#endif
 define_pd_global(uintx, CodeCacheMinBlockLength,     4);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
--- a/hotspot/src/cpu/sparc/vm/copy_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/copy_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,14 +114,8 @@
 }
 
 static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
-#ifdef _LP64
   assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
   pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
-#else
-  // Guarantee use of ldd/std via some asm code, because compiler won't.
-  // See solaris_sparc.il.
-  _Copy_conjoint_jlongs_atomic(from, to, count);
-#endif
 }
 
 static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
@@ -162,7 +156,6 @@
 }
 
 static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
-#ifdef _LP64
   guarantee(mask_bits((uintptr_t)tohw, right_n_bits(LogBytesPerLong)) == 0,
          "unaligned fill words");
   julong* to = (julong*)tohw;
@@ -170,12 +163,6 @@
   while (count-- > 0) {
     *to++ = v;
   }
-#else // _LP64
-  juint* to = (juint*)tohw;
-  while (count-- > 0) {
-    *to++ = value;
-  }
-#endif // _LP64
 }
 
 typedef void (*_zero_Fn)(HeapWord* to, size_t count);
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,11 +114,7 @@
     // register locations. When that is fixed we'd will return NULL
     // (or assert here).
     reg = regname->prev()->as_Register();
-#ifdef _LP64
     second_word = sizeof(jint);
-#else
-    return NULL;
-#endif // _LP64
   } else {
     reg = regname->as_Register();
   }
@@ -332,9 +328,7 @@
 
 // Construct an unpatchable, deficient frame
 void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
-#ifdef _LP64
   assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
-#endif
   _sp = sp;
   _younger_sp = NULL;
   _pc = pc;
@@ -693,11 +687,9 @@
     intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
 
     address l_addr = (address)l_scratch;
-#ifdef _LP64
     // On 64-bit the result for 1/8/16/32-bit result types is in the other
     // word half
     l_addr += wordSize/2;
-#endif
 
     switch (type) {
       case T_OBJECT:
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,11 +100,7 @@
 
     // size of each block, in order of increasing address:
     register_save_words                          = 16,
-#ifdef _LP64
     callee_aggregate_return_pointer_words        =  0,
-#else
-    callee_aggregate_return_pointer_words        =  1,
-#endif
     callee_register_argument_save_area_words     =  6,
     // memory_parameter_words                    = <arbitrary>,
 
--- a/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,24 +38,14 @@
 
 // The expected size in bytes of a cache line, used to pad data structures.
 #if defined(TIERED)
-  #ifdef _LP64
-    // tiered, 64-bit, large machine
-    #define DEFAULT_CACHE_LINE_SIZE 128
-  #else
-    // tiered, 32-bit, medium machine
-    #define DEFAULT_CACHE_LINE_SIZE 64
-  #endif
+  // tiered, 64-bit, large machine
+  #define DEFAULT_CACHE_LINE_SIZE 128
 #elif defined(COMPILER1)
   // pure C1, 32-bit, small machine
   #define DEFAULT_CACHE_LINE_SIZE 16
 #elif defined(COMPILER2) || defined(SHARK)
-  #ifdef _LP64
-    // pure C2, 64-bit, large machine
-    #define DEFAULT_CACHE_LINE_SIZE 128
-  #else
-    // pure C2, 32-bit, medium machine
-    #define DEFAULT_CACHE_LINE_SIZE 64
-  #endif
+  // pure C2, 64-bit, large machine
+  #define DEFAULT_CACHE_LINE_SIZE 128
 #endif
 
 #if defined(SOLARIS)
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,18 +56,11 @@
 #define DEFAULT_STACK_RED_PAGES (1)
 #define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0))
 
-#ifdef _LP64
 // Stack slots are 2X larger in LP64 than in the 32 bit VM.
 define_pd_global(intx, CompilerThreadStackSize, 1024);
 define_pd_global(intx, ThreadStackSize,       1024);
 define_pd_global(intx, VMThreadStackSize,     1024);
 #define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2))
-#else
-define_pd_global(intx, CompilerThreadStackSize, 512);
-define_pd_global(intx, ThreadStackSize,       512);
-define_pd_global(intx, VMThreadStackSize,     512);
-#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
-#endif // _LP64
 
 #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
--- a/hotspot/src/cpu/sparc/vm/icBuffer_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/icBuffer_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,13 +32,9 @@
 #include "oops/oop.inline.hpp"
 
 int InlineCacheBuffer::ic_stub_code_size() {
-#ifdef _LP64
   return (NativeMovConstReg::instruction_size +  // sethi;add
           NativeJump::instruction_size +          // sethi; jmp; delay slot
           (1*BytesPerInstWord) + 1);            // flush + 1 extra byte
-#else
-  return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
-#endif
 }
 
 void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -318,52 +318,32 @@
 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
   assert_not_delayed();
 
-#ifdef _LP64
   ldf(FloatRegisterImpl::D, r1, offset, d);
-#else
-  ldf(FloatRegisterImpl::S, r1, offset, d);
-  ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
-#endif
 }
 
 // Known good alignment in _LP64 but unknown otherwise
 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
   assert_not_delayed();
 
-#ifdef _LP64
   stf(FloatRegisterImpl::D, d, r1, offset);
   // store something more useful here
   debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
-#else
-  stf(FloatRegisterImpl::S, d, r1, offset);
-  stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
-#endif
 }
 
 
 // Known good alignment in _LP64 but unknown otherwise
 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
   assert_not_delayed();
-#ifdef _LP64
   ldx(r1, offset, rd);
-#else
-  ld(r1, offset, rd);
-  ld(r1, offset + Interpreter::stackElementSize, rd->successor());
-#endif
 }
 
 // Known good alignment in _LP64 but unknown otherwise
 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
   assert_not_delayed();
 
-#ifdef _LP64
   stx(l, r1, offset);
   // store something more useful here
   stx(G0, r1, offset+Interpreter::stackElementSize);
-#else
-  st(l, r1, offset);
-  st(l->successor(), r1, offset + Interpreter::stackElementSize);
-#endif
 }
 
 void InterpreterMacroAssembler::pop_i(Register r) {
@@ -527,9 +507,7 @@
   sub( Lesp, Gframe_size, Gframe_size );
   and3( Gframe_size, -(2 * wordSize), Gframe_size );          // align SP (downwards) to an 8/16-byte boundary
   debug_only(verify_sp(Gframe_size, G4_scratch));
-#ifdef _LP64
   sub(Gframe_size, STACK_BIAS, Gframe_size );
-#endif
   mov(Gframe_size, SP);
 
   bind(done);
@@ -541,28 +519,20 @@
   Label Bad, OK;
 
   // Saved SP must be aligned.
-#ifdef _LP64
   btst(2*BytesPerWord-1, Rsp);
-#else
-  btst(LongAlignmentMask, Rsp);
-#endif
   br(Assembler::notZero, false, Assembler::pn, Bad);
   delayed()->nop();
 
   // Saved SP, plus register window size, must not be above FP.
   add(Rsp, frame::register_save_words * wordSize, Rtemp);
-#ifdef _LP64
   sub(Rtemp, STACK_BIAS, Rtemp);  // Bias Rtemp before cmp to FP
-#endif
   cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
 
   // Saved SP must not be ridiculously below current SP.
   size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
   set(maxstack, Rtemp);
   sub(SP, Rtemp, Rtemp);
-#ifdef _LP64
   add(Rtemp, STACK_BIAS, Rtemp);  // Unbias Rtemp before cmp to Rsp
-#endif
   cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
 
   ba_short(OK);
@@ -584,9 +554,7 @@
   delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
   stop("too many pops:  Lesp points into monitor area");
   bind(OK1);
-#ifdef _LP64
   sub(Resp, STACK_BIAS, Resp);
-#endif
   cmp(Resp, SP);
   brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
   delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
@@ -696,21 +664,12 @@
   }
 
   br(Assembler::zero, true, Assembler::pn, aligned);
-#ifdef _LP64
   delayed()->ldsw(Rtmp, 0, Rdst);
-#else
-  delayed()->ld(Rtmp, 0, Rdst);
-#endif
 
   ldub(Lbcp, bcp_offset + 3, Rdst);
   ldub(Lbcp, bcp_offset + 2, Rtmp);  sll(Rtmp,  8, Rtmp);  or3(Rtmp, Rdst, Rdst);
   ldub(Lbcp, bcp_offset + 1, Rtmp);  sll(Rtmp, 16, Rtmp);  or3(Rtmp, Rdst, Rdst);
-#ifdef _LP64
   ldsb(Lbcp, bcp_offset + 0, Rtmp);  sll(Rtmp, 24, Rtmp);
-#else
-  // Unsigned load is faster than signed on some implementations
-  ldub(Lbcp, bcp_offset + 0, Rtmp);  sll(Rtmp, 24, Rtmp);
-#endif
   or3(Rtmp, Rdst, Rdst );
 
   bind(aligned);
@@ -910,10 +869,8 @@
   assert_not_delayed();
 
   verify_oop(array);
-#ifdef _LP64
   // sign extend since tos (index) can be a 32bit value
   sra(index, G0, index);
-#endif // _LP64
 
   // check array
   Label ptr_ok;
@@ -1191,11 +1148,7 @@
   // return tos
   assert(Otos_l1 == Otos_i, "adjust code below");
   switch (state) {
-#ifdef _LP64
   case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
-#else
-  case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through  // O1 -> I1
-#endif
   case btos:                                      // fall through
   case ztos:                                      // fall through
   case ctos:
@@ -1207,20 +1160,6 @@
   case vtos: /* nothing to do */                     break;
   default  : ShouldNotReachHere();
   }
-
-#if defined(COMPILER2) && !defined(_LP64)
-  if (state == ltos) {
-    // C2 expects long results in G1 we can't tell if we're returning to interpreted
-    // or compiled so just be safe use G1 and O0/O1
-
-    // Shift bits into high (msb) of G1
-    sllx(Otos_l1->after_save(), 32, G1);
-    // Zero extend low bits
-    srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
-    or3 (Otos_l2->after_save(), G1, G1);
-  }
-#endif /* COMPILER2 */
-
 }
 
 // Lock object
@@ -1270,9 +1209,7 @@
     // Check if owner is self by comparing the value in the markOop of object
     // with the stack pointer
     sub(temp_reg, SP, temp_reg);
-#ifdef _LP64
     sub(temp_reg, STACK_BIAS, temp_reg);
-#endif
     assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
 
     // Composite "andcc" test:
@@ -2711,11 +2648,7 @@
 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
   if (is_native_call) {
     stf(FloatRegisterImpl::D, F0, d_tmp);
-#ifdef _LP64
     stx(O0, l_tmp);
-#else
-    std(O0, l_tmp);
-#endif
   } else {
     push(state);
   }
@@ -2724,11 +2657,7 @@
 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
   if (is_native_call) {
     ldf(FloatRegisterImpl::D, d_tmp, F0);
-#ifdef _LP64
     ldx(l_tmp, O0);
-#else
-    ldd(l_tmp, O0);
-#endif
   } else {
     pop(state);
   }
--- a/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/interpreterRT_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,47 +53,24 @@
   Argument  jni_arg(jni_offset(), false);
   Register  Rtmp = O0;
 
-#ifdef _LP64
   __ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
   __ store_long_argument(Rtmp, jni_arg);
-#else
-  __ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
-  __ store_argument(Rtmp, jni_arg);
-  __ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 0), Rtmp);
-  Argument successor(jni_arg.successor());
-  __ store_argument(Rtmp, successor);
-#endif
 }
 
 
 void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
   Argument  jni_arg(jni_offset(), false);
-#ifdef _LP64
   FloatRegister  Rtmp = F0;
   __ ldf(FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
   __ store_float_argument(Rtmp, jni_arg);
-#else
-  Register     Rtmp = O0;
-  __ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
-  __ store_argument(Rtmp, jni_arg);
-#endif
 }
 
 
 void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
   Argument  jni_arg(jni_offset(), false);
-#ifdef _LP64
   FloatRegister  Rtmp = F0;
   __ ldf(FloatRegisterImpl::D, Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
   __ store_double_argument(Rtmp, jni_arg);
-#else
-  Register  Rtmp = O0;
-  __ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp);
-  __ store_argument(Rtmp, jni_arg);
-  __ ld(Llocals, Interpreter::local_offset_in_bytes(offset()), Rtmp);
-  Argument successor(jni_arg.successor());
-  __ store_argument(Rtmp, successor);
-#endif
 }
 
 void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
@@ -171,7 +148,6 @@
     add_signature( non_float );
    }
 
-#ifdef _LP64
   virtual void pass_float()  {
     *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
     _from -= Interpreter::stackElementSize;
@@ -190,23 +166,6 @@
     _from -= 2*Interpreter::stackElementSize;
     add_signature( long_sig );
   }
-#else
-   // pass_double() is pass_long() and pass_float() only _LP64
-  virtual void pass_long() {
-    _to[0] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(1));
-    _to[1] = *(intptr_t*)(_from+Interpreter::local_offset_in_bytes(0));
-    _to += 2;
-    _from -= 2*Interpreter::stackElementSize;
-    add_signature( non_float );
-  }
-
-  virtual void pass_float() {
-    *_to++ = *(jint *)(_from+Interpreter::local_offset_in_bytes(0));
-    _from -= Interpreter::stackElementSize;
-    add_signature( non_float );
-  }
-
-#endif // _LP64
 
   virtual void add_signature( intptr_t sig_type ) {
     if ( _argcount < (sizeof (intptr_t))*4 ) {
--- a/hotspot/src/cpu/sparc/vm/javaFrameAnchor_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/javaFrameAnchor_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -88,9 +88,7 @@
     // _last_Java_sp will always be a an unbiased stack pointer
     // if is is biased then some setter screwed up. This is
     // deadly.
-#ifdef _LP64
     assert(((intptr_t)_last_Java_sp & 0xF) == 0, "Biased last_Java_sp");
-#endif
     return _last_Java_sp;
   }
 
--- a/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/jniFastGetField_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -152,39 +152,19 @@
   __ ld_ptr (O1, 0, O5);
   __ add (O5, O4, O5);
 
-#ifndef _LP64
-  assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small");
-  speculative_load_pclist[count++] = __ pc();
-  __ ld (O5, 0, G2);
-
-  speculative_load_pclist[count] = __ pc();
-  __ ld (O5, 4, O3);
-#else
   assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
   speculative_load_pclist[count] = __ pc();
   __ ldx (O5, 0, O3);
-#endif
 
   __ ld (cnt_addr, G1);
   __ cmp (G1, G4);
   __ br (Assembler::notEqual, false, Assembler::pn, label2);
   __ delayed()->mov (O7, G1);
 
-#ifndef _LP64
-  __ mov (G2, O0);
-  __ retl ();
-  __ delayed()->mov (O3, O1);
-#else
   __ retl ();
   __ delayed()->mov (O3, O0);
-#endif
 
-#ifndef _LP64
-  slowcase_entry_pclist[count-1] = __ pc();
-  slowcase_entry_pclist[count++] = __ pc() ;
-#else
   slowcase_entry_pclist[count++] = __ pc();
-#endif
 
   __ bind (label1);
   __ mov (O7, G1);
--- a/hotspot/src/cpu/sparc/vm/jniTypes_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/jniTypes_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,18 +55,10 @@
   static inline void    put_int(jint  from, intptr_t *to, int& pos)     { *(jint *)(to + pos++) =  from; }
   static inline void    put_int(jint *from, intptr_t *to, int& pos)     { *(jint *)(to + pos++) = *from; }
 
-#ifdef _LP64
   // Longs are stored in native format in one JavaCallArgument slot at *(to+1).
   static inline void    put_long(jlong  from, intptr_t *to)             { *(jlong *)(to + 1 +   0) =  from; }
   static inline void    put_long(jlong  from, intptr_t *to, int& pos)   { *(jlong *)(to + 1 + pos) =  from; pos += 2; }
   static inline void    put_long(jlong *from, intptr_t *to, int& pos)   { *(jlong *)(to + 1 + pos) = *from; pos += 2; }
-#else
-  // Longs are stored in reversed native word format in two JavaCallArgument slots at *to.
-  // The high half is in *(to+1) and the low half in *to.
-  static inline void    put_long(jlong  from, intptr_t *to)            { put_int2r((jint *)&from, (jint *)to); }
-  static inline void    put_long(jlong  from, intptr_t *to, int& pos)  { put_int2r((jint *)&from, (jint *)to, pos); }
-  static inline void    put_long(jlong *from, intptr_t *to, int& pos)  { put_int2r((jint *) from, (jint *)to, pos); }
-#endif
 
   // Oops are stored in native format in one JavaCallArgument slot at *to.
   static inline void    put_obj(oop  from, intptr_t *to)                { *(oop *)(to +   0  ) =  from; }
@@ -78,39 +70,21 @@
   static inline void    put_float(jfloat  from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) =  from; }
   static inline void    put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
 
-#ifdef _LP64
   // Doubles are stored in native word format in one JavaCallArgument slot at *(to+1).
   static inline void    put_double(jdouble  from, intptr_t *to)           { *(jdouble *)(to + 1 +   0) =  from; }
   static inline void    put_double(jdouble  from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) =  from; pos += 2; }
   static inline void    put_double(jdouble *from, intptr_t *to, int& pos) { *(jdouble *)(to + 1 + pos) = *from; pos += 2; }
-#else
-  // Doubles are stored in reversed native word format in two JavaCallArgument slots at *to.
-  static inline void    put_double(jdouble  from, intptr_t *to)           { put_int2r((jint *)&from, (jint *)to); }
-  static inline void    put_double(jdouble  from, intptr_t *to, int& pos) { put_int2r((jint *)&from, (jint *)to, pos); }
-  static inline void    put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, (jint *)to, pos); }
-#endif
 
   // The get_xxx routines, on the other hand, actually _do_ fetch
   // java primitive types from the interpreter stack.
   static inline jint    get_int(intptr_t *from)         { return *(jint *)from; }
 
-#ifdef _LP64
   static inline jlong   get_long(intptr_t *from)        { return *(jlong *)from; }
-#else
-  static inline jlong   get_long(intptr_t *from)        { return ((jlong)(*(  signed int *)((jint *)from    )) << 32) |
-                                                                 ((jlong)(*(unsigned int *)((jint *)from + 1)) <<  0); }
-#endif
 
   static inline oop     get_obj(intptr_t *from)         { return *(oop *)from; }
   static inline jfloat  get_float(intptr_t *from)       { return *(jfloat *)from; }
 
-#ifdef _LP64
   static inline jdouble get_double(intptr_t *from)      { return *(jdouble *)from; }
-#else
-  static inline jdouble get_double(intptr_t *from)      { jlong jl = ((jlong)(*(  signed int *)((jint *)from    )) << 32) |
-                                                                     ((jlong)(*(unsigned int *)((jint *)from + 1)) <<  0);
-                                                          return *(jdouble *)&jl; }
-#endif
 
 };
 
--- a/hotspot/src/cpu/sparc/vm/jni_sparc.h	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/jni_sparc.h	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,10 +39,6 @@
 
 typedef int jint;
 
-#ifdef _LP64
-  typedef long jlong;
-#else
-  typedef long long jlong;
-#endif
+typedef long jlong;
 
 typedef signed char jbyte;
--- a/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/jvmciCodeInstaller_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -47,13 +47,9 @@
   Handle obj(THREAD, HotSpotObjectConstantImpl::object(constant));
   jobject value = JNIHandles::make_local(obj());
   if (HotSpotObjectConstantImpl::compressed(constant)) {
-#ifdef _LP64
     int oop_index = _oop_recorder->find_index(value);
     RelocationHolder rspec = oop_Relocation::spec(oop_index);
     _instructions->relocate(pc, rspec, 1);
-#else
-    JVMCI_ERROR("compressed oop on 32bit");
-#endif
   } else {
     NativeMovConstReg* move = nativeMovConstReg_at(pc);
     move->set_data((intptr_t) value);
@@ -69,14 +65,10 @@
 void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS) {
   address pc = _instructions->start() + pc_offset;
   if (HotSpotMetaspaceConstantImpl::compressed(constant)) {
-#ifdef _LP64
     NativeMovConstReg32* move = nativeMovConstReg32_at(pc);
     narrowKlass narrowOop = record_narrow_metadata_reference(_instructions, pc, constant, CHECK);
     move->set_data((intptr_t)narrowOop);
     TRACE_jvmci_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/0x%x", p2i(pc), narrowOop);
-#else
-    JVMCI_ERROR("compressed Klass* on 32bit");
-#endif
   } else {
     NativeMovConstReg* move = nativeMovConstReg_at(pc);
     void* reference = record_metadata_reference(_instructions, pc, constant, CHECK);
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -296,11 +296,6 @@
     mov(G3, L3);                // avoid clobbering G3
     mov(G4, L4);                // avoid clobbering G4
     mov(G5_method, L5);         // avoid clobbering G5_method
-#if defined(COMPILER2) && !defined(_LP64)
-    // Save & restore possible 64-bit Long arguments in G-regs
-    srlx(G1,32,L0);
-    srlx(G4,32,L6);
-#endif
     call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
     delayed()->mov(G2_thread, O0);
 
@@ -309,15 +304,6 @@
     mov(L3, G3);                // restore G3
     mov(L4, G4);                // restore G4
     mov(L5, G5_method);         // restore G5_method
-#if defined(COMPILER2) && !defined(_LP64)
-    // Save & restore possible 64-bit Long arguments in G-regs
-    sllx(L0,32,G2);             // Move old high G1 bits high in G2
-    srl(G1, 0,G1);              // Clear current high G1 bits
-    or3 (G1,G2,G1);             // Recover 64-bit G1
-    sllx(L6,32,G2);             // Move old high G4 bits high in G2
-    srl(G4, 0,G4);              // Clear current high G4 bits
-    or3 (G4,G2,G4);             // Recover 64-bit G4
-#endif
     restore(O0, 0, G2_thread);
   }
 }
@@ -387,7 +373,6 @@
     st_ptr(last_Java_pc, pc_addr);
   }
 
-#ifdef _LP64
 #ifdef ASSERT
   // Make sure that we have an odd stack
   Label StackOk;
@@ -400,9 +385,6 @@
   assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
   add( last_java_sp, STACK_BIAS, G4_scratch );
   st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
-#else
-  st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
-#endif // _LP64
 }
 
 void MacroAssembler::reset_last_Java_frame(void) {
@@ -658,11 +640,7 @@
 
 void MacroAssembler::card_table_write(jbyte* byte_map_base,
                                       Register tmp, Register obj) {
-#ifdef _LP64
   srlx(obj, CardTableModRefBS::card_shift, obj);
-#else
-  srl(obj, CardTableModRefBS::card_shift, obj);
-#endif
   assert(tmp != obj, "need separate temp reg");
   set((address) byte_map_base, tmp);
   stb(G0, tmp, obj);
@@ -672,7 +650,6 @@
 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
   address save_pc;
   int shiftcnt;
-#ifdef _LP64
 # ifdef CHECK_DELAY
   assert_not_delayed((char*) "cannot put two instructions in delay slot");
 # endif
@@ -719,9 +696,6 @@
     while (pc() < (save_pc + (7 * BytesPerInstWord)))
       nop();
   }
-#else
-  Assembler::sethi(addrlit.value(), d, addrlit.rspec());
-#endif
 }
 
 
@@ -736,7 +710,6 @@
 
 
 int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
-#ifdef _LP64
   if (worst_case)  return 7;
   intptr_t iaddr = (intptr_t) a;
   int msb32 = (int) (iaddr >> 32);
@@ -756,9 +729,6 @@
     }
   }
   return count;
-#else
-  return 1;
-#endif
 }
 
 int MacroAssembler::worst_case_insts_for_set() {
@@ -1488,11 +1458,7 @@
 
 
 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
-#ifdef _LP64
   add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
-#else
-  add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
-#endif
   bclr(1, Rresult);
   sll(Rresult, LogBytesPerWord, Rresult);  // Rresult has total frame bytes
 }
@@ -1531,22 +1497,12 @@
 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
   assert_not_delayed();
-#ifdef _LP64
   bpr( rc_z, a, p, s1, L );
-#else
-  tst(s1);
-  br ( zero, a, p, L );
-#endif
 }
 
 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
   assert_not_delayed();
-#ifdef _LP64
   bpr( rc_nz, a, p, s1, L );
-#else
-  tst(s1);
-  br ( notZero, a, p, L );
-#endif
 }
 
 // Compare registers and branch with nop in delay slot or cbcond without delay slot.
@@ -1862,14 +1818,12 @@
   bind( done );
 }
 
-#ifdef _LP64
 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
   cmp(Ra, Rb);
   mov(-1, Rresult);
   movcc(equal,   false, xcc,  0, Rresult);
   movcc(greater, false, xcc,  1, Rresult);
 }
-#endif
 
 
 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
@@ -2668,9 +2622,7 @@
      // if compare/exchange succeeded we found an unlocked object and we now have locked it
      // hence we are done
      cmp(Rmark, Rscratch);
-#ifdef _LP64
      sub(Rscratch, STACK_BIAS, Rscratch);
-#endif
      brx(Assembler::equal, false, Assembler::pt, done);
      delayed()->sub(Rscratch, SP, Rscratch);  //pull next instruction into delay slot
 
@@ -2716,9 +2668,7 @@
 
       // Stack-lock attempt failed - check for recursive stack-lock.
       // See the comments below about how we might remove this case.
-#ifdef _LP64
       sub(Rscratch, STACK_BIAS, Rscratch);
-#endif
       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
       andcc(Rscratch, 0xfffff003, Rscratch);
       br(Assembler::always, false, Assembler::pt, done);
@@ -2800,9 +2750,7 @@
       // control to the "slow" operators in synchronizer.cpp.
 
       // RScratch contains the fetched obj->mark value from the failed CAS.
-#ifdef _LP64
       sub(Rscratch, STACK_BIAS, Rscratch);
-#endif
       sub(Rscratch, SP, Rscratch);
       assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
       andcc(Rscratch, 0xfffff003, Rscratch);
@@ -3720,11 +3668,7 @@
 
   Label not_already_dirty, restart, refill, young_card;
 
-#ifdef _LP64
   __ srlx(O0, CardTableModRefBS::card_shift, O0);
-#else
-  __ srl(O0, CardTableModRefBS::card_shift, O0);
-#endif
   AddressLiteral addrlit(byte_map_base);
   __ set(addrlit, O1); // O1 := <card table base>
   __ ldub(O0, O1, O2); // O2 := [O0 + O1]
@@ -3826,11 +3770,7 @@
 
   if (G1RSBarrierRegionFilter) {
     xor3(store_addr, new_val, tmp);
-#ifdef _LP64
     srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
-#else
-    srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
-#endif
 
     // XXX Should I predict this taken or not?  Does it matter?
     cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -333,14 +333,12 @@
       return external_word_Relocation::spec(addr);
     case relocInfo::internal_word_type:
       return internal_word_Relocation::spec(addr);
-#ifdef _LP64
     case relocInfo::opt_virtual_call_type:
       return opt_virtual_call_Relocation::spec();
     case relocInfo::static_call_type:
       return static_call_Relocation::spec();
     case relocInfo::runtime_call_type:
       return runtime_call_Relocation::spec();
-#endif
     case relocInfo::none:
       return RelocationHolder();
     default:
@@ -396,12 +394,10 @@
     : _address((address) addr),
       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 
-#ifdef _LP64
   // 32-bit complains about a multiple declaration for int*.
   AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
     : _address((address) addr),
       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-#endif
 
   AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
     : _address((address) addr),
@@ -464,16 +460,10 @@
   bool _is_in;
 
  public:
-#ifdef _LP64
   enum {
     n_register_parameters = 6,          // only 6 registers may contain integer parameters
     n_float_register_parameters = 16    // Can have up to 16 floating registers
   };
-#else
-  enum {
-    n_register_parameters = 6           // only 6 registers may contain integer parameters
-  };
-#endif
 
   // creation
   Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
@@ -489,7 +479,6 @@
   // locating register-based arguments:
   bool is_register() const { return _number < n_register_parameters; }
 
-#ifdef _LP64
   // locating Floating Point register-based arguments:
   bool is_float_register() const { return _number < n_float_register_parameters; }
 
@@ -501,7 +490,6 @@
     assert(is_float_register(), "must be a register argument");
     return as_FloatRegister(( number() *2 ));
   }
-#endif
 
   Register as_register() const {
     assert(is_register(), "must be a register argument");
@@ -1217,9 +1205,7 @@
   void lushr( Register Rin_high,  Register Rin_low,  Register Rcount,
               Register Rout_high, Register Rout_low, Register Rtemp );
 
-#ifdef _LP64
   void lcmp( Register Ra, Register Rb, Register Rresult);
-#endif
 
   // Load and store values by size and signed-ness
   void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,19 +45,11 @@
 
 // Use the right loads/stores for the platform
 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
   Assembler::ldx(s1, s2, d);
-#else
-             ld( s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
-#ifdef _LP64
   Assembler::ldx(s1, simm13a, d);
-#else
-             ld( s1, simm13a, d);
-#endif
 }
 
 #ifdef ASSERT
@@ -68,35 +60,19 @@
 #endif
 
 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
-#ifdef _LP64
   ldx(s1, s2, d);
-#else
-  ld( s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
-#ifdef _LP64
   ldx(a, d, offset);
-#else
-  ld( a, d, offset);
-#endif
 }
 
 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
-#ifdef _LP64
   Assembler::stx(d, s1, s2);
-#else
-             st( d, s1, s2);
-#endif
 }
 
 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
-#ifdef _LP64
   Assembler::stx(d, s1, simm13a);
-#else
-             st( d, s1, simm13a);
-#endif
 }
 
 #ifdef ASSERT
@@ -107,84 +83,44 @@
 #endif
 
 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
-#ifdef _LP64
   stx(d, s1, s2);
-#else
-  st( d, s1, s2);
-#endif
 }
 
 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
-#ifdef _LP64
   stx(d, a, offset);
-#else
-  st( d, a, offset);
-#endif
 }
 
 // Use the right loads/stores for the platform
 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
-#ifdef _LP64
   Assembler::ldx(s1, s2, d);
-#else
-  Assembler::ldd(s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
-#ifdef _LP64
   Assembler::ldx(s1, simm13a, d);
-#else
-  Assembler::ldd(s1, simm13a, d);
-#endif
 }
 
 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
-#ifdef _LP64
   ldx(s1, s2, d);
-#else
-  ldd(s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
-#ifdef _LP64
   ldx(a, d, offset);
-#else
-  ldd(a, d, offset);
-#endif
 }
 
 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
-#ifdef _LP64
   Assembler::stx(d, s1, s2);
-#else
-  Assembler::std(d, s1, s2);
-#endif
 }
 
 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
-#ifdef _LP64
   Assembler::stx(d, s1, simm13a);
-#else
-  Assembler::std(d, s1, simm13a);
-#endif
 }
 
 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
-#ifdef _LP64
   stx(d, s1, s2);
-#else
-  std(d, s1, s2);
-#endif
 }
 
 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
-#ifdef _LP64
   stx(d, a, offset);
-#else
-  std(d, a, offset);
-#endif
 }
 
 inline void MacroAssembler::stbool(Register d, const Address& a) { stb(d, a); }
@@ -207,45 +143,25 @@
 // Functions for isolating 64 bit atomic swaps for LP64
 // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
 inline void MacroAssembler::cas_ptr(  Register s1, Register s2, Register d) {
-#ifdef _LP64
   casx( s1, s2, d );
-#else
-  cas( s1, s2, d );
-#endif
 }
 
 // Functions for isolating 64 bit shifts for LP64
 
 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
   Assembler::sllx(s1, s2, d);
-#else
-  Assembler::sll( s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::sll_ptr( Register s1, int imm6a,   Register d ) {
-#ifdef _LP64
   Assembler::sllx(s1, imm6a, d);
-#else
-  Assembler::sll( s1, imm6a, d);
-#endif
 }
 
 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
-#ifdef _LP64
   Assembler::srlx(s1, s2, d);
-#else
-  Assembler::srl( s1, s2, d);
-#endif
 }
 
 inline void MacroAssembler::srl_ptr( Register s1, int imm6a,   Register d ) {
-#ifdef _LP64
   Assembler::srlx(s1, imm6a, d);
-#else
-  Assembler::srl( s1, imm6a, d);
-#endif
 }
 
 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
@@ -277,11 +193,7 @@
 // Branch that tests either xcc or icc depending on the
 // architecture compiled (LP64 or not)
 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
-#ifdef _LP64
     Assembler::bp(c, a, xcc, p, d, rt);
-#else
-    MacroAssembler::br(c, a, p, d, rt);
-#endif
 }
 
 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
@@ -338,7 +250,6 @@
 }
 
 inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
-#ifdef _LP64
   intptr_t disp;
   // NULL is ok because it will be relocated later.
   // Must change NULL to a reachable address in order to
@@ -355,9 +266,6 @@
   } else {
     Assembler::call(d, rspec);
   }
-#else
-  Assembler::call( d, rspec );
-#endif
 }
 
 inline void MacroAssembler::call( Label& L,   relocInfo::relocType rt ) {
@@ -414,12 +322,7 @@
 // 2 instructions.  All PCs in the CodeCache are within 2 Gig of each other.
 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
   intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
-#ifdef _LP64
   Unimplemented();
-#else
-  Assembler::sethi(   thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
-             add(reg, thepc &  0x3ff, reg, internal_word_Relocation::spec((address)thepc));
-#endif
   return thepc;
 }
 
@@ -554,7 +457,6 @@
 }
 
 
-#ifdef _LP64
 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
   if (a.is_float_register())
 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
@@ -579,7 +481,6 @@
   else
     stx(s, a.as_address());
 }
-#endif
 
 inline void MacroAssembler::round_to( Register r, int modulus ) {
   assert_not_delayed();
@@ -640,22 +541,13 @@
 inline void MacroAssembler::clruw( Register s, Register d ) { srl( s, G0, d); }
 inline void MacroAssembler::clruwu( Register d ) { srl( d, G0, d); }
 
-#ifdef _LP64
 // Make all 32 bit loads signed so 64 bit registers maintain proper sign
 inline void MacroAssembler::ld(  Register s1, Register s2, Register d)      { ldsw( s1, s2, d); }
 inline void MacroAssembler::ld(  Register s1, int simm13a, Register d)      { ldsw( s1, simm13a, d); }
-#else
-inline void MacroAssembler::ld(  Register s1, Register s2, Register d)      { lduw( s1, s2, d); }
-inline void MacroAssembler::ld(  Register s1, int simm13a, Register d)      { lduw( s1, simm13a, d); }
-#endif
 
 #ifdef ASSERT
   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
-# ifdef _LP64
 inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
-# else
-inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); }
-# endif
 #endif
 
 inline void MacroAssembler::ld(  const Address& a, Register d, int offset) {
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -236,8 +236,6 @@
 
 //-------------------------------------------------------------------
 
-#ifdef _LP64
-
 void NativeFarCall::set_destination(address dest) {
   // Address materialized in the instruction stream, so nothing to do.
   return;
@@ -290,8 +288,6 @@
 }
 // End code for unit testing implementation of NativeFarCall class
 
-#endif // _LP64
-
 //-------------------------------------------------------------------
 
 
@@ -304,18 +300,9 @@
 
   // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
   Register rd = inv_rd(i0);
-#ifndef _LP64
-  if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
-        is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
-        inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
-        rd == inv_rs1(i1) && rd == inv_rd(i1))) {
-    fatal("not a set_metadata");
-  }
-#else
   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
     fatal("not a set_metadata");
   }
-#endif
 }
 
 
@@ -324,23 +311,13 @@
 }
 
 
-#ifdef _LP64
 intptr_t NativeMovConstReg::data() const {
   return data64(addr_at(sethi_offset), long_at(add_offset));
 }
-#else
-intptr_t NativeMovConstReg::data() const {
-  return data32(long_at(sethi_offset), long_at(add_offset));
-}
-#endif
 
 
 void NativeMovConstReg::set_data(intptr_t x) {
-#ifdef _LP64
   set_data64_sethi(addr_at(sethi_offset), x);
-#else
-  set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
-#endif
   set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
 
   // also store the value into an oop_Relocation cell, if any
@@ -508,20 +485,12 @@
 
 
 int NativeMovConstRegPatching::data() const {
-#ifdef _LP64
   return data64(addr_at(sethi_offset), long_at(add_offset));
-#else
-  return data32(long_at(sethi_offset), long_at(add_offset));
-#endif
 }
 
 
 void NativeMovConstRegPatching::set_data(int x) {
-#ifdef _LP64
   set_data64_sethi(addr_at(sethi_offset), x);
-#else
-  set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
-#endif
   set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
 
   // also store the value into an oop_Relocation cell, if any
@@ -758,21 +727,12 @@
   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
   // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
   Register rd = inv_rd(i0);
-#ifndef _LP64
-  if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
-        (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op)) &&
-        inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
-        rd == inv_rs1(i1))) {
-    fatal("not a jump_to instruction");
-  }
-#else
   // In LP64, the jump instruction location varies for non relocatable
   // jumps, for example is could be sethi, xor, jmp instead of the
   // 7 instructions for sethi.  So let's check sethi only.
   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
     fatal("not a jump_to instruction");
   }
-#endif
 }
 
 
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,11 +121,7 @@
 
   bool is_safepoint_poll() {
     int x = long_at(0);
-#ifdef _LP64
     return is_op3(x, Assembler::ldx_op3,  Assembler::ldst_op) &&
-#else
-    return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
-#endif
       (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
   }
 
@@ -432,22 +428,6 @@
 // instructions in the sparcv9 vm.  Used to call native methods which may be loaded
 // anywhere in the address space, possibly out of reach of a call instruction.
 
-#ifndef _LP64
-
-// On 32-bit systems, a far call is the same as a near one.
-class NativeFarCall;
-inline NativeFarCall* nativeFarCall_at(address instr);
-class NativeFarCall : public NativeCall {
-public:
-  friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
-  friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
-                                                        { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
-  friend NativeFarCall* nativeFarCall_before(address return_address)
-                                                        { return (NativeFarCall*)nativeCall_before(return_address); }
-};
-
-#else
-
 // The format of this extended-range call is:
 //      jumpl_to addr, lreg
 //      == sethi %hi54(addr), O7 ;  jumpl O7, %lo10(addr), O7 ;  <delay>
@@ -515,7 +495,6 @@
   static void replace_mt_safe(address instr_addr, address code_buffer);
 };
 
-#endif // _LP64
 
 // An interface for accessing/manipulating 32 bit native set_metadata imm, reg instructions
 // (used to manipulate inlined data references, etc.)
@@ -567,13 +546,8 @@
  public:
   enum Sparc_specific_constants {
     sethi_offset           = 0,
-#ifdef _LP64
     add_offset             = 7 * BytesPerInstWord,
     instruction_size       = 8 * BytesPerInstWord
-#else
-    add_offset             = 4,
-    instruction_size       = 8
-#endif
   };
 
   address instruction_address() const       { return addr_at(0); }
@@ -626,11 +600,7 @@
  public:
   enum Sparc_specific_constants {
     sethi_offset           = 0,
-#ifdef _LP64
     nop_offset             = 7 * BytesPerInstWord,
-#else
-    nop_offset             = sethi_offset + BytesPerInstWord,
-#endif
     add_offset             = nop_offset   + BytesPerInstWord,
     instruction_size       = add_offset   + BytesPerInstWord
   };
@@ -705,11 +675,7 @@
 
     offset_width    = 13,
     sethi_offset    = 0,
-#ifdef _LP64
     add_offset      = 7 * BytesPerInstWord,
-#else
-    add_offset      = 4,
-#endif
     ldst_offset     = add_offset + BytesPerInstWord
   };
   bool is_immediate() const {
@@ -720,11 +686,7 @@
 
   address instruction_address() const           { return addr_at(0); }
   address next_instruction_address() const      {
-#ifdef _LP64
     return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
-#else
-    return addr_at(is_immediate() ? 4 : 12);
-#endif
   }
   intptr_t   offset() const                             {
      return is_immediate()? inv_simm(long_at(0), offset_width) :
@@ -777,19 +739,13 @@
  public:
   enum Sparc_specific_constants {
     sethi_offset           = 0,
-#ifdef _LP64
     jmpl_offset            = 7 * BytesPerInstWord,
     instruction_size       = 9 * BytesPerInstWord  // includes delay slot
-#else
-    jmpl_offset            = 1 * BytesPerInstWord,
-    instruction_size       = 3 * BytesPerInstWord  // includes delay slot
-#endif
   };
 
   address instruction_address() const       { return addr_at(0); }
   address next_instruction_address() const  { return addr_at(instruction_size); }
 
-#ifdef _LP64
   address jump_destination() const {
     return (address) data64(instruction_address(), long_at(jmpl_offset));
   }
@@ -797,15 +753,6 @@
     set_data64_sethi( instruction_address(), (intptr_t)dest);
     set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
   }
-#else
-  address jump_destination() const {
-    return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
-  }
-  void set_jump_destination(address dest) {
-    set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), (intptr_t)dest));
-    set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
-  }
-#endif
 
   // Creation
   friend inline NativeJump* nativeJump_at(address address) {
--- a/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/relocInfo_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -93,7 +93,6 @@
 
   case Assembler::branch_op:
     {
-#ifdef _LP64
     jint inst2;
     guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
     if (format() != 0) {
@@ -121,17 +120,6 @@
     } else {
       ip->set_data64_sethi( ip->addr_at(0), (intptr_t)x );
     }
-#else
-    guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
-    inst &= ~Assembler::hi22(     -1);
-    inst |=  Assembler::hi22((intptr_t)x);
-    // (ignore offset; it doesn't play into the sethi)
-    if (verify_only) {
-      guarantee(ip->long_at(0) == inst, "instructions must match");
-    } else {
-      ip->set_long_at(0, inst);
-    }
-#endif
     }
     break;
 
--- a/hotspot/src/cpu/sparc/vm/relocInfo_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/relocInfo_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,12 +34,8 @@
 
     // There is no need for format bits; the instructions are
     // sufficiently self-identifying.
-#ifndef _LP64
-    format_width       =  0
-#else
     // Except narrow oops in 64-bits VM.
     format_width       =  1
-#endif
   };
 
 
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -127,56 +127,10 @@
   // OopMap* map = new OopMap(*total_frame_words, 0);
   OopMap* map = new OopMap(frame_size_in_slots, 0);
 
-#if !defined(_LP64)
-
-  // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
-  __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
-  __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
-  __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
-  __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
-  __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
-  __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
-#endif /* _LP64 */
-
   __ save(SP, -frame_size, SP);
 
-#ifndef _LP64
-  // Reload the 64 bit Oregs. Although they are now Iregs we load them
-  // to Oregs here to avoid interrupts cutting off their heads
-
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
-
-  __ stx(O0, SP, o0_offset+STACK_BIAS);
-  map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
-
-  __ stx(O1, SP, o1_offset+STACK_BIAS);
-
-  map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
-
-  __ stx(O2, SP, o2_offset+STACK_BIAS);
-  map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
-
-  __ stx(O3, SP, o3_offset+STACK_BIAS);
-  map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
-
-  __ stx(O4, SP, o4_offset+STACK_BIAS);
-  map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
-
-  __ stx(O5, SP, o5_offset+STACK_BIAS);
-  map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
-#endif /* _LP64 */
-
-
-#ifdef _LP64
+
   int debug_offset = 0;
-#else
-  int debug_offset = 4;
-#endif
   // Save the G's
   __ stx(G1, SP, g1_offset+STACK_BIAS);
   map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
@@ -192,18 +146,6 @@
 
   // This is really a waste but we'll keep things as they were for now
   if (true) {
-#ifndef _LP64
-    map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
-    map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
-    map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
-    map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
-    map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
-    map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
-    map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
-    map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
-    map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
-    map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
-#endif /* _LP64 */
   }
 
 
@@ -250,70 +192,22 @@
   __ ldx(SP, g4_offset+STACK_BIAS, G4);
   __ ldx(SP, g5_offset+STACK_BIAS, G5);
 
-
-#if !defined(_LP64)
-  // Restore the 64-bit O's.
-  __ ldx(SP, o0_offset+STACK_BIAS, O0);
-  __ ldx(SP, o1_offset+STACK_BIAS, O1);
-  __ ldx(SP, o2_offset+STACK_BIAS, O2);
-  __ ldx(SP, o3_offset+STACK_BIAS, O3);
-  __ ldx(SP, o4_offset+STACK_BIAS, O4);
-  __ ldx(SP, o5_offset+STACK_BIAS, O5);
-
-  // And temporarily place them in TLS
-
-  __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
-  __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
-  __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
-  __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
-  __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
-  __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
-#endif /* _LP64 */
-
   // Restore flags
 
   __ ldxfsr(SP, fsr_offset+STACK_BIAS);
 
   __ restore();
 
-#if !defined(_LP64)
-  // Now reload the 64bit Oregs after we've restore the window.
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
-#endif /* _LP64 */
-
 }
 
 // Pop the current frame and restore the registers that might be holding
 // a result.
 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
 
-#if !defined(_LP64)
-  // 32bit build returns longs in G1
-  __ ldx(SP, g1_offset+STACK_BIAS, G1);
-
-  // Retrieve the 64-bit O's.
-  __ ldx(SP, o0_offset+STACK_BIAS, O0);
-  __ ldx(SP, o1_offset+STACK_BIAS, O1);
-  // and save to TLS
-  __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
-  __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
-#endif /* _LP64 */
-
   __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
 
   __ restore();
 
-#if !defined(_LP64)
-  // Now reload the 64bit Oregs after we've restore the window.
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
-  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
-#endif /* _LP64 */
-
 }
 
 // Is vector's size (in bytes) bigger than a size saved by default?
@@ -410,11 +304,6 @@
     case T_CHAR:
     case T_BYTE:
     case T_BOOLEAN:
-#ifndef _LP64
-    case T_OBJECT:
-    case T_ARRAY:
-    case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
-#endif // _LP64
       if (int_reg < int_reg_max) {
         Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
         regs[i].set1(r->as_VMReg());
@@ -423,7 +312,6 @@
       }
       break;
 
-#ifdef _LP64
     case T_LONG:
       assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
       // fall-through
@@ -439,15 +327,6 @@
         slot += 2;
       }
       break;
-#else
-    case T_LONG:
-      assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
-      // On 32-bit SPARC put longs always on the stack to keep the pressure off
-      // integer argument registers.  They should be used for oops.
-      slot = round_to(slot, 2);  // align
-      regs[i].set2(VMRegImpl::stack2reg(slot));
-      slot += 2;
-#endif
       break;
 
     case T_FLOAT:
@@ -554,7 +433,6 @@
 
   // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
 
-#ifdef _LP64
   // mov(s,d)
   __ mov(G1, L1);
   __ mov(G4, L4);
@@ -571,20 +449,6 @@
   __ mov(L1, G1);
   __ mov(L4, G4);
   __ mov(L5, G5_method);
-#else
-  __ stx(G1, FP, -8 + STACK_BIAS);
-  __ stx(G4, FP, -16 + STACK_BIAS);
-  __ mov(G5_method, L5);
-  __ mov(G5_method, O0);         // VM needs target method
-  __ mov(I7, O1);                // VM needs caller's callsite
-  // Must be a leaf call...
-  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
-  __ delayed()->mov(G2_thread, L7_thread_cache);
-  __ mov(L7_thread_cache, G2_thread);
-  __ ldx(FP, -8 + STACK_BIAS, G1);
-  __ ldx(FP, -16 + STACK_BIAS, G4);
-  __ mov(L5, G5_method);
-#endif /* _LP64 */
 
   __ restore();      // Restore args
   __ bind(L);
@@ -605,28 +469,9 @@
 // Stores long into offset pointed to by base
 void AdapterGenerator::store_c2i_long(Register r, Register base,
                                       const int st_off, bool is_stack) {
-#ifdef _LP64
   // In V9, longs are given 2 64-bit slots in the interpreter, but the
   // data is passed in only 1 slot.
   __ stx(r, base, next_arg_slot(st_off));
-#else
-#ifdef COMPILER2
-  // Misaligned store of 64-bit data
-  __ stw(r, base, arg_slot(st_off));    // lo bits
-  __ srlx(r, 32, r);
-  __ stw(r, base, next_arg_slot(st_off));  // hi bits
-#else
-  if (is_stack) {
-    // Misaligned store of 64-bit data
-    __ stw(r, base, arg_slot(st_off));    // lo bits
-    __ srlx(r, 32, r);
-    __ stw(r, base, next_arg_slot(st_off));  // hi bits
-  } else {
-    __ stw(r->successor(), base, arg_slot(st_off)     ); // lo bits
-    __ stw(r             , base, next_arg_slot(st_off)); // hi bits
-  }
-#endif // COMPILER2
-#endif // _LP64
 }
 
 void AdapterGenerator::store_c2i_object(Register r, Register base,
@@ -642,15 +487,9 @@
 // Stores into offset pointed to by base
 void AdapterGenerator::store_c2i_double(VMReg r_2,
                       VMReg r_1, Register base, const int st_off) {
-#ifdef _LP64
   // In V9, doubles are given 2 64-bit slots in the interpreter, but the
   // data is passed in only 1 slot.
   __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
-#else
-  // Need to marshal 64-bit value from misaligned Lesp loads
-  __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
-  __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
-#endif
 }
 
 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
@@ -957,22 +796,17 @@
       if (!r_2->is_valid()) {
         __ ld(Gargs, arg_slot(ld_off), r);
       } else {
-#ifdef _LP64
         // In V9, longs are given 2 64-bit slots in the interpreter, but the
         // data is passed in only 1 slot.
         RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
               next_arg_slot(ld_off) : arg_slot(ld_off);
         __ ldx(Gargs, slot, r);
-#else
-        fatal("longs should be on stack");
-#endif
       }
     } else {
       assert(r_1->is_FloatRegister(), "");
       if (!r_2->is_valid()) {
         __ ldf(FloatRegisterImpl::S, Gargs,      arg_slot(ld_off), r_1->as_FloatRegister());
       } else {
-#ifdef _LP64
         // In V9, doubles are given 2 64-bit slots in the interpreter, but the
         // data is passed in only 1 slot.  This code also handles longs that
         // are passed on the stack, but need a stack-to-stack move through a
@@ -980,11 +814,6 @@
         RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
               next_arg_slot(ld_off) : arg_slot(ld_off);
         __ ldf(FloatRegisterImpl::D, Gargs,                  slot, r_1->as_FloatRegister());
-#else
-        // Need to marshal 64-bit value from misaligned Lesp loads
-        __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
-        __ ldf(FloatRegisterImpl::S, Gargs,      arg_slot(ld_off), r_2->as_FloatRegister());
-#endif
       }
     }
     // Was the argument really intended to be on the stack, but was loaded
@@ -1157,7 +986,6 @@
     // See int_stk_helper for a further discussion.
     int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
 
-#ifdef _LP64
     // V9 convention: All things "as-if" on double-wide stack slots.
     // Hoist any int/ptr/long's in the first 6 to int regs.
     // Hoist any flt/dbl's in the first 16 dbl regs.
@@ -1241,44 +1069,6 @@
         if (off > max_stack_slots) max_stack_slots = off;
       }
     }
-
-#else // _LP64
-    // V8 convention: first 6 things in O-regs, rest on stack.
-    // Alignment is willy-nilly.
-    for (int i = 0; i < total_args_passed; i++) {
-      switch (sig_bt[i]) {
-      case T_ADDRESS: // raw pointers, like current thread, for VM calls
-      case T_ARRAY:
-      case T_BOOLEAN:
-      case T_BYTE:
-      case T_CHAR:
-      case T_FLOAT:
-      case T_INT:
-      case T_OBJECT:
-      case T_METADATA:
-      case T_SHORT:
-        regs[i].set1(int_stk_helper(i));
-        break;
-      case T_DOUBLE:
-      case T_LONG:
-        assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
-        regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
-        break;
-      case T_VOID: regs[i].set_bad(); break;
-      default:
-        ShouldNotReachHere();
-      }
-      if (regs[i].first()->is_stack()) {
-        int off = regs[i].first()->reg2stack();
-        if (off > max_stack_slots) max_stack_slots = off;
-      }
-      if (regs[i].second()->is_stack()) {
-        int off = regs[i].second()->reg2stack();
-        if (off > max_stack_slots) max_stack_slots = off;
-      }
-    }
-#endif // _LP64
-
   return round_to(max_stack_slots + 1, 2);
 
 }
@@ -1406,12 +1196,7 @@
     Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
     __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
     __ ld_ptr(rHandle, 0, L4);
-#ifdef _LP64
     __ movr( Assembler::rc_z, L4, G0, rHandle );
-#else
-    __ tst( L4 );
-    __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
-#endif
     if (dst.first()->is_stack()) {
       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
     }
@@ -1432,12 +1217,7 @@
     }
     map->set_oop(VMRegImpl::stack2reg(oop_slot));
     __ add(SP, offset + STACK_BIAS, rHandle);
-#ifdef _LP64
     __ movr( Assembler::rc_z, rOop, G0, rHandle );
-#else
-    __ tst( rOop );
-    __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
-#endif
 
     if (dst.first()->is_stack()) {
       __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
@@ -2068,11 +1848,7 @@
     __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
 
     // Check for a valid (non-zero) hash code and get its value.
-#ifdef _LP64
     __ srlx(header, markOopDesc::hash_shift, hash);
-#else
-    __ srl(header, markOopDesc::hash_shift, hash);
-#endif
     __ andcc(hash, mask, hash);
     __ br(Assembler::equal, false, Assembler::pn, slowCase);
     __ delayed()->nop();
@@ -2408,7 +2184,6 @@
   // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
   // except O6/O7. So if we must call out we must push a new frame. We immediately
   // push a new frame and flush the windows.
-#ifdef _LP64
   intptr_t thepc = (intptr_t) __ pc();
   {
     address here = __ pc();
@@ -2416,9 +2191,6 @@
     __ call(here + 8, relocInfo::none);
     __ delayed()->nop();
   }
-#else
-  intptr_t thepc = __ load_pc_address(O7, 0);
-#endif /* _LP64 */
 
   // We use the same pc/oopMap repeatedly when we call out
   oop_maps->add_gc_map(thepc - start, map);
@@ -2553,13 +2325,9 @@
   // Transition from _thread_in_Java to _thread_in_native.
   __ set(_thread_in_native, G3_scratch);
 
-#ifdef _LP64
   AddressLiteral dest(native_func);
   __ relocate(relocInfo::runtime_call_type);
   __ jumpl_to(dest, O7, O7);
-#else
-  __ call(native_func, relocInfo::runtime_call_type);
-#endif
   __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
 
   __ restore_thread(L7_thread_cache); // restore G2_thread
@@ -2574,9 +2342,6 @@
   case T_DOUBLE:  break;        // Got it where we want it (unless slow-path)
   // In 64 bits build result is in O0, in O0, O1 in 32bit build
   case T_LONG:
-#ifndef _LP64
-                  __ mov(O1, I1);
-#endif
                   // Fall thru
   case T_OBJECT:                // Really a handle
   case T_ARRAY:
@@ -2797,16 +2562,6 @@
 
   // Return
 
-#ifndef _LP64
-  if (ret_type == T_LONG) {
-
-    // Must leave proper result in O0,O1 and G1 (c2/tiered only)
-    __ sllx(I0, 32, G1);          // Shift bits into high G1
-    __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
-    __ or3 (I1, G1, G1);          // OR 64 bits into G1
-  }
-#endif
-
   __ ret();
   __ delayed()->restore();
 
@@ -2868,10 +2623,6 @@
 
   #ifdef ASSERT
   // make sure that the frames are aligned properly
-#ifndef _LP64
-  __ btst(wordSize*2-1, SP);
-  __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
-#endif
   #endif
 
   // Deopt needs to pass some extra live values from frame to frame
@@ -2989,13 +2740,7 @@
     pad += 1000; // Increase the buffer size when compiling for JVMCI
   }
 #endif
-#ifdef _LP64
   CodeBuffer buffer("deopt_blob", 2100+pad, 512);
-#else
-  // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
-  // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
-  CodeBuffer buffer("deopt_blob", 1600+pad, 512);
-#endif /* _LP64 */
   MacroAssembler* masm               = new MacroAssembler(&buffer);
   FloatRegister   Freturn0           = F0;
   Register        Greturn1           = G1;
@@ -3006,9 +2751,6 @@
   Register        G4deopt_mode       = G4_scratch;
   int             frame_size_words;
   Address         saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
-#if !defined(_LP64) && defined(COMPILER2)
-  Address         saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
-#endif
   Label           cont;
 
   OopMapSet *oop_maps = new OopMapSet();
@@ -3220,30 +2962,13 @@
   // to the interpreter entry point
   __ save(SP, -frame_size_words*wordSize, SP);
   __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
-#if !defined(_LP64)
-#if defined(COMPILER2)
-  // 32-bit 1-register longs return longs in G1
-  __ stx(Greturn1, saved_Greturn1_addr);
-#endif
-  __ set_last_Java_frame(SP, noreg);
-  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
-#else
   // LP64 uses g4 in set_last_Java_frame
   __ mov(G4deopt_mode, O1);
   __ set_last_Java_frame(SP, G0);
   __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
-#endif
   __ reset_last_Java_frame();
   __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
 
-#if !defined(_LP64) && defined(COMPILER2)
-  // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
-  // I0/I1 if the return value is long.
-  Label not_long;
-  __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
-  __ ldd(saved_Greturn1_addr,I0);
-  __ bind(not_long);
-#endif
   __ ret();
   __ delayed()->restore();
 
@@ -3273,13 +2998,7 @@
     pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
   }
 #endif
-#ifdef _LP64
   CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
-#else
-  // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
-  // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
-  CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
-#endif
   MacroAssembler* masm               = new MacroAssembler(&buffer);
   Register        O2UnrollBlock      = O2;
   Register        O2klass_index      = O2;
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Wed Apr 12 17:52:04 2017 -0400
@@ -311,7 +311,6 @@
 // ----------------------------
 // Pointer Register Classes
 // ----------------------------
-#ifdef _LP64
 // 64-bit build means 64-bit pointers means hi/lo pairs
 reg_class ptr_reg(            R_G1H,R_G1,             R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
                   R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
@@ -344,40 +343,6 @@
 reg_class o2_regP(R_O2H,R_O2);
 reg_class o7_regP(R_O7H,R_O7);
 
-#else // _LP64
-// 32-bit build means 32-bit pointers means 1 register.
-reg_class ptr_reg(     R_G1,     R_G3,R_G4,R_G5,
-                  R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
-                  R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
-                  R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
-// Lock encodings use G3 and G4 internally
-reg_class lock_ptr_reg(R_G1,               R_G5,
-                  R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
-                  R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
-                  R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
-// Special class for storeP instructions, which can store SP or RPC to TLS.
-// It is also used for memory addressing, allowing direct TLS addressing.
-reg_class sp_ptr_reg(  R_G1,R_G2,R_G3,R_G4,R_G5,
-                  R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
-                  R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
-                  R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
-// R_L7 is the lowest-priority callee-save (i.e., NS) register
-// We use it to save R_G2 across calls out of Java.
-reg_class l7_regP(R_L7);
-
-// Other special pointer regs
-reg_class g1_regP(R_G1);
-reg_class g2_regP(R_G2);
-reg_class g3_regP(R_G3);
-reg_class g4_regP(R_G4);
-reg_class g5_regP(R_G5);
-reg_class i0_regP(R_I0);
-reg_class o0_regP(R_O0);
-reg_class o1_regP(R_O1);
-reg_class o2_regP(R_O2);
-reg_class o7_regP(R_O7);
-#endif // _LP64
-
 
 // ----------------------------
 // Long Register Classes
@@ -386,12 +351,10 @@
 // Note:  O7 is never in this class; it is sometimes used as an encoding temp.
 reg_class long_reg(             R_G1H,R_G1,             R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
                    ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
-#ifdef _LP64
 // 64-bit, longs in 1 register: use all 64-bit integer registers
 // 32-bit, longs in 1 register: cannot use I's and L's.  Restrict to O's and G's.
                    ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
                    ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
-#endif // _LP64
                   );
 
 reg_class g1_regL(R_G1H,R_G1);
@@ -533,10 +496,8 @@
 // instructions which either zero-fill or sign-fill).
 bool can_branch_register( Node *bol, Node *cmp ) {
   if( !BranchOnRegister ) return false;
-#ifdef _LP64
   if( cmp->Opcode() == Op_CmpP )
     return true;  // No problems with pointer compares
-#endif
   if( cmp->Opcode() == Op_CmpL )
     return true;  // No problems with long compares
 
@@ -617,15 +578,11 @@
 }
 
 int MachCallRuntimeNode::ret_addr_offset() {
-#ifdef _LP64
   if (MacroAssembler::is_far_target(entry_point())) {
     return NativeFarCall::instruction_size;
   } else {
     return NativeCall::instruction_size;
   }
-#else
-  return NativeCall::instruction_size;  // call; delay slot
-#endif
 }
 
 // Indicate if the safepoint node needs the polling page as an input.
@@ -1024,7 +981,6 @@
 
 #ifdef ASSERT
   if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
-#ifdef _LP64
     // Trash argument dump slots.
     __ set(0xb0b8ac0db0b8ac0d, G1);
     __ mov(G1, G5);
@@ -1034,22 +990,6 @@
     __ stx(G1, SP, STACK_BIAS + 0x98);
     __ stx(G1, SP, STACK_BIAS + 0xA0);
     __ stx(G1, SP, STACK_BIAS + 0xA8);
-#else // _LP64
-    // this is also a native call, so smash the first 7 stack locations,
-    // and the various registers
-
-    // Note:  [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
-    // while [SP+0x44..0x58] are the argument dump slots.
-    __ set((intptr_t)0xbaadf00d, G1);
-    __ mov(G1, G5);
-    __ sllx(G1, 32, G1);
-    __ or3(G1, G5, G1);
-    __ mov(G1, G5);
-    __ stx(G1, SP, 0x40);
-    __ stx(G1, SP, 0x48);
-    __ stx(G1, SP, 0x50);
-    __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
-#endif // _LP64
   }
 #endif /*ASSERT*/
 }
@@ -1262,11 +1202,7 @@
 
   if(do_polling() && ra_->C->is_method_compilation()) {
     st->print("SETHI  #PollAddr,L0\t! Load Polling address\n\t");
-#ifdef _LP64
     st->print("LDX    [L0],G0\t!Poll for Safepointing\n\t");
-#else
-    st->print("LDUW   [L0],G0\t!Poll for Safepointing\n\t");
-#endif
   }
 
   if(do_polling()) {
@@ -1472,75 +1408,10 @@
   // hardware does the flop for me.  Doubles are always aligned, so no problem
   // there.  Misaligned sources only come from native-long-returns (handled
   // special below).
-#ifndef _LP64
-  if (src_first_rc == rc_int &&     // source is already big-endian
-      src_second_rc != rc_bad &&    // 64-bit move
-      ((dst_first & 1) != 0 || dst_second != dst_first + 1)) { // misaligned dst
-    assert((src_first & 1) == 0 && src_second == src_first + 1, "source must be aligned");
-    // Do the big-endian flop.
-    OptoReg::Name tmp    = dst_first   ; dst_first    = dst_second   ; dst_second    = tmp   ;
-    enum RC       tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
-  }
-#endif
 
   // --------------------------------------
   // Check for integer reg-reg copy
   if (src_first_rc == rc_int && dst_first_rc == rc_int) {
-#ifndef _LP64
-    if (src_first == R_O0_num && src_second == R_O1_num) {  // Check for the evil O0/O1 native long-return case
-      // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
-      //       as stored in memory.  On a big-endian machine like SPARC, this means that the _second
-      //       operand contains the least significant word of the 64-bit value and vice versa.
-      OptoReg::Name tmp = OptoReg::Name(R_O7_num);
-      assert((dst_first & 1) == 0 && dst_second == dst_first + 1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
-      // Shift O0 left in-place, zero-extend O1, then OR them into the dst
-      if ( cbuf ) {
-        emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020);
-        emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000);
-        emit3       (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second]);
-#ifndef PRODUCT
-      } else {
-        print_helper(st, "SLLX   R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
-        print_helper(st, "SRL    R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
-        print_helper(st, "OR     R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
-#endif
-      }
-      return;
-    } else if (dst_first == R_I0_num && dst_second == R_I1_num) {
-      // returning a long value in I0/I1
-      // a SpillCopy must be able to target a return instruction's reg_class
-      // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
-      //       as stored in memory.  On a big-endian machine like SPARC, this means that the _second
-      //       operand contains the least significant word of the 64-bit value and vice versa.
-      OptoReg::Name tdest = dst_first;
-
-      if (src_first == dst_first) {
-        tdest = OptoReg::Name(R_O7_num);
-      }
-
-      if (cbuf) {
-        assert((src_first & 1) == 0 && (src_first + 1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
-        // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
-        // ShrL_reg_imm6
-        emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000);
-        // ShrR_reg_imm6  src, 0, dst
-        emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000);
-        if (tdest != dst_first) {
-          emit3     (*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest]);
-        }
-      }
-#ifndef PRODUCT
-      else {
-        print_helper(st, "SRLX   R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
-        print_helper(st, "SRL    R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
-        if (tdest != dst_first) {
-          print_helper(st, "MOV    R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
-        }
-      }
-#endif // PRODUCT
-      return size+8;
-    }
-#endif // !_LP64
     // Else normal reg-reg copy
     assert(src_second != dst_first, "smashed second before evacuating it");
     impl_mov_helper(cbuf, src_first, dst_first, Assembler::or_op3, 0, "MOV  ", st);
@@ -1614,58 +1485,6 @@
   }
   assert(src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad");
 
-#ifndef _LP64
-  // In the LP64 build, all registers can be moved as aligned/adjacent
-  // pairs, so there's never any need to move the high bits separately.
-  // The 32-bit builds have to deal with the 32-bit ABI which can force
-  // all sorts of silly alignment problems.
-
-  // Check for integer reg-reg copy.  Hi bits are stuck up in the top
-  // 32-bits of a 64-bit register, but are needed in low bits of another
-  // register (else it's a hi-bits-to-hi-bits copy which should have
-  // happened already as part of a 64-bit move)
-  if (src_second_rc == rc_int && dst_second_rc == rc_int) {
-    assert((src_second & 1) == 1, "its the evil O0/O1 native return case");
-    assert((dst_second & 1) == 0, "should have moved with 1 64-bit move");
-    // Shift src_second down to dst_second's low bits.
-    if (cbuf) {
-      emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
-#ifndef PRODUCT
-    } else  {
-      print_helper(st, "SRLX   R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second - 1), OptoReg::regname(dst_second));
-#endif
-    }
-    return;
-  }
-
-  // Check for high word integer store.  Must down-shift the hi bits
-  // into a temp register, then fall into the case of storing int bits.
-  if (src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second & 1) == 1) {
-    // Shift src_second down to dst_second's low bits.
-    if (cbuf) {
-      emit3_simm13(*cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020);
-#ifndef PRODUCT
-    } else {
-      print_helper(st, "SRLX   R_%s,32,R_%s\t! spill: Move high bits down low", OptoReg::regname(src_second-1), OptoReg::regname(R_O7_num));
-#endif
-    }
-    src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
-  }
-
-  // Check for high word integer load
-  if (dst_second_rc == rc_int && src_second_rc == rc_stack)
-    return impl_helper(this, cbuf, ra_, true, ra_->reg2offset(src_second), dst_second, Assembler::lduw_op3, "LDUW", size, st);
-
-  // Check for high word integer store
-  if (src_second_rc == rc_int && dst_second_rc == rc_stack)
-    return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stw_op3, "STW ", size, st);
-
-  // Check for high word float store
-  if (src_second_rc == rc_float && dst_second_rc == rc_stack)
-    return impl_helper(this, cbuf, ra_, false, ra_->reg2offset(dst_second), src_second, Assembler::stf_op3, "STF ", size, st);
-
-#endif // !_LP64
-
   Unimplemented();
 }
 
@@ -1743,7 +1562,6 @@
 #ifndef PRODUCT
 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
   st->print_cr("\nUEP:");
-#ifdef    _LP64
   if (UseCompressedClassPointers) {
     assert(Universe::heap() != NULL, "java heap should be initialized");
     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
@@ -1762,11 +1580,6 @@
   }
   st->print_cr("\tCMP    R_G5,R_G3" );
   st->print   ("\tTne    xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
-#else  // _LP64
-  st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
-  st->print_cr("\tCMP    R_G5,R_G3" );
-  st->print   ("\tTne    icc,R_G0+ST_RESERVED_FOR_USER_0+2");
-#endif // _LP64
 }
 #endif
 
@@ -1874,9 +1687,7 @@
     if (!UsePopCountInstruction)
       return false;
   case Op_CompareAndSwapL:
-#ifdef _LP64
   case Op_CompareAndSwapP:
-#endif
     if (!VM_Version::supports_cx8())
       return false;
     break;
@@ -1992,13 +1803,11 @@
 const bool Matcher::need_masked_shift_count = false;
 
 bool Matcher::narrow_oop_use_complex_address() {
-  NOT_LP64(ShouldNotCallThis());
   assert(UseCompressedOops, "only for compressed oops code");
   return false;
 }
 
 bool Matcher::narrow_klass_use_complex_address() {
-  NOT_LP64(ShouldNotCallThis());
   assert(UseCompressedClassPointers, "only for compressed klass code");
   return false;
 }
@@ -2027,11 +1836,7 @@
 // needed.  Else we split the double into 2 integer pieces and move it
 // piece-by-piece.  Only happens when passing doubles into C code as the
 // Java calling convention forces doubles to be aligned.
-#ifdef _LP64
 const bool Matcher::misaligned_doubles_ok = true;
-#else
-const bool Matcher::misaligned_doubles_ok = false;
-#endif
 
 // No-op on SPARC.
 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
@@ -2050,11 +1855,7 @@
 // The relevant question is how the int is callee-saved.  In _LP64
 // the whole long is written but de-opt'ing will have to extract
 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
-#ifdef _LP64
 const bool Matcher::int_in_long = true;
-#else
-const bool Matcher::int_in_long = false;
-#endif
 
 // Return whether or not this register is ever used as an argument.  This
 // function is used on startup to build the trampoline stubs in generateOptoStub.
@@ -2068,7 +1869,6 @@
       reg == R_I3_num ||
       reg == R_I4_num ||
       reg == R_I5_num ) return true;
-#ifdef _LP64
   // 64-bit builds can pass 64-bit pointers and longs in
   // the high I registers
   if( reg == R_I0H_num ||
@@ -2082,14 +1882,6 @@
     return true;
   }
 
-#else
-  // 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
-  // Longs cannot be passed in O regs, because O regs become I regs
-  // after a 'save' and I regs get their high bits chopped off on
-  // interrupt.
-  if( reg == R_G1H_num || reg == R_G1_num ) return true;
-  if( reg == R_G4H_num || reg == R_G4_num ) return true;
-#endif
   // A few float args in registers
   if( reg >= R_F0_num && reg <= R_F7_num ) return true;
 
@@ -2152,19 +1944,11 @@
 
 // The intptr_t operand types, defined by textual substitution.
 // (Cf. opto/type.hpp.  This lets us avoid many, many other ifdefs.)
-#ifdef _LP64
 #define immX      immL
 #define immX13    immL13
 #define immX13m7  immL13m7
 #define iRegX     iRegL
 #define g1RegX    g1RegL
-#else
-#define immX      immI
-#define immX13    immI13
-#define immX13m7  immI13m7
-#define iRegX     iRegI
-#define g1RegX    g1RegI
-#endif
 
 //----------ENCODING BLOCK-----------------------------------------------------
 // This block specifies the encoding classes used by the compiler to output
@@ -2326,7 +2110,6 @@
     emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
   %}
 
-#ifdef _LP64
   /* %%% merge with enc_to_bool */
   enc_class enc_convP2B( iRegI dst, iRegP src ) %{
     MacroAssembler _masm(&cbuf);
@@ -2335,7 +2118,6 @@
     Register   dst_reg = reg_to_register_object($dst$$reg);
     __ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
   %}
-#endif
 
   enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
     // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
@@ -2626,16 +2408,6 @@
   // to G1 so the register allocator will not have to deal with the misaligned register
   // pair.
   enc_class adjust_long_from_native_call %{
-#ifndef _LP64
-    if (returns_long()) {
-      //    sllx  O0,32,O0
-      emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
-      //    srl   O1,0,O1
-      emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
-      //    or    O0,O1,G1
-      emit3       ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
-    }
-#endif
   %}
 
   enc_class Java_To_Runtime (method meth) %{    // CALL Java_To_Runtime
@@ -3102,11 +2874,7 @@
   cisc_spilling_operand_name(indOffset);
 
   // Number of stack slots consumed by a Monitor enter
-#ifdef _LP64
   sync_stack_slots(2);
-#else
-  sync_stack_slots(1);
-#endif
 
   // Compiled code's Frame Pointer
   frame_pointer(R_SP);
@@ -3124,13 +2892,8 @@
   // Number of outgoing stack slots killed above the out_preserve_stack_slots
   // for calls to C.  Supports the var-args backing area for register parms.
   // ADLC doesn't support parsing expressions, so I folded the math by hand.
-#ifdef _LP64
   // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
   varargs_C_out_slots_killed(12);
-#else
-  // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
-  varargs_C_out_slots_killed( 7);
-#endif
 
   // The after-PROLOG location of the return address.  Location of
   // return address specifies a type (REG or STACK) and a number
@@ -3161,17 +2924,10 @@
   // opcodes.  This simplifies the register allocator.
   c_return_value %{
     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
-#ifdef     _LP64
     static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_O0_num };
     static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num,    OptoReg::Bad, R_F1_num, R_O0H_num};
     static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_I0_num };
     static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num,    OptoReg::Bad, R_F1_num, R_I0H_num};
-#else  // !_LP64
-    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_G1_num };
-    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
-    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_G1_num };
-    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
-#endif
     return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
                         (is_outgoing?lo_out:lo_in)[ideal_reg] );
   %}
@@ -3179,17 +2935,10 @@
   // Location of compiled Java return values.  Same as C
   return_value %{
     assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
-#ifdef     _LP64
     static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_O0_num };
     static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num,    OptoReg::Bad, R_F1_num, R_O0H_num};
     static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_I0_num };
     static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num,    OptoReg::Bad, R_F1_num, R_I0H_num};
-#else  // !_LP64
-    static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num,     R_O0_num,     R_O0_num,     R_F0_num,     R_F0_num, R_G1_num };
-    static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
-    static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num,     R_I0_num,     R_I0_num,     R_F0_num,     R_F0_num, R_G1_num };
-    static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
-#endif
     return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
                         (is_outgoing?lo_out:lo_in)[ideal_reg] );
   %}
@@ -3444,7 +3193,6 @@
   interface(CONST_INTER);
 %}
 
-#ifdef _LP64
 // Pointer Immediate: 64-bit
 operand immP_set() %{
   predicate(!VM_Version::is_niagara_plus());
@@ -3478,7 +3226,6 @@
   format %{ %}
   interface(CONST_INTER);
 %}
-#endif
 
 operand immP13() %{
   predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
@@ -3919,11 +3666,7 @@
   constraint(ALLOC_IN_RC(int_flags));
   match(RegFlags);
 
-#ifdef _LP64
   format %{ "xcc_P" %}
-#else
-  format %{ "icc_P" %}
-#endif
   interface(REG_INTER);
 %}
 
@@ -4500,7 +4243,6 @@
     MS      : R(2);
 %}
 
-#ifdef _LP64
 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
     instruction_count(1); multiple_bundles;
     dst     : C(write)+1;
@@ -4509,7 +4251,6 @@
     BR      : E(2);
     MS      : E(2);
 %}
-#endif
 
 // Integer ALU reg operation
 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
@@ -4614,13 +4355,8 @@
 
 // Polling Address
 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
-#ifdef _LP64
     instruction_count(0); multiple_bundles;
     fixed_latency(6);
-#else
-    dst   : E(write);
-    IALU  : R;
-#endif
 %}
 
 // Long Constant small
@@ -5361,7 +5097,6 @@
   ins_pipe(istore_mem_reg);
 %}
 
-#ifdef _LP64
 // Load pointer from stack slot, 64-bit encoding
 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
   match(Set dst src);
@@ -5381,27 +5116,6 @@
   ins_encode(simple_form3_mem_reg( dst, src ) );
   ins_pipe(istore_mem_reg);
 %}
-#else // _LP64
-// Load pointer from stack slot, 32-bit encoding
-instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
-  match(Set dst src);
-  ins_cost(MEMORY_REF_COST);
-  format %{ "LDUW   $src,$dst\t!ptr" %}
-  opcode(Assembler::lduw_op3, Assembler::ldst_op);
-  ins_encode(simple_form3_mem_reg( src, dst ) );
-  ins_pipe(iload_mem);
-%}
-
-// Store pointer to stack slot
-instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
-  match(Set dst src);
-  ins_cost(MEMORY_REF_COST);
-  format %{ "STW    $src,$dst\t!ptr" %}
-  opcode(Assembler::stw_op3, Assembler::ldst_op);
-  ins_encode(simple_form3_mem_reg( dst, src ) );
-  ins_pipe(istore_mem_reg);
-%}
-#endif // _LP64
 
 //------------Special Nop instructions for bundling - no match rules-----------
 // Nop using the A0 functional unit
@@ -5858,17 +5572,10 @@
   ins_cost(MEMORY_REF_COST);
   size(4);
 
-#ifndef _LP64
-  format %{ "LDUW   $mem,$dst\t! ptr" %}
-  ins_encode %{
-    __ lduw($mem$$Address, $dst$$Register);
-  %}
-#else
   format %{ "LDX    $mem,$dst\t! ptr" %}
   ins_encode %{
     __ ldx($mem$$Address, $dst$$Register);
   %}
-#endif
   ins_pipe(iload_mem);
 %}
 
@@ -5891,17 +5598,10 @@
   ins_cost(MEMORY_REF_COST);
   size(4);
 
-#ifndef _LP64
-  format %{ "LDUW   $mem,$dst\t! klass ptr" %}
-  ins_encode %{
-    __ lduw($mem$$Address, $dst$$Register);
-  %}
-#else
   format %{ "LDX    $mem,$dst\t! klass ptr" %}
   ins_encode %{
     __ ldx($mem$$Address, $dst$$Register);
   %}
-#endif
   ins_pipe(iload_mem);
 %}
 
@@ -5969,26 +5669,6 @@
   ins_pipe(ialu_imm);
 %}
 
-#ifndef _LP64
-instruct loadConP(iRegP dst, immP con) %{
-  match(Set dst con);
-  ins_cost(DEFAULT_COST * 3/2);
-  format %{ "SET    $con,$dst\t!ptr" %}
-  ins_encode %{
-    relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
-      intptr_t val = $con$$constant;
-    if (constant_reloc == relocInfo::oop_type) {
-      __ set_oop_constant((jobject) val, $dst$$Register);
-    } else if (constant_reloc == relocInfo::metadata_type) {
-      __ set_metadata_constant((Metadata*)val, $dst$$Register);
-    } else {          // non-oop pointers, e.g. card mark base, heap top
-      assert(constant_reloc == relocInfo::none, "unexpected reloc type");
-      __ set(val, $dst$$Register);
-    }
-  %}
-  ins_pipe(loadConP);
-%}
-#else
 instruct loadConP_set(iRegP dst, immP_set con) %{
   match(Set dst con);
   ins_cost(DEFAULT_COST * 3/2);
@@ -6032,7 +5712,6 @@
   %}
   ins_pipe(loadConP);
 %}
-#endif // _LP64
 
 instruct loadConP0(iRegP dst, immP0 src) %{
   match(Set dst src);
@@ -6186,19 +5865,6 @@
 %}
 
 // Next code is used for finding next cache line address to prefetch.
-#ifndef _LP64
-instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
-  match(Set dst (CastX2P (AndI (CastP2X src) mask)));
-  ins_cost(DEFAULT_COST);
-  size(4);
-
-  format %{ "AND    $src,$mask,$dst\t! next cache line address" %}
-  ins_encode %{
-    __ and3($src$$Register, $mask$$constant, $dst$$Register);
-  %}
-  ins_pipe(ialu_reg_imm);
-%}
-#else
 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
   match(Set dst (CastX2P (AndL (CastP2X src) mask)));
   ins_cost(DEFAULT_COST);
@@ -6210,7 +5876,6 @@
   %}
   ins_pipe(ialu_reg_imm);
 %}
-#endif
 
 //----------Store Instructions-------------------------------------------------
 // Store Byte
@@ -6322,13 +5987,8 @@
   match(Set dst (StoreP dst src));
   ins_cost(MEMORY_REF_COST);
 
-#ifndef _LP64
-  format %{ "STW    $src,$dst\t! ptr" %}
-  opcode(Assembler::stw_op3, 0, REGP_OP);
-#else
   format %{ "STX    $src,$dst\t! ptr" %}
   opcode(Assembler::stx_op3, 0, REGP_OP);
-#endif
   ins_encode( form3_mem_reg( dst, src ) );
   ins_pipe(istore_mem_spORreg);
 %}
@@ -6337,13 +5997,8 @@
   match(Set dst (StoreP dst src));
   ins_cost(MEMORY_REF_COST);
 
-#ifndef _LP64
-  format %{ "STW    $src,$dst\t! ptr" %}
-  opcode(Assembler::stw_op3, 0, REGP_OP);
-#else
   format %{ "STX    $src,$dst\t! ptr" %}
   opcode(Assembler::stx_op3, 0, REGP_OP);
-#endif
   ins_encode( form3_mem_reg( dst, R_G0 ) );
   ins_pipe(istore_mem_zero);
 %}
@@ -7094,13 +6749,8 @@
   match(Set dst (LoadPLocked mem));
   ins_cost(MEMORY_REF_COST);
 
-#ifndef _LP64
-  format %{ "LDUW   $mem,$dst\t! ptr" %}
-  opcode(Assembler::lduw_op3, 0, REGP_OP);
-#else
   format %{ "LDX    $mem,$dst\t! ptr" %}
   opcode(Assembler::ldx_op3, 0, REGP_OP);
-#endif
   ins_encode( form3_mem_reg( mem, dst ) );
   ins_pipe(iload_mem);
 %}
@@ -7171,9 +6821,7 @@
 %}
 
 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
-#ifdef _LP64
   predicate(VM_Version::supports_cx8());
-#endif
   match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
   match(Set res (WeakCompareAndSwapP mem_ptr (Binary oldval newval)));
   effect( USE mem_ptr, KILL ccr, KILL tmp1);
@@ -7184,13 +6832,8 @@
             "MOV    1,$res\n\t"
             "MOVne  xcc,R_G0,$res"
   %}
-#ifdef _LP64
   ins_encode( enc_casx(mem_ptr, oldval, newval),
               enc_lflags_ne_to_boolean(res) );
-#else
-  ins_encode( enc_casi(mem_ptr, oldval, newval),
-              enc_iflags_ne_to_boolean(res) );
-#endif
   ins_pipe( long_memory_op );
 %}
 
@@ -7268,17 +6911,6 @@
   ins_pipe( long_memory_op );
 %}
 
-#ifndef _LP64
-instruct xchgP( memory mem, iRegP newval) %{
-  match(Set newval (GetAndSetP mem newval));
-  format %{ "SWAP  [$mem],$newval" %}
-  size(4);
-  ins_encode %{
-    __ swap($mem$$Address, $newval$$Register);
-  %}
-  ins_pipe( long_memory_op );
-%}
-#endif
 
 instruct xchgN( memory mem, iRegN newval) %{
   match(Set newval (GetAndSetN mem newval));
@@ -7740,7 +7372,6 @@
 %}
 
 // Register Shift Right Immediate with a CastP2X
-#ifdef _LP64
 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
   match(Set dst (URShiftL (CastP2X src1) src2));
   size(4);
@@ -7749,16 +7380,6 @@
   ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
   ins_pipe(ialu_reg_imm);
 %}
-#else
-instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
-  match(Set dst (URShiftI (CastP2X src1) src2));
-  size(4);
-  format %{ "SRL    $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
-  opcode(Assembler::srl_op3, Assembler::arith_op);
-  ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
-  ins_pipe(ialu_reg_imm);
-%}
-#endif
 
 
 //----------Floating Point Arithmetic Instructions-----------------------------
@@ -8001,21 +7622,6 @@
   ins_pipe(ialu_reg_imm);
 %}
 
-#ifndef _LP64
-
-// Use sp_ptr_RegP to match G2 (TLS register) without spilling.
-instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
-  match(Set dst (OrI src1 (CastP2X src2)));
-
-  size(4);
-  format %{ "OR     $src1,$src2,$dst" %}
-  opcode(Assembler::or_op3, Assembler::arith_op);
-  ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
-  ins_pipe(ialu_reg_reg);
-%}
-
-#else
-
 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
   match(Set dst (OrL src1 (CastP2X src2)));
 
@@ -8027,8 +7633,6 @@
   ins_pipe(ialu_reg_reg);
 %}
 
-#endif
-
 // Xor Instructions
 // Register Xor
 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
@@ -8088,17 +7692,6 @@
   ins_pipe(ialu_reg_ialu);
 %}
 
-#ifndef _LP64
-instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
-  match(Set dst (Conv2B src));
-  effect( KILL ccr );
-  ins_cost(DEFAULT_COST*2);
-  format %{ "CMP    R_G0,$src\n\t"
-            "ADDX   R_G0,0,$dst" %}
-  ins_encode( enc_to_bool( src, dst ) );
-  ins_pipe(ialu_reg_ialu);
-%}
-#else
 instruct convP2B( iRegI dst, iRegP src ) %{
   match(Set dst (Conv2B src));
   ins_cost(DEFAULT_COST*2);
@@ -8107,7 +7700,6 @@
   ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
   ins_pipe(ialu_clr_and_mover);
 %}
-#endif
 
 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
   match(Set dst (CmpLTMask src zero));
@@ -8750,16 +8342,10 @@
 
 instruct convL2I_reg(iRegI dst, iRegL src) %{
   match(Set dst (ConvL2I src));
-#ifndef _LP64
-  format %{ "MOV    $src.lo,$dst\t! long->int" %}
-  ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
-  ins_pipe(ialu_move_reg_I_to_L);
-#else
   size(4);
   format %{ "SRA    $src,R_G0,$dst\t! long->int" %}
   ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
   ins_pipe(ialu_reg);
-#endif
 %}
 
 // Register Shift Right Immediate
@@ -9528,11 +9114,7 @@
 
   size(4);
   ins_cost(BRANCH_COST);
-#ifdef _LP64
   format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
-#else
-  format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
-#endif
   ins_encode %{
     Label* L = $labl$$label;
     assert(__ use_cbcond(*L), "back to back cbcond");
@@ -9550,11 +9132,7 @@
 
   size(4);
   ins_cost(BRANCH_COST);
-#ifdef _LP64
   format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
-#else
-  format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
-#endif
   ins_encode %{
     Label* L = $labl$$label;
     assert(__ use_cbcond(*L), "back to back cbcond");
@@ -9822,11 +9400,7 @@
   effect(USE poll);
 
   size(4);
-#ifdef _LP64
   format %{ "LDX    [$poll],R_G0\t! Safepoint: poll for GC" %}
-#else
-  format %{ "LDUW   [$poll],R_G0\t! Safepoint: poll for GC" %}
-#endif
   ins_encode %{
     __ relocate(relocInfo::poll_type);
     __ ld_ptr($poll$$Register, 0, G0);
@@ -10259,15 +9833,15 @@
 
 instruct has_negatives(o0RegP pAryR, g3RegI iSizeR, notemp_iRegI resultR,
                        iRegL tmp1L, iRegL tmp2L, iRegL tmp3L, iRegL tmp4L,
-                       flagsReg ccr) 
+                       flagsReg ccr)
 %{
   match(Set resultR (HasNegatives pAryR iSizeR));
   effect(TEMP resultR, TEMP tmp1L, TEMP tmp2L, TEMP tmp3L, TEMP tmp4L, USE pAryR, USE iSizeR, KILL ccr);
   format %{ "has negatives byte[] $pAryR,$iSizeR -> $resultR // KILL $tmp1L,$tmp2L,$tmp3L,$tmp4L" %}
   ins_encode %{
-    __ has_negatives($pAryR$$Register, $iSizeR$$Register, 
+    __ has_negatives($pAryR$$Register, $iSizeR$$Register,
                      $resultR$$Register,
-                     $tmp1L$$Register, $tmp2L$$Register, 
+                     $tmp1L$$Register, $tmp2L$$Register,
                      $tmp3L$$Register, $tmp4L$$Register);
   %}
   ins_pipe(long_memory_op);
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -216,9 +216,7 @@
     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
     __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
     __ sub(FP, t, Gargs);                              // setup parameter pointer
-#ifdef _LP64
     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
-#endif
     __ mov(SP, O5_savedSP);
 
 
@@ -271,27 +269,8 @@
       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
 
       __ BIND(is_long);
-#ifdef _LP64
       __ ba(exit);
       __ delayed()->st_long(O0, addr, G0);      // store entire long
-#else
-#if defined(COMPILER2)
-  // All return values are where we want them, except for Longs.  C2 returns
-  // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
-  // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
-  // build we simply always use G1.
-  // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
-  // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
-  // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
-
-      __ ba(exit);
-      __ delayed()->stx(G1, addr, G0);  // store entire long
-#else
-      __ st(O1, addr, BytesPerInt);
-      __ ba(exit);
-      __ delayed()->st(O0, addr, G0);
-#endif /* COMPILER2 */
-#endif /* _LP64 */
      }
      return start;
   }
@@ -746,22 +725,10 @@
     address start = __ pc();
     Label miss;
 
-#if defined(COMPILER2) && !defined(_LP64)
-    // Do not use a 'save' because it blows the 64-bit O registers.
-    __ add(SP,-4*wordSize,SP);  // Make space for 4 temps (stack must be 2 words aligned)
-    __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
-    __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
-    __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
-    __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
-    Register Rret   = O0;
-    Register Rsub   = O1;
-    Register Rsuper = O2;
-#else
     __ save_frame(0);
     Register Rret   = I0;
     Register Rsub   = I1;
     Register Rsuper = I2;
-#endif
 
     Register L0_ary_len = L0;
     Register L1_ary_ptr = L1;
@@ -775,32 +742,14 @@
     // Match falls through here.
     __ addcc(G0,0,Rret);        // set Z flags, Z result
 
-#if defined(COMPILER2) && !defined(_LP64)
-    __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
-    __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
-    __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
-    __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
-    __ retl();                  // Result in Rret is zero; flags set to Z
-    __ delayed()->add(SP,4*wordSize,SP);
-#else
     __ ret();                   // Result in Rret is zero; flags set to Z
     __ delayed()->restore();
-#endif
 
     __ BIND(miss);
     __ addcc(G0,1,Rret);        // set NZ flags, NZ result
 
-#if defined(COMPILER2) && !defined(_LP64)
-    __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
-    __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
-    __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
-    __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
-    __ retl();                  // Result in Rret is != 0; flags set to NZ
-    __ delayed()->add(SP,4*wordSize,SP);
-#else
     __ ret();                   // Result in Rret is != 0; flags set to NZ
     __ delayed()->restore();
-#endif
 
     return start;
   }
@@ -828,11 +777,11 @@
   //    Rtmp  -  scratch
   //
   void assert_clean_int(Register Rint, Register Rtmp) {
-#if defined(ASSERT) && defined(_LP64)
+  #if defined(ASSERT)
     __ signx(Rint, Rtmp);
     __ cmp(Rint, Rtmp);
     __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
-#endif
+  #endif
   }
 
   //
@@ -1269,17 +1218,6 @@
       // Aligned arrays have 4 bytes alignment in 32-bits VM
       // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
       //
-#ifndef _LP64
-      // copy a 4-bytes word if necessary to align 'to' to 8 bytes
-      __ andcc(to, 7, G0);
-      __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
-      __ delayed()->ld(from, 0, O3);
-      __ inc(from, 4);
-      __ inc(to, 4);
-      __ dec(count, 4);
-      __ st(O3, to, -4);
-    __ BIND(L_skip_alignment);
-#endif
     } else {
       // copy bytes to align 'to' on 8 byte boundary
       __ andcc(to, 7, G1); // misaligned bytes
@@ -1296,9 +1234,7 @@
       __ delayed()->inc(to);
     __ BIND(L_skip_alignment);
     }
-#ifdef _LP64
     if (!aligned)
-#endif
     {
       // Copy with shift 16 bytes per iteration if arrays do not have
       // the same alignment mod 8, otherwise fall through to the next
@@ -1395,14 +1331,12 @@
       __ delayed()->stb(O3, end_to, 0);
     __ BIND(L_skip_alignment);
     }
-#ifdef _LP64
     if (aligned) {
       // Both arrays are aligned to 8-bytes in 64-bits VM.
       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
       // in unaligned case.
       __ dec(count, 16);
     } else
-#endif
     {
       // Copy with shift 16 bytes per iteration if arrays do not have
       // the same alignment mod 8, otherwise jump to the next
@@ -1490,17 +1424,6 @@
       // Aligned arrays have 4 bytes alignment in 32-bits VM
       // and 8 bytes - in 64-bits VM.
       //
-#ifndef _LP64
-      // copy a 2-elements word if necessary to align 'to' to 8 bytes
-      __ andcc(to, 7, G0);
-      __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
-      __ delayed()->ld(from, 0, O3);
-      __ inc(from, 4);
-      __ inc(to, 4);
-      __ dec(count, 2);
-      __ st(O3, to, -4);
-    __ BIND(L_skip_alignment);
-#endif
     } else {
       // copy 1 element if necessary to align 'to' on an 4 bytes
       __ andcc(to, 3, G0);
@@ -1524,9 +1447,7 @@
       __ sth(O4, to, -2);
     __ BIND(L_skip_alignment2);
     }
-#ifdef _LP64
     if (!aligned)
-#endif
     {
       // Copy with shift 16 bytes per iteration if arrays do not have
       // the same alignment mod 8, otherwise fall through to the next
@@ -1643,9 +1564,7 @@
       __ dec(count, 1 << (shift - 1));
       __ BIND(L_skip_align2);
     }
-#ifdef _LP64
     if (!aligned) {
-#endif
     // align to 8 bytes, we know we are 4 byte aligned to start
     __ andcc(to, 7, G0);
     __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
@@ -1654,9 +1573,7 @@
     __ inc(to, 4);
     __ dec(count, 1 << shift);
     __ BIND(L_fill_32_bytes);
-#ifdef _LP64
     }
-#endif
 
     if (t == T_INT) {
       // Zero extend value
@@ -1857,14 +1774,12 @@
       __ sth(O4, end_to, 0);
     __ BIND(L_skip_alignment2);
     }
-#ifdef _LP64
     if (aligned) {
       // Both arrays are aligned to 8-bytes in 64-bits VM.
       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
       // in unaligned case.
       __ dec(count, 8);
     } else
-#endif
     {
       // Copy with shift 16 bytes per iteration if arrays do not have
       // the same alignment mod 8, otherwise jump to the next
@@ -1974,9 +1889,7 @@
     // Aligned arrays have 4 bytes alignment in 32-bits VM
     // and 8 bytes - in 64-bits VM.
     //
-#ifdef _LP64
     if (!aligned)
-#endif
     {
       // The next check could be put under 'ifndef' since the code in
       // generate_disjoint_long_copy_core() has own checks and set 'offset'.
@@ -2463,16 +2376,12 @@
     __ mov(to, G1);
     __ mov(count, G5);
     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
-  #ifdef _LP64
     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
     if (UseCompressedOops) {
       generate_disjoint_int_copy_core(aligned);
     } else {
       generate_disjoint_long_copy_core(aligned);
     }
-  #else
-    generate_disjoint_int_copy_core(aligned);
-  #endif
     // O0 is used as temp register
     gen_write_ref_array_post_barrier(G1, G5, O0);
 
@@ -2518,15 +2427,11 @@
     __ mov(count, G5);
     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
 
-  #ifdef _LP64
     if (UseCompressedOops) {
       generate_conjoint_int_copy_core(aligned);
     } else {
       generate_conjoint_long_copy_core(aligned);
     }
-  #else
-    generate_conjoint_int_copy_core(aligned);
-  #endif
 
     // O0 is used as temp register
     gen_write_ref_array_post_barrier(G1, G5, O0);
@@ -3138,7 +3043,6 @@
                                                                                 "arrayof_jint_disjoint_arraycopy");
     StubRoutines::_arrayof_jint_arraycopy          = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
                                                                                 "arrayof_jint_arraycopy");
-#ifdef _LP64
     // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
     // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
     StubRoutines::_jint_disjoint_arraycopy         = generate_disjoint_int_copy(false, &entry,
@@ -3146,14 +3050,6 @@
     StubRoutines::_jint_arraycopy                  = generate_conjoint_int_copy(false, entry,
                                                                                 &entry_jint_arraycopy,
                                                                                 "jint_arraycopy");
-#else
-    // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
-    // (in fact in 32bit we always have a pre-loop part even in the aligned version,
-    //  because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
-    StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
-    StubRoutines::_jint_arraycopy          = StubRoutines::_arrayof_jint_arraycopy;
-#endif
-
 
     //*** jlong
     // It is always aligned
@@ -3178,7 +3074,6 @@
     StubRoutines::_arrayof_oop_arraycopy_uninit          = generate_conjoint_oop_copy(true, entry, NULL,
                                                                                       "arrayof_oop_arraycopy_uninit",
                                                                                       /*dest_uninitialized*/true);
-#ifdef _LP64
     if (UseCompressedOops) {
       // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
       StubRoutines::_oop_disjoint_arraycopy            = generate_disjoint_oop_copy(false, &entry,
@@ -3193,7 +3088,6 @@
                                                                                     "oop_arraycopy_uninit",
                                                                                     /*dest_uninitialized*/true);
     } else
-#endif
     {
       // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
       StubRoutines::_oop_disjoint_arraycopy            = StubRoutines::_arrayof_oop_disjoint_arraycopy;
@@ -5104,17 +4998,6 @@
     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
 
-#if !defined(COMPILER2) && !defined(_LP64)
-    StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
-    StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
-    StubRoutines::_atomic_add_entry          = generate_atomic_add();
-    StubRoutines::_atomic_xchg_ptr_entry     = StubRoutines::_atomic_xchg_entry;
-    StubRoutines::_atomic_cmpxchg_ptr_entry  = StubRoutines::_atomic_cmpxchg_entry;
-    StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
-    StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
-    StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
-#endif  // COMPILER2 !=> _LP64
-
     // Build this early so it's available for the interpreter.
     StubRoutines::_throw_StackOverflowError_entry =
             generate_throw_exception("StackOverflowError throw_exception",
@@ -5222,11 +5105,9 @@
   void stub_prolog(StubCodeDesc* cdesc) {
     # ifdef ASSERT
       // put extra information in the stub code, to make it more readable
-#ifdef _LP64
 // Write the high part of the address
 // [RGV] Check if there is a dependency on the size of this prolog
       __ emit_data((intptr_t)cdesc >> 32,    relocInfo::none);
-#endif
       __ emit_data((intptr_t)cdesc,    relocInfo::none);
       __ emit_data(++_stub_count, relocInfo::none);
     # endif
--- a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -57,13 +57,9 @@
 // if too small.
 // Run with +PrintInterpreter to get the VM to print out the size.
 // Max size with JVMTI
-#ifdef _LP64
-  // The sethi() instruction generates lots more instructions when shell
-  // stack limit is unlimited, so that's why this is much bigger.
+// The sethi() instruction generates lots more instructions when shell
+// stack limit is unlimited, so that's why this is much bigger.
 int TemplateInterpreter::InterpreterCodeSize = 260 * K;
-#else
-int TemplateInterpreter::InterpreterCodeSize = 230 * K;
-#endif
 
 // Generation of Interpreter
 //
@@ -75,41 +71,6 @@
 
 //----------------------------------------------------------------------------------------------------
 
-#ifndef _LP64
-address TemplateInterpreterGenerator::generate_slow_signature_handler() {
-  address entry = __ pc();
-  Argument argv(0, true);
-
-  // We are in the jni transition frame. Save the last_java_frame corresponding to the
-  // outer interpreter frame
-  //
-  __ set_last_Java_frame(FP, noreg);
-  // make sure the interpreter frame we've pushed has a valid return pc
-  __ mov(O7, I7);
-  __ mov(Lmethod, G3_scratch);
-  __ mov(Llocals, G4_scratch);
-  __ save_frame(0);
-  __ mov(G2_thread, L7_thread_cache);
-  __ add(argv.address_in_frame(), O3);
-  __ mov(G2_thread, O0);
-  __ mov(G3_scratch, O1);
-  __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
-  __ delayed()->mov(G4_scratch, O2);
-  __ mov(L7_thread_cache, G2_thread);
-  __ reset_last_Java_frame();
-
-  // load the register arguments (the C code packed them as varargs)
-  for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) {
-      __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
-  }
-  __ ret();
-  __ delayed()->
-     restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
-  return entry;
-}
-
-
-#else
 // LP64 passes floating point arguments in F1, F3, F5, etc. instead of
 // O0, O1, O2 etc..
 // Doubles are passed in D0, D2, D4
@@ -206,7 +167,6 @@
      restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
   return entry;
 }
-#endif
 
 void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
 
@@ -253,11 +213,7 @@
 
   // save and restore any potential method result value around the unlocking operation
   __ stf(FloatRegisterImpl::D, F0, d_tmp);
-#ifdef _LP64
   __ stx(O0, l_tmp);
-#else
-  __ std(O0, l_tmp);
-#endif
 }
 
 void TemplateInterpreterGenerator::restore_native_result(void) {
@@ -266,11 +222,7 @@
 
   // Restore any method result value
   __ ldf(FloatRegisterImpl::D, d_tmp, F0);
-#ifdef _LP64
   __ ldx(l_tmp, O0);
-#else
-  __ ldd(l_tmp, O0);
-#endif
 }
 
 address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
@@ -340,22 +292,6 @@
     __ profile_return_type(O0, G3_scratch, G1_scratch);
   }
 
-#if !defined(_LP64) && defined(COMPILER2)
-  // All return values are where we want them, except for Longs.  C2 returns
-  // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
-  // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
-  // build even if we are returning from interpreted we just do a little
-  // stupid shuffing.
-  // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
-  // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
-  // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
-
-  if (state == ltos) {
-    __ srl (G1,  0, O1);
-    __ srlx(G1, 32, O0);
-  }
-#endif // !_LP64 && COMPILER2
-
   // The callee returns with the stack possibly adjusted by adapter transition
   // We remove that possible adjustment here.
   // All interpreter local registers are untouched. Any result is passed back
@@ -442,9 +378,6 @@
     case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
     case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
     case T_LONG   :
-#ifndef _LP64
-                    __ mov(O1, Itos_l2);  // move other half of long
-#endif              // ifdef or no ifdef, fall through to the T_INT case
     case T_INT    : __ mov(O0, Itos_i);                         break;
     case T_VOID   : /* nothing to do */                         break;
     case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
@@ -884,9 +817,7 @@
   __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
   __ get_constant_pool_cache( LcpoolCache );   // set LcpoolCache
   __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
-#ifdef _LP64
   __ add( Lmonitors, STACK_BIAS, Lmonitors );   // Account for 64 bit stack bias
-#endif
   __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
 
   // setup interpreter activation registers
@@ -1481,12 +1412,7 @@
   // Move the result handler address
   __ mov(Lscratch, G3_scratch);
   // return possible result to the outer frame
-#ifndef __LP64
-  __ mov(O0, I0);
-  __ restore(O1, G0, O1);
-#else
   __ restore(O0, G0, O0);
-#endif /* __LP64 */
 
   // Move result handler to expected register
   __ mov(G3_scratch, Lscratch);
@@ -1566,17 +1492,6 @@
     restore_native_result();
   }
 
-#if defined(COMPILER2) && !defined(_LP64)
-
-  // C2 expects long results in G1 we can't tell if we're returning to interpreted
-  // or compiled so just be safe.
-
-  __ sllx(O0, 32, G1);          // Shift bits into high G1
-  __ srl (O1, 0, O1);           // Zero extend O1
-  __ or3 (O1, G1, G1);          // OR 64 bits into G1
-
-#endif /* COMPILER2 && !_LP64 */
-
   // dispose of return address and remove activation
 #ifdef ASSERT
   {
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -248,12 +248,7 @@
 void TemplateTable::lconst(int value) {
   transition(vtos, ltos);
   assert(value >= 0, "check this code");
-#ifdef _LP64
   __ set(value, Otos_l);
-#else
-  __ set(value, Otos_l2);
-  __ clr( Otos_l1);
-#endif
 }
 
 
@@ -406,24 +401,12 @@
   // Check out Conversions.java for an example.
   // Also ConstantPool::header_size() is 20, which makes it very difficult
   // to double-align double on the constant pool.  SG, 11/7/97
-#ifdef _LP64
   __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
-#else
-  FloatRegister f = Ftos_d;
-  __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
-  __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
-         f->successor());
-#endif
   __ push(dtos);
   __ ba_short(exit);
 
   __ bind(Long);
-#ifdef _LP64
   __ ldx(G3_scratch, base_offset, Otos_l);
-#else
-  __ ld(G3_scratch, base_offset, Otos_l);
-  __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
-#endif
   __ push(ltos);
 
   __ bind(exit);
@@ -1128,19 +1111,11 @@
   transition(ltos, ltos);
   __ pop_l(O2);
   switch (op) {
-#ifdef _LP64
    case  add:  __  add(O2, Otos_l, Otos_l);  break;
    case  sub:  __  sub(O2, Otos_l, Otos_l);  break;
    case _and:  __ and3(O2, Otos_l, Otos_l);  break;
    case  _or:  __  or3(O2, Otos_l, Otos_l);  break;
    case _xor:  __ xor3(O2, Otos_l, Otos_l);  break;
-#else
-   case  add:  __ addcc(O3, Otos_l2, Otos_l2);  __ addc(O2, Otos_l1, Otos_l1);  break;
-   case  sub:  __ subcc(O3, Otos_l2, Otos_l2);  __ subc(O2, Otos_l1, Otos_l1);  break;
-   case _and:  __  and3(O3, Otos_l2, Otos_l2);  __ and3(O2, Otos_l1, Otos_l1);  break;
-   case  _or:  __   or3(O3, Otos_l2, Otos_l2);  __  or3(O2, Otos_l1, Otos_l1);  break;
-   case _xor:  __  xor3(O3, Otos_l2, Otos_l2);  __ xor3(O2, Otos_l1, Otos_l1);  break;
-#endif
    default: ShouldNotReachHere();
   }
 }
@@ -1171,14 +1146,10 @@
   Label regular;
   __ cmp(Otos_i, -1);
   __ br(Assembler::notEqual, false, Assembler::pt, regular);
-#ifdef _LP64
   // Don't put set in delay slot
   // Set will turn into multiple instructions in 64 bit mode
   __ delayed()->nop();
   __ set(min_int, G4_scratch);
-#else
-  __ delayed()->set(min_int, G4_scratch);
-#endif
   Label done;
   __ cmp(O1, G4_scratch);
   __ br(Assembler::equal, true, Assembler::pt, done);
@@ -1202,11 +1173,7 @@
 void TemplateTable::lmul() {
   transition(ltos, ltos);
   __ pop_l(O2);
-#ifdef _LP64
   __ mulx(Otos_l, O2, Otos_l);
-#else
-  __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
-#endif
 
 }
 
@@ -1216,15 +1183,9 @@
 
   // check for zero
   __ pop_l(O2);
-#ifdef _LP64
   __ tst(Otos_l);
   __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
   __ sdivx(O2, Otos_l, Otos_l);
-#else
-  __ orcc(Otos_l1, Otos_l2, G0);
-  __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
-  __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
-#endif
 }
 
 
@@ -1233,17 +1194,11 @@
 
   // check for zero
   __ pop_l(O2);
-#ifdef _LP64
   __ tst(Otos_l);
   __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
   __ sdivx(O2, Otos_l, Otos_l2);
   __ mulx (Otos_l2, Otos_l, Otos_l2);
   __ sub  (O2, Otos_l2, Otos_l);
-#else
-  __ orcc(Otos_l1, Otos_l2, G0);
-  __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
-  __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
-#endif
 }
 
 
@@ -1251,11 +1206,7 @@
   transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
 
   __ pop_l(O2);                          // shift value in O2, O3
-#ifdef _LP64
   __ sllx(O2, Otos_i, Otos_l);
-#else
-  __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
-#endif
 }
 
 
@@ -1263,11 +1214,7 @@
   transition(itos, ltos); // %%%% see lshl comment
 
   __ pop_l(O2);                          // shift value in O2, O3
-#ifdef _LP64
   __ srax(O2, Otos_i, Otos_l);
-#else
-  __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
-#endif
 }
 
 
@@ -1276,11 +1223,7 @@
   transition(itos, ltos); // %%%% see lshl comment
 
   __ pop_l(O2);                          // shift value in O2, O3
-#ifdef _LP64
   __ srlx(O2, Otos_i, Otos_l);
-#else
-  __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
-#endif
 }
 
 
@@ -1293,15 +1236,9 @@
    case  div:  __  pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f);  break;
    case  rem:
      assert(Ftos_f == F0, "just checking");
-#ifdef _LP64
      // LP64 calling conventions use F1, F3 for passing 2 floats
      __ pop_f(F1);
      __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
-#else
-     __ pop_i(O0);
-     __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
-     __ ld( __ d_tmp, O1 );
-#endif
      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
      assert( Ftos_f == F0, "fix this code" );
      break;
@@ -1319,18 +1256,9 @@
    case  mul:  __  pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
    case  div:  __  pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d);  break;
    case  rem:
-#ifdef _LP64
      // Pass arguments in D0, D2
      __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
      __ pop_d( F0 );
-#else
-     // Pass arguments in O0O1, O2O3
-     __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
-     __ ldd( __ d_tmp, O2 );
-     __ pop_d(Ftos_f);
-     __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
-     __ ldd( __ d_tmp, O0 );
-#endif
      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
      assert( Ftos_d == F0, "fix this code" );
      break;
@@ -1348,11 +1276,7 @@
 
 void TemplateTable::lneg() {
   transition(ltos, ltos);
-#ifdef _LP64
   __ sub(G0, Otos_l, Otos_l);
-#else
-  __ lneg(Otos_l1, Otos_l2);
-#endif
 }
 
 
@@ -1437,15 +1361,8 @@
   Label done;
   switch (bytecode()) {
    case Bytecodes::_i2l:
-#ifdef _LP64
     // Sign extend the 32 bits
     __ sra ( Otos_i, 0, Otos_l );
-#else
-    __ addcc(Otos_i, 0, Otos_l2);
-    __ br(Assembler::greaterEqual, true, Assembler::pt, done);
-    __ delayed()->clr(Otos_l1);
-    __ set(~0, Otos_l1);
-#endif
     break;
 
    case Bytecodes::_i2f:
@@ -1476,12 +1393,8 @@
     break;
 
    case Bytecodes::_l2i:
-#ifndef _LP64
-    __ mov(Otos_l2, Otos_i);
-#else
     // Sign-extend into the high 32 bits
     __ sra(Otos_l, 0, Otos_i);
-#endif
     break;
 
    case Bytecodes::_l2f:
@@ -1512,11 +1425,7 @@
    case Bytecodes::_f2l:
     // must uncache tos
     __ push_f();
-#ifdef _LP64
     __ pop_f(F1);
-#else
-    __ pop_i(O0);
-#endif
     __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
     break;
 
@@ -1528,13 +1437,8 @@
    case Bytecodes::_d2l:
     // must uncache tos
     __ push_d();
-#ifdef _LP64
     // LP64 calling conventions pass first double arg in D0
     __ pop_d( Ftos_d );
-#else
-    __ pop_i( O0 );
-    __ pop_i( O1 );
-#endif
     __ call_VM_leaf(Lscratch,
         bytecode() == Bytecodes::_d2i
           ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
@@ -1554,13 +1458,8 @@
 void TemplateTable::lcmp() {
   transition(ltos, itos);
 
-#ifdef _LP64
   __ pop_l(O1); // pop off value 1, value 2 is in O0
   __ lcmp( O1, Otos_l, Otos_i );
-#else
-  __ pop_l(O2); // cmp O2,3 to O0,1
-  __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
-#endif
 }
 
 
@@ -1756,7 +1655,6 @@
   __ access_local_returnAddress(G3_scratch, Otos_i);
   // Otos_i contains the bci, compute the bcp from that
 
-#ifdef _LP64
 #ifdef ASSERT
   // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
   // the result.  The return address (really a BCI) was stored with an
@@ -1772,7 +1670,6 @@
      __ bind (zzz) ;
   }
 #endif
-#endif
 
   __ profile_ret(vtos, Otos_i, G4_scratch);
 
@@ -1808,10 +1705,8 @@
   // load lo, hi
   __ ld(O1, 1 * BytesPerInt, O2);       // Low Byte
   __ ld(O1, 2 * BytesPerInt, O3);       // High Byte
-#ifdef _LP64
   // Sign extend the 32 bits
   __ sra ( Otos_i, 0, Otos_i );
-#endif /* _LP64 */
 
   // check against lo & hi
   __ cmp( Otos_i, O2);
@@ -3400,11 +3295,7 @@
       // Check if tlab should be discarded (refill_waste_limit >= free)
       __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
       __ sub(RendValue, RoldTopValue, RfreeValue);
-#ifdef _LP64
       __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
-#else
-      __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
-#endif
       __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
 
       // increment waste limit to prevent getting stuck on this slow path
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,12 +70,10 @@
     if (FLAG_IS_DEFAULT(OptoLoopAlignment)) {
       FLAG_SET_DEFAULT(OptoLoopAlignment, 4);
     }
-#ifdef _LP64
     // 32-bit oops don't make sense for the 64-bit VM on sparc
     // since the 32-bit VM has the same registers and smaller objects.
     Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
     Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
-#endif // _LP64
 #ifdef COMPILER2
     // Indirect branch is the same cost as direct
     if (FLAG_IS_DEFAULT(UseJumpTables)) {
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -232,7 +232,7 @@
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return basic + slop;
     } else {
-      const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
+      const int basic = 34 * BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
                         (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
@@ -257,7 +257,6 @@
   //   ld  [ %g3 + 0xe8 ], %l2
   //   sll  %l2, 2, %l2
   //   add  %l2, 0x134, %l2
-  //   and  %l2, -8, %l2        ! NOT_LP64 only
   //   add  %g3, %l2, %l2
   //   add  %g3, 4, %g3
   //   ld  [ %l2 ], %l5
--- a/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -70,7 +70,6 @@
 
 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
   intptr_t rv;
-#ifdef _LP64
   __asm__ volatile(
     "1: \n\t"
     " ldx    [%2], %%o2\n\t"
@@ -83,20 +82,6 @@
     : "=r" (rv)
     : "r" (add_value), "r" (dest)
     : "memory", "o2", "o3");
-#else
-  __asm__ volatile(
-    "1: \n\t"
-    " ld     [%2], %%o2\n\t"
-    " add    %1, %%o2, %%o3\n\t"
-    " cas    [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    1b\n\t"
-    "  nop\n\t"
-    " add    %1, %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (add_value), "r" (dest)
-    : "memory", "o2", "o3");
-#endif // _LP64
   return rv;
 }
 
@@ -117,7 +102,6 @@
 
 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
   intptr_t rv = exchange_value;
-#ifdef _LP64
   __asm__ volatile(
     "1:\n\t"
     " mov    %1, %%o3\n\t"
@@ -130,13 +114,6 @@
     : "=r" (rv)
     : "r" (exchange_value), "r" (dest)
     : "memory", "o2", "o3");
-#else
-  __asm__ volatile(
-    "swap    [%2],%1\n\t"
-    : "=r" (rv)
-    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
-    : "memory");
-#endif // _LP64
   return rv;
 }
 
@@ -156,7 +133,6 @@
 }
 
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
   jlong rv;
   __asm__ volatile(
     " casx   [%2], %3, %0"
@@ -164,44 +140,15 @@
     : "0" (exchange_value), "r" (dest), "r" (compare_value)
     : "memory");
   return rv;
-#else
-  volatile jlong_accessor evl, cvl, rv;
-  evl.long_value = exchange_value;
-  cvl.long_value = compare_value;
-
-  __asm__ volatile(
-    " sllx   %2, 32, %2\n\t"
-    " srl    %3, 0,  %3\n\t"
-    " or     %2, %3, %2\n\t"
-    " sllx   %5, 32, %5\n\t"
-    " srl    %6, 0,  %6\n\t"
-    " or     %5, %6, %5\n\t"
-    " casx   [%4], %5, %2\n\t"
-    " srl    %2, 0, %1\n\t"
-    " srlx   %2, 32, %0\n\t"
-    : "=r" (rv.words[0]), "=r" (rv.words[1])
-    : "r"  (evl.words[0]), "r" (evl.words[1]), "r" (dest), "r" (cvl.words[0]), "r" (cvl.words[1])
-    : "memory");
-
-  return rv.long_value;
-#endif
 }
 
 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
   intptr_t rv;
-#ifdef _LP64
   __asm__ volatile(
     " casx    [%2], %3, %0"
     : "=r" (rv)
     : "0" (exchange_value), "r" (dest), "r" (compare_value)
     : "memory");
-#else
-  __asm__ volatile(
-    " cas     [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-#endif // _LP64
   return rv;
 }
 
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -55,15 +55,9 @@
 
 // Linux/Sparc has rather obscure naming of registers in sigcontext
 // different between 32 and 64 bits
-#ifdef _LP64
 #define SIG_PC(x) ((x)->sigc_regs.tpc)
 #define SIG_NPC(x) ((x)->sigc_regs.tnpc)
 #define SIG_REGS(x) ((x)->sigc_regs)
-#else
-#define SIG_PC(x) ((x)->si_regs.pc)
-#define SIG_NPC(x) ((x)->si_regs.npc)
-#define SIG_REGS(x) ((x)->si_regs)
-#endif
 
 // those are to reference registers in sigcontext
 enum {
@@ -661,21 +655,7 @@
 }
 
 bool os::is_allocatable(size_t bytes) {
-#ifdef _LP64
   return true;
-#else
-  if (bytes < 2 * G) {
-    return true;
-  }
-
-  char* addr = reserve_memory(bytes, NULL);
-
-  if (addr != NULL) {
-    release_memory(addr, bytes);
-  }
-
-  return addr != NULL;
-#endif // _LP64
 }
 
 ///////////////////////////////////////////////////////////////////////////////
--- a/hotspot/src/os_cpu/linux_sparc/vm/prefetch_linux_sparc.inline.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/os_cpu/linux_sparc/vm/prefetch_linux_sparc.inline.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -27,8 +27,6 @@
 
 #include "runtime/prefetch.hpp"
 
-#if defined(COMPILER2) || defined(_LP64)
-
 inline void Prefetch::read(void *loc, intx interval) {
   __asm__ volatile("prefetch [%0+%1], 0" : : "r" (loc), "r" (interval) : "memory" );
 }
@@ -37,11 +35,4 @@
   __asm__ volatile("prefetch [%0+%1], 2" : : "r" (loc), "r" (interval) : "memory" );
 }
 
-#else
-
-inline void Prefetch::read (void *loc, intx interval) {}
-inline void Prefetch::write(void *loc, intx interval) {}
-
-#endif
-
 #endif // OS_CPU_LINUX_SPARC_VM_PREFETCH_LINUX_SPARC_INLINE_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,36 +50,10 @@
 inline void Atomic::dec_ptr(volatile void*     dest) { (void)add_ptr(-1, dest); }
 
 
-#ifdef _LP64
-
 inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
 inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
 inline jlong Atomic::load(volatile jlong* src) { return *src; }
 
-#else
-
-extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
-
-inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
-  _Atomic_move_long_v9(src, dst);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif
-
 #ifdef _GNU_SOURCE
 
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
@@ -101,7 +75,6 @@
 
 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
   intptr_t rv;
-#ifdef _LP64
   __asm__ volatile(
     "1: \n\t"
     " ldx    [%2], %%o2\n\t"
@@ -114,20 +87,6 @@
     : "=r" (rv)
     : "r" (add_value), "r" (dest)
     : "memory", "o2", "o3");
-#else //_LP64
-  __asm__ volatile(
-    "1: \n\t"
-    " ld     [%2], %%o2\n\t"
-    " add    %1, %%o2, %%o3\n\t"
-    " cas    [%2], %%o2, %%o3\n\t"
-    " cmp    %%o2, %%o3\n\t"
-    " bne    1b\n\t"
-    "  nop\n\t"
-    " add    %1, %%o2, %0\n\t"
-    : "=r" (rv)
-    : "r" (add_value), "r" (dest)
-    : "memory", "o2", "o3");
-#endif // _LP64
   return rv;
 }
 
@@ -148,7 +107,6 @@
 
 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
   intptr_t rv = exchange_value;
-#ifdef _LP64
   __asm__ volatile(
     "1:\n\t"
     " mov    %1, %%o3\n\t"
@@ -161,13 +119,6 @@
     : "=r" (rv)
     : "r" (exchange_value), "r" (dest)
     : "memory", "o2", "o3");
-#else  //_LP64
-  __asm__ volatile(
-    "swap    [%2],%1\n\t"
-    : "=r" (rv)
-    : "0" (exchange_value) /* we use same register as for return value */, "r" (dest)
-    : "memory");
-#endif // _LP64
   return rv;
 }
 
@@ -187,7 +138,6 @@
 }
 
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
   jlong rv;
   __asm__ volatile(
     " casx   [%2], %3, %0"
@@ -195,44 +145,15 @@
     : "0" (exchange_value), "r" (dest), "r" (compare_value)
     : "memory");
   return rv;
-#else  //_LP64
-  volatile jlong_accessor evl, cvl, rv;
-  evl.long_value = exchange_value;
-  cvl.long_value = compare_value;
-
-  __asm__ volatile(
-    " sllx   %2, 32, %2\n\t"
-    " srl    %3, 0,  %3\n\t"
-    " or     %2, %3, %2\n\t"
-    " sllx   %5, 32, %5\n\t"
-    " srl    %6, 0,  %6\n\t"
-    " or     %5, %6, %5\n\t"
-    " casx   [%4], %5, %2\n\t"
-    " srl    %2, 0, %1\n\t"
-    " srlx   %2, 32, %0\n\t"
-    : "=r" (rv.words[0]), "=r" (rv.words[1])
-    : "r"  (evl.words[0]), "r" (evl.words[1]), "r" (dest), "r" (cvl.words[0]), "r" (cvl.words[1])
-    : "memory");
-
-  return rv.long_value;
-#endif  //_LP64
 }
 
 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
   intptr_t rv;
-#ifdef _LP64
   __asm__ volatile(
     " casx    [%2], %3, %0"
     : "=r" (rv)
     : "0" (exchange_value), "r" (dest), "r" (compare_value)
     : "memory");
-#else  //_LP64
-  __asm__ volatile(
-    " cas     [%2], %3, %0"
-    : "=r" (rv)
-    : "0" (exchange_value), "r" (dest), "r" (compare_value)
-    : "memory");
-#endif // _LP64
   return rv;
 }
 
@@ -242,8 +163,6 @@
 
 #else // _GNU_SOURCE
 
-#if defined(COMPILER2) || defined(_LP64)
-
 // This is the interface to the atomic instructions in solaris_sparc.il.
 // It's very messy because we need to support v8 and these instructions
 // are illegal there.  When sparc v8 is dropped, we can drop out lots of
@@ -266,11 +185,7 @@
 }
 
 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef _LP64
   return _Atomic_add64(add_value, dest);
-#else  //_LP64
-  return _Atomic_add32(add_value, dest);
-#endif // _LP64
 }
 
 inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
@@ -283,11 +198,7 @@
 }
 
 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-#ifdef _LP64
   return _Atomic_swap64(exchange_value, dest);
-#else  // _LP64
-  return _Atomic_swap32(exchange_value, dest);
-#endif // _LP64
 }
 
 inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
@@ -300,77 +211,18 @@
 }
 
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
   // Return 64 bit value in %o0
   return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
-#else  // _LP64
-  // Return 64 bit value in %o0,%o1 by hand
-  return _Atomic_casl(exchange_value, dest, compare_value);
-#endif // _LP64
 }
 
 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-#ifdef _LP64
   return _Atomic_cas64(exchange_value, dest, compare_value);
-#else  // _LP64
-  return _Atomic_cas32(exchange_value, dest, compare_value);
-#endif // _LP64
 }
 
 inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
   return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
 }
 
-
-#else // _LP64 || COMPILER2
-
-
-// 32-bit compiler1 only
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return (*os::atomic_add_func)(add_value, dest);
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  return (*os::atomic_xchg_func)(exchange_value, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-#endif // _LP64 || COMPILER2
-
 #endif // _GNU_SOURCE
 
 #endif // OS_CPU_SOLARIS_SPARC_VM_ATOMIC_SOLARIS_SPARC_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,13 +33,9 @@
 define_pd_global(size_t, JVMInvokeMethodSlack,   12288);
 
 // Used on 64 bit platforms for UseCompressedOops base address
-#ifdef _LP64
 // use 6G as default base address because by default the OS maps the application
 // to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
 define_pd_global(size_t, HeapBaseMinAddress,     CONST64(6)*G);
-#else
-define_pd_global(size_t, HeapBaseMinAddress,     2*G);
-#endif
 
 
 
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Wed Apr 12 17:52:04 2017 -0400
@@ -85,11 +85,7 @@
 // HotSpot guard pages is added later.
 size_t os::Posix::_compiler_thread_min_stack_allowed = 104 * K;
 size_t os::Posix::_java_thread_min_stack_allowed = 86 * K;
-#ifdef _LP64
 size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K;
-#else
-size_t os::Posix::_vm_internal_thread_min_stack_allowed = 96 * K;
-#endif
 
 int os::Solaris::max_register_window_saves_before_flushing() {
   // We should detect this at run time. For now, filling
@@ -320,11 +316,7 @@
 }
 
 bool os::is_allocatable(size_t bytes) {
-#ifdef _LP64
    return true;
-#else
-   return (bytes <= (size_t)3835*M);
-#endif
 }
 
 extern "C" JNIEXPORT int
@@ -733,85 +725,6 @@
     // Nothing needed on Sparc.
 }
 
-#if !defined(COMPILER2) && !defined(_LP64)
-
-// These routines are the initial value of atomic_xchg_entry(),
-// atomic_cmpxchg_entry(), atomic_add_entry() and fence_entry()
-// until initialization is complete.
-// TODO - remove when the VM drops support for V8.
-
-typedef jint  xchg_func_t        (jint,  volatile jint*);
-typedef jint  cmpxchg_func_t     (jint,  volatile jint*,  jint);
-typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
-typedef jint  add_func_t         (jint,  volatile jint*);
-
-jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
-  // try to use the stub:
-  xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
-
-  if (func != NULL) {
-    os::atomic_xchg_func = func;
-    return (*func)(exchange_value, dest);
-  }
-  assert(Threads::number_of_threads() == 0, "for bootstrap only");
-
-  jint old_value = *dest;
-  *dest = exchange_value;
-  return old_value;
-}
-
-jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
-  // try to use the stub:
-  cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
-
-  if (func != NULL) {
-    os::atomic_cmpxchg_func = func;
-    return (*func)(exchange_value, dest, compare_value);
-  }
-  assert(Threads::number_of_threads() == 0, "for bootstrap only");
-
-  jint old_value = *dest;
-  if (old_value == compare_value)
-    *dest = exchange_value;
-  return old_value;
-}
-
-jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
-  // try to use the stub:
-  cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
-
-  if (func != NULL) {
-    os::atomic_cmpxchg_long_func = func;
-    return (*func)(exchange_value, dest, compare_value);
-  }
-  assert(Threads::number_of_threads() == 0, "for bootstrap only");
-
-  jlong old_value = *dest;
-  if (old_value == compare_value)
-    *dest = exchange_value;
-  return old_value;
-}
-
-jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
-  // try to use the stub:
-  add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
-
-  if (func != NULL) {
-    os::atomic_add_func = func;
-    return (*func)(add_value, dest);
-  }
-  assert(Threads::number_of_threads() == 0, "for bootstrap only");
-
-  return (*dest) += add_value;
-}
-
-xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
-cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
-cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
-add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
-
-#endif // !_LP64 && !COMPILER2
-
 #if defined(__sparc) && defined(COMPILER2) && defined(_GNU_SOURCE)
  // See file build/solaris/makefiles/$compiler.make
  // For compiler1 the architecture is v8 and frps isn't present in v8
--- a/hotspot/src/os_cpu/solaris_sparc/vm/prefetch_solaris_sparc.inline.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/prefetch_solaris_sparc.inline.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,8 +27,6 @@
 
 #include "runtime/prefetch.hpp"
 
-#if defined(COMPILER2) || defined(_LP64)
-
 // For Sun Studio inplementation is in solaris_sparc.il
 // For gcc inplementation is just below
 extern "C" void _Prefetch_read (void *loc, intx interval);
@@ -55,11 +53,4 @@
 }
 #endif // _GNU_SOURCE
 
-#else  // defined(COMPILER2) || defined(_LP64)
-
-inline void Prefetch::read (void *loc, intx interval) {}
-inline void Prefetch::write(void *loc, intx interval) {}
-
-#endif // defined(COMPILER2) || defined(_LP64)
-
 #endif // OS_CPU_SOLARIS_SPARC_VM_PREFETCH_SOLARIS_SPARC_INLINE_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp	Wed Apr 12 08:02:29 2017 -0400
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp	Wed Apr 12 17:52:04 2017 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,10 +63,6 @@
 
   static int o_reg_temps_offset_in_bytes() { return offset_of(JavaThread, _o_reg_temps); }
 
-#ifndef _LP64
-  address o_reg_temps(int i) { return (address)&_o_reg_temps[i]; }
-#endif
-
   static int saved_exception_npc_offset_in_bytes() { return offset_of(JavaThread,_saved_exception_npc); }
 
   address  saved_exception_npc()             { return _saved_exception_npc; }