Merge
authorkvn
Fri, 14 Jun 2013 16:33:34 -0700
changeset 18103 a17a8a4b7e5c
parent 18068 45a0e81d43b7 (current diff)
parent 18102 5c2c46c821ba (diff)
child 18104 590c10f73634
Merge
hotspot/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp
hotspot/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp
hotspot/src/share/vm/opto/matcher.cpp
hotspot/src/share/vm/prims/jvm.cpp
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -57,7 +57,6 @@
     fbp_op2   = 5,
     br_op2    = 2,
     bp_op2    = 1,
-    cb_op2    = 7, // V8
     sethi_op2 = 4
   };
 
@@ -145,7 +144,6 @@
     ldsh_op3     = 0x0a,
     ldx_op3      = 0x0b,
 
-    ldstub_op3   = 0x0d,
     stx_op3      = 0x0e,
     swap_op3     = 0x0f,
 
@@ -163,15 +161,6 @@
 
     prefetch_op3 = 0x2d,
 
-
-    ldc_op3      = 0x30,
-    ldcsr_op3    = 0x31,
-    lddc_op3     = 0x33,
-    stc_op3      = 0x34,
-    stcsr_op3    = 0x35,
-    stdcq_op3    = 0x36,
-    stdc_op3     = 0x37,
-
     casa_op3     = 0x3c,
     casxa_op3    = 0x3e,
 
@@ -574,17 +563,11 @@
   static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
 
   // instruction only in v9
-  static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
-
-  // instruction only in v8
-  static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
+  static void v9_only() { } // do nothing
 
   // instruction deprecated in v9
   static void v9_dep()  { } // do nothing for now
 
-  // some float instructions only exist for single prec. on v8
-  static void v8_s_only(FloatRegisterImpl::Width w)  { if (w != FloatRegisterImpl::S)  v9_only(); }
-
   // v8 has no CC field
   static void v8_no_cc(CC cc)  { if (cc)  v9_only(); }
 
@@ -730,11 +713,6 @@
   inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
 
-  // pp 121 (V8)
-
-  inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
-  inline void cb( Condition c, bool a, Label& L );
-
   // pp 149
 
   inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
@@ -775,8 +753,8 @@
 
   // pp 157
 
-  void fcmp(  FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc);  emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
-  void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc);  emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
+  void fcmp(  FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
+  void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
 
   // pp 159
 
@@ -794,21 +772,11 @@
 
   // pp 162
 
-  void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
-
-  void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
-
-  // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
-  // on v8 to do negation of single, double and quad precision floats.
+  void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
 
-  void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) |  opf(0x05) | fs2(sd, w)); }
-
-  void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
+  void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
 
-  // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
-  // on v8 to do abs operation on single/double/quad precision floats.
-
-  void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
+  void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
 
   // pp 163
 
@@ -839,11 +807,6 @@
   void impdep1( int id1, int const19a ) { v9_only();  emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
   void impdep2( int id1, int const19a ) { v9_only();  emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
 
-  // pp 149 (v8)
-
-  void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only();  emit_int32( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
-  void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only();  emit_int32( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
-
   // pp 170
 
   void jmpl( Register s1, Register s2, Register d );
@@ -860,16 +823,6 @@
   inline void ldxfsr( Register s1, Register s2 );
   inline void ldxfsr( Register s1, int simm13a);
 
-  // pp 94 (v8)
-
-  inline void ldc(   Register s1, Register s2, int crd );
-  inline void ldc(   Register s1, int simm13a, int crd);
-  inline void lddc(  Register s1, Register s2, int crd );
-  inline void lddc(  Register s1, int simm13a, int crd);
-  inline void ldcsr( Register s1, Register s2, int crd );
-  inline void ldcsr( Register s1, int simm13a, int crd);
-
-
   // 173
 
   void ldfa(  FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only();  emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
@@ -910,18 +863,6 @@
   void lduwa(  Register s1, int simm13a,         Register d ) {             emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   void ldxa(   Register s1, Register s2, int ia, Register d ) { v9_only();  emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3  | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
   void ldxa(   Register s1, int simm13a,         Register d ) { v9_only();  emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3  | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void ldda(   Register s1, Register s2, int ia, Register d ) { v9_dep();   emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3  | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-  void ldda(   Register s1, int simm13a,         Register d ) { v9_dep();   emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3  | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
-  // pp 179
-
-  inline void ldstub(  Register s1, Register s2, Register d );
-  inline void ldstub(  Register s1, int simm13a, Register d);
-
-  // pp 180
-
-  void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
-  void ldstuba( Register s1, int simm13a,         Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
   // pp 181
 
@@ -992,11 +933,6 @@
   void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
-  // pp 199
-
-  void mulscc(   Register s1, Register s2, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
-  void mulscc(   Register s1, int simm13a, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-
   // pp 201
 
   void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
@@ -1116,17 +1052,6 @@
   void stda(  Register d, Register s1, Register s2, int ia ) {             emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
   void stda(  Register d, Register s1, int simm13a         ) {             emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
-  // pp 97 (v8)
-
-  inline void stc(   int crd, Register s1, Register s2 );
-  inline void stc(   int crd, Register s1, int simm13a);
-  inline void stdc(  int crd, Register s1, Register s2 );
-  inline void stdc(  int crd, Register s1, int simm13a);
-  inline void stcsr( int crd, Register s1, Register s2 );
-  inline void stcsr( int crd, Register s1, int simm13a);
-  inline void stdcq( int crd, Register s1, Register s2 );
-  inline void stdcq( int crd, Register s1, int simm13a);
-
   // pp 230
 
   void sub(    Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3              ) | rs1(s1) | rs2(s2) ); }
@@ -1153,20 +1078,16 @@
 
   void taddcc(    Register s1, Register s2, Register d ) {            emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3  ) | rs1(s1) | rs2(s2) ); }
   void taddcc(    Register s1, int simm13a, Register d ) {            emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void taddcctv(  Register s1, Register s2, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
-  void taddcctv(  Register s1, int simm13a, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
   // pp 235
 
   void tsubcc(    Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3  ) | rs1(s1) | rs2(s2) ); }
   void tsubcc(    Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
-  void tsubcctv(  Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
-  void tsubcctv(  Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
   // pp 237
 
-  void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc);  emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
-  void trap( Condition c, CC cc, Register s1, int trapa   ) { v8_no_cc(cc);  emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
+  void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
+  void trap( Condition c, CC cc, Register s1, int trapa   ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
   // simple uncond. trap
   void trap( int trapa ) { trap( always, icc, G0, trapa ); }
 
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -63,9 +63,6 @@
 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);  has_delay_slot(); }
 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
 
-inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
-inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
-
 inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep();  cti();   emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
 inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
 
@@ -88,18 +85,9 @@
 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
 
-inline void Assembler::ldfsr(  Register s1, Register s2) { v9_dep();   emit_int32( op(ldst_op) |             op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only();  emit_int32( op(ldst_op) | rd(G1)    | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only();  emit_data( op(ldst_op) | rd(G1)    | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-inline void Assembler::ldc(   Register s1, Register s2, int crd) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(ldc_op3  ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldc(   Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::lddc(  Register s1, Register s2, int crd) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::lddc(  Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
 inline void Assembler::ldsb(  Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldsb(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
@@ -119,9 +107,6 @@
 inline void Assembler::ldd(   Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldd(   Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-inline void Assembler::ldstub(  Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::ldstub(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
 inline void Assembler::rett( Register s1, Register s2                         ) { cti();  emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti();  emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt);  has_delay_slot(); }
 
@@ -132,8 +117,6 @@
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-inline void Assembler::stfsr(  Register s1, Register s2) { v9_dep();   emit_int32( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only();  emit_int32( op(ldst_op) | rd(G1)    | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only();  emit_data( op(ldst_op) | rd(G1)    | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
@@ -152,17 +135,6 @@
 inline void Assembler::std(  Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::std(  Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
-// v8 p 99
-
-inline void Assembler::stc(    int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stc(    int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::stdc(   int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stdc(   int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::stcsr(  int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stcsr(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-inline void Assembler::stdcq(  int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
-inline void Assembler::stdcq(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
-
 // pp 231
 
 inline void Assembler::swap(    Register s1, Register s2, Register d) { v9_dep();  emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -597,13 +597,6 @@
 
   __ sra(Rdividend, 31, Rscratch);
   __ wry(Rscratch);
-  if (!VM_Version::v9_instructions_work()) {
-    // v9 doesn't require these nops
-    __ nop();
-    __ nop();
-    __ nop();
-    __ nop();
-  }
 
   add_debug_info_for_div0_here(op->info());
 
@@ -652,10 +645,6 @@
       case lir_cond_lessEqual:     acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual   : Assembler::f_lessOrEqual);    break;
       case lir_cond_greaterEqual:  acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
       default :                         ShouldNotReachHere();
-    };
-
-    if (!VM_Version::v9_instructions_work()) {
-      __ nop();
     }
     __ fb( acond, false, Assembler::pn, *(op->label()));
   } else {
@@ -725,9 +714,6 @@
       Label L;
       // result must be 0 if value is NaN; test by comparing value to itself
       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
-      if (!VM_Version::v9_instructions_work()) {
-        __ nop();
-      }
       __ fb(Assembler::f_unordered, true, Assembler::pn, L);
       __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
       __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
@@ -1909,7 +1895,7 @@
       switch (code) {
         case lir_add:  __ add  (lreg, rreg, res); break;
         case lir_sub:  __ sub  (lreg, rreg, res); break;
-        case lir_mul:  __ mult (lreg, rreg, res); break;
+        case lir_mul:  __ mulx (lreg, rreg, res); break;
         default: ShouldNotReachHere();
       }
     }
@@ -1924,7 +1910,7 @@
       switch (code) {
         case lir_add:  __ add  (lreg, simm13, res); break;
         case lir_sub:  __ sub  (lreg, simm13, res); break;
-        case lir_mul:  __ mult (lreg, simm13, res); break;
+        case lir_mul:  __ mulx (lreg, simm13, res); break;
         default: ShouldNotReachHere();
       }
     } else {
@@ -1936,7 +1922,7 @@
       switch (code) {
         case lir_add:  __ add  (lreg, (int)con, res); break;
         case lir_sub:  __ sub  (lreg, (int)con, res); break;
-        case lir_mul:  __ mult (lreg, (int)con, res); break;
+        case lir_mul:  __ mulx (lreg, (int)con, res); break;
         default: ShouldNotReachHere();
       }
     }
@@ -3234,48 +3220,26 @@
     Register base = mem_addr->base()->as_register();
     if (src->is_register() && dest->is_address()) {
       // G4 is high half, G5 is low half
-      if (VM_Version::v9_instructions_work()) {
-        // clear the top bits of G5, and scale up G4
-        __ srl (src->as_register_lo(),  0, G5);
-        __ sllx(src->as_register_hi(), 32, G4);
-        // combine the two halves into the 64 bits of G4
-        __ or3(G4, G5, G4);
-        null_check_offset = __ offset();
-        if (idx == noreg) {
-          __ stx(G4, base, disp);
-        } else {
-          __ stx(G4, base, idx);
-        }
+      // clear the top bits of G5, and scale up G4
+      __ srl (src->as_register_lo(),  0, G5);
+      __ sllx(src->as_register_hi(), 32, G4);
+      // combine the two halves into the 64 bits of G4
+      __ or3(G4, G5, G4);
+      null_check_offset = __ offset();
+      if (idx == noreg) {
+        __ stx(G4, base, disp);
       } else {
-        __ mov (src->as_register_hi(), G4);
-        __ mov (src->as_register_lo(), G5);
-        null_check_offset = __ offset();
-        if (idx == noreg) {
-          __ std(G4, base, disp);
-        } else {
-          __ std(G4, base, idx);
-        }
+        __ stx(G4, base, idx);
       }
     } else if (src->is_address() && dest->is_register()) {
       null_check_offset = __ offset();
-      if (VM_Version::v9_instructions_work()) {
-        if (idx == noreg) {
-          __ ldx(base, disp, G5);
-        } else {
-          __ ldx(base, idx, G5);
-        }
-        __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
-        __ mov (G5, dest->as_register_lo());     // copy low half into lo
+      if (idx == noreg) {
+        __ ldx(base, disp, G5);
       } else {
-        if (idx == noreg) {
-          __ ldd(base, disp, G4);
-        } else {
-          __ ldd(base, idx, G4);
-        }
-        // G4 is high half, G5 is low half
-        __ mov (G4, dest->as_register_hi());
-        __ mov (G5, dest->as_register_lo());
+        __ ldx(base, idx, G5);
       }
+      __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
+      __ mov (G5, dest->as_register_lo());     // copy low half into lo
     } else {
       Unimplemented();
     }
--- a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -108,7 +108,7 @@
 
   // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
   assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-  casx_under_lock(mark_addr.base(), Rmark, Rscratch, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+  cas_ptr(mark_addr.base(), Rmark, Rscratch);
   // if compare/exchange succeeded we found an unlocked object and we now have locked it
   // hence we are done
   cmp(Rmark, Rscratch);
@@ -149,7 +149,7 @@
 
   // Check if it is still a light weight lock, this is is true if we see
   // the stack address of the basicLock in the markOop of the object
-  casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+  cas_ptr(mark_addr.base(), Rbox, Rmark);
   cmp(Rbox, Rmark);
 
   brx(Assembler::notEqual, false, Assembler::pn, slow_case);
@@ -276,7 +276,7 @@
     sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
     initialize_body(t1, t2);
 #ifndef _LP64
-  } else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) {
+  } else if (con_size_in_bytes < threshold * 2) {
     // on v9 we can do double word stores to fill twice as much space.
     assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
     assert(con_size_in_bytes % 8 == 0, "double word aligned");
--- a/hotspot/src/cpu/sparc/vm/c2_init_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/c2_init_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -30,5 +30,4 @@
 
 void Compile::pd_compiler2_init() {
   guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
-  guarantee( VM_Version::v9_instructions_work(), "Server compiler does not run on V8 systems" );
 }
--- a/hotspot/src/cpu/sparc/vm/disassembler_sparc.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/disassembler_sparc.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -30,8 +30,7 @@
   }
 
   static const char* pd_cpu_opts() {
-    return (VM_Version::v9_instructions_work()?
-            (VM_Version::v8_instructions_work()? "" : "v9only") : "v8only");
+    return "v9only";
   }
 
 #endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -110,8 +110,5 @@
                                                                             \
   product(uintx,  ArraycopyDstPrefetchDistance, 0,                          \
           "Distance to prefetch destination array in arracopy")             \
-                                                                            \
-  develop(intx, V8AtomicOperationUnderLockSpinCount,    50,                 \
-          "Number of times to spin wait on a v8 atomic operation lock")     \
 
 #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -1210,8 +1210,7 @@
     st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
     // compare and exchange object_addr, markOop | 1, stack address of basicLock
     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-    casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+    cas_ptr(mark_addr.base(), mark_reg, temp_reg);
 
     // if the compare and exchange succeeded we are done (we saw an unlocked object)
     cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
@@ -1291,8 +1290,7 @@
     // we expect to see the stack address of the basicLock in case the
     // lock is still a light weight lock (lock_reg)
     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-    casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+    cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);
     cmp(lock_reg, displaced_header_reg);
     brx(Assembler::equal, true, Assembler::pn, done);
     delayed()->st_ptr(G0, lockobj_addr);  // free entry
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -118,7 +118,6 @@
       case bp_op2:     m = wdisp(  word_aligned_ones, 0, 19);  v = wdisp(  dest_pos, inst_pos, 19); break;
       case fb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
       case br_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
-      case cb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
       case bpr_op2: {
         if (is_cbcond(inst)) {
           m = wdisp10(word_aligned_ones, 0);
@@ -149,7 +148,6 @@
       case bp_op2:     r = inv_wdisp(  inst, pos, 19);  break;
       case fb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
       case br_op2:     r = inv_wdisp(  inst, pos, 22);  break;
-      case cb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
       case bpr_op2: {
         if (is_cbcond(inst)) {
           r = inv_wdisp10(inst, pos);
@@ -325,12 +323,6 @@
   trap(ST_RESERVED_FOR_USER_0);
 }
 
-// flush windows (except current) using flushw instruction if avail.
-void MacroAssembler::flush_windows() {
-  if (VM_Version::v9_instructions_work())  flushw();
-  else                                     flush_windows_trap();
-}
-
 // Write serialization page so VM thread can do a pseudo remote membar
 // We use the current thread pointer to calculate a thread specific
 // offset to write to within the page. This minimizes bus traffic
@@ -358,88 +350,6 @@
   Unimplemented();
 }
 
-void MacroAssembler::mult(Register s1, Register s2, Register d) {
-  if(VM_Version::v9_instructions_work()) {
-    mulx (s1, s2, d);
-  } else {
-    smul (s1, s2, d);
-  }
-}
-
-void MacroAssembler::mult(Register s1, int simm13a, Register d) {
-  if(VM_Version::v9_instructions_work()) {
-    mulx (s1, simm13a, d);
-  } else {
-    smul (s1, simm13a, d);
-  }
-}
-
-
-#ifdef ASSERT
-void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
-  const Register s1 = G3_scratch;
-  const Register s2 = G4_scratch;
-  Label get_psr_test;
-  // Get the condition codes the V8 way.
-  read_ccr_trap(s1);
-  mov(ccr_save, s2);
-  // This is a test of V8 which has icc but not xcc
-  // so mask off the xcc bits
-  and3(s2, 0xf, s2);
-  // Compare condition codes from the V8 and V9 ways.
-  subcc(s2, s1, G0);
-  br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
-  delayed()->breakpoint_trap();
-  bind(get_psr_test);
-}
-
-void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
-  const Register s1 = G3_scratch;
-  const Register s2 = G4_scratch;
-  Label set_psr_test;
-  // Write out the saved condition codes the V8 way
-  write_ccr_trap(ccr_save, s1, s2);
-  // Read back the condition codes using the V9 instruction
-  rdccr(s1);
-  mov(ccr_save, s2);
-  // This is a test of V8 which has icc but not xcc
-  // so mask off the xcc bits
-  and3(s2, 0xf, s2);
-  and3(s1, 0xf, s1);
-  // Compare the V8 way with the V9 way.
-  subcc(s2, s1, G0);
-  br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
-  delayed()->breakpoint_trap();
-  bind(set_psr_test);
-}
-#else
-#define read_ccr_v8_assert(x)
-#define write_ccr_v8_assert(x)
-#endif // ASSERT
-
-void MacroAssembler::read_ccr(Register ccr_save) {
-  if (VM_Version::v9_instructions_work()) {
-    rdccr(ccr_save);
-    // Test code sequence used on V8.  Do not move above rdccr.
-    read_ccr_v8_assert(ccr_save);
-  } else {
-    read_ccr_trap(ccr_save);
-  }
-}
-
-void MacroAssembler::write_ccr(Register ccr_save) {
-  if (VM_Version::v9_instructions_work()) {
-    // Test code sequence used on V8.  Do not move below wrccr.
-    write_ccr_v8_assert(ccr_save);
-    wrccr(ccr_save);
-  } else {
-    const Register temp_reg1 = G3_scratch;
-    const Register temp_reg2 = G4_scratch;
-    write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
-  }
-}
-
-
 // Calls to C land
 
 #ifdef ASSERT
@@ -465,8 +375,8 @@
 #ifdef ASSERT
   AddressLiteral last_get_thread_addrlit(&last_get_thread);
   set(last_get_thread_addrlit, L3);
-  inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
-  st_ptr(L4, L3, 0);
+  rdpc(L4);
+  inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call  st_ptr(L4, L3, 0);
 #endif
   call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
   delayed()->nop();
@@ -1327,7 +1237,7 @@
 
 void RegistersForDebugging::save_registers(MacroAssembler* a) {
   a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
-  a->flush_windows();
+  a->flushw();
   int i;
   for (i = 0; i < 8; ++i) {
     a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1);  a->st_ptr( L1, O0, i_offset(i));
@@ -1338,7 +1248,7 @@
   for (i = 0;  i < 32; ++i) {
     a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
   }
-  for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
+  for (i = 0; i < 64; i += 2) {
     a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
   }
 }
@@ -1350,7 +1260,7 @@
   for (int j = 0; j < 32; ++j) {
     a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
   }
-  for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
+  for (int k = 0; k < 64; k += 2) {
     a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
   }
 }
@@ -1465,8 +1375,6 @@
 // the high bits of the O-regs if they contain Long values.  Acts as a 'leaf'
 // call.
 void MacroAssembler::verify_oop_subroutine() {
-  assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
-
   // Leaf call; no frame.
   Label succeed, fail, null_or_fail;
 
@@ -1870,26 +1778,17 @@
   // And the equals case for the high part does not need testing,
   // since that triplet is reached only after finding the high halves differ.
 
-  if (VM_Version::v9_instructions_work()) {
-    mov(-1, Rresult);
-    ba(done);  delayed()-> movcc(greater, false, icc,  1, Rresult);
-  } else {
-    br(less,    true, pt, done); delayed()-> set(-1, Rresult);
-    br(greater, true, pt, done); delayed()-> set( 1, Rresult);
-  }
-
-  bind( check_low_parts );
-
-  if (VM_Version::v9_instructions_work()) {
-    mov(                               -1, Rresult);
-    movcc(equal,           false, icc,  0, Rresult);
-    movcc(greaterUnsigned, false, icc,  1, Rresult);
-  } else {
-    set(-1, Rresult);
-    br(equal,           true, pt, done); delayed()->set( 0, Rresult);
-    br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
-  }
-  bind( done );
+  mov(-1, Rresult);
+  ba(done);
+  delayed()->movcc(greater, false, icc,  1, Rresult);
+
+  bind(check_low_parts);
+
+  mov(                               -1, Rresult);
+  movcc(equal,           false, icc,  0, Rresult);
+  movcc(greaterUnsigned, false, icc,  1, Rresult);
+
+  bind(done);
 }
 
 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
@@ -2117,118 +2016,23 @@
 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
                                 FloatRegister Fa, FloatRegister Fb,
                                 Register Rresult) {
-
-  fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
-
-  Condition lt = unordered_result == -1 ? f_unorderedOrLess    : f_less;
-  Condition eq =                          f_equal;
-  Condition gt = unordered_result ==  1 ? f_unorderedOrGreater : f_greater;
-
-  if (VM_Version::v9_instructions_work()) {
-
-    mov(-1, Rresult);
-    movcc(eq, true, fcc0, 0, Rresult);
-    movcc(gt, true, fcc0, 1, Rresult);
-
+  if (is_float) {
+    fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb);
   } else {
-    Label done;
-
-    set( -1, Rresult );
-    //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
-    fb( eq, true, pn, done);  delayed()->set(  0, Rresult );
-    fb( gt, true, pn, done);  delayed()->set(  1, Rresult );
-
-    bind (done);
+    fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb);
   }
-}
-
-
-void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
-{
-  if (VM_Version::v9_instructions_work()) {
-    Assembler::fneg(w, s, d);
+
+  if (unordered_result == 1) {
+    mov(                                    -1, Rresult);
+    movcc(f_equal,              true, fcc0,  0, Rresult);
+    movcc(f_unorderedOrGreater, true, fcc0,  1, Rresult);
   } else {
-    if (w == FloatRegisterImpl::S) {
-      Assembler::fneg(w, s, d);
-    } else if (w == FloatRegisterImpl::D) {
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
-
-      Assembler::fneg(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-    } else {
-      assert(w == FloatRegisterImpl::Q, "Invalid float register width");
-
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
-
-      Assembler::fneg(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
-    }
+    mov(                                    -1, Rresult);
+    movcc(f_equal,              true, fcc0,  0, Rresult);
+    movcc(f_greater,            true, fcc0,  1, Rresult);
   }
 }
 
-void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
-{
-  if (VM_Version::v9_instructions_work()) {
-    Assembler::fmov(w, s, d);
-  } else {
-    if (w == FloatRegisterImpl::S) {
-      Assembler::fmov(w, s, d);
-    } else if (w == FloatRegisterImpl::D) {
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
-
-      Assembler::fmov(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-    } else {
-      assert(w == FloatRegisterImpl::Q, "Invalid float register width");
-
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
-
-      Assembler::fmov(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
-    }
-  }
-}
-
-void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
-{
-  if (VM_Version::v9_instructions_work()) {
-    Assembler::fabs(w, s, d);
-  } else {
-    if (w == FloatRegisterImpl::S) {
-      Assembler::fabs(w, s, d);
-    } else if (w == FloatRegisterImpl::D) {
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
-        ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
-
-      Assembler::fabs(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-    } else {
-      assert(w == FloatRegisterImpl::Q, "Invalid float register width");
-
-      // number() does a sanity check on the alignment.
-      assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
-       ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
-
-      Assembler::fabs(FloatRegisterImpl::S, s, d);
-      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
-      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
-    }
-  }
-}
 
 void MacroAssembler::save_all_globals_into_locals() {
   mov(G1,L1);
@@ -2250,135 +2054,6 @@
   mov(L7,G7);
 }
 
-// Use for 64 bit operation.
-void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
-{
-  // store ptr_reg as the new top value
-#ifdef _LP64
-  casx(top_ptr_reg, top_reg, ptr_reg);
-#else
-  cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
-#endif // _LP64
-}
-
-// [RGV] This routine does not handle 64 bit operations.
-//       use casx_under_lock() or casx directly!!!
-void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
-{
-  // store ptr_reg as the new top value
-  if (VM_Version::v9_instructions_work()) {
-    cas(top_ptr_reg, top_reg, ptr_reg);
-  } else {
-
-    // If the register is not an out nor global, it is not visible
-    // after the save.  Allocate a register for it, save its
-    // value in the register save area (the save may not flush
-    // registers to the save area).
-
-    Register top_ptr_reg_after_save;
-    Register top_reg_after_save;
-    Register ptr_reg_after_save;
-
-    if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
-      top_ptr_reg_after_save = top_ptr_reg->after_save();
-    } else {
-      Address reg_save_addr = top_ptr_reg->address_in_saved_window();
-      top_ptr_reg_after_save = L0;
-      st(top_ptr_reg, reg_save_addr);
-    }
-
-    if (top_reg->is_out() || top_reg->is_global()) {
-      top_reg_after_save = top_reg->after_save();
-    } else {
-      Address reg_save_addr = top_reg->address_in_saved_window();
-      top_reg_after_save = L1;
-      st(top_reg, reg_save_addr);
-    }
-
-    if (ptr_reg->is_out() || ptr_reg->is_global()) {
-      ptr_reg_after_save = ptr_reg->after_save();
-    } else {
-      Address reg_save_addr = ptr_reg->address_in_saved_window();
-      ptr_reg_after_save = L2;
-      st(ptr_reg, reg_save_addr);
-    }
-
-    const Register& lock_reg = L3;
-    const Register& lock_ptr_reg = L4;
-    const Register& value_reg = L5;
-    const Register& yield_reg = L6;
-    const Register& yieldall_reg = L7;
-
-    save_frame();
-
-    if (top_ptr_reg_after_save == L0) {
-      ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
-    }
-
-    if (top_reg_after_save == L1) {
-      ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
-    }
-
-    if (ptr_reg_after_save == L2) {
-      ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
-    }
-
-    Label(retry_get_lock);
-    Label(not_same);
-    Label(dont_yield);
-
-    assert(lock_addr, "lock_address should be non null for v8");
-    set((intptr_t)lock_addr, lock_ptr_reg);
-    // Initialize yield counter
-    mov(G0,yield_reg);
-    mov(G0, yieldall_reg);
-    set(StubRoutines::Sparc::locked, lock_reg);
-
-    bind(retry_get_lock);
-    cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
-
-    if(use_call_vm) {
-      Untested("Need to verify global reg consistancy");
-      call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
-    } else {
-      // Save the regs and make space for a C call
-      save(SP, -96, SP);
-      save_all_globals_into_locals();
-      call(CAST_FROM_FN_PTR(address,os::yield_all));
-      delayed()->mov(yieldall_reg, O0);
-      restore_globals_from_locals();
-      restore();
-    }
-
-    // reset the counter
-    mov(G0,yield_reg);
-    add(yieldall_reg, 1, yieldall_reg);
-
-    bind(dont_yield);
-    // try to get lock
-    Assembler::swap(lock_ptr_reg, 0, lock_reg);
-
-    // did we get the lock?
-    cmp(lock_reg, StubRoutines::Sparc::unlocked);
-    br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
-    delayed()->add(yield_reg,1,yield_reg);
-
-    // yes, got lock.  do we have the same top?
-    ld(top_ptr_reg_after_save, 0, value_reg);
-    cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
-
-    // yes, same top.
-    st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
-    membar(Assembler::StoreStore);
-
-    bind(not_same);
-    mov(value_reg, ptr_reg_after_save);
-    st(lock_reg, lock_ptr_reg, 0); // unlock
-
-    restore();
-  }
-}
-
 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
                                                       Register tmp,
                                                       int offset) {
@@ -2970,7 +2645,7 @@
                   markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
                   mark_reg);
   or3(G2_thread, mark_reg, temp_reg);
-  casn(mark_addr.base(), mark_reg, temp_reg);
+  cas_ptr(mark_addr.base(), mark_reg, temp_reg);
   // If the biasing toward our thread failed, this means that
   // another thread succeeded in biasing it toward itself and we
   // need to revoke that bias. The revocation will occur in the
@@ -2998,7 +2673,7 @@
   load_klass(obj_reg, temp_reg);
   ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
   or3(G2_thread, temp_reg, temp_reg);
-  casn(mark_addr.base(), mark_reg, temp_reg);
+  cas_ptr(mark_addr.base(), mark_reg, temp_reg);
   // If the biasing toward our thread failed, this means that
   // another thread succeeded in biasing it toward itself and we
   // need to revoke that bias. The revocation will occur in the
@@ -3027,7 +2702,7 @@
   // bits in this situation. Should attempt to preserve them.
   load_klass(obj_reg, temp_reg);
   ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
-  casn(mark_addr.base(), mark_reg, temp_reg);
+  cas_ptr(mark_addr.base(), mark_reg, temp_reg);
   // Fall through to the normal CAS-based lock, because no matter what
   // the result of the above CAS, some thread must have succeeded in
   // removing the bias bit from the object's header.
@@ -3058,15 +2733,6 @@
 }
 
 
-// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
-// Solaris/SPARC's "as".  Another apt name would be cas_ptr()
-
-void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
-  casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
-}
-
-
-
 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
 // of i486.ad fast_lock() and fast_unlock().  See those methods for detailed comments.
 // The code could be tightened up considerably.
@@ -3129,8 +2795,7 @@
 
      // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-     casx_under_lock(mark_addr.base(), Rmark, Rscratch,
-        (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+     cas_ptr(mark_addr.base(), Rmark, Rscratch);
 
      // if compare/exchange succeeded we found an unlocked object and we now have locked it
      // hence we are done
@@ -3176,7 +2841,7 @@
       mov(Rbox,  Rscratch);
       or3(Rmark, markOopDesc::unlocked_value, Rmark);
       assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-      casn(mark_addr.base(), Rmark, Rscratch);
+      cas_ptr(mark_addr.base(), Rmark, Rscratch);
       cmp(Rmark, Rscratch);
       brx(Assembler::equal, false, Assembler::pt, done);
       delayed()->sub(Rscratch, SP, Rscratch);
@@ -3207,7 +2872,7 @@
       // Invariant: if we acquire the lock then _recursions should be 0.
       add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
       mov(G2_thread, Rscratch);
-      casn(Rmark, G0, Rscratch);
+      cas_ptr(Rmark, G0, Rscratch);
       cmp(Rscratch, G0);
       // Intentional fall-through into done
    } else {
@@ -3240,7 +2905,7 @@
       mov(0, Rscratch);
       or3(Rmark, markOopDesc::unlocked_value, Rmark);
       assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-      casn(mark_addr.base(), Rmark, Rscratch);
+      cas_ptr(mark_addr.base(), Rmark, Rscratch);
 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
       cmp(Rscratch, Rmark);
       brx(Assembler::notZero, false, Assembler::pn, Recursive);
@@ -3266,7 +2931,7 @@
       // the fast-path stack-lock code from the interpreter and always passed
       // control to the "slow" operators in synchronizer.cpp.
 
-      // RScratch contains the fetched obj->mark value from the failed CASN.
+      // RScratch contains the fetched obj->mark value from the failed CAS.
 #ifdef _LP64
       sub(Rscratch, STACK_BIAS, Rscratch);
 #endif
@@ -3300,7 +2965,7 @@
       // Invariant: if we acquire the lock then _recursions should be 0.
       add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
       mov(G2_thread, Rscratch);
-      casn(Rmark, G0, Rscratch);
+      cas_ptr(Rmark, G0, Rscratch);
       cmp(Rscratch, G0);
       // ST box->displaced_header = NonZero.
       // Any non-zero value suffices:
@@ -3336,8 +3001,7 @@
      // Check if it is still a light weight lock, this is is true if we see
      // the stack address of the basicLock in the markOop of the object
      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
-     casx_under_lock(mark_addr.base(), Rbox, Rmark,
-       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+     cas_ptr(mark_addr.base(), Rbox, Rmark);
      ba(done);
      delayed()->cmp(Rbox, Rmark);
      bind(done);
@@ -3398,7 +3062,7 @@
       delayed()->andcc(G0, G0, G0);
       add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
       mov(G2_thread, Rscratch);
-      casn(Rmark, G0, Rscratch);
+      cas_ptr(Rmark, G0, Rscratch);
       // invert icc.zf and goto done
       br_notnull(Rscratch, false, Assembler::pt, done);
       delayed()->cmp(G0, G0);
@@ -3440,7 +3104,7 @@
    // A prototype implementation showed excellent results, although
    // the scavenger and timeout code was rather involved.
 
-   casn(mark_addr.base(), Rbox, Rscratch);
+   cas_ptr(mark_addr.base(), Rbox, Rscratch);
    cmp(Rbox, Rscratch);
    // Intentional fall through into done ...
 
@@ -3540,7 +3204,8 @@
 
   if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
     // No allocation in the shared eden.
-    ba_short(slow_case);
+    ba(slow_case);
+    delayed()->nop();
   } else {
     // get eden boundaries
     // note: we need both top & top_addr!
@@ -3583,7 +3248,7 @@
     // Compare obj with the value at top_addr; if still equal, swap the value of
     // end with the value at top_addr. If not equal, read the value at top_addr
     // into end.
-    casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+    cas_ptr(top_addr, obj, end);
     // if someone beat us on the allocation, try again, otherwise continue
     cmp(obj, end);
     brx(Assembler::notEqual, false, Assembler::pn, retry);
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -1056,13 +1056,6 @@
 
   void breakpoint_trap();
   void breakpoint_trap(Condition c, CC cc);
-  void flush_windows_trap();
-  void clean_windows_trap();
-  void get_psr_trap();
-  void set_psr_trap();
-
-  // V8/V9 flush_windows
-  void flush_windows();
 
   // Support for serializing memory accesses between threads
   void serialize_memory(Register thread, Register tmp1, Register tmp2);
@@ -1071,14 +1064,6 @@
   void enter();
   void leave();
 
-  // V8/V9 integer multiply
-  void mult(Register s1, Register s2, Register d);
-  void mult(Register s1, int simm13a, Register d);
-
-  // V8/V9 read and write of condition codes.
-  void read_ccr(Register d);
-  void write_ccr(Register s);
-
   // Manipulation of C++ bools
   // These are idioms to flag the need for care with accessing bools but on
   // this platform we assume byte size
@@ -1162,21 +1147,6 @@
   // check_and_forward_exception to handle exceptions when it is safe
   void check_and_forward_exception(Register scratch_reg);
 
- private:
-  // For V8
-  void read_ccr_trap(Register ccr_save);
-  void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
-
-#ifdef ASSERT
-  // For V8 debugging.  Uses V8 instruction sequence and checks
-  // result with V9 insturctions rdccr and wrccr.
-  // Uses Gscatch and Gscatch2
-  void read_ccr_v8_assert(Register ccr_save);
-  void write_ccr_v8_assert(Register ccr_save);
-#endif // ASSERT
-
- public:
-
   // Write to card table for - register is destroyed afterwards.
   void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
 
@@ -1314,20 +1284,9 @@
                   FloatRegister Fa, FloatRegister Fb,
                   Register Rresult);
 
-  void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
-  void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
-  void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
-  void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
-
   void save_all_globals_into_locals();
   void restore_globals_from_locals();
 
-  void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
-    address lock_addr=0, bool use_call_vm=false);
-  void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
-    address lock_addr=0, bool use_call_vm=false);
-  void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
-
   // These set the icc condition code to equal if the lock succeeded
   // and notEqual if it failed and requires a slow case
   void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -229,10 +229,7 @@
 // Use the right branch for the platform
 
 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
-  if (VM_Version::v9_instructions_work())
-    Assembler::bp(c, a, icc, p, d, rt);
-  else
-    Assembler::br(c, a, d, rt);
+  Assembler::bp(c, a, icc, p, d, rt);
 }
 
 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
@@ -268,10 +265,7 @@
 }
 
 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
-  if (VM_Version::v9_instructions_work())
-    fbp(c, a, fcc0, p, d, rt);
-  else
-    Assembler::fb(c, a, d, rt);
+  fbp(c, a, fcc0, p, d, rt);
 }
 
 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
@@ -334,7 +328,7 @@
 
 // prefetch instruction
 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
-  if (VM_Version::v9_instructions_work())
+  Assembler::bp( never, true, xcc, pt, d, rt );
     Assembler::bp( never, true, xcc, pt, d, rt );
 }
 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
@@ -344,15 +338,7 @@
 // returns delta from gotten pc to addr after
 inline int MacroAssembler::get_pc( Register d ) {
   int x = offset();
-  if (VM_Version::v9_instructions_work())
-    rdpc(d);
-  else {
-    Label lbl;
-    Assembler::call(lbl, relocInfo::none);  // No relocation as this is call to pc+0x8
-    if (d == O7)  delayed()->nop();
-    else          delayed()->mov(O7, d);
-    bind(lbl);
-  }
+  rdpc(d);
   return offset() - x;
 }
 
@@ -646,41 +632,26 @@
 // returns if membar generates anything, obviously this code should mirror
 // membar below.
 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
-  if( !os::is_MP() ) return false;  // Not needed on single CPU
-  if( VM_Version::v9_instructions_work() ) {
-    const Membar_mask_bits effective_mask =
-        Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
-    return (effective_mask != 0);
-  } else {
-    return true;
-  }
+  if (!os::is_MP())
+    return false;  // Not needed on single CPU
+  const Membar_mask_bits effective_mask =
+      Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
+  return (effective_mask != 0);
 }
 
 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
   // Uniprocessors do not need memory barriers
-  if (!os::is_MP()) return;
+  if (!os::is_MP())
+    return;
   // Weakened for current Sparcs and TSO.  See the v9 manual, sections 8.4.3,
   // 8.4.4.3, a.31 and a.50.
-  if( VM_Version::v9_instructions_work() ) {
-    // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
-    // of the mmask subfield of const7a that does anything that isn't done
-    // implicitly is StoreLoad.
-    const Membar_mask_bits effective_mask =
-        Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
-    if ( effective_mask != 0 ) {
-      Assembler::membar( effective_mask );
-    }
-  } else {
-    // stbar is the closest there is on v8.  Equivalent to membar(StoreStore).  We
-    // do not issue the stbar because to my knowledge all v8 machines implement TSO,
-    // which guarantees that all stores behave as if an stbar were issued just after
-    // each one of them.  On these machines, stbar ought to be a nop.  There doesn't
-    // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
-    // it can't be specified by stbar, nor have I come up with a way to simulate it.
-    //
-    // Addendum.  Dave says that ldstub guarantees a write buffer flush to coherent
-    // space.  Put one here to be on the safe side.
-    Assembler::ldstub(SP, 0, G0);
+  // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
+  // of the mmask subfield of const7a that does anything that isn't done
+  // implicitly is StoreLoad.
+  const Membar_mask_bits effective_mask =
+      Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
+  if (effective_mask != 0) {
+    Assembler::membar(effective_mask);
   }
 }
 
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -162,7 +162,7 @@
    int i1 = ((int*)code_buffer)[1];
    int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
    assert(inv_op(*contention_addr) == Assembler::arith_op ||
-          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
+          *contention_addr == nop_instruction(),
           "must not interfere with original call");
    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
    n_call->set_long_at(1*BytesPerInstWord, i1);
@@ -181,7 +181,7 @@
    // Make sure the first-patched instruction, which may co-exist
    // briefly with the call, will do something harmless.
    assert(inv_op(*contention_addr) == Assembler::arith_op ||
-          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
+          *contention_addr == nop_instruction(),
           "must not interfere with original call");
 }
 
@@ -933,11 +933,7 @@
   int code_size = 1 * BytesPerInstWord;
   CodeBuffer cb(verified_entry, code_size + 1);
   MacroAssembler* a = new MacroAssembler(&cb);
-  if (VM_Version::v9_instructions_work()) {
-    a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
-  } else {
-    a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
-  }
+  a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
   ICache::invalidate_range(verified_entry, code_size);
 }
 
@@ -1024,7 +1020,7 @@
    int i1 = ((int*)code_buffer)[1];
    int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
    assert(inv_op(*contention_addr) == Assembler::arith_op ||
-          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
+          *contention_addr == nop_instruction(),
           "must not interfere with original call");
    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
    h_jump->set_long_at(1*BytesPerInstWord, i1);
@@ -1043,6 +1039,6 @@
    // Make sure the first-patched instruction, which may co-exist
    // briefly with the call, will do something harmless.
    assert(inv_op(*contention_addr) == Assembler::arith_op ||
-          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
+          *contention_addr == nop_instruction(),
           "must not interfere with original call");
 }
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -70,8 +70,7 @@
   bool is_zombie() {
     int x = long_at(0);
     return is_op3(x,
-                  VM_Version::v9_instructions_work() ?
-                    Assembler::ldsw_op3 : Assembler::lduw_op3,
+                  Assembler::ldsw_op3,
                   Assembler::ldst_op)
         && Assembler::inv_rs1(x) == G0
         && Assembler::inv_rd(x) == O7;
--- a/hotspot/src/cpu/sparc/vm/register_sparc.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/register_sparc.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -249,12 +249,10 @@
 
       case D:
         assert(c < 64  &&  (c & 1) == 0, "bad double float register");
-        assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
         return (c & 0x1e) | ((c & 0x20) >> 5);
 
       case Q:
         assert(c < 64  &&  (c & 3) == 0, "bad quad float register");
-        assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
         return (c & 0x1c) | ((c & 0x20) >> 5);
     }
     ShouldNotReachHere();
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -2459,7 +2459,7 @@
 
   // Finally just about ready to make the JNI call
 
-  __ flush_windows();
+  __ flushw();
   if (inner_frame_created) {
     __ restore();
   } else {
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Jun 14 16:33:34 2013 -0700
@@ -2778,10 +2778,7 @@
     Register Rold = reg_to_register_object($old$$reg);
     Register Rnew = reg_to_register_object($new$$reg);
 
-    // casx_under_lock picks 1 of 3 encodings:
-    // For 32-bit pointers you get a 32-bit CAS
-    // For 64-bit pointers you get a 64-bit CASX
-    __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
+    __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
     __ cmp( Rold, Rnew );
   %}
 
@@ -3067,7 +3064,7 @@
     AddressLiteral last_rethrow_addrlit(&last_rethrow);
     __ sethi(last_rethrow_addrlit, L1);
     Address addr(L1, last_rethrow_addrlit.low10());
-    __ get_pc(L2);
+    __ rdpc(L2);
     __ inc(L2, 3 * BytesPerInstWord);  // skip this & 2 more insns to point at jump_to
     __ st_ptr(L2, addr);
     __ restore();
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -566,7 +566,7 @@
     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
     address start = __ pc();
 
-    __ flush_windows();
+    __ flushw();
     __ retl(false);
     __ delayed()->add( FP, STACK_BIAS, O0 );
     // The returned value must be a stack pointer whose register save area
@@ -575,67 +575,9 @@
     return start;
   }
 
-  // Helper functions for v8 atomic operations.
-  //
-  void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
-    if (mark_oop_reg == noreg) {
-      address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
-      __ set((intptr_t)lock_ptr, lock_ptr_reg);
-    } else {
-      assert(scratch_reg != noreg, "just checking");
-      address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
-      __ set((intptr_t)lock_ptr, lock_ptr_reg);
-      __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
-      __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
-    }
-  }
-
-  void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
-
-    get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
-    __ set(StubRoutines::Sparc::locked, lock_reg);
-    // Initialize yield counter
-    __ mov(G0,yield_reg);
-
-    __ BIND(retry);
-    __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
-
-    // This code can only be called from inside the VM, this
-    // stub is only invoked from Atomic::add().  We do not
-    // want to use call_VM, because _last_java_sp and such
-    // must already be set.
-    //
-    // Save the regs and make space for a C call
-    __ save(SP, -96, SP);
-    __ save_all_globals_into_locals();
-    BLOCK_COMMENT("call os::naked_sleep");
-    __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
-    __ delayed()->nop();
-    __ restore_globals_from_locals();
-    __ restore();
-    // reset the counter
-    __ mov(G0,yield_reg);
-
-    __ BIND(dontyield);
-
-    // try to get lock
-    __ swap(lock_ptr_reg, 0, lock_reg);
-
-    // did we get the lock?
-    __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
-    __ br(Assembler::notEqual, true, Assembler::pn, retry);
-    __ delayed()->add(yield_reg,1,yield_reg);
-
-    // yes, got lock. do the operation here.
-  }
-
-  void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
-    __ st(lock_reg, lock_ptr_reg, 0); // unlock
-  }
-
   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
   //
-  // Arguments :
+  // Arguments:
   //
   //      exchange_value: O0
   //      dest:           O1
@@ -656,33 +598,14 @@
       __ mov(O0, O3);       // scratch copy of exchange value
       __ ld(O1, 0, O2);     // observe the previous value
       // try to replace O2 with O3
-      __ cas_under_lock(O1, O2, O3,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
+      __ cas(O1, O2, O3);
       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 
       __ retl(false);
       __ delayed()->mov(O2, O0);  // report previous value to caller
-
     } else {
-      if (VM_Version::v9_instructions_work()) {
-        __ retl(false);
-        __ delayed()->swap(O1, 0, O0);
-      } else {
-        const Register& lock_reg = O2;
-        const Register& lock_ptr_reg = O3;
-        const Register& yield_reg = O4;
-
-        Label retry;
-        Label dontyield;
-
-        generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-        // got the lock, do the swap
-        __ swap(O1, 0, O0);
-
-        generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-        __ retl(false);
-        __ delayed()->nop();
-      }
+      __ retl(false);
+      __ delayed()->swap(O1, 0, O0);
     }
 
     return start;
@@ -691,7 +614,7 @@
 
   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
   //
-  // Arguments :
+  // Arguments:
   //
   //      exchange_value: O0
   //      dest:           O1
@@ -701,15 +624,12 @@
   //
   //     O0: the value previously stored in dest
   //
-  // Overwrites (v8): O3,O4,O5
-  //
   address generate_atomic_cmpxchg() {
     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
     address start = __ pc();
 
     // cmpxchg(dest, compare_value, exchange_value)
-    __ cas_under_lock(O1, O2, O0,
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
+    __ cas(O1, O2, O0);
     __ retl(false);
     __ delayed()->nop();
 
@@ -718,7 +638,7 @@
 
   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
   //
-  // Arguments :
+  // Arguments:
   //
   //      exchange_value: O1:O0
   //      dest:           O2
@@ -728,17 +648,12 @@
   //
   //     O1:O0: the value previously stored in dest
   //
-  // This only works on V9, on V8 we don't generate any
-  // code and just return NULL.
-  //
   // Overwrites: G1,G2,G3
   //
   address generate_atomic_cmpxchg_long() {
     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
     address start = __ pc();
 
-    if (!VM_Version::supports_cx8())
-        return NULL;;
     __ sllx(O0, 32, O0);
     __ srl(O1, 0, O1);
     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
@@ -756,7 +671,7 @@
 
   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
   //
-  // Arguments :
+  // Arguments:
   //
   //      add_value: O0   (e.g., +1 or -1)
   //      dest:      O1
@@ -765,47 +680,22 @@
   //
   //     O0: the new value stored in dest
   //
-  // Overwrites (v9): O3
-  // Overwrites (v8): O3,O4,O5
+  // Overwrites: O3
   //
   address generate_atomic_add() {
     StubCodeMark mark(this, "StubRoutines", "atomic_add");
     address start = __ pc();
     __ BIND(_atomic_add_stub);
 
-    if (VM_Version::v9_instructions_work()) {
-      Label(retry);
-      __ BIND(retry);
-
-      __ lduw(O1, 0, O2);
-      __ add(O0, O2, O3);
-      __ cas(O1, O2, O3);
-      __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
-      __ retl(false);
-      __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
-    } else {
-      const Register& lock_reg = O2;
-      const Register& lock_ptr_reg = O3;
-      const Register& value_reg = O4;
-      const Register& yield_reg = O5;
-
-      Label(retry);
-      Label(dontyield);
-
-      generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-      // got lock, do the increment
-      __ ld(O1, 0, value_reg);
-      __ add(O0, value_reg, value_reg);
-      __ st(value_reg, O1, 0);
-
-      // %%% only for RMO and PSO
-      __ membar(Assembler::StoreStore);
-
-      generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
-
-      __ retl(false);
-      __ delayed()->mov(value_reg, O0);
-    }
+    Label(retry);
+    __ BIND(retry);
+
+    __ lduw(O1, 0, O2);
+    __ add(O0, O2, O3);
+    __ cas(O1, O2, O3);
+    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
+    __ retl(false);
+    __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
 
     return start;
   }
@@ -841,7 +731,7 @@
     __ mov(G3, L3);
     __ mov(G4, L4);
     __ mov(G5, L5);
-    for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
+    for (i = 0; i < 64; i += 2) {
       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
     }
 
@@ -855,7 +745,7 @@
     __ mov(L3, G3);
     __ mov(L4, G4);
     __ mov(L5, G5);
-    for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
+    for (i = 0; i < 64; i += 2) {
       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
     }
 
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -52,7 +52,3 @@
 address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows);
 
 address StubRoutines::Sparc::_partial_subtype_check = NULL;
-
-int StubRoutines::Sparc::_atomic_memory_operation_lock = StubRoutines::Sparc::unlocked;
-
-int StubRoutines::Sparc::_v8_oop_lock_cache[StubRoutines::Sparc::nof_v8_oop_lock_cache_entries];
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -47,46 +47,14 @@
 class Sparc {
  friend class StubGenerator;
 
- public:
-  enum { nof_instance_allocators = 10 };
-
-  // allocator lock values
-  enum {
-    unlocked = 0,
-    locked   = 1
-  };
-
-  enum {
-    v8_oop_lock_ignore_bits = 2,
-    v8_oop_lock_bits = 4,
-    nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits),
-    v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits),
-    v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits
-  };
-
-  static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries];
-
  private:
   static address _test_stop_entry;
   static address _stop_subroutine_entry;
   static address _flush_callers_register_windows_entry;
 
-  static int _atomic_memory_operation_lock;
-
   static address _partial_subtype_check;
 
  public:
-  // %%% global lock for everyone who needs to use atomic_compare_and_exchange
-  // %%% or atomic_increment -- should probably use more locks for more
-  // %%% scalability-- for instance one for each eden space or group of
-
-  // address of the lock for atomic_compare_and_exchange
-  static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; }
-
-  // accessor and mutator for _atomic_memory_operation_lock
-  static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
-  static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
-
   // test assembler stop routine by setting registers
   static void (*test_stop_entry()) ()                     { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); }
 
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -1054,7 +1054,7 @@
   // flush the windows now. We don't care about the current (protection) frame
   // only the outer frames
 
-  __ flush_windows();
+  __ flushw();
 
   // mark windows as flushed
   Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -1338,14 +1338,13 @@
 
 void TemplateTable::fneg() {
   transition(ftos, ftos);
-  __ fneg(FloatRegisterImpl::S, Ftos_f);
+  __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
 }
 
 
 void TemplateTable::dneg() {
   transition(dtos, dtos);
-  // v8 has fnegd if source and dest are the same
-  __ fneg(FloatRegisterImpl::D, Ftos_f);
+  __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
 }
 
 
@@ -1470,19 +1469,10 @@
     __ st_long(Otos_l, __ d_tmp);
     __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
 
-    if (VM_Version::v9_instructions_work()) {
-      if (bytecode() == Bytecodes::_l2f) {
-        __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
-      } else {
-        __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
-      }
+    if (bytecode() == Bytecodes::_l2f) {
+      __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
     } else {
-      __ call_VM_leaf(
-        Lscratch,
-        bytecode() == Bytecodes::_l2f
-          ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
-          : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
-      );
+      __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
     }
     break;
 
@@ -1490,11 +1480,6 @@
       Label isNaN;
       // result must be 0 if value is NaN; test by comparing value to itself
       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
-      // According to the v8 manual, you have to have a non-fp instruction
-      // between fcmp and fb.
-      if (!VM_Version::v9_instructions_work()) {
-        __ nop();
-      }
       __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
       __ delayed()->clr(Otos_i);                                     // NaN
       __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
@@ -1537,16 +1522,7 @@
     break;
 
     case Bytecodes::_d2f:
-    if (VM_Version::v9_instructions_work()) {
       __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
-    }
-    else {
-      // must uncache tos
-      __ push_d();
-      __ pop_i(O0);
-      __ pop_i(O1);
-      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
-    }
     break;
 
     default: ShouldNotReachHere();
@@ -1956,17 +1932,8 @@
     __ ld( Rarray, Rscratch, Rscratch );
     // (Rscratch is already in the native byte-ordering.)
     __ cmp( Rkey, Rscratch );
-    if ( VM_Version::v9_instructions_work() ) {
-      __ movcc( Assembler::less,         false, Assembler::icc, Rh, Rj );  // j = h if (key <  array[h].fast_match())
-      __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri );  // i = h if (key >= array[h].fast_match())
-    }
-    else {
-      Label end_of_if;
-      __ br( Assembler::less, true, Assembler::pt, end_of_if );
-      __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
-      __ mov( Rh, Ri );            // else i = h
-      __ bind(end_of_if);          // }
-    }
+    __ movcc( Assembler::less,         false, Assembler::icc, Rh, Rj );  // j = h if (key <  array[h].fast_match())
+    __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri );  // i = h if (key >= array[h].fast_match())
 
     // while (i+1 < j)
     __ bind( entry );
@@ -3418,9 +3385,7 @@
     // has been allocated.
     __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
 
-    __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
-      VM_Version::v9_instructions_work() ? NULL :
-      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
+    __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
 
     // if someone beat us on the allocation, try again, otherwise continue
     __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
@@ -3701,14 +3666,7 @@
 
     __ verify_oop(O4);          // verify each monitor's oop
     __ tst(O4); // is this entry unused?
-    if (VM_Version::v9_instructions_work())
-      __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
-    else {
-      Label L;
-      __ br( Assembler::zero, true, Assembler::pn, L );
-      __ delayed()->mov(O3, O1); // rememeber this one if match
-      __ bind(L);
-    }
+    __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
 
     __ cmp(O4, O0); // check if current entry is for same object
     __ brx( Assembler::equal, false, Assembler::pn, exit );
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -75,23 +75,14 @@
     FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
   }
 
-  if (has_v9()) {
-    assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
-    if (ArraycopySrcPrefetchDistance >= 4096)
-      ArraycopySrcPrefetchDistance = 4064;
-    assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
-    if (ArraycopyDstPrefetchDistance >= 4096)
-      ArraycopyDstPrefetchDistance = 4064;
-  } else {
-    if (ArraycopySrcPrefetchDistance > 0) {
-      warning("prefetch instructions are not available on this CPU");
-      FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
-    }
-    if (ArraycopyDstPrefetchDistance > 0) {
-      warning("prefetch instructions are not available on this CPU");
-      FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
-    }
-  }
+  guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
+
+  assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
+  if (ArraycopySrcPrefetchDistance >= 4096)
+    ArraycopySrcPrefetchDistance = 4064;
+  assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
+  if (ArraycopyDstPrefetchDistance >= 4096)
+    ArraycopyDstPrefetchDistance = 4064;
 
   UseSSE = 0; // Only on x86 and x64
 
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -177,10 +177,6 @@
     return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
   }
 
-  // Legacy
-  static bool v8_instructions_work() { return has_v8() && !has_v9(); }
-  static bool v9_instructions_work() { return has_v9(); }
-
   // Assembler testing
   static void allow_all();
   static void revert();
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -1429,6 +1429,8 @@
   assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
          "possible collision");
 
+  __ block_comment("unpack_array_argument {");
+
   // Pass the length, ptr pair
   Label is_null, done;
   VMRegPair tmp;
@@ -1453,6 +1455,8 @@
   move_ptr(masm, tmp, body_arg);
   move32_64(masm, tmp, length_arg);
   __ bind(done);
+
+  __ block_comment("} unpack_array_argument");
 }
 
 
@@ -2170,27 +2174,34 @@
     }
   }
 
-  // point c_arg at the first arg that is already loaded in case we
-  // need to spill before we call out
-  int c_arg = total_c_args - total_in_args;
+  int c_arg;
 
   // Pre-load a static method's oop into r14.  Used both by locking code and
   // the normal JNI call code.
-  if (method->is_static() && !is_critical_native) {
-
-    //  load oop into a register
-    __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
-
-    // Now handlize the static class mirror it's known not-null.
-    __ movptr(Address(rsp, klass_offset), oop_handle_reg);
-    map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
-
-    // Now get the handle
-    __ lea(oop_handle_reg, Address(rsp, klass_offset));
-    // store the klass handle as second argument
-    __ movptr(c_rarg1, oop_handle_reg);
-    // and protect the arg if we must spill
-    c_arg--;
+  if (!is_critical_native) {
+    // point c_arg at the first arg that is already loaded in case we
+    // need to spill before we call out
+    c_arg = total_c_args - total_in_args;
+
+    if (method->is_static()) {
+
+      //  load oop into a register
+      __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
+
+      // Now handlize the static class mirror it's known not-null.
+      __ movptr(Address(rsp, klass_offset), oop_handle_reg);
+      map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
+
+      // Now get the handle
+      __ lea(oop_handle_reg, Address(rsp, klass_offset));
+      // store the klass handle as second argument
+      __ movptr(c_rarg1, oop_handle_reg);
+      // and protect the arg if we must spill
+      c_arg--;
+    }
+  } else {
+    // For JNI critical methods we need to save all registers in save_args.
+    c_arg = 0;
   }
 
   // Change state to native (we save the return address in the thread, since it might not
--- a/hotspot/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-#include <asm-sparc/traps.h>
-
-void MacroAssembler::read_ccr_trap(Register ccr_save) {
-  // No implementation
-  breakpoint_trap();
-}
-
-void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
-  // No implementation
-  breakpoint_trap();
-}
-
-void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); }
-void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); }
-
-// Use software breakpoint trap until we figure out how to do this on Linux
-void MacroAssembler::get_psr_trap()       { trap(SP_TRAP_SBPT); }
-void MacroAssembler::set_psr_trap()       { trap(SP_TRAP_SBPT); }
--- a/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -169,7 +169,6 @@
     : "memory");
   return rv;
 #else
-  assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
   volatile jlong_accessor evl, cvl, rv;
   evl.long_value = exchange_value;
   cvl.long_value = compare_value;
--- a/hotspot/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "runtime/os.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-#include <sys/trap.h>          // For trap numbers
-#include <v9/sys/psr_compat.h> // For V8 compatibility
-
-void MacroAssembler::read_ccr_trap(Register ccr_save) {
-  // Execute a trap to get the PSR, mask and shift
-  // to get the condition codes.
-  get_psr_trap();
-  nop();
-  set(PSR_ICC, ccr_save);
-  and3(O0, ccr_save, ccr_save);
-  srl(ccr_save, PSR_ICC_SHIFT, ccr_save);
-}
-
-void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
-  // Execute a trap to get the PSR, shift back
-  // the condition codes, mask the condition codes
-  // back into and PSR and trap to write back the
-  // PSR.
-  sll(ccr_save, PSR_ICC_SHIFT, scratch2);
-  get_psr_trap();
-  nop();
-  set(~PSR_ICC, scratch1);
-  and3(O0, scratch1, O0);
-  or3(O0, scratch2, O0);
-  set_psr_trap();
-  nop();
-}
-
-void MacroAssembler::flush_windows_trap() { trap(ST_FLUSH_WINDOWS); }
-void MacroAssembler::clean_windows_trap() { trap(ST_CLEAN_WINDOWS); }
-void MacroAssembler::get_psr_trap()       { trap(ST_GETPSR); }
-void MacroAssembler::set_psr_trap()       { trap(ST_SETPSR); }
--- a/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -60,21 +60,10 @@
 
 #else
 
-extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst);
 extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
 
 inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
-#ifdef COMPILER2
-  // Compiler2 does not support v8, it is used only for v9.
   _Atomic_move_long_v9(src, dst);
-#else
-  // The branch is cheaper then emulated LDD.
-  if (VM_Version::v9_instructions_work()) {
-    _Atomic_move_long_v9(src, dst);
-  } else {
-    _Atomic_move_long_v8(src, dst);
-  }
-#endif
 }
 
 inline jlong Atomic::load(volatile jlong* src) {
@@ -209,7 +198,6 @@
     : "memory");
   return rv;
 #else  //_LP64
-  assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
   volatile jlong_accessor evl, cvl, rv;
   evl.long_value = exchange_value;
   cvl.long_value = compare_value;
@@ -318,7 +306,6 @@
   // Return 64 bit value in %o0
   return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
 #else  // _LP64
-  assert (VM_Version::v9_instructions_work(), "only supported on v9");
   // Return 64 bit value in %o0,%o1 by hand
   return _Atomic_casl(exchange_value, dest, compare_value);
 #endif // _LP64
--- a/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Fri Jun 14 16:33:34 2013 -0700
@@ -152,23 +152,6 @@
         .nonvolatile
         .end
 
-  // Support for jlong Atomic::load and Atomic::store on v8.
-  //
-  // void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst)
-  //
-  // Arguments:
-  //      src:  O0
-  //      dest: O1
-  //
-  // Overwrites O2 and O3
-
-        .inline _Atomic_move_long_v8,2
-        .volatile
-        ldd     [%o0], %o2
-        std     %o2, [%o1]
-        .nonvolatile
-        .end
-
   // Support for jlong Atomic::load and Atomic::store on v9.
   //
   // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -235,6 +235,9 @@
   return false;
 }
 
+bool InstructForm::is_ideal_negD() const {
+  return (_matrule && _matrule->_rChild && strcmp(_matrule->_rChild->_opType, "NegD") == 0);
+}
 
 // Return 'true' if this instruction matches an ideal 'Copy*' node
 int InstructForm::is_ideal_copy() const {
@@ -533,6 +536,12 @@
   if( data_type != Form::none )
     rematerialize = true;
 
+  // Ugly: until a better fix is implemented, disable rematerialization for
+  // negD nodes because they are proved to be problematic.
+  if (is_ideal_negD()) {
+    return false;
+  }
+
   // Constants
   if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
     rematerialize = true;
--- a/hotspot/src/share/vm/adlc/formssel.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -147,6 +147,7 @@
   virtual int         is_empty_encoding() const; // _size=0 and/or _insencode empty
   virtual int         is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal
   virtual int         is_ideal_copy() const;    // node matches ideal 'Copy*'
+  virtual bool        is_ideal_negD() const;    // node matches ideal 'NegD'
   virtual bool        is_ideal_if()   const;    // node matches ideal 'If'
   virtual bool        is_ideal_fastlock() const; // node matches 'FastLock'
   virtual bool        is_ideal_membar() const;  // node matches ideal 'MemBarXXX'
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Fri Jun 14 16:33:34 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -406,10 +406,10 @@
   develop(intx, WarmCallMaxSize, 999999,                                    \
           "size of the largest inlinable method")                           \
                                                                             \
-  product(intx, MaxNodeLimit, 65000,                                        \
+  product(intx, MaxNodeLimit, 80000,                                        \
           "Maximum number of nodes")                                        \
                                                                             \
-  product(intx, NodeLimitFudgeFactor, 1000,                                 \
+  product(intx, NodeLimitFudgeFactor, 2000,                                 \
           "Fudge Factor for certain optimizations")                         \
                                                                             \
   product(bool, UseJumpTables, true,                                        \
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -435,6 +435,9 @@
     // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
     // not match the Phi itself, insert a copy.
     coalesce.insert_copies(_matcher);
+    if (C->failing()) {
+      return;
+    }
   }
 
   // After aggressive coalesce, attempt a first cut at coloring.
--- a/hotspot/src/share/vm/opto/coalesce.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/share/vm/opto/coalesce.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -240,6 +240,8 @@
   _unique = C->unique();
 
   for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
+    C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce");
+    if (C->failing()) return;
     Block *b = _phc._cfg._blocks[i];
     uint cnt = b->num_preds();  // Number of inputs to the Phi
 
--- a/hotspot/src/share/vm/opto/matcher.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -985,6 +985,8 @@
   mstack.push(n, Visit, NULL, -1);  // set NULL as parent to indicate root
 
   while (mstack.is_nonempty()) {
+    C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
+    if (C->failing()) return NULL;
     n = mstack.node();          // Leave node on stack
     Node_State nstate = mstack.state();
     if (nstate == Visit) {
--- a/hotspot/src/share/vm/prims/jvm.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -3241,24 +3241,10 @@
 JVM_END
 
 
-// Utility object for collecting method holders walking down the stack
-class KlassLink: public ResourceObj {
- public:
-  KlassHandle klass;
-  KlassLink*  next;
-
-  KlassLink(KlassHandle k) { klass = k; next = NULL; }
-};
-
-
 JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
   JVMWrapper("JVM_GetClassContext");
   ResourceMark rm(THREAD);
   JvmtiVMObjectAllocEventCollector oam;
-  // Collect linked list of (handles to) method holders
-  KlassLink* first = NULL;
-  KlassLink* last  = NULL;
-  int depth = 0;
   vframeStream vfst(thread);
 
   if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) {
@@ -3272,32 +3258,23 @@
   }
 
   // Collect method holders
+  GrowableArray<KlassHandle>* klass_array = new GrowableArray<KlassHandle>();
   for (; !vfst.at_end(); vfst.security_next()) {
     Method* m = vfst.method();
     // Native frames are not returned
     if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) {
       Klass* holder = m->method_holder();
       assert(holder->is_klass(), "just checking");
-      depth++;
-      KlassLink* l = new KlassLink(KlassHandle(thread, holder));
-      if (first == NULL) {
-        first = last = l;
-      } else {
-        last->next = l;
-        last = l;
-      }
+      klass_array->append(holder);
     }
   }
 
   // Create result array of type [Ljava/lang/Class;
-  objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL);
+  objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), klass_array->length(), CHECK_NULL);
   // Fill in mirrors corresponding to method holders
-  int index = 0;
-  while (first != NULL) {
-    result->obj_at_put(index++, first->klass()->java_mirror());
-    first = first->next;
+  for (int i = 0; i < klass_array->length(); i++) {
+    result->obj_at_put(i, klass_array->at(i)->java_mirror());
   }
-  assert(index == depth, "just checking");
 
   return (jobjectArray) JNIHandles::make_local(env, result);
 JVM_END
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri Jun 14 07:27:22 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Fri Jun 14 16:33:34 2013 -0700
@@ -1885,21 +1885,6 @@
   // Note: Needs platform-dependent factoring.
   bool status = true;
 
-#if ( (defined(COMPILER2) && defined(SPARC)))
-  // NOTE: The call to VM_Version_init depends on the fact that VM_Version_init
-  // on sparc doesn't require generation of a stub as is the case on, e.g.,
-  // x86.  Normally, VM_Version_init must be called from init_globals in
-  // init.cpp, which is called by the initial java thread *after* arguments
-  // have been parsed.  VM_Version_init gets called twice on sparc.
-  extern void VM_Version_init();
-  VM_Version_init();
-  if (!VM_Version::has_v9()) {
-    jio_fprintf(defaultStream::error_stream(),
-                "V8 Machine detected, Server requires V9\n");
-    status = false;
-  }
-#endif /* COMPILER2 && SPARC */
-
   // Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
   // builds so the cost of stack banging can be measured.
 #if (defined(PRODUCT) && defined(SOLARIS))