8031319: PPC64: Some fixes in ppc and aix coding.
authorgoetz
Tue, 07 Jan 2014 17:24:59 +0100
changeset 22867 309bcf262a19
parent 22866 5e47c3568183
child 22868 7f6eb436873b
8031319: PPC64: Some fixes in ppc and aix coding. Reviewed-by: kvn
hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp
hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp
hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp
hotspot/src/cpu/ppc/vm/nativeInst_ppc.hpp
hotspot/src/cpu/ppc/vm/ppc.ad
hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp
hotspot/src/os/aix/vm/os_aix.cpp
hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp
--- a/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Mon Jan 06 11:02:21 2014 +0100
+++ b/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Tue Jan 07 17:24:59 2014 +0100
@@ -1513,6 +1513,7 @@
   // Resize frame to get rid of a potential extension.
   __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
 
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
   // Load target address of the runtime stub.
   __ load_const(R12_scratch2, (StubRoutines::throw_StackOverflowError_entry()));
   __ mtctr(R12_scratch2);
@@ -2150,6 +2151,7 @@
   //      Since we restored the caller SP above, the rethrow_excp_entry can restore the original interpreter state
   //      registers using the stack and resume the calling method with a pending excp.
 
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
   __ load_const(R3_ARG1, (StubRoutines::throw_StackOverflowError_entry()));
   __ mtctr(R3_ARG1);
   __ bctr();
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Mon Jan 06 11:02:21 2014 +0100
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Tue Jan 07 17:24:59 2014 +0100
@@ -206,7 +206,7 @@
   // The relocation points to the second instruction, the ori,
   // and the ori reads and writes the same register dst.
   const int dst = inv_rta_field(inst2);
-  assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be addi reading and writing dst");
+  assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
   // Now, find the preceding addis which writes to dst.
   int inst1 = 0;
   address inst1_addr = inst2_addr - BytesPerInstWord;
@@ -222,8 +222,7 @@
   int xd = (data >>  0) & 0xffff;
 
   set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo
-  set_imm((int *)inst2_addr, (short)(xd));
-
+  set_imm((int *)inst2_addr,        (xd)); // unsigned int
   return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr);
 }
 
@@ -237,7 +236,7 @@
   // The relocation points to the second instruction, the ori,
   // and the ori reads and writes the same register dst.
   const int dst = inv_rta_field(inst2);
-  assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be addi reading and writing dst");
+  assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
   // Now, find the preceding lis which writes to dst.
   int inst1 = 0;
   address inst1_addr = inst2_addr - BytesPerInstWord;
@@ -996,10 +995,10 @@
 
       bool has_env = (fd != NULL && fd->env() != NULL);
       return branch_to(R11, /*and_link=*/true,
-                                /*save toc=*/false,
-                                /*restore toc=*/false,
-                                /*load toc=*/true,
-                                /*load env=*/has_env);
+                            /*save toc=*/false,
+                            /*restore toc=*/false,
+                            /*load toc=*/true,
+                            /*load env=*/has_env);
     } else {
       // It's a friend function. Load the entry point and don't care about
       // toc and env. Use an optimizable call instruction, but ensure the
@@ -1020,10 +1019,10 @@
       // so do a full call-c here.
       load_const(R11, (address)fd, R0);
       return branch_to(R11, /*and_link=*/true,
-                                /*save toc=*/false,
-                                /*restore toc=*/false,
-                                /*load toc=*/true,
-                                /*load env=*/true);
+                            /*save toc=*/false,
+                            /*restore toc=*/false,
+                            /*load toc=*/true,
+                            /*load env=*/true);
     } else {
       // it's a friend function, load the entry point and don't care about
       // toc and env.
@@ -1967,12 +1966,13 @@
   // Must fence, otherwise, preceding store(s) may float below cmpxchg.
   // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
   // CmpxchgX sets cr_reg to cmpX(current, displaced).
+  membar(Assembler::StoreStore);
   cmpxchgd(/*flag=*/flag,
            /*current_value=*/current_header,
            /*compare_value=*/displaced_header,
            /*exchange_value=*/box,
            /*where=*/oop,
-           MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
+           MacroAssembler::MemBarAcq,
            MacroAssembler::cmpxchgx_hint_acquire_lock(),
            noreg,
            &cas_failed);
@@ -2158,7 +2158,7 @@
   load_const_optimized(Rtmp, (address)byte_map_base, R0);
   srdi(Robj, Robj, CardTableModRefBS::card_shift);
   li(R0, 0); // dirty
-  if (UseConcMarkSweepGC) release();
+  if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
   stbx(R0, Rtmp, Robj);
 }
 
@@ -2399,15 +2399,17 @@
 
 
 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
-  if (src == noreg) src = dst;
+  Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
   if (Universe::narrow_klass_base() != 0) {
-    load_const(R0, Universe::narrow_klass_base());
-    sub(dst, src, R0);
+    load_const(R0, Universe::narrow_klass_base(), (dst != current) ? dst : noreg); // Use dst as temp if it is free.
+    sub(dst, current, R0);
+    current = dst;
   }
-  if (Universe::narrow_klass_shift() != 0 ||
-      Universe::narrow_klass_base() == 0 && src != dst) {  // Move required.
-    srdi(dst, src, Universe::narrow_klass_shift());
+  if (Universe::narrow_klass_shift() != 0) {
+    srdi(dst, current, Universe::narrow_klass_shift());
+    current = dst;
   }
+  mr_if_needed(dst, current); // Move may be required.
 }
 
 void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
--- a/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp	Mon Jan 06 11:02:21 2014 +0100
+++ b/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp	Tue Jan 07 17:24:59 2014 +0100
@@ -71,7 +71,7 @@
   if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
     // Yes we do, so get the destination from the trampoline stub.
     const address trampoline_stub_addr = destination;
-    destination = NativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
+    destination = NativeCallTrampolineStub_at(trampoline_stub_addr)->destination(nm);
   }
 
   return destination;
@@ -371,8 +371,8 @@
   return instruction_addr;
 }
 
-address NativeCallTrampolineStub::destination() const {
-  CodeBlob* cb = CodeCache::find_blob(addr_at(0));
+address NativeCallTrampolineStub::destination(nmethod *nm) const {
+  CodeBlob* cb = nm ? nm : CodeCache::find_blob_unsafe(addr_at(0));
   address ctable = cb->content_begin();
 
   return *(address*)(ctable + destination_toc_offset());
--- a/hotspot/src/cpu/ppc/vm/nativeInst_ppc.hpp	Mon Jan 06 11:02:21 2014 +0100
+++ b/hotspot/src/cpu/ppc/vm/nativeInst_ppc.hpp	Tue Jan 07 17:24:59 2014 +0100
@@ -378,7 +378,7 @@
 
  public:
 
-  address destination() const;
+  address destination(nmethod *nm = NULL) const;
   int destination_toc_offset() const;
 
   void set_destination(address new_destination);
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Mon Jan 06 11:02:21 2014 +0100
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Tue Jan 07 17:24:59 2014 +0100
@@ -634,7 +634,6 @@
   R31_H, R31
 );
 
-// 64 bit registers used excluding r19.
 // Used to hold the TOC to avoid collisions with expanded DynamicCall
 // which uses r19 as inline cache internally and expanded LeafCall which uses
 // r2, r11 and r12 internally.
@@ -1421,13 +1420,11 @@
 
   // Optimized version for most common case.
   if (UsePower6SchedulerPPC64 &&
-      !method_is_frameless && Assembler::is_simm((int)(-(_abi(lr) + offset)), 16) &&
+      !method_is_frameless && Assembler::is_simm((int)(-offset), 16) &&
       !(false /* ConstantsALot TODO: PPC port*/)) {
     ___(or) mr(callers_sp, R1_SP);
-    ___(addi) addi(R1_SP, R1_SP, -offset);
-    ___stop; // Emulator won't recognize dependency.
-    ___(std) std(return_pc, _abi(lr) + offset, R1_SP);
-    ___(std) std(callers_sp, 0, R1_SP);
+    ___(std) std(return_pc, _abi(lr), R1_SP);
+    ___(stdu) stdu(R1_SP, -offset, R1_SP);
     return;
   }
 
@@ -1472,6 +1469,7 @@
 }
 #undef ___
 #undef ___stop
+#undef ___advance
 
 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
   // Variable size. determine dynamically.
@@ -2853,7 +2851,7 @@
     // StoreStore barrier conditionally.
     __ lwz(R0, 0, $releaseFieldAddr$$Register);
     __ cmpwi(CCR0, R0, 0);
-    __ beq_predict_taken(CCR0, skip_release);
+    __ beq_predict_taken(CCR0, skip_storestore);
 #endif
     __ li(R0, 0);
     __ membar(Assembler::StoreStore);
@@ -3518,8 +3516,9 @@
 #endif
     // If fails should store backlink before unextending.
 
-    if (ra_->C->env()->failing())
+    if (ra_->C->env()->failing()) {
       return;
+    }
   %}
 
   // Second node of expanded dynamic call - the call.
@@ -3940,11 +3939,6 @@
 // long branch? Not required.
 ins_attrib ins_short_branch(0);
 
-// This instruction does implicit checks at the given machine-instruction offset
-// (optional attribute).
-ins_attrib ins_implicit_check_offset(-1);  // TODO: PPC port
-
-ins_attrib ins_implicit_check_follows_matched_true_path(true);
 ins_attrib ins_is_TrapBasedCheckNode(true);
 
 // Number of constants.
--- a/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Mon Jan 06 11:02:21 2014 +0100
+++ b/hotspot/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Tue Jan 07 17:24:59 2014 +0100
@@ -2027,6 +2027,11 @@
     StubRoutines::_forward_exception_entry          = generate_forward_exception();
     StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
     StubRoutines::_catch_exception_entry            = generate_catch_exception();
+
+    // Build this early so it's available for the interpreter.
+    StubRoutines::_throw_StackOverflowError_entry   =
+      generate_throw_exception("StackOverflowError throw_exception",
+                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
   }
 
   void generate_all() {
@@ -2038,7 +2043,6 @@
     // Handle IncompatibleClassChangeError in itable stubs.
     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
-    StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError),   false);
 
     StubRoutines::_handler_for_unsafe_access_entry         = generate_handler_for_unsafe_access();
 
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Mon Jan 06 11:02:21 2014 +0100
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Tue Jan 07 17:24:59 2014 +0100
@@ -1152,8 +1152,19 @@
 bool os::getTimesSecs(double* process_real_time,
                       double* process_user_time,
                       double* process_system_time) {
-  Unimplemented();
-  return false;
+  struct tms ticks;
+  clock_t real_ticks = times(&ticks);
+
+  if (real_ticks == (clock_t) (-1)) {
+    return false;
+  } else {
+    double ticks_per_second = (double) clock_tics_per_sec;
+    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
+    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
+    *process_real_time = ((double) real_ticks) / ticks_per_second;
+
+    return true;
+  }
 }
 
 
@@ -2440,7 +2451,7 @@
   // trace
   if (Verbose && !addr) {
     if (requested_addr != NULL) {
-      warning("failed to shm-allocate 0x%llX bytes at with address 0x%p.", size, requested_addr);
+      warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
     } else {
       warning("failed to shm-allocate 0x%llX bytes at any address.", size);
     }
@@ -2521,16 +2532,17 @@
 
 cleanup_mmap:
 
-  if (addr) {
-    if (Verbose) {
+  // trace
+  if (Verbose) {
+    if (addr) {
       fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
     }
-  }
-  else {
-    if (requested_addr != NULL) {
-      warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
-    } else {
-      warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
+    else {
+      if (requested_addr != NULL) {
+        warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
+      } else {
+        warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
+      }
     }
   }
 
@@ -3359,7 +3371,46 @@
 
 static bool call_chained_handler(struct sigaction *actp, int sig,
                                  siginfo_t *siginfo, void *context) {
-  Unimplemented();
+  // Call the old signal handler
+  if (actp->sa_handler == SIG_DFL) {
+    // It's more reasonable to let jvm treat it as an unexpected exception
+    // instead of taking the default action.
+    return false;
+  } else if (actp->sa_handler != SIG_IGN) {
+    if ((actp->sa_flags & SA_NODEFER) == 0) {
+      // automaticlly block the signal
+      sigaddset(&(actp->sa_mask), sig);
+    }
+
+    sa_handler_t hand = NULL;
+    sa_sigaction_t sa = NULL;
+    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
+    // retrieve the chained handler
+    if (siginfo_flag_set) {
+      sa = actp->sa_sigaction;
+    } else {
+      hand = actp->sa_handler;
+    }
+
+    if ((actp->sa_flags & SA_RESETHAND) != 0) {
+      actp->sa_handler = SIG_DFL;
+    }
+
+    // try to honor the signal mask
+    sigset_t oset;
+    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
+
+    // call into the chained handler
+    if (siginfo_flag_set) {
+      (*sa)(sig, siginfo, context);
+    } else {
+      (*hand)(sig);
+    }
+
+    // restore the signal mask
+    pthread_sigmask(SIG_SETMASK, &oset, 0);
+  }
+  // Tell jvm's signal handler the signal is taken care of.
   return true;
 }
 
@@ -4041,7 +4092,23 @@
 }
 
 bool os::find(address addr, outputStream* st) {
-  Unimplemented();
+
+  st->print(PTR_FORMAT ": ", addr);
+
+  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
+  if (lib) {
+    lib->print(st);
+    return true;
+  } else {
+    lib = LoadedLibraries::find_for_data_address(addr);
+    if (lib) {
+      lib->print(st);
+      return true;
+    } else {
+      st->print_cr("(outside any module)");
+    }
+  }
+
   return false;
 }
 
@@ -4099,8 +4166,22 @@
 
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
-  Unimplemented();
-  return false;
+  DIR *dir = NULL;
+  struct dirent *ptr;
+
+  dir = opendir(path);
+  if (dir == NULL) return true;
+
+  /* Scan the directory */
+  bool result = true;
+  char buf[sizeof(struct dirent) + MAX_PATH];
+  while (result && (ptr = ::readdir(dir)) != NULL) {
+    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
+      result = false;
+    }
+  }
+  closedir(dir);
+  return result;
 }
 
 // This code originates from JDK's sysOpen and open64_w
@@ -4127,7 +4208,7 @@
   fd = ::open64(path, oflag, mode);
   if (fd == -1) return -1;
 
-  //If the open succeeded, the file might still be a directory
+  // If the open succeeded, the file might still be a directory.
   {
     struct stat64 buf64;
     int ret = ::fstat64(fd, &buf64);
@@ -4182,8 +4263,11 @@
 
 // create binary file, rewriting existing file if required
 int os::create_binary_file(const char* path, bool rewrite_existing) {
-  Unimplemented();
-  return 0;
+  int oflags = O_WRONLY | O_CREAT;
+  if (!rewrite_existing) {
+    oflags |= O_EXCL;
+  }
+  return ::open64(path, oflags, S_IREAD | S_IWRITE);
 }
 
 // return current position of file pointer
--- a/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Mon Jan 06 11:02:21 2014 +0100
+++ b/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Tue Jan 07 17:24:59 2014 +0100
@@ -53,62 +53,62 @@
 
 inline jlong Atomic::load(volatile jlong* src) { return *src; }
 
-/*
-  machine barrier instructions:
+//
+//   machine barrier instructions:
+//
+//   - ppc_sync            two-way memory barrier, aka fence
+//   - ppc_lwsync          orders  Store|Store,
+//                                  Load|Store,
+//                                  Load|Load,
+//                         but not Store|Load
+//   - ppc_eieio           orders memory accesses for device memory (only)
+//   - ppc_isync           invalidates speculatively executed instructions
+//                         From the POWER ISA 2.06 documentation:
+//                          "[...] an isync instruction prevents the execution of
+//                         instructions following the isync until instructions
+//                         preceding the isync have completed, [...]"
+//                         From IBM's AIX assembler reference:
+//                          "The isync [...] instructions causes the processor to
+//                         refetch any instructions that might have been fetched
+//                         prior to the isync instruction. The instruction isync
+//                         causes the processor to wait for all previous instructions
+//                         to complete. Then any instructions already fetched are
+//                         discarded and instruction processing continues in the
+//                         environment established by the previous instructions."
+//
+//   semantic barrier instructions:
+//   (as defined in orderAccess.hpp)
+//
+//   - ppc_release         orders Store|Store,       (maps to ppc_lwsync)
+//                                 Load|Store
+//   - ppc_acquire         orders  Load|Store,       (maps to ppc_lwsync)
+//                                 Load|Load
+//   - ppc_fence           orders Store|Store,       (maps to ppc_sync)
+//                                 Load|Store,
+//                                 Load|Load,
+//                                Store|Load
+//
 
-  - ppc_sync            two-way memory barrier, aka fence
-  - ppc_lwsync          orders  Store|Store,
-                                 Load|Store,
-                                 Load|Load,
-                        but not Store|Load
-  - ppc_eieio           orders memory accesses for device memory (only)
-  - ppc_isync           invalidates speculatively executed instructions
-                        From the POWER ISA 2.06 documentation:
-                         "[...] an isync instruction prevents the execution of
-                        instructions following the isync until instructions
-                        preceding the isync have completed, [...]"
-                        From IBM's AIX assembler reference:
-                         "The isync [...] instructions causes the processor to
-                        refetch any instructions that might have been fetched
-                        prior to the isync instruction. The instruction isync
-                        causes the processor to wait for all previous instructions
-                        to complete. Then any instructions already fetched are
-                        discarded and instruction processing continues in the
-                        environment established by the previous instructions."
-
-  semantic barrier instructions:
-  (as defined in orderAccess.hpp)
-
-  - ppc_release         orders Store|Store,       (maps to ppc_lwsync)
-                                Load|Store
-  - ppc_acquire         orders  Load|Store,       (maps to ppc_lwsync)
-                                Load|Load
-  - ppc_fence           orders Store|Store,       (maps to ppc_sync)
-                                Load|Store,
-                                Load|Load,
-                               Store|Load
-*/
-
-#define strasm_ppc_sync                       "\n  sync    \n"
-#define strasm_ppc_lwsync                     "\n  lwsync  \n"
-#define strasm_ppc_isync                      "\n  isync   \n"
-#define strasm_ppc_release                    strasm_ppc_lwsync
-#define strasm_ppc_acquire                    strasm_ppc_lwsync
-#define strasm_ppc_fence                      strasm_ppc_sync
-#define strasm_ppc_nobarrier                  ""
-#define strasm_ppc_nobarrier_clobber_memory   ""
+#define strasm_sync                       "\n  sync    \n"
+#define strasm_lwsync                     "\n  lwsync  \n"
+#define strasm_isync                      "\n  isync   \n"
+#define strasm_release                    strasm_lwsync
+#define strasm_acquire                    strasm_lwsync
+#define strasm_fence                      strasm_sync
+#define strasm_nobarrier                  ""
+#define strasm_nobarrier_clobber_memory   ""
 
 inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
 
   unsigned int result;
 
   __asm__ __volatile__ (
-    strasm_ppc_lwsync
+    strasm_lwsync
     "1: lwarx   %0,  0, %2    \n"
     "   add     %0, %0, %1    \n"
     "   stwcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_ppc_isync
+    strasm_isync
     : /*%0*/"=&r" (result)
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
@@ -122,12 +122,12 @@
   long result;
 
   __asm__ __volatile__ (
-    strasm_ppc_lwsync
+    strasm_lwsync
     "1: ldarx   %0,  0, %2    \n"
     "   add     %0, %0, %1    \n"
     "   stdcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_ppc_isync
+    strasm_isync
     : /*%0*/"=&r" (result)
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
@@ -145,15 +145,15 @@
   unsigned int temp;
 
   __asm__ __volatile__ (
-    strasm_ppc_nobarrier
+    strasm_nobarrier
     "1: lwarx   %0,  0, %2    \n"
     "   addic   %0, %0,  1    \n"
     "   stwcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_ppc_nobarrier
+    strasm_nobarrier
     : /*%0*/"=&r" (temp), "=m" (*dest)
     : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_ppc_nobarrier_clobber_memory);
+    : "cc" strasm_nobarrier_clobber_memory);
 
 }
 
@@ -162,15 +162,15 @@
   long temp;
 
   __asm__ __volatile__ (
-    strasm_ppc_nobarrier
+    strasm_nobarrier
     "1: ldarx   %0,  0, %2    \n"
     "   addic   %0, %0,  1    \n"
     "   stdcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_ppc_nobarrier
+    strasm_nobarrier
     : /*%0*/"=&r" (temp), "=m" (*dest)
     : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_ppc_nobarrier_clobber_memory);
+    : "cc" strasm_nobarrier_clobber_memory);
 
 }
 
@@ -184,15 +184,15 @@
   unsigned int temp;
 
   __asm__ __volatile__ (
-    strasm_ppc_nobarrier
+    strasm_nobarrier
     "1: lwarx   %0,  0, %2    \n"
     "   addic   %0, %0, -1    \n"
     "   stwcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_ppc_nobarrier
+    strasm_nobarrier
     : /*%0*/"=&r" (temp), "=m" (*dest)
     : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_ppc_nobarrier_clobber_memory);
+    : "cc" strasm_nobarrier_clobber_memory);
 
 }
 
@@ -201,15 +201,15 @@
   long temp;
 
   __asm__ __volatile__ (
-    strasm_ppc_nobarrier
+    strasm_nobarrier
     "1: ldarx   %0,  0, %2    \n"
     "   addic   %0, %0, -1    \n"
     "   stdcx.  %0,  0, %2    \n"
     "   bne-    1b            \n"
-    strasm_ppc_nobarrier
+    strasm_nobarrier
     : /*%0*/"=&r" (temp), "=m" (*dest)
     : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_ppc_nobarrier_clobber_memory);
+    : "cc" strasm_nobarrier_clobber_memory);
 
 }
 
@@ -227,14 +227,14 @@
 
   __asm__ __volatile__ (
     /* lwsync */
-    strasm_ppc_lwsync
+    strasm_lwsync
     /* atomic loop */
     "1:                                                 \n"
     "   lwarx   %[old_value], %[dest], %[zero]          \n"
     "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
     "   bne-    1b                                      \n"
     /* isync */
-    strasm_ppc_sync
+    strasm_sync
     /* exit */
     "2:                                                 \n"
     /* out */
@@ -263,14 +263,14 @@
 
   __asm__ __volatile__ (
     /* lwsync */
-    strasm_ppc_lwsync
+    strasm_lwsync
     /* atomic loop */
     "1:                                                 \n"
     "   ldarx   %[old_value], %[dest], %[zero]          \n"
     "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
     "   bne-    1b                                      \n"
     /* isync */
-    strasm_ppc_sync
+    strasm_sync
     /* exit */
     "2:                                                 \n"
     /* out */
@@ -304,7 +304,7 @@
 
   __asm__ __volatile__ (
     /* fence */
-    strasm_ppc_sync
+    strasm_sync
     /* simple guard */
     "   lwz     %[old_value], 0(%[dest])                \n"
     "   cmpw    %[compare_value], %[old_value]          \n"
@@ -317,7 +317,7 @@
     "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
     "   bne-    1b                                      \n"
     /* acquire */
-    strasm_ppc_sync
+    strasm_sync
     /* exit */
     "2:                                                 \n"
     /* out */
@@ -348,7 +348,7 @@
 
   __asm__ __volatile__ (
     /* fence */
-    strasm_ppc_sync
+    strasm_sync
     /* simple guard */
     "   ld      %[old_value], 0(%[dest])                \n"
     "   cmpd    %[compare_value], %[old_value]          \n"
@@ -361,7 +361,7 @@
     "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
     "   bne-    1b                                      \n"
     /* acquire */
-    strasm_ppc_sync
+    strasm_sync
     /* exit */
     "2:                                                 \n"
     /* out */
@@ -389,13 +389,13 @@
   return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
 }
 
-#undef strasm_ppc_sync
-#undef strasm_ppc_lwsync
-#undef strasm_ppc_isync
-#undef strasm_ppc_release
-#undef strasm_ppc_acquire
-#undef strasm_ppc_fence
-#undef strasm_ppc_nobarrier
-#undef strasm_ppc_nobarrier_clobber_memory
+#undef strasm_sync
+#undef strasm_lwsync
+#undef strasm_isync
+#undef strasm_release
+#undef strasm_acquire
+#undef strasm_fence
+#undef strasm_nobarrier
+#undef strasm_nobarrier_clobber_memory
 
 #endif // OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP