Merge
authorkvn
Fri, 19 Jul 2013 13:32:53 -0700
changeset 22821 40a3c34a50a1
parent 22820 70660a78baaf (diff)
parent 18736 c37fce37f43e (current diff)
child 22822 696e77cc8e7b
Merge
hotspot/src/os/bsd/vm/os_bsd.cpp
hotspot/src/os/linux/vm/os_linux.cpp
hotspot/src/share/vm/memory/klassInfoClosure.hpp
hotspot/src/share/vm/runtime/aprofiler.cpp
hotspot/src/share/vm/runtime/aprofiler.hpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/vmStructs.cpp
--- a/hotspot/agent/src/os/linux/libproc.h	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/agent/src/os/linux/libproc.h	Fri Jul 19 13:32:53 2013 -0700
@@ -80,7 +80,7 @@
 *************************************************************************************/
 
 
-#if defined(sparc)  || defined(sparcv9)
+#if defined(sparc) || defined(sparcv9) || defined(ppc64)
 #define user_regs_struct  pt_regs
 #endif
 
--- a/hotspot/make/Makefile	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/make/Makefile	Fri Jul 19 13:32:53 2013 -0700
@@ -87,6 +87,7 @@
 # Typical C1/C2 targets made available with this Makefile
 C1_VM_TARGETS=product1 fastdebug1 optimized1 debug1
 C2_VM_TARGETS=product  fastdebug  optimized  debug
+CORE_VM_TARGETS=productcore fastdebugcore optimizedcore debugcore
 ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero debugzero
 SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark debugshark
 MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 debugminimal1
@@ -136,6 +137,12 @@
 all_debugshark:     debugshark docs export_debug
 all_optimizedshark: optimizedshark docs export_optimized
 
+allcore:           all_productcore all_fastdebugcore
+all_productcore:   productcore docs export_product
+all_fastdebugcore: fastdebugcore docs export_fastdebug
+all_debugcore:     debugcore docs export_debug
+all_optimizedcore: optimizedcore docs export_optimized
+
 # Do everything
 world:         all create_jdk
 
@@ -154,6 +161,7 @@
 # Output directories
 C1_DIR      =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1
 C2_DIR      =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2
+CORE_DIR    =$(OUTPUTDIR)/$(VM_PLATFORM)_core
 MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1
 ZERO_DIR    =$(OUTPUTDIR)/$(VM_PLATFORM)_zero
 SHARK_DIR   =$(OUTPUTDIR)/$(VM_PLATFORM)_shark
@@ -167,6 +175,10 @@
 	$(CD) $(GAMMADIR)/make; \
 	$(MAKE) BUILD_DIR=$(C2_DIR) BUILD_FLAVOR=$@ VM_TARGET=$@ generic_build2 $(ALT_OUT)
 
+$(CORE_VM_TARGETS):
+	$(CD) $(GAMMADIR)/make; \
+	$(MAKE) BUILD_DIR=$(CORE_DIR) BUILD_FLAVOR=$(@:%core=%) VM_TARGET=$@ generic_buildcore $(ALT_OUT)
+
 $(ZERO_VM_TARGETS):
 	$(CD) $(GAMMADIR)/make; \
 	$(MAKE) BUILD_DIR=$(ZERO_DIR) BUILD_FLAVOR=$(@:%zero=%) VM_TARGET=$@ generic_buildzero $(ALT_OUT)
@@ -228,6 +240,20 @@
 		      $(MAKE_ARGS) $(VM_TARGET)
 endif
 
+generic_buildcore: $(HOTSPOT_SCRIPT)
+ifeq ($(HS_ARCH),ppc)
+  ifeq ($(ARCH_DATA_MODEL),64)
+	$(MKDIR) -p $(OUTPUTDIR)
+	$(CD) $(OUTPUTDIR); \
+		$(MAKE) -f $(ABS_OS_MAKEFILE) \
+			$(MAKE_ARGS) $(VM_TARGET)
+  else
+	@$(ECHO) "No ($(VM_TARGET)) for ppc ARCH_DATA_MODEL=$(ARCH_DATA_MODEL)"
+  endif
+else
+	@$(ECHO) "No ($(VM_TARGET)) for $(HS_ARCH)"
+endif
+
 generic_buildzero: $(HOTSPOT_SCRIPT)
 	$(MKDIR) -p $(OUTPUTDIR)
 	$(CD) $(OUTPUTDIR); \
@@ -287,6 +313,7 @@
 DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs
 C1_BUILD_DIR      =$(C1_DIR)/$(BUILD_FLAVOR)
 C2_BUILD_DIR      =$(C2_DIR)/$(BUILD_FLAVOR)
+CORE_BUILD_DIR    =$(CORE_DIR)/$(BUILD_FLAVOR)
 MINIMAL1_BUILD_DIR=$(MINIMAL1_DIR)/$(BUILD_FLAVOR)
 ZERO_BUILD_DIR    =$(ZERO_DIR)/$(BUILD_FLAVOR)
 SHARK_BUILD_DIR   =$(SHARK_DIR)/$(BUILD_FLAVOR)
@@ -448,6 +475,28 @@
 	$(install-file)
 endif
 
+# Core
+ifeq ($(JVM_VARIANT_CORE), true)
+# Common
+$(EXPORT_LIB_DIR)/%.jar:			$(CORE_BUILD_DIR)/../generated/%.jar
+	$(install-file)
+$(EXPORT_INCLUDE_DIR)/%:			$(CORE_BUILD_DIR)/../generated/jvmtifiles/%
+	$(install-file)
+# Unix
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX):	$(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+	$(install-file)
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo:		$(CORE_BUILD_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(CORE_BUILD_DIR)/%.diz
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):	$(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.debuginfo:		$(CORE_BUILD_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.diz:			$(CORE_BUILD_DIR)/%.diz
+	$(install-file)
+endif
+
 # Shark
 ifeq ($(JVM_VARIANT_ZEROSHARK), true)
 # Common
@@ -510,6 +559,7 @@
 clean_build:
 	$(RM) -r $(C1_DIR)
 	$(RM) -r $(C2_DIR)
+	$(RM) -r $(CORE_DIR)
 	$(RM) -r $(ZERO_DIR)
 	$(RM) -r $(SHARK_DIR)
 	$(RM) -r $(MINIMAL1_DIR)
--- a/hotspot/make/defs.make	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/make/defs.make	Fri Jul 19 13:32:53 2013 -0700
@@ -259,7 +259,7 @@
 
   # Use uname output for SRCARCH, but deal with platform differences. If ARCH
   # is not explicitly listed below, it is treated as x86.
-  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH)))
+  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc ppc64 zero,$(ARCH)))
   ARCH/       = x86
   ARCH/sparc  = sparc
   ARCH/sparc64= sparc
@@ -285,6 +285,11 @@
       BUILDARCH = sparcv9
     endif
   endif
+  ifeq ($(BUILDARCH), ppc)
+    ifdef LP64
+      BUILDARCH = ppc64
+    endif
+  endif
 
   # LIBARCH is 1:1 mapping from BUILDARCH
   LIBARCH         = $(LIBARCH/$(BUILDARCH))
@@ -293,12 +298,12 @@
   LIBARCH/sparc   = sparc
   LIBARCH/sparcv9 = sparcv9
   LIBARCH/ia64    = ia64
-  LIBARCH/ppc64   = ppc
+  LIBARCH/ppc64   = ppc64
   LIBARCH/ppc     = ppc
   LIBARCH/arm     = arm
   LIBARCH/zero    = $(ZERO_LIBARCH)
 
-  LP64_ARCH = sparcv9 amd64 ia64 zero
+  LP64_ARCH = sparcv9 amd64 ia64 ppc64 zero
 endif
 
 # Required make macro settings for all platforms
--- a/hotspot/make/linux/makefiles/buildtree.make	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/make/linux/makefiles/buildtree.make	Fri Jul 19 13:32:53 2013 -0700
@@ -193,6 +193,7 @@
 DATA_MODE/sparc = 32
 DATA_MODE/sparcv9 = 64
 DATA_MODE/amd64 = 64
+DATA_MODE/ppc64 = 64
 
 DATA_MODE = $(DATA_MODE/$(BUILDARCH))
 
--- a/hotspot/make/linux/makefiles/defs.make	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/make/linux/makefiles/defs.make	Fri Jul 19 13:32:53 2013 -0700
@@ -120,6 +120,15 @@
   HS_ARCH          = ppc
 endif
 
+# PPC64
+ifeq ($(ARCH), ppc64)
+  ARCH_DATA_MODEL  = 64
+  MAKE_ARGS        += LP64=1
+  PLATFORM         = linux-ppc64
+  VM_PLATFORM      = linux_ppc64
+  HS_ARCH          = ppc
+endif
+
 # On 32 bit linux we build server and client, on 64 bit just server.
 ifeq ($(JVM_VARIANTS),)
   ifeq ($(ARCH_DATA_MODEL), 32)
@@ -255,7 +264,7 @@
 EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
 
-ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
   ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
--- a/hotspot/make/linux/makefiles/gcc.make	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/make/linux/makefiles/gcc.make	Fri Jul 19 13:32:53 2013 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -181,6 +181,7 @@
 ifndef E500V2
 ARCHFLAG/ppc     =  -mcpu=powerpc
 endif
+ARCHFLAG/ppc64   =  -m64
 
 CFLAGS     += $(ARCHFLAG)
 AOUT_FLAGS += $(ARCHFLAG)
@@ -346,6 +347,7 @@
   DEBUG_CFLAGS/amd64 = -g
   DEBUG_CFLAGS/arm   = -g
   DEBUG_CFLAGS/ppc   = -g
+  DEBUG_CFLAGS/ppc64 = -g
   DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
   ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
@@ -361,6 +363,7 @@
     FASTDEBUG_CFLAGS/amd64 = -g
     FASTDEBUG_CFLAGS/arm   = -g
     FASTDEBUG_CFLAGS/ppc   = -g
+    FASTDEBUG_CFLAGS/ppc64 = -g
     FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
     ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
@@ -375,6 +378,7 @@
     OPT_CFLAGS/amd64 = -g
     OPT_CFLAGS/arm   = -g
     OPT_CFLAGS/ppc   = -g
+    OPT_CFLAGS/ppc64 = -g
     OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
     ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/make/linux/makefiles/ppc64.make	Fri Jul 19 13:32:53 2013 -0700
@@ -0,0 +1,39 @@
+#
+# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# make c code know it is on a 64 bit platform.
+CFLAGS += -D_LP64=1
+
+# fixes `relocation truncated to fit' error for gcc 4.1.
+CFLAGS += -mminimal-toc
+
+# finds use ppc64 instructions, but schedule for power5
+CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
+
+# let linker find external 64 bit libs.
+LFLAGS_VM += -L/lib64
+
+# specify lib format.
+LFLAGS_VM +=  -Wl,-melf64ppc
--- a/hotspot/make/linux/platform_ppc	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/make/linux/platform_ppc	Fri Jul 19 13:32:53 2013 -0700
@@ -2,11 +2,11 @@
 
 arch = ppc
 
-arch_model = ppc
+arch_model = ppc_32
 
 os_arch = linux_ppc
 
-os_arch_model = linux_ppc
+os_arch_model = linux_ppc_32
 
 lib_arch = ppc
 
@@ -14,4 +14,4 @@
 
 gnu_dis_arch = ppc
 
-sysdefs = -DLINUX -D_GNU_SOURCE -DPPC
+sysdefs = -DLINUX -D_GNU_SOURCE -DPPC32
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/make/linux/platform_ppc64	Fri Jul 19 13:32:53 2013 -0700
@@ -0,0 +1,17 @@
+os_family = linux
+
+arch = ppc
+
+arch_model = ppc_64
+
+os_arch = linux_ppc
+
+os_arch_model = linux_ppc_64
+
+lib_arch = ppc64
+
+compiler = gcc
+
+gnu_dis_arch = ppc64
+
+sysdefs = -DLINUX -D_GNU_SOURCE -DPPC64
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -205,7 +205,7 @@
 static char cpu_arch[] = "amd64";
 #elif defined(ARM)
 static char cpu_arch[] = "arm";
-#elif defined(PPC)
+#elif defined(PPC32)
 static char cpu_arch[] = "ppc";
 #elif defined(SPARC)
 #  ifdef _LP64
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -138,7 +138,7 @@
 
 // For diagnostics to print a message once. see run_periodic_checks
 static sigset_t check_signal_done;
-static bool check_signals = true;;
+static bool check_signals = true;
 
 static pid_t _initial_pid = 0;
 
@@ -256,8 +256,10 @@
 static char cpu_arch[] = "amd64";
 #elif defined(ARM)
 static char cpu_arch[] = "arm";
-#elif defined(PPC)
+#elif defined(PPC32)
 static char cpu_arch[] = "ppc";
+#elif defined(PPC64)
+static char cpu_arch[] = "ppc64";
 #elif defined(SPARC)
 #  ifdef _LP64
 static char cpu_arch[] = "sparcv9";
@@ -4621,7 +4623,7 @@
     // the future if the appropriate cleanup code can be added to the
     // VM_Exit VMOperation's doit method.
     if (atexit(perfMemory_exit_helper) != 0) {
-      warning("os::init2 atexit(perfMemory_exit_helper) failed");
+      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
     }
   }
 
@@ -4632,8 +4634,7 @@
 }
 
 // this is called at the end of vm_initialization
-void os::init_3(void)
-{
+void os::init_3(void) {
 #ifdef JAVASE_EMBEDDED
   // Start the MemNotifyThread
   if (LowMemoryProtection) {
--- a/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -36,7 +36,7 @@
 
   // Atomically copy 64 bits of data
   static void atomic_copy64(volatile void *src, volatile void *dst) {
-#if defined(PPC) && !defined(_LP64)
+#if defined(PPC32)
     double tmp;
     asm volatile ("lfd  %0, 0(%1)\n"
                   "stfd %0, 0(%2)\n"
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -36,7 +36,7 @@
 
   // Atomically copy 64 bits of data
   static void atomic_copy64(volatile void *src, volatile void *dst) {
-#if defined(PPC) && !defined(_LP64)
+#if defined(PPC32)
     double tmp;
     asm volatile ("lfd  %0, 0(%1)\n"
                   "stfd %0, 0(%2)\n"
--- a/hotspot/src/share/vm/adlc/main.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/adlc/main.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -243,6 +243,11 @@
   AD.addInclude(AD._CPP_file, "nativeInst_arm.hpp");
   AD.addInclude(AD._CPP_file, "vmreg_arm.inline.hpp");
 #endif
+#ifdef TARGET_ARCH_ppc
+  AD.addInclude(AD._CPP_file, "assembler_ppc.inline.hpp");
+  AD.addInclude(AD._CPP_file, "nativeInst_ppc.hpp");
+  AD.addInclude(AD._CPP_file, "vmreg_ppc.inline.hpp");
+#endif
   AD.addInclude(AD._HPP_file, "memory/allocation.hpp");
   AD.addInclude(AD._HPP_file, "opto/machnode.hpp");
   AD.addInclude(AD._HPP_file, "opto/node.hpp");
--- a/hotspot/src/share/vm/code/relocInfo.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/code/relocInfo.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -582,6 +582,18 @@
   _static_call = address_from_scaled_offset(unpack_1_int(), base);
 }
 
+void trampoline_stub_Relocation::pack_data_to(CodeSection* dest ) {
+  short* p = (short*) dest->locs_end();
+  CodeSection* insts = dest->outer()->insts();
+  normalize_address(_owner, insts);
+  p = pack_1_int_to(p, scaled_offset(_owner, insts->start()));
+  dest->set_locs_end((relocInfo*) p);
+}
+
+void trampoline_stub_Relocation::unpack_data() {
+  address base = binding()->section_start(CodeBuffer::SECT_INSTS);
+  _owner = address_from_scaled_offset(unpack_1_int(), base);
+}
 
 void external_word_Relocation::pack_data_to(CodeSection* dest) {
   short* p = (short*) dest->locs_end();
@@ -811,6 +823,25 @@
   return NULL;
 }
 
+// Finds the trampoline address for a call. If no trampoline stub is
+// found NULL is returned which can be handled by the caller.
+address trampoline_stub_Relocation::get_trampoline_for(address call, nmethod* code) {
+  // There are no relocations available when the code gets relocated
+  // because of CodeBuffer expansion.
+  if (code->relocation_size() == 0)
+    return NULL;
+
+  RelocIterator iter(code, call);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::trampoline_stub_type) {
+      if (iter.trampoline_stub_reloc()->owner() == call) {
+        return iter.addr();
+      }
+    }
+  }
+
+  return NULL;
+}
 
 void static_stub_Relocation::clear_inline_cache() {
   // Call stub is only used when calling the interpreted code.
@@ -975,6 +1006,12 @@
       tty->print(" | [static_call=" INTPTR_FORMAT "]", r->static_call());
       break;
     }
+  case relocInfo::trampoline_stub_type:
+    {
+      trampoline_stub_Relocation* r = (trampoline_stub_Relocation*) reloc();
+      tty->print(" | [trampoline owner=" INTPTR_FORMAT "]", r->owner());
+      break;
+    }
   }
   tty->cr();
 }
--- a/hotspot/src/share/vm/code/relocInfo.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/code/relocInfo.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -260,8 +260,8 @@
     poll_type               = 10, // polling instruction for safepoints
     poll_return_type        = 11, // polling instruction for safepoints at return
     metadata_type           = 12, // metadata that used to be oops
-    yet_unused_type_1       = 13, // Still unused
-    yet_unused_type_2       = 14, // Still unused
+    trampoline_stub_type    = 13, // stub-entry for trampoline
+    yet_unused_type_1       = 14, // Still unused
     data_prefix_tag         = 15, // tag for a prefix (carries data arguments)
     type_mask               = 15  // A mask which selects only the above values
   };
@@ -301,6 +301,7 @@
     visitor(poll) \
     visitor(poll_return) \
     visitor(section_word) \
+    visitor(trampoline_stub) \
 
 
  public:
@@ -1150,6 +1151,43 @@
  public:
 };
 
+// Trampoline Relocations.
+// A trampoline allows to encode a small branch in the code, even if there
+// is the chance that this branch can not reach all possible code locations.
+// If the relocation finds that a branch is too far for the instruction
+// in the code, it can patch it to jump to the trampoline where is
+// sufficient space for a far branch. Needed on PPC.
+class trampoline_stub_Relocation : public Relocation {
+  relocInfo::relocType type() { return relocInfo::trampoline_stub_type; }
+
+ public:
+  static RelocationHolder spec(address static_call) {
+    RelocationHolder rh = newHolder();
+    return (new (rh) trampoline_stub_Relocation(static_call));
+  }
+
+ private:
+  address _owner;    // Address of the NativeCall that owns the trampoline.
+
+  trampoline_stub_Relocation(address owner) {
+    _owner = owner;
+  }
+
+  friend class RelocIterator;
+  trampoline_stub_Relocation() { }
+
+ public:
+
+  // Return the address of the NativeCall that owns the trampoline.
+  address owner() { return _owner; }
+
+  void pack_data_to(CodeSection * dest);
+  void unpack_data();
+
+  // Find the trampoline stub for a call.
+  static address get_trampoline_for(address call, nmethod* code);
+};
+
 class external_word_Relocation : public DataRelocation {
   relocInfo::relocType type() { return relocInfo::external_word_type; }
 
--- a/hotspot/src/share/vm/code/vmreg.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/code/vmreg.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -47,8 +47,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/adGlobals_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/adGlobals_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/adGlobals_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/adGlobals_ppc_64.hpp"
 #endif
 #endif
 
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -961,7 +961,7 @@
 // Initialize the compilation queue
 void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
   EXCEPTION_MARK;
-#if !defined(ZERO) && !defined(SHARK)
+#if !defined(ZERO) && !defined(SHARK) && !defined(PPC64)
   assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
 #endif // !ZERO && !SHARK
   if (c2_compiler_count > 0) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -29,6 +29,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1OopClosures.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 
 /*
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -28,6 +28,7 @@
 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
+#include "oops/oop.psgc.inline.hpp"
 
 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
   assert(_manager_array != NULL, "access of NULL manager_array");
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -45,8 +45,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "interp_masm_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "interp_masm_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "interp_masm_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "interp_masm_ppc_64.hpp"
 #endif
 
 // This file contains the platform-independent parts
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -30,12 +30,13 @@
 #include "interpreter/bytecodeInterpreter.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
-#include "memory/cardTableModRefBS.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/methodCounters.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/biasedLocking.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -197,6 +198,10 @@
               !THREAD->pop_frame_in_process()) {                                 \
             goto handle_Pop_Frame;                                               \
           }                                                                      \
+          if (THREAD->jvmti_thread_state() &&                                    \
+              THREAD->jvmti_thread_state()->is_earlyret_pending()) {             \
+            goto handle_Early_Return;                                            \
+          }                                                                      \
           opcode = *pc;                                                          \
         }                                                                        \
       }                                                                          \
@@ -411,21 +416,25 @@
         CACHE_LOCALS();
 
 // Call the VM don't check for pending exceptions
-#define CALL_VM_NOCHECK(func)                                     \
-          DECACHE_STATE();                                        \
-          SET_LAST_JAVA_FRAME();                                  \
-          func;                                                   \
-          RESET_LAST_JAVA_FRAME();                                \
-          CACHE_STATE();                                          \
-          if (THREAD->pop_frame_pending() &&                      \
-              !THREAD->pop_frame_in_process()) {                  \
-            goto handle_Pop_Frame;                                \
-          }
+#define CALL_VM_NOCHECK(func)                                      \
+        DECACHE_STATE();                                           \
+        SET_LAST_JAVA_FRAME();                                     \
+        func;                                                      \
+        RESET_LAST_JAVA_FRAME();                                   \
+        CACHE_STATE();                                             \
+        if (THREAD->pop_frame_pending() &&                         \
+            !THREAD->pop_frame_in_process()) {                     \
+          goto handle_Pop_Frame;                                   \
+        }                                                          \
+        if (THREAD->jvmti_thread_state() &&                        \
+            THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
+          goto handle_Early_Return;                                \
+        }
 
 // Call the VM and check for pending exceptions
-#define CALL_VM(func, label) {                                    \
-          CALL_VM_NOCHECK(func);                                  \
-          if (THREAD->has_pending_exception()) goto label;        \
+#define CALL_VM(func, label) {                                     \
+          CALL_VM_NOCHECK(func);                                   \
+          if (THREAD->has_pending_exception()) goto label;         \
         }
 
 /*
@@ -502,8 +511,6 @@
   interpreterState orig = istate;
 #endif
 
-  static volatile jbyte* _byte_map_base; // adjusted card table base for oop store barrier
-
   register intptr_t*        topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
   register address          pc = istate->bcp();
   register jubyte opcode;
@@ -511,12 +518,9 @@
   register ConstantPoolCache*    cp = istate->constants(); // method()->constants()->cache()
 #ifdef LOTS_OF_REGS
   register JavaThread*      THREAD = istate->thread();
-  register volatile jbyte*  BYTE_MAP_BASE = _byte_map_base;
 #else
 #undef THREAD
 #define THREAD istate->thread()
-#undef BYTE_MAP_BASE
-#define BYTE_MAP_BASE _byte_map_base
 #endif
 
 #ifdef USELABELS
@@ -629,9 +633,6 @@
 #ifdef VM_JVMTI
       _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
 #endif
-      BarrierSet* bs = Universe::heap()->barrier_set();
-      assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
-      _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base);
       return;
     }
     break;
@@ -679,114 +680,97 @@
 
       // lock method if synchronized
       if (METHOD->is_synchronized()) {
-          // oop rcvr = locals[0].j.r;
-          oop rcvr;
-          if (METHOD->is_static()) {
-            rcvr = METHOD->constants()->pool_holder()->java_mirror();
-          } else {
-            rcvr = LOCALS_OBJECT(0);
-            VERIFY_OOP(rcvr);
-          }
-          // The initial monitor is ours for the taking
-          BasicObjectLock* mon = &istate->monitor_base()[-1];
-          oop monobj = mon->obj();
-          assert(mon->obj() == rcvr, "method monitor mis-initialized");
-
-          bool success = UseBiasedLocking;
-          if (UseBiasedLocking) {
-            markOop mark = rcvr->mark();
-            if (mark->has_bias_pattern()) {
-              // The bias pattern is present in the object's header. Need to check
-              // whether the bias owner and the epoch are both still current.
-              intptr_t xx = ((intptr_t) THREAD) ^ (intptr_t) mark;
-              xx = (intptr_t) rcvr->klass()->prototype_header() ^ xx;
-              intptr_t yy = (xx & ~((int) markOopDesc::age_mask_in_place));
-              if (yy != 0 ) {
-                // At this point we know that the header has the bias pattern and
-                // that we are not the bias owner in the current epoch. We need to
-                // figure out more details about the state of the header in order to
-                // know what operations can be legally performed on the object's
-                // header.
-
-                // If the low three bits in the xor result aren't clear, that means
-                // the prototype header is no longer biased and we have to revoke
-                // the bias on this object.
-
-                if (yy & markOopDesc::biased_lock_mask_in_place == 0 ) {
-                  // Biasing is still enabled for this data type. See whether the
-                  // epoch of the current bias is still valid, meaning that the epoch
-                  // bits of the mark word are equal to the epoch bits of the
-                  // prototype header. (Note that the prototype header's epoch bits
-                  // only change at a safepoint.) If not, attempt to rebias the object
-                  // toward the current thread. Note that we must be absolutely sure
-                  // that the current epoch is invalid in order to do this because
-                  // otherwise the manipulations it performs on the mark word are
-                  // illegal.
-                  if (yy & markOopDesc::epoch_mask_in_place == 0) {
-                    // The epoch of the current bias is still valid but we know nothing
-                    // about the owner; it might be set or it might be clear. Try to
-                    // acquire the bias of the object using an atomic operation. If this
-                    // fails we will go in to the runtime to revoke the object's bias.
-                    // Note that we first construct the presumed unbiased header so we
-                    // don't accidentally blow away another thread's valid bias.
-                    intptr_t unbiased = (intptr_t) mark & (markOopDesc::biased_lock_mask_in_place |
-                                                           markOopDesc::age_mask_in_place |
-                                                           markOopDesc::epoch_mask_in_place);
-                    if (Atomic::cmpxchg_ptr((intptr_t)THREAD | unbiased, (intptr_t*) rcvr->mark_addr(), unbiased) != unbiased) {
-                      CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
-                    }
-                  } else {
-                    try_rebias:
-                    // At this point we know the epoch has expired, meaning that the
-                    // current "bias owner", if any, is actually invalid. Under these
-                    // circumstances _only_, we are allowed to use the current header's
-                    // value as the comparison value when doing the cas to acquire the
-                    // bias in the current epoch. In other words, we allow transfer of
-                    // the bias from one thread to another directly in this situation.
-                    xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD;
-                    if (Atomic::cmpxchg_ptr((intptr_t)THREAD | (intptr_t) rcvr->klass()->prototype_header(),
-                                            (intptr_t*) rcvr->mark_addr(),
-                                            (intptr_t) mark) != (intptr_t) mark) {
-                      CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
-                    }
-                  }
-                } else {
-                  try_revoke_bias:
-                  // The prototype mark in the klass doesn't have the bias bit set any
-                  // more, indicating that objects of this data type are not supposed
-                  // to be biased any more. We are going to try to reset the mark of
-                  // this object to the prototype value and fall through to the
-                  // CAS-based locking scheme. Note that if our CAS fails, it means
-                  // that another thread raced us for the privilege of revoking the
-                  // bias of this particular object, so it's okay to continue in the
-                  // normal locking code.
-                  //
-                  xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD;
-                  if (Atomic::cmpxchg_ptr(rcvr->klass()->prototype_header(),
-                                          (intptr_t*) rcvr->mark_addr(),
-                                          mark) == mark) {
-                    // (*counters->revoked_lock_entry_count_addr())++;
-                  success = false;
-                  }
-                }
+        // oop rcvr = locals[0].j.r;
+        oop rcvr;
+        if (METHOD->is_static()) {
+          rcvr = METHOD->constants()->pool_holder()->java_mirror();
+        } else {
+          rcvr = LOCALS_OBJECT(0);
+          VERIFY_OOP(rcvr);
+        }
+        // The initial monitor is ours for the taking
+        // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
+        BasicObjectLock* mon = &istate->monitor_base()[-1];
+        mon->set_obj(rcvr);
+        bool success = false;
+        uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
+        markOop mark = rcvr->mark();
+        intptr_t hash = (intptr_t) markOopDesc::no_hash;
+        // Implies UseBiasedLocking.
+        if (mark->has_bias_pattern()) {
+          uintptr_t thread_ident;
+          uintptr_t anticipated_bias_locking_value;
+          thread_ident = (uintptr_t)istate->thread();
+          anticipated_bias_locking_value =
+            (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
+            ~((uintptr_t) markOopDesc::age_mask_in_place);
+
+          if (anticipated_bias_locking_value == 0) {
+            // Already biased towards this thread, nothing to do.
+            if (PrintBiasedLockingStatistics) {
+              (* BiasedLocking::biased_lock_entry_count_addr())++;
+            }
+            success = true;
+          } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+            // Try to revoke bias.
+            markOop header = rcvr->klass()->prototype_header();
+            if (hash != markOopDesc::no_hash) {
+              header = header->copy_set_hash(hash);
+            }
+            if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
+              if (PrintBiasedLockingStatistics)
+                (*BiasedLocking::revoked_lock_entry_count_addr())++;
+            }
+          } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
+            // Try to rebias.
+            markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
+            if (hash != markOopDesc::no_hash) {
+              new_header = new_header->copy_set_hash(hash);
+            }
+            if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
+              if (PrintBiasedLockingStatistics) {
+                (* BiasedLocking::rebiased_lock_entry_count_addr())++;
               }
             } else {
-              cas_label:
-              success = false;
+              CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
+            }
+            success = true;
+          } else {
+            // Try to bias towards thread in case object is anonymously biased.
+            markOop header = (markOop) ((uintptr_t) mark &
+                                        ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
+                                         (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
+            if (hash != markOopDesc::no_hash) {
+              header = header->copy_set_hash(hash);
+            }
+            markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+            // Debugging hint.
+            DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+            if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
+              if (PrintBiasedLockingStatistics) {
+                (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
+              }
+            } else {
+              CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
+            }
+            success = true;
+          }
+        }
+
+        // Traditional lightweight locking.
+        if (!success) {
+          markOop displaced = rcvr->mark()->set_unlocked();
+          mon->lock()->set_displaced_header(displaced);
+          bool call_vm = UseHeavyMonitors;
+          if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
+            // Is it simple recursive case?
+            if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
+              mon->lock()->set_displaced_header(NULL);
+            } else {
+              CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
             }
           }
-          if (!success) {
-            markOop displaced = rcvr->mark()->set_unlocked();
-            mon->lock()->set_displaced_header(displaced);
-            if (Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
-              // Is it simple recursive case?
-              if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
-                mon->lock()->set_displaced_header(NULL);
-              } else {
-                CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
-              }
-            }
-          }
+        }
       }
       THREAD->clr_do_not_unlock();
 
@@ -808,7 +792,6 @@
     case popping_frame: {
       // returned from a java call to pop the frame, restart the call
       // clear the message so we don't confuse ourselves later
-      ShouldNotReachHere();  // we don't return this.
       assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
       istate->set_msg(no_request);
       THREAD->clr_pop_frame_in_process();
@@ -836,6 +819,10 @@
       if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
         goto handle_Pop_Frame;
       }
+      if (THREAD->jvmti_thread_state() &&
+          THREAD->jvmti_thread_state()->is_earlyret_pending()) {
+        goto handle_Early_Return;
+      }
 
       if (THREAD->has_pending_exception()) goto handle_exception;
       // Update the pc by the saved amount of the invoke bytecode size
@@ -881,15 +868,84 @@
       BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
       assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
       entry->set_obj(lockee);
-
-      markOop displaced = lockee->mark()->set_unlocked();
-      entry->lock()->set_displaced_header(displaced);
-      if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
-        // Is it simple recursive case?
-        if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
-          entry->lock()->set_displaced_header(NULL);
+      bool success = false;
+      uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
+
+      markOop mark = lockee->mark();
+      intptr_t hash = (intptr_t) markOopDesc::no_hash;
+      // implies UseBiasedLocking
+      if (mark->has_bias_pattern()) {
+        uintptr_t thread_ident;
+        uintptr_t anticipated_bias_locking_value;
+        thread_ident = (uintptr_t)istate->thread();
+        anticipated_bias_locking_value =
+          (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
+          ~((uintptr_t) markOopDesc::age_mask_in_place);
+
+        if  (anticipated_bias_locking_value == 0) {
+          // already biased towards this thread, nothing to do
+          if (PrintBiasedLockingStatistics) {
+            (* BiasedLocking::biased_lock_entry_count_addr())++;
+          }
+          success = true;
+        } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+          // try revoke bias
+          markOop header = lockee->klass()->prototype_header();
+          if (hash != markOopDesc::no_hash) {
+            header = header->copy_set_hash(hash);
+          }
+          if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
+            if (PrintBiasedLockingStatistics) {
+              (*BiasedLocking::revoked_lock_entry_count_addr())++;
+            }
+          }
+        } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
+          // try rebias
+          markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
+          if (hash != markOopDesc::no_hash) {
+                new_header = new_header->copy_set_hash(hash);
+          }
+          if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
+            if (PrintBiasedLockingStatistics) {
+              (* BiasedLocking::rebiased_lock_entry_count_addr())++;
+            }
+          } else {
+            CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          }
+          success = true;
         } else {
-          CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          // try to bias towards thread in case object is anonymously biased
+          markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
+                                                          (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
+          if (hash != markOopDesc::no_hash) {
+            header = header->copy_set_hash(hash);
+          }
+          markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+          // debugging hint
+          DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+          if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
+            if (PrintBiasedLockingStatistics) {
+              (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
+            }
+          } else {
+            CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          }
+          success = true;
+        }
+      }
+
+      // traditional lightweight locking
+      if (!success) {
+        markOop displaced = lockee->mark()->set_unlocked();
+        entry->lock()->set_displaced_header(displaced);
+        bool call_vm = UseHeavyMonitors;
+        if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+          // Is it simple recursive case?
+          if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
+            entry->lock()->set_displaced_header(NULL);
+          } else {
+            CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          }
         }
       }
       UPDATE_PC_AND_TOS(1, -1);
@@ -1600,8 +1656,11 @@
           ARRAY_LOADTO32(T_INT, jint,   "%d",   STACK_INT, 0);
       CASE(_faload):
           ARRAY_LOADTO32(T_FLOAT, jfloat, "%f",   STACK_FLOAT, 0);
-      CASE(_aaload):
-          ARRAY_LOADTO32(T_OBJECT, oop,   INTPTR_FORMAT, STACK_OBJECT, 0);
+      CASE(_aaload): {
+          ARRAY_INTRO(-2);
+          SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
+          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
+      }
       CASE(_baload):
           ARRAY_LOADTO32(T_BYTE, jbyte,  "%d",   STACK_INT, 0);
       CASE(_caload):
@@ -1655,11 +1714,7 @@
               VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "");
             }
           }
-          oop* elem_loc = (oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop));
-          // *(oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)) = rhsObject;
-          *elem_loc = rhsObject;
-          // Mark the card
-          OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0);
+          ((objArrayOopDesc *) arrObj)->obj_at_put(index, rhsObject);
           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
       }
       CASE(_bastore):
@@ -1700,14 +1755,87 @@
         }
         if (entry != NULL) {
           entry->set_obj(lockee);
-          markOop displaced = lockee->mark()->set_unlocked();
-          entry->lock()->set_displaced_header(displaced);
-          if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
-            // Is it simple recursive case?
-            if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
-              entry->lock()->set_displaced_header(NULL);
-            } else {
-              CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          int success = false;
+          uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
+
+          markOop mark = lockee->mark();
+          intptr_t hash = (intptr_t) markOopDesc::no_hash;
+          // implies UseBiasedLocking
+          if (mark->has_bias_pattern()) {
+            uintptr_t thread_ident;
+            uintptr_t anticipated_bias_locking_value;
+            thread_ident = (uintptr_t)istate->thread();
+            anticipated_bias_locking_value =
+              (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
+              ~((uintptr_t) markOopDesc::age_mask_in_place);
+
+            if  (anticipated_bias_locking_value == 0) {
+              // already biased towards this thread, nothing to do
+              if (PrintBiasedLockingStatistics) {
+                (* BiasedLocking::biased_lock_entry_count_addr())++;
+              }
+              success = true;
+            }
+            else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+              // try revoke bias
+              markOop header = lockee->klass()->prototype_header();
+              if (hash != markOopDesc::no_hash) {
+                header = header->copy_set_hash(hash);
+              }
+              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
+                if (PrintBiasedLockingStatistics)
+                  (*BiasedLocking::revoked_lock_entry_count_addr())++;
+              }
+            }
+            else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
+              // try rebias
+              markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
+              if (hash != markOopDesc::no_hash) {
+                new_header = new_header->copy_set_hash(hash);
+              }
+              if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
+                if (PrintBiasedLockingStatistics)
+                  (* BiasedLocking::rebiased_lock_entry_count_addr())++;
+              }
+              else {
+                CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+              }
+              success = true;
+            }
+            else {
+              // try to bias towards thread in case object is anonymously biased
+              markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
+                                                              (uintptr_t)markOopDesc::age_mask_in_place |
+                                                              epoch_mask_in_place));
+              if (hash != markOopDesc::no_hash) {
+                header = header->copy_set_hash(hash);
+              }
+              markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+              // debugging hint
+              DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+              if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
+                if (PrintBiasedLockingStatistics)
+                  (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
+              }
+              else {
+                CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+              }
+              success = true;
+            }
+          }
+
+          // traditional lightweight locking
+          if (!success) {
+            markOop displaced = lockee->mark()->set_unlocked();
+            entry->lock()->set_displaced_header(displaced);
+            bool call_vm = UseHeavyMonitors;
+            if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+              // Is it simple recursive case?
+              if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
+                entry->lock()->set_displaced_header(NULL);
+              } else {
+                CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+              }
             }
           }
           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
@@ -1729,12 +1857,15 @@
             BasicLock* lock = most_recent->lock();
             markOop header = lock->displaced_header();
             most_recent->set_obj(NULL);
-            // If it isn't recursive we either must swap old header or call the runtime
-            if (header != NULL) {
-              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
-                // restore object for the slow case
-                most_recent->set_obj(lockee);
-                CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
+            if (!lockee->mark()->has_bias_pattern()) {
+              bool call_vm = UseHeavyMonitors;
+              // If it isn't recursive we either must swap old header or call the runtime
+              if (header != NULL || call_vm) {
+                if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+                  // restore object for the slow case
+                  most_recent->set_obj(lockee);
+                  CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
+                }
               }
             }
             UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
@@ -1923,7 +2054,6 @@
             } else if (tos_type == atos) {
               VERIFY_OOP(STACK_OBJECT(-1));
               obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
-              OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
             } else if (tos_type == btos) {
               obj->release_byte_field_put(field_offset, STACK_INT(-1));
             } else if (tos_type == ltos) {
@@ -1944,7 +2074,6 @@
             } else if (tos_type == atos) {
               VERIFY_OOP(STACK_OBJECT(-1));
               obj->obj_field_put(field_offset, STACK_OBJECT(-1));
-              OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
             } else if (tos_type == btos) {
               obj->byte_field_put(field_offset, STACK_INT(-1));
             } else if (tos_type == ltos) {
@@ -2591,32 +2720,81 @@
     // No handler in this activation, unwind and try again
     THREAD->set_pending_exception(except_oop(), NULL, 0);
     goto handle_return;
-  }  /* handle_exception: */
-
-
+  }  // handle_exception:
 
   // Return from an interpreter invocation with the result of the interpretation
   // on the top of the Java Stack (or a pending exception)
 
-handle_Pop_Frame:
-
-  // We don't really do anything special here except we must be aware
-  // that we can get here without ever locking the method (if sync).
-  // Also we skip the notification of the exit.
-
-  istate->set_msg(popping_frame);
-  // Clear pending so while the pop is in process
-  // we don't start another one if a call_vm is done.
-  THREAD->clr_pop_frame_pending();
-  // Let interpreter (only) see the we're in the process of popping a frame
-  THREAD->set_pop_frame_in_process();
-
-handle_return:
-  {
+  handle_Pop_Frame: {
+
+    // We don't really do anything special here except we must be aware
+    // that we can get here without ever locking the method (if sync).
+    // Also we skip the notification of the exit.
+
+    istate->set_msg(popping_frame);
+    // Clear pending so while the pop is in process
+    // we don't start another one if a call_vm is done.
+    THREAD->clr_pop_frame_pending();
+    // Let interpreter (only) see the we're in the process of popping a frame
+    THREAD->set_pop_frame_in_process();
+
+    goto handle_return;
+
+  } // handle_Pop_Frame
+
+  // ForceEarlyReturn ends a method, and returns to the caller with a return value
+  // given by the invoker of the early return.
+  handle_Early_Return: {
+
+    istate->set_msg(early_return);
+
+    // Clear expression stack.
+    topOfStack = istate->stack_base() - Interpreter::stackElementWords;
+
+    JvmtiThreadState *ts = THREAD->jvmti_thread_state();
+
+    // Push the value to be returned.
+    switch (istate->method()->result_type()) {
+      case T_BOOLEAN:
+      case T_SHORT:
+      case T_BYTE:
+      case T_CHAR:
+      case T_INT:
+        SET_STACK_INT(ts->earlyret_value().i, 0);
+        MORE_STACK(1);
+        break;
+      case T_LONG:
+        SET_STACK_LONG(ts->earlyret_value().j, 1);
+        MORE_STACK(2);
+        break;
+      case T_FLOAT:
+        SET_STACK_FLOAT(ts->earlyret_value().f, 0);
+        MORE_STACK(1);
+        break;
+      case T_DOUBLE:
+        SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
+        MORE_STACK(2);
+        break;
+      case T_ARRAY:
+      case T_OBJECT:
+        SET_STACK_OBJECT(ts->earlyret_oop(), 0);
+        MORE_STACK(1);
+        break;
+    }
+
+    ts->clr_earlyret_value();
+    ts->set_earlyret_oop(NULL);
+    ts->clr_earlyret_pending();
+
+    // Fall through to handle_return.
+
+  } // handle_Early_Return
+
+  handle_return: {
     DECACHE_STATE();
 
-    bool suppress_error = istate->msg() == popping_frame;
-    bool suppress_exit_event = THREAD->has_pending_exception() || suppress_error;
+    bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
+    bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
     Handle original_exception(THREAD, THREAD->pending_exception());
     Handle illegal_state_oop(THREAD, NULL);
 
@@ -2677,15 +2855,18 @@
           BasicLock* lock = end->lock();
           markOop header = lock->displaced_header();
           end->set_obj(NULL);
-          // If it isn't recursive we either must swap old header or call the runtime
-          if (header != NULL) {
-            if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
-              // restore object for the slow case
-              end->set_obj(lockee);
-              {
-                // Prevent any HandleMarkCleaner from freeing our live handles
-                HandleMark __hm(THREAD);
-                CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
+
+          if (!lockee->mark()->has_bias_pattern()) {
+            // If it isn't recursive we either must swap old header or call the runtime
+            if (header != NULL) {
+              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+                // restore object for the slow case
+                end->set_obj(lockee);
+                {
+                  // Prevent any HandleMarkCleaner from freeing our live handles
+                  HandleMark __hm(THREAD);
+                  CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
+                }
               }
             }
           }
@@ -2734,23 +2915,37 @@
               illegal_state_oop = THREAD->pending_exception();
               THREAD->clear_pending_exception();
             }
+          } else if (UseHeavyMonitors) {
+            {
+              // Prevent any HandleMarkCleaner from freeing our live handles.
+              HandleMark __hm(THREAD);
+              CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
+            }
+            if (THREAD->has_pending_exception()) {
+              if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
+              THREAD->clear_pending_exception();
+            }
           } else {
             BasicLock* lock = base->lock();
             markOop header = lock->displaced_header();
             base->set_obj(NULL);
-            // If it isn't recursive we either must swap old header or call the runtime
-            if (header != NULL) {
-              if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
-                // restore object for the slow case
-                base->set_obj(rcvr);
-                {
-                  // Prevent any HandleMarkCleaner from freeing our live handles
-                  HandleMark __hm(THREAD);
-                  CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
-                }
-                if (THREAD->has_pending_exception()) {
-                  if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
-                  THREAD->clear_pending_exception();
+
+            if (!rcvr->mark()->has_bias_pattern()) {
+              base->set_obj(NULL);
+              // If it isn't recursive we either must swap old header or call the runtime
+              if (header != NULL) {
+                if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
+                  // restore object for the slow case
+                  base->set_obj(rcvr);
+                  {
+                    // Prevent any HandleMarkCleaner from freeing our live handles
+                    HandleMark __hm(THREAD);
+                    CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
+                  }
+                  if (THREAD->has_pending_exception()) {
+                    if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
+                    THREAD->clear_pending_exception();
+                  }
                 }
               }
             }
@@ -2758,6 +2953,8 @@
         }
       }
     }
+    // Clear the do_not_unlock flag now.
+    THREAD->clr_do_not_unlock();
 
     //
     // Notify jvmti/jvmdi
@@ -2810,7 +3007,6 @@
         THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
       else
         THREAD->set_pending_exception(original_exception(), NULL, 0);
-      istate->set_return_kind((Bytecodes::Code)opcode);
       UPDATE_PC_AND_RETURN(0);
     }
 
@@ -2829,13 +3025,12 @@
                                 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
         THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
       }
-      THREAD->clr_pop_frame_in_process();
+    } else {
+      istate->set_msg(return_from_method);
     }
 
     // Normal return
     // Advance the pc and return to frame manager
-    istate->set_msg(return_from_method);
-    istate->set_return_kind((Bytecodes::Code)opcode);
     UPDATE_PC_AND_RETURN(1);
   } /* handle_return: */
 
@@ -3110,7 +3305,6 @@
   tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
   tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
   tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
-  tty->print_cr("result_return_kind 0x%x ", (int) this->_result._return_kind);
   tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
   tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
   tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
@@ -3129,9 +3323,9 @@
 }
 
 extern "C" {
-    void PI(uintptr_t arg) {
-        ((BytecodeInterpreter*)arg)->print();
-    }
+  void PI(uintptr_t arg) {
+    ((BytecodeInterpreter*)arg)->print();
+  }
 }
 #endif // PRODUCT
 
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -66,27 +66,26 @@
 typedef class BytecodeInterpreter* interpreterState;
 
 struct call_message {
-    class Method* _callee;    /* method to call during call_method request */
-    address   _callee_entry_point;   /* address to jump to for call_method request */
-    int       _bcp_advance;          /* size of the invoke bytecode operation */
+  class Method* _callee;           // method to call during call_method request
+  address _callee_entry_point;     // address to jump to for call_method request
+  int _bcp_advance;                // size of the invoke bytecode operation
 };
 
 struct osr_message {
-    address _osr_buf;                 /* the osr buffer */
-    address _osr_entry;               /* the entry to the osr method */
+  address _osr_buf;                 // the osr buffer
+  address _osr_entry;               // the entry to the osr method
 };
 
 struct osr_result {
-  nmethod* nm;                       /* osr nmethod */
-  address return_addr;               /* osr blob return address */
+  nmethod* nm;                      // osr nmethod
+  address return_addr;              // osr blob return address
 };
 
 // Result returned to frame manager
 union frame_manager_message {
-    call_message _to_call;            /* describes callee */
-    Bytecodes::Code _return_kind;     /* i_return, a_return, ... */
-    osr_message _osr;                 /* describes the osr */
-    osr_result _osr_result;           /* result of OSR request */
+  call_message _to_call;            // describes callee
+  osr_message _osr;                 // describes the osr
+  osr_result _osr_result;           // result of OSR request
 };
 
 class BytecodeInterpreter : StackObj {
@@ -115,7 +114,8 @@
          more_monitors,             // need a new monitor
          throwing_exception,        // unwind stack and rethrow
          popping_frame,             // unwind call and retry call
-         do_osr                     // request this invocation be OSR's
+         do_osr,                    // request this invocation be OSR's
+         early_return               // early return as commanded by jvmti
     };
 
 private:
@@ -216,8 +216,6 @@
 inline int bcp_advance() { return _result._to_call._bcp_advance; }
 inline void set_bcp_advance(int count) { _result._to_call._bcp_advance = count; }
 
-inline void set_return_kind(Bytecodes::Code kind) { _result._return_kind = kind; }
-
 inline interpreterState prev() { return _prev_link; }
 
 inline intptr_t* stack() { return _stack; }
--- a/hotspot/src/share/vm/interpreter/templateTable.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/interpreter/templateTable.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -43,8 +43,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "interp_masm_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "interp_masm_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "interp_masm_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "interp_masm_ppc_64.hpp"
 #endif
 
 #ifndef CC_INTERP
@@ -373,8 +376,8 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "templateTable_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "templateTable_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "templateTable_ppc_32.hpp"
 #endif
 
 };
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -35,6 +35,9 @@
 #ifdef TARGET_ARCH_arm
 # include "c2_globals_arm.hpp"
 #endif
+#ifdef TARGET_ARCH_ppc
+# include "c2_globals_ppc.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "c2_globals_linux.hpp"
 #endif
--- a/hotspot/src/share/vm/opto/c2compiler.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/c2compiler.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -40,8 +40,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 
--- a/hotspot/src/share/vm/opto/compile.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -80,8 +80,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 
--- a/hotspot/src/share/vm/opto/gcm.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -50,9 +50,13 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
+#endif
+
 
 // Portions of code courtesy of Clifford Click
 
--- a/hotspot/src/share/vm/opto/generateOptoStub.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/generateOptoStub.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -214,7 +214,7 @@
 #if defined(SPARC)
   store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias);
 #endif /* defined(SPARC) */
-#ifdef IA64
+#if (defined(IA64) && !defined(AIX))
   Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
   if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease);
   store_to_memory(NULL, adr_last_Java_fp,    null(),    T_ADDRESS, NoAlias);
--- a/hotspot/src/share/vm/opto/lcm.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -45,8 +45,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 // Optimization - Graph Style
--- a/hotspot/src/share/vm/opto/locknode.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/locknode.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -43,8 +43,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 //------------------------------BoxLockNode------------------------------------
--- a/hotspot/src/share/vm/opto/matcher.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/matcher.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -53,8 +53,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 OptoReg::Name OptoReg::c_frame_pointer;
--- a/hotspot/src/share/vm/opto/output.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/output.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -1065,7 +1065,7 @@
   // Compute prolog code size
   _method_size = 0;
   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
-#ifdef IA64
+#if defined(IA64) && !defined(AIX)
   if (save_argument_registers()) {
     // 4815101: this is a stub with implicit and unknown precision fp args.
     // The usual spill mechanism can only generate stfd's in this case, which
--- a/hotspot/src/share/vm/opto/output.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/output.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -42,8 +42,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 class Arena;
--- a/hotspot/src/share/vm/opto/regmask.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/regmask.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -40,8 +40,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 #define RM_SIZE _RM_SIZE /* a constant private to the class RegMask */
--- a/hotspot/src/share/vm/opto/regmask.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/regmask.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -43,8 +43,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/adGlobals_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/adGlobals_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/adGlobals_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/adGlobals_ppc_64.hpp"
 #endif
 
 // Some fun naming (textual) substitutions:
--- a/hotspot/src/share/vm/opto/runtime.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -83,8 +83,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 
@@ -999,7 +1002,7 @@
   nm = CodeCache::find_nmethod(pc);
   assert(nm != NULL, "No NMethod found");
   if (nm->is_native_method()) {
-    fatal("Native mathod should not have path to exception handling");
+    fatal("Native method should not have path to exception handling");
   } else {
     // we are switching to old paradigm: search for exception handler in caller_frame
     // instead in exception handler of caller_frame.sender()
@@ -1028,7 +1031,7 @@
     }
 
     // If we are forcing an unwind because of stack overflow then deopt is
-    // irrelevant sice we are throwing the frame away anyway.
+    // irrelevant since we are throwing the frame away anyway.
 
     if (deopting && !force_unwind) {
       handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
@@ -1071,7 +1074,7 @@
 // Note we enter without the usual JRT wrapper. We will call a helper routine that
 // will do the normal VM entry. We do it this way so that we can see if the nmethod
 // we looked up the handler for has been deoptimized in the meantime. If it has been
-// we must not use the handler and instread return the deopt blob.
+// we must not use the handler and instead return the deopt blob.
 address OptoRuntime::handle_exception_C(JavaThread* thread) {
 //
 // We are in Java not VM and in debug mode we have a NoHandleMark
--- a/hotspot/src/share/vm/prims/forte.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/prims/forte.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -70,7 +70,7 @@
 // Native interfaces for use by Forte tools.
 
 
-#ifndef IA64
+#if !defined(IA64) && !defined(PPC64)
 
 class vframeStreamForte : public vframeStreamCommon {
  public:
@@ -624,16 +624,16 @@
 #endif // !_WINDOWS
 
 } // end extern "C"
-#endif // !IA64
+#endif // !IA64 && !PPC64
 
 void Forte::register_stub(const char* name, address start, address end) {
-#if !defined(_WINDOWS) && !defined(IA64)
+#if !defined(_WINDOWS) && !defined(IA64) && !defined(PPC64)
   assert(pointer_delta(end, start, sizeof(jbyte)) < INT_MAX,
          "Code size exceeds maximum range");
 
   collector_func_load((char*)name, NULL, NULL, start,
     pointer_delta(end, start, sizeof(jbyte)), 0, NULL);
-#endif // !_WINDOWS && !IA64
+#endif // !_WINDOWS && !IA64 && !PPC64
 }
 
 #else // INCLUDE_JVMTI
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -81,10 +81,13 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
+#endif // COMPILER2
 
 bool DeoptimizationMarker::_is_active = false;
 
--- a/hotspot/src/share/vm/runtime/frame.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/frame.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -918,7 +918,7 @@
     cld_f->do_cld(m->method_holder()->class_loader_data());
   }
 
-#if !defined(PPC) || defined(ZERO)
+#if !defined(PPC32) || defined(ZERO)
   if (m->is_native()) {
 #ifdef CC_INTERP
     interpreterState istate = get_interpreterState();
@@ -927,11 +927,11 @@
     f->do_oop((oop*)( fp() + interpreter_frame_oop_temp_offset ));
 #endif /* CC_INTERP */
   }
-#else // PPC
+#else // PPC32
   if (m->is_native() && m->is_static()) {
     f->do_oop(interpreter_frame_mirror_addr());
   }
-#endif // PPC
+#endif // PPC32
 
   int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
 
--- a/hotspot/src/share/vm/runtime/frame.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/frame.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -46,10 +46,13 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/adGlobals_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/adGlobals_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/adGlobals_ppc_32.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/adGlobals_ppc_64.hpp"
 #endif
+#endif // COMPILER2
 #ifdef ZERO
 #ifdef TARGET_ARCH_zero
 # include "stack_zero.hpp"
@@ -347,7 +350,7 @@
   void interpreter_frame_set_method(Method* method);
   Method** interpreter_frame_method_addr() const;
   ConstantPoolCache** interpreter_frame_cache_addr() const;
-#ifdef PPC
+#ifdef PPC32
   oop* interpreter_frame_mirror_addr() const;
 #endif
 
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -167,7 +167,6 @@
 define_pd_global(intx, OnStackReplacePercentage,     0);
 define_pd_global(bool, ResizeTLAB,                   false);
 define_pd_global(intx, FreqInlineSize,               0);
-define_pd_global(intx, InlineSmallCode,              0);
 define_pd_global(intx, NewSizeThreadIncrease,        4*K);
 define_pd_global(intx, InlineClassNatives,           true);
 define_pd_global(intx, InlineUnsafeOps,              true);
@@ -3152,7 +3151,8 @@
           "disable this feature")                                           \
                                                                             \
   /* code cache parameters */                                               \
-  develop(uintx, CodeCacheSegmentSize, 64,                                  \
+  /* ppc64 has large code-entry alignment. */                               \
+  develop(uintx, CodeCacheSegmentSize, 64 PPC64_ONLY(+64),                  \
           "Code cache segment size (in bytes) - smallest unit of "          \
           "allocation")                                                     \
                                                                             \
@@ -3617,7 +3617,7 @@
           NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)),                           \
           "Address to allocate shared memory region for class data")        \
                                                                             \
-  diagnostic(bool, EnableInvokeDynamic, true,                               \
+  diagnostic(bool, EnableInvokeDynamic, true PPC64_ONLY(&& false),          \
           "support JSR 292 (method handles, invokedynamic, "                \
           "anonymous classes")                                              \
                                                                             \
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -54,7 +54,7 @@
 # include "os_bsd.inline.hpp"
 #endif
 
-#if defined(__GNUC__) && !defined(IA64)
+#if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define ATTR __attribute__((noinline))
 #else
--- a/hotspot/src/share/vm/runtime/os.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -1020,7 +1020,7 @@
 // if C stack is walkable beyond current frame. The check for fp() is not
 // necessary on Sparc, but it's harmless.
 bool os::is_first_C_frame(frame* fr) {
-#if defined(IA64) && !defined(_WIN32)
+#if (defined(IA64) && !defined(AIX)) && !defined(_WIN32)
   // On IA64 we have to check if the callers bsp is still valid
   // (i.e. within the register stack bounds).
   // Notice: this only works for threads created by the VM and only if
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -406,7 +406,7 @@
 
 #endif
 
-#if defined(__SOFTFP__) || defined(PPC)
+#if defined(__SOFTFP__) || defined(PPC32)
 double SharedRuntime::dsqrt(double f) {
   return sqrt(f);
 }
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -140,7 +140,7 @@
   static double dabs(double f);
 #endif
 
-#if defined(__SOFTFP__) || defined(PPC)
+#if defined(__SOFTFP__) || defined(PPC32)
   static double dsqrt(double f);
 #endif
 
--- a/hotspot/src/share/vm/runtime/stubRoutines.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/stubRoutines.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -114,8 +114,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "stubRoutines_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "stubRoutines_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "stubRoutines_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "stubRoutines_ppc_64.hpp"
 #endif
 
 
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/synchronizer.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -53,7 +53,7 @@
 # include "os_bsd.inline.hpp"
 #endif
 
-#if defined(__GNUC__)
+#if defined(__GNUC__) && !defined(PPC64)
   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define ATTR __attribute__((noinline))
 #else
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -311,6 +311,9 @@
 void Thread::record_stack_base_and_size() {
   set_stack_base(os::current_stack_base());
   set_stack_size(os::current_stack_size());
+  if (is_Java_thread()) {
+    ((JavaThread*) this)->set_stack_overflow_limit();
+  }
   // CR 7190089: on Solaris, primordial thread's stack is adjusted
   // in initialize_thread(). Without the adjustment, stack size is
   // incorrect if stack is set to unlimited (ulimit -s unlimited).
--- a/hotspot/src/share/vm/runtime/thread.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -893,7 +893,11 @@
 
  private:
 
-  StackGuardState        _stack_guard_state;
+  StackGuardState  _stack_guard_state;
+
+  // Precompute the limit of the stack as used in stack overflow checks.
+  // We load it from here to simplify the stack overflow check in assembly.
+  address          _stack_overflow_limit;
 
   // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is
   // used to temp. parsing values into and out of the runtime system during exception handling for compiled
@@ -1301,6 +1305,14 @@
   // and reguard if possible.
   bool reguard_stack(void);
 
+  address stack_overflow_limit() { return _stack_overflow_limit; }
+  void set_stack_overflow_limit() {
+    _stack_overflow_limit = _stack_base - _stack_size +
+                            ((StackShadowPages +
+                              StackYellowPages +
+                              StackRedPages) * os::vm_page_size());
+  }
+
   // Misc. accessors/mutators
   void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
@@ -1335,6 +1347,7 @@
   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
+  static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -198,10 +198,13 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/adGlobals_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/adGlobals_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/adGlobals_ppc_32.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/adGlobals_ppc_64.hpp"
 #endif
+#endif // COMPILER2
 
 // Note: the cross-product of (c1, c2, product, nonproduct, ...),
 // (nonstatic, static), and (unchecked, checked) has not been taken.
--- a/hotspot/src/share/vm/runtime/vm_version.cpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/runtime/vm_version.cpp	Fri Jul 19 13:32:53 2013 -0700
@@ -186,7 +186,8 @@
                  IA64_ONLY("ia64")               \
                  AMD64_ONLY("amd64")             \
                  ARM_ONLY("arm")                 \
-                 PPC_ONLY("ppc")                 \
+                 PPC32_ONLY("ppc")               \
+                 PPC64_ONLY("ppc64")             \
                  SPARC_ONLY("sparc")
 #endif // ZERO
 
@@ -248,7 +249,7 @@
       #define FLOAT_ARCH_STR "-e500v2"
     #elif defined(ARM)
       #define FLOAT_ARCH_STR "-vfp"
-    #elif defined(PPC)
+    #elif defined(PPC32)
       #define FLOAT_ARCH_STR "-hflt"
     #else
       #define FLOAT_ARCH_STR ""
--- a/hotspot/src/share/vm/utilities/macros.hpp	Thu Jul 18 03:38:10 2013 -0700
+++ b/hotspot/src/share/vm/utilities/macros.hpp	Fri Jul 19 13:32:53 2013 -0700
@@ -320,7 +320,11 @@
 #define NOT_IA32(code) code
 #endif
 
-#ifdef IA64
+// This is a REALLY BIG HACK, but on AIX <sys/systemcfg.h> unconditionally defines IA64.
+// At least on AIX 7.1 this is a real problem because 'systemcfg.h' is indirectly included
+// by 'pthread.h' and other common system headers.
+
+#if defined(IA64) && !defined(AIX)
 #define IA64_ONLY(code) code
 #define NOT_IA64(code)
 #else
@@ -344,14 +348,34 @@
 #define NOT_SPARC(code) code
 #endif
 
-#ifdef PPC
+#if defined(PPC32) || defined(PPC64)
+#ifndef PPC
+#define PPC
+#endif
 #define PPC_ONLY(code) code
 #define NOT_PPC(code)
 #else
+#undef PPC
 #define PPC_ONLY(code)
 #define NOT_PPC(code) code
 #endif
 
+#ifdef PPC32
+#define PPC32_ONLY(code) code
+#define NOT_PPC32(code)
+#else
+#define PPC32_ONLY(code)
+#define NOT_PPC32(code) code
+#endif
+
+#ifdef PPC64
+#define PPC64_ONLY(code) code
+#define NOT_PPC64(code)
+#else
+#define PPC64_ONLY(code)
+#define NOT_PPC64(code) code
+#endif
+
 #ifdef E500V2
 #define E500V2_ONLY(code) code
 #define NOT_E500V2(code)