8233078: fix minimal VM build on Linux ppc64(le)
authormbaesken
Mon, 04 Nov 2019 09:54:00 +0100
changeset 58904 1f7981ef8779
parent 58903 eeb1c0da2126
child 58905 75eedcd30a2a
8233078: fix minimal VM build on Linux ppc64(le) Reviewed-by: mdoerr, lucy
src/hotspot/cpu/ppc/c1_globals_ppc.hpp
src/hotspot/cpu/ppc/disassembler_ppc.cpp
src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
src/hotspot/cpu/ppc/vm_version_ppc.cpp
--- a/src/hotspot/cpu/ppc/c1_globals_ppc.hpp	Mon Nov 04 09:40:35 2019 +0100
+++ b/src/hotspot/cpu/ppc/c1_globals_ppc.hpp	Mon Nov 04 09:54:00 2019 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,8 +48,12 @@
 define_pd_global(intx,     FreqInlineSize,               325 );
 define_pd_global(bool,     ResizeTLAB,                   true);
 define_pd_global(uintx,    ReservedCodeCacheSize,        32*M);
+define_pd_global(uintx,    NonProfiledCodeHeapSize,      13*M );
+define_pd_global(uintx,    ProfiledCodeHeapSize,         14*M );
+define_pd_global(uintx,    NonNMethodCodeHeapSize,       5*M );
 define_pd_global(uintx,    CodeCacheExpansionSize,       32*K);
 define_pd_global(uintx,    CodeCacheMinBlockLength,      1);
+define_pd_global(uintx,    CodeCacheMinimumUseSpace,     400*K);
 define_pd_global(size_t,   MetaspaceSize,                12*M);
 define_pd_global(bool,     NeverActAsServerClassMachine, true);
 define_pd_global(size_t,   NewSizeThreadIncrease,        16*K);
--- a/src/hotspot/cpu/ppc/disassembler_ppc.cpp	Mon Nov 04 09:40:35 2019 +0100
+++ b/src/hotspot/cpu/ppc/disassembler_ppc.cpp	Mon Nov 04 09:54:00 2019 +0100
@@ -27,8 +27,6 @@
 #include "code/codeCache.hpp"
 #include "compiler/disassembler.hpp"
 #include "depChecker_ppc.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/parOopClosures.inline.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
--- a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp	Mon Nov 04 09:40:35 2019 +0100
+++ b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp	Mon Nov 04 09:54:00 2019 +0100
@@ -50,6 +50,8 @@
 #if defined(COMPILER2) && (defined(AIX) || defined(LINUX))
 // Include Transactional Memory lock eliding optimization
 #define INCLUDE_RTM_OPT 1
+#else
+#define INCLUDE_RTM_OPT 0
 #endif
 
 #define SUPPORT_RESERVED_STACK_AREA
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Mon Nov 04 09:40:35 2019 +0100
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Mon Nov 04 09:54:00 2019 +0100
@@ -571,7 +571,6 @@
   __ bctr();
 }
 
-#ifdef COMPILER2
 static int reg2slot(VMReg r) {
   return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
 }
@@ -579,7 +578,6 @@
 static int reg2offset(VMReg r) {
   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 }
-#endif
 
 // ---------------------------------------------------------------------------
 // Read the array of BasicTypes from a signature, and compute where the
@@ -1305,7 +1303,6 @@
   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 }
 
-#ifdef COMPILER2
 // An oop arg. Must pass a handle not the oop itself.
 static void object_move(MacroAssembler* masm,
                         int frame_size_in_slots,
@@ -1813,8 +1810,6 @@
                                                  receiver_reg, member_reg, /*for_compiler_entry:*/ true);
 }
 
-#endif // COMPILER2
-
 // ---------------------------------------------------------------------------
 // Generate a native wrapper for a given method. The method takes arguments
 // in the Java compiled code convention, marshals them to the native
@@ -1851,7 +1846,6 @@
                                                 VMRegPair *in_regs,
                                                 BasicType ret_type,
                                                 address critical_entry) {
-#ifdef COMPILER2
   if (method->is_method_handle_intrinsic()) {
     vmIntrinsics::ID iid = method->intrinsic_id();
     intptr_t start = (intptr_t)__ pc();
@@ -2108,7 +2102,7 @@
 
   // Check ic: object class == cached class?
   if (!method_is_static) {
-  Register ic = as_Register(Matcher::inline_cache_reg_encode());
+  Register ic = R19_inline_cache_reg;
   Register receiver_klass = r_temp_1;
 
   __ cmpdi(CCR0, R3_ARG1, 0);
@@ -2638,12 +2632,10 @@
 
   // Handler for pending exceptions (out-of-line).
   // --------------------------------------------------------------------------
-
   // Since this is a native call, we know the proper exception handler
   // is the empty function. We just pop this frame and then jump to
   // forward_exception_entry.
   if (!is_critical_native) {
-  __ align(InteriorEntryAlignment);
   __ bind(handle_pending_exception);
 
   __ pop_frame();
@@ -2656,7 +2648,6 @@
   // --------------------------------------------------------------------------
 
   if (!method_is_static) {
-  __ align(InteriorEntryAlignment);
   __ bind(ic_miss);
 
   __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
@@ -2683,10 +2674,6 @@
   }
 
   return nm;
-#else
-  ShouldNotReachHere();
-  return NULL;
-#endif // COMPILER2
 }
 
 // This function returns the adjust size (in number of words) to a c2i adapter
@@ -2863,7 +2850,7 @@
   // We can't grab a free register here, because all registers may
   // contain live values, so let the RegisterSaver do the adjustment
   // of the return pc.
-  const int return_pc_adjustment_no_exception = -HandlerImpl::size_deopt_handler();
+  const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size;
 
   // Push the "unpack frame"
   // Save everything in sight.
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Mon Nov 04 09:40:35 2019 +0100
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Mon Nov 04 09:54:00 2019 +0100
@@ -3103,6 +3103,7 @@
                                                              STUB_ENTRY(checkcast_arraycopy));
 
     // fill routines
+#ifdef COMPILER2
     if (OptimizeFill) {
       StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
       StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
@@ -3111,6 +3112,7 @@
       StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
       StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
     }
+#endif
   }
 
   // Safefetch stubs.
@@ -3579,8 +3581,6 @@
     if (UseMultiplyToLenIntrinsic) {
       StubRoutines::_multiplyToLen = generate_multiplyToLen();
     }
-#endif
-
     if (UseSquareToLenIntrinsic) {
       StubRoutines::_squareToLen = generate_squareToLen();
     }
@@ -3595,6 +3595,7 @@
       StubRoutines::_montgomerySquare
         = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
     }
+#endif
 
     if (UseAESIntrinsics) {
       StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Mon Nov 04 09:40:35 2019 +0100
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Mon Nov 04 09:54:00 2019 +0100
@@ -312,6 +312,7 @@
     FLAG_SET_DEFAULT(UseSHA, false);
   }
 
+#ifdef COMPILER2
   if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
     UseSquareToLenIntrinsic = true;
   }
@@ -327,6 +328,7 @@
   if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
     UseMontgomerySquareIntrinsic = true;
   }
+#endif
 
   if (UseVectorizedMismatchIntrinsic) {
     warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
@@ -373,9 +375,11 @@
     if (UseRTMDeopt) {
       FLAG_SET_DEFAULT(UseRTMDeopt, false);
     }
+#ifdef COMPILER2
     if (PrintPreciseRTMLockingStatistics) {
       FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
     }
+#endif
   }
 
   // This machine allows unaligned memory accesses