src/hotspot/cpu/x86/vm_version_x86.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 54852 ddb27517396c
child 58679 9c3209ff7550
equal deleted inserted replaced
58677:13588c901957 58678:9cf78a70fa4f
    33 #include "runtime/os.hpp"
    33 #include "runtime/os.hpp"
    34 #include "runtime/stubCodeGenerator.hpp"
    34 #include "runtime/stubCodeGenerator.hpp"
    35 #include "utilities/virtualizationSupport.hpp"
    35 #include "utilities/virtualizationSupport.hpp"
    36 #include "vm_version_x86.hpp"
    36 #include "vm_version_x86.hpp"
    37 
    37 
       
    38 #include OS_HEADER_INLINE(os)
    38 
    39 
    39 int VM_Version::_cpu;
    40 int VM_Version::_cpu;
    40 int VM_Version::_model;
    41 int VM_Version::_model;
    41 int VM_Version::_stepping;
    42 int VM_Version::_stepping;
    42 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
    43 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
   378     __ movl(rax, 0xE0);
   379     __ movl(rax, 0xE0);
   379     __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
   380     __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
   380     __ cmpl(rax, 0xE0);
   381     __ cmpl(rax, 0xE0);
   381     __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
   382     __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
   382 
   383 
       
   384     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
       
   385     __ movl(rax, Address(rsi, 0));
       
   386     __ cmpl(rax, 0x50654);              // If it is Skylake
       
   387     __ jcc(Assembler::equal, legacy_setup);
   383     // If UseAVX is unitialized or is set by the user to include EVEX
   388     // If UseAVX is unitialized or is set by the user to include EVEX
   384     if (use_evex) {
   389     if (use_evex) {
   385       // EVEX setup: run in lowest evex mode
   390       // EVEX setup: run in lowest evex mode
   386       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
   391       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
   387       UseAVX = 3;
   392       UseAVX = 3;
   462     __ movl(rax, 0xE0);
   467     __ movl(rax, 0xE0);
   463     __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
   468     __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
   464     __ cmpl(rax, 0xE0);
   469     __ cmpl(rax, 0xE0);
   465     __ jcc(Assembler::notEqual, legacy_save_restore);
   470     __ jcc(Assembler::notEqual, legacy_save_restore);
   466 
   471 
       
   472     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
       
   473     __ movl(rax, Address(rsi, 0));
       
   474     __ cmpl(rax, 0x50654);              // If it is Skylake
       
   475     __ jcc(Assembler::equal, legacy_save_restore);
       
   476 
   467     // If UseAVX is unitialized or is set by the user to include EVEX
   477     // If UseAVX is unitialized or is set by the user to include EVEX
   468     if (use_evex) {
   478     if (use_evex) {
   469       // EVEX check: run in lowest evex mode
   479       // EVEX check: run in lowest evex mode
   470       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
   480       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
   471       UseAVX = 3;
   481       UseAVX = 3;
   606   guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
   616   guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
   607   // clflush_size is size in quadwords (8 bytes).
   617   // clflush_size is size in quadwords (8 bytes).
   608   guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
   618   guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
   609 #endif
   619 #endif
   610 
   620 
       
   621 #ifdef _LP64
       
   622   // assigning this field effectively enables Unsafe.writebackMemory()
       
   623   // by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero
       
   624   // that is only implemented on x86_64 and only if the OS plays ball
       
   625   if (os::supports_map_sync()) {
       
   626     // publish data cache line flush size to generic field, otherwise
       
   627     // let if default to zero thereby disabling writeback
       
   628     _data_cache_line_flush_size = _cpuid_info.std_cpuid1_ebx.bits.clflush_size * 8;
       
   629   }
       
   630 #endif
   611   // If the OS doesn't support SSE, we can't use this feature even if the HW does
   631   // If the OS doesn't support SSE, we can't use this feature even if the HW does
   612   if (!os::supports_sse())
   632   if (!os::supports_sse())
   613     _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
   633     _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
   614 
   634 
   615   if (UseSSE < 4) {
   635   if (UseSSE < 4) {
   647       use_avx_limit = 0;
   667       use_avx_limit = 0;
   648     }
   668     }
   649   }
   669   }
   650   if (FLAG_IS_DEFAULT(UseAVX)) {
   670   if (FLAG_IS_DEFAULT(UseAVX)) {
   651     FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
   671     FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
       
   672     if (is_intel_family_core() && _model == CPU_MODEL_SKYLAKE && _stepping < 5) {
       
   673       FLAG_SET_DEFAULT(UseAVX, 2);  //Set UseAVX=2 for Skylake
       
   674     }
   652   } else if (UseAVX > use_avx_limit) {
   675   } else if (UseAVX > use_avx_limit) {
   653     warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
   676     warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
   654     FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
   677     FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
   655   } else if (UseAVX < 0) {
   678   } else if (UseAVX < 0) {
   656     warning("UseAVX=%d is not valid, setting it to UseAVX=0", (int) UseAVX);
   679     warning("UseAVX=%d is not valid, setting it to UseAVX=0", (int) UseAVX);
  1046       }
  1069       }
  1047     }
  1070     }
  1048   }
  1071   }
  1049 #endif // COMPILER2 && ASSERT
  1072 #endif // COMPILER2 && ASSERT
  1050 
  1073 
       
  1074   if (!FLAG_IS_DEFAULT(AVX3Threshold)) {
       
  1075     if (!is_power_of_2(AVX3Threshold)) {
       
  1076       warning("AVX3Threshold must be a power of 2");
       
  1077       FLAG_SET_DEFAULT(AVX3Threshold, 4096);
       
  1078     }
       
  1079   }
       
  1080 
  1051 #ifdef _LP64
  1081 #ifdef _LP64
  1052   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
  1082   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
  1053     UseMultiplyToLenIntrinsic = true;
  1083     UseMultiplyToLenIntrinsic = true;
  1054   }
  1084   }
  1055   if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
  1085   if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {