author | iveresov |
Fri, 03 Sep 2010 17:51:07 -0700 | |
changeset 6453 | 970dc585ab63 |
parent 6418 | 6671edbd230e |
child 7397 | 5b173b4ca846 |
permissions | -rw-r--r-- |
2111 | 1 |
/* |
5902 | 2 |
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved. |
2111 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4430
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4430
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
4430
diff
changeset
|
21 |
* questions. |
2111 | 22 |
* |
23 |
*/ |
|
24 |
||
25 |
# include "incls/_precompiled.incl" |
|
26 |
# include "incls/_vm_version_x86.cpp.incl" |
|
27 |
||
28 |
||
29 |
int VM_Version::_cpu; |
|
30 |
int VM_Version::_model; |
|
31 |
int VM_Version::_stepping; |
|
32 |
int VM_Version::_cpuFeatures; |
|
33 |
const char* VM_Version::_features_str = ""; |
|
34 |
VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; |
|
35 |
||
36 |
static BufferBlob* stub_blob; |
|
5902 | 37 |
static const int stub_size = 400; |
2111 | 38 |
|
39 |
extern "C" { |
|
40 |
typedef void (*getPsrInfo_stub_t)(void*); |
|
41 |
} |
|
42 |
static getPsrInfo_stub_t getPsrInfo_stub = NULL; |
|
43 |
||
44 |
||
45 |
class VM_Version_StubGenerator: public StubCodeGenerator { |
|
46 |
public: |
|
47 |
||
48 |
VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {} |
|
49 |
||
50 |
address generate_getPsrInfo() { |
|
51 |
// Flags to test CPU type. |
|
52 |
const uint32_t EFL_AC = 0x40000; |
|
53 |
const uint32_t EFL_ID = 0x200000; |
|
54 |
// Values for when we don't have a CPUID instruction. |
|
55 |
const int CPU_FAMILY_SHIFT = 8; |
|
56 |
const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); |
|
57 |
const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); |
|
58 |
||
5902 | 59 |
Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; |
2111 | 60 |
Label ext_cpuid1, ext_cpuid5, done; |
61 |
||
62 |
StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub"); |
|
63 |
# define __ _masm-> |
|
64 |
||
65 |
address start = __ pc(); |
|
66 |
||
67 |
// |
|
68 |
// void getPsrInfo(VM_Version::CpuidInfo* cpuid_info); |
|
69 |
// |
|
70 |
// LP64: rcx and rdx are first and second argument registers on windows |
|
71 |
||
72 |
__ push(rbp); |
|
73 |
#ifdef _LP64 |
|
74 |
__ mov(rbp, c_rarg0); // cpuid_info address |
|
75 |
#else |
|
76 |
__ movptr(rbp, Address(rsp, 8)); // cpuid_info address |
|
77 |
#endif |
|
78 |
__ push(rbx); |
|
79 |
__ push(rsi); |
|
80 |
__ pushf(); // preserve rbx, and flags |
|
81 |
__ pop(rax); |
|
82 |
__ push(rax); |
|
83 |
__ mov(rcx, rax); |
|
84 |
// |
|
85 |
// if we are unable to change the AC flag, we have a 386 |
|
86 |
// |
|
87 |
__ xorl(rax, EFL_AC); |
|
88 |
__ push(rax); |
|
89 |
__ popf(); |
|
90 |
__ pushf(); |
|
91 |
__ pop(rax); |
|
92 |
__ cmpptr(rax, rcx); |
|
93 |
__ jccb(Assembler::notEqual, detect_486); |
|
94 |
||
95 |
__ movl(rax, CPU_FAMILY_386); |
|
96 |
__ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); |
|
97 |
__ jmp(done); |
|
98 |
||
99 |
// |
|
100 |
// If we are unable to change the ID flag, we have a 486 which does |
|
101 |
// not support the "cpuid" instruction. |
|
102 |
// |
|
103 |
__ bind(detect_486); |
|
104 |
__ mov(rax, rcx); |
|
105 |
__ xorl(rax, EFL_ID); |
|
106 |
__ push(rax); |
|
107 |
__ popf(); |
|
108 |
__ pushf(); |
|
109 |
__ pop(rax); |
|
110 |
__ cmpptr(rcx, rax); |
|
111 |
__ jccb(Assembler::notEqual, detect_586); |
|
112 |
||
113 |
__ bind(cpu486); |
|
114 |
__ movl(rax, CPU_FAMILY_486); |
|
115 |
__ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax); |
|
116 |
__ jmp(done); |
|
117 |
||
118 |
// |
|
119 |
// At this point, we have a chip which supports the "cpuid" instruction |
|
120 |
// |
|
121 |
__ bind(detect_586); |
|
122 |
__ xorl(rax, rax); |
|
123 |
__ cpuid(); |
|
124 |
__ orl(rax, rax); |
|
125 |
__ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input |
|
126 |
// value of at least 1, we give up and |
|
127 |
// assume a 486 |
|
128 |
__ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); |
|
129 |
__ movl(Address(rsi, 0), rax); |
|
130 |
__ movl(Address(rsi, 4), rbx); |
|
131 |
__ movl(Address(rsi, 8), rcx); |
|
132 |
__ movl(Address(rsi,12), rdx); |
|
133 |
||
5902 | 134 |
__ cmpl(rax, 0xa); // Is cpuid(0xB) supported? |
135 |
__ jccb(Assembler::belowEqual, std_cpuid4); |
|
136 |
||
137 |
// |
|
138 |
// cpuid(0xB) Processor Topology |
|
139 |
// |
|
140 |
__ movl(rax, 0xb); |
|
141 |
__ xorl(rcx, rcx); // Threads level |
|
142 |
__ cpuid(); |
|
143 |
||
144 |
__ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); |
|
145 |
__ movl(Address(rsi, 0), rax); |
|
146 |
__ movl(Address(rsi, 4), rbx); |
|
147 |
__ movl(Address(rsi, 8), rcx); |
|
148 |
__ movl(Address(rsi,12), rdx); |
|
149 |
||
150 |
__ movl(rax, 0xb); |
|
151 |
__ movl(rcx, 1); // Cores level |
|
152 |
__ cpuid(); |
|
153 |
__ push(rax); |
|
154 |
__ andl(rax, 0x1f); // Determine if valid topology level |
|
155 |
__ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level |
|
156 |
__ andl(rax, 0xffff); |
|
157 |
__ pop(rax); |
|
158 |
__ jccb(Assembler::equal, std_cpuid4); |
|
159 |
||
160 |
__ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); |
|
161 |
__ movl(Address(rsi, 0), rax); |
|
162 |
__ movl(Address(rsi, 4), rbx); |
|
163 |
__ movl(Address(rsi, 8), rcx); |
|
164 |
__ movl(Address(rsi,12), rdx); |
|
165 |
||
166 |
__ movl(rax, 0xb); |
|
167 |
__ movl(rcx, 2); // Packages level |
|
168 |
__ cpuid(); |
|
169 |
__ push(rax); |
|
170 |
__ andl(rax, 0x1f); // Determine if valid topology level |
|
171 |
__ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level |
|
172 |
__ andl(rax, 0xffff); |
|
173 |
__ pop(rax); |
|
174 |
__ jccb(Assembler::equal, std_cpuid4); |
|
175 |
||
176 |
__ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); |
|
177 |
__ movl(Address(rsi, 0), rax); |
|
178 |
__ movl(Address(rsi, 4), rbx); |
|
179 |
__ movl(Address(rsi, 8), rcx); |
|
180 |
__ movl(Address(rsi,12), rdx); |
|
2111 | 181 |
|
182 |
// |
|
183 |
// cpuid(0x4) Deterministic cache params |
|
184 |
// |
|
5902 | 185 |
__ bind(std_cpuid4); |
2111 | 186 |
__ movl(rax, 4); |
5902 | 187 |
__ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? |
188 |
__ jccb(Assembler::greater, std_cpuid1); |
|
189 |
||
2111 | 190 |
__ xorl(rcx, rcx); // L1 cache |
191 |
__ cpuid(); |
|
192 |
__ push(rax); |
|
193 |
__ andl(rax, 0x1f); // Determine if valid cache parameters used |
|
194 |
__ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache |
|
195 |
__ pop(rax); |
|
196 |
__ jccb(Assembler::equal, std_cpuid1); |
|
197 |
||
198 |
__ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset()))); |
|
199 |
__ movl(Address(rsi, 0), rax); |
|
200 |
__ movl(Address(rsi, 4), rbx); |
|
201 |
__ movl(Address(rsi, 8), rcx); |
|
202 |
__ movl(Address(rsi,12), rdx); |
|
203 |
||
204 |
// |
|
205 |
// Standard cpuid(0x1) |
|
206 |
// |
|
207 |
__ bind(std_cpuid1); |
|
208 |
__ movl(rax, 1); |
|
209 |
__ cpuid(); |
|
210 |
__ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset()))); |
|
211 |
__ movl(Address(rsi, 0), rax); |
|
212 |
__ movl(Address(rsi, 4), rbx); |
|
213 |
__ movl(Address(rsi, 8), rcx); |
|
214 |
__ movl(Address(rsi,12), rdx); |
|
215 |
||
216 |
__ movl(rax, 0x80000000); |
|
217 |
__ cpuid(); |
|
218 |
__ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported? |
|
219 |
__ jcc(Assembler::belowEqual, done); |
|
220 |
__ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported? |
|
221 |
__ jccb(Assembler::belowEqual, ext_cpuid1); |
|
222 |
__ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported? |
|
223 |
__ jccb(Assembler::belowEqual, ext_cpuid5); |
|
224 |
// |
|
225 |
// Extended cpuid(0x80000008) |
|
226 |
// |
|
227 |
__ movl(rax, 0x80000008); |
|
228 |
__ cpuid(); |
|
229 |
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset()))); |
|
230 |
__ movl(Address(rsi, 0), rax); |
|
231 |
__ movl(Address(rsi, 4), rbx); |
|
232 |
__ movl(Address(rsi, 8), rcx); |
|
233 |
__ movl(Address(rsi,12), rdx); |
|
234 |
||
235 |
// |
|
236 |
// Extended cpuid(0x80000005) |
|
237 |
// |
|
238 |
__ bind(ext_cpuid5); |
|
239 |
__ movl(rax, 0x80000005); |
|
240 |
__ cpuid(); |
|
241 |
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset()))); |
|
242 |
__ movl(Address(rsi, 0), rax); |
|
243 |
__ movl(Address(rsi, 4), rbx); |
|
244 |
__ movl(Address(rsi, 8), rcx); |
|
245 |
__ movl(Address(rsi,12), rdx); |
|
246 |
||
247 |
// |
|
248 |
// Extended cpuid(0x80000001) |
|
249 |
// |
|
250 |
__ bind(ext_cpuid1); |
|
251 |
__ movl(rax, 0x80000001); |
|
252 |
__ cpuid(); |
|
253 |
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset()))); |
|
254 |
__ movl(Address(rsi, 0), rax); |
|
255 |
__ movl(Address(rsi, 4), rbx); |
|
256 |
__ movl(Address(rsi, 8), rcx); |
|
257 |
__ movl(Address(rsi,12), rdx); |
|
258 |
||
259 |
// |
|
260 |
// return |
|
261 |
// |
|
262 |
__ bind(done); |
|
263 |
__ popf(); |
|
264 |
__ pop(rsi); |
|
265 |
__ pop(rbx); |
|
266 |
__ pop(rbp); |
|
267 |
__ ret(0); |
|
268 |
||
269 |
# undef __ |
|
270 |
||
271 |
return start; |
|
272 |
}; |
|
273 |
}; |
|
274 |
||
275 |
||
276 |
void VM_Version::get_processor_features() { |
|
277 |
||
278 |
_cpu = 4; // 486 by default |
|
279 |
_model = 0; |
|
280 |
_stepping = 0; |
|
281 |
_cpuFeatures = 0; |
|
282 |
_logical_processors_per_package = 1; |
|
283 |
||
284 |
if (!Use486InstrsOnly) { |
|
285 |
// Get raw processor info |
|
286 |
getPsrInfo_stub(&_cpuid_info); |
|
287 |
assert_is_initialized(); |
|
288 |
_cpu = extended_cpu_family(); |
|
289 |
_model = extended_cpu_model(); |
|
290 |
_stepping = cpu_stepping(); |
|
291 |
||
292 |
if (cpu_family() > 4) { // it supports CPUID |
|
293 |
_cpuFeatures = feature_flags(); |
|
294 |
// Logical processors are only available on P4s and above, |
|
295 |
// and only if hyperthreading is available. |
|
296 |
_logical_processors_per_package = logical_processor_count(); |
|
297 |
} |
|
298 |
} |
|
299 |
||
300 |
_supports_cx8 = supports_cmpxchg8(); |
|
301 |
||
302 |
#ifdef _LP64 |
|
303 |
// OS should support SSE for x64 and hardware should support at least SSE2. |
|
304 |
if (!VM_Version::supports_sse2()) { |
|
305 |
vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported"); |
|
306 |
} |
|
4430 | 307 |
// in 64 bit the use of SSE2 is the minimum |
308 |
if (UseSSE < 2) UseSSE = 2; |
|
2111 | 309 |
#endif |
310 |
||
311 |
// If the OS doesn't support SSE, we can't use this feature even if the HW does |
|
312 |
if (!os::supports_sse()) |
|
313 |
_cpuFeatures &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2); |
|
314 |
||
315 |
if (UseSSE < 4) { |
|
316 |
_cpuFeatures &= ~CPU_SSE4_1; |
|
317 |
_cpuFeatures &= ~CPU_SSE4_2; |
|
318 |
} |
|
319 |
||
320 |
if (UseSSE < 3) { |
|
321 |
_cpuFeatures &= ~CPU_SSE3; |
|
322 |
_cpuFeatures &= ~CPU_SSSE3; |
|
323 |
_cpuFeatures &= ~CPU_SSE4A; |
|
324 |
} |
|
325 |
||
326 |
if (UseSSE < 2) |
|
327 |
_cpuFeatures &= ~CPU_SSE2; |
|
328 |
||
329 |
if (UseSSE < 1) |
|
330 |
_cpuFeatures &= ~CPU_SSE; |
|
331 |
||
332 |
if (logical_processors_per_package() == 1) { |
|
333 |
// HT processor could be installed on a system which doesn't support HT. |
|
334 |
_cpuFeatures &= ~CPU_HT; |
|
335 |
} |
|
336 |
||
337 |
char buf[256]; |
|
2862
fad636edf18f
6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}()
twisti
parents:
2348
diff
changeset
|
338 |
jio_snprintf(buf, sizeof(buf), "(%u cores per cpu, %u threads per core) family %d model %d stepping %d%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", |
2111 | 339 |
cores_per_cpu(), threads_per_core(), |
340 |
cpu_family(), _model, _stepping, |
|
341 |
(supports_cmov() ? ", cmov" : ""), |
|
342 |
(supports_cmpxchg8() ? ", cx8" : ""), |
|
343 |
(supports_fxsr() ? ", fxsr" : ""), |
|
344 |
(supports_mmx() ? ", mmx" : ""), |
|
345 |
(supports_sse() ? ", sse" : ""), |
|
346 |
(supports_sse2() ? ", sse2" : ""), |
|
347 |
(supports_sse3() ? ", sse3" : ""), |
|
348 |
(supports_ssse3()? ", ssse3": ""), |
|
349 |
(supports_sse4_1() ? ", sse4.1" : ""), |
|
350 |
(supports_sse4_2() ? ", sse4.2" : ""), |
|
2255
54abdf3e1055
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
2111
diff
changeset
|
351 |
(supports_popcnt() ? ", popcnt" : ""), |
2111 | 352 |
(supports_mmx_ext() ? ", mmxext" : ""), |
353 |
(supports_3dnow() ? ", 3dnow" : ""), |
|
354 |
(supports_3dnow2() ? ", 3dnowext" : ""), |
|
2862
fad636edf18f
6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}()
twisti
parents:
2348
diff
changeset
|
355 |
(supports_lzcnt() ? ", lzcnt": ""), |
2111 | 356 |
(supports_sse4a() ? ", sse4a": ""), |
357 |
(supports_ht() ? ", ht": "")); |
|
358 |
_features_str = strdup(buf); |
|
359 |
||
360 |
// UseSSE is set to the smaller of what hardware supports and what |
|
361 |
// the command line requires. I.e., you cannot set UseSSE to 2 on |
|
362 |
// older Pentiums which do not support it. |
|
363 |
if( UseSSE > 4 ) UseSSE=4; |
|
364 |
if( UseSSE < 0 ) UseSSE=0; |
|
365 |
if( !supports_sse4_1() ) // Drop to 3 if no SSE4 support |
|
366 |
UseSSE = MIN2((intx)3,UseSSE); |
|
367 |
if( !supports_sse3() ) // Drop to 2 if no SSE3 support |
|
368 |
UseSSE = MIN2((intx)2,UseSSE); |
|
369 |
if( !supports_sse2() ) // Drop to 1 if no SSE2 support |
|
370 |
UseSSE = MIN2((intx)1,UseSSE); |
|
371 |
if( !supports_sse () ) // Drop to 0 if no SSE support |
|
372 |
UseSSE = 0; |
|
373 |
||
374 |
// On new cpus instructions which update whole XMM register should be used |
|
375 |
// to prevent partial register stall due to dependencies on high half. |
|
376 |
// |
|
377 |
// UseXmmLoadAndClearUpper == true --> movsd(xmm, mem) |
|
378 |
// UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem) |
|
379 |
// UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm). |
|
380 |
// UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm). |
|
381 |
||
382 |
if( is_amd() ) { // AMD cpus specific settings |
|
383 |
if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) { |
|
384 |
// Use it on new AMD cpus starting from Opteron. |
|
385 |
UseAddressNop = true; |
|
386 |
} |
|
387 |
if( supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift) ) { |
|
388 |
// Use it on new AMD cpus starting from Opteron. |
|
389 |
UseNewLongLShift = true; |
|
390 |
} |
|
391 |
if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { |
|
392 |
if( supports_sse4a() ) { |
|
393 |
UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron |
|
394 |
} else { |
|
395 |
UseXmmLoadAndClearUpper = false; |
|
396 |
} |
|
397 |
} |
|
398 |
if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { |
|
399 |
if( supports_sse4a() ) { |
|
400 |
UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h' |
|
401 |
} else { |
|
402 |
UseXmmRegToRegMoveAll = false; |
|
403 |
} |
|
404 |
} |
|
405 |
if( FLAG_IS_DEFAULT(UseXmmI2F) ) { |
|
406 |
if( supports_sse4a() ) { |
|
407 |
UseXmmI2F = true; |
|
408 |
} else { |
|
409 |
UseXmmI2F = false; |
|
410 |
} |
|
411 |
} |
|
412 |
if( FLAG_IS_DEFAULT(UseXmmI2D) ) { |
|
413 |
if( supports_sse4a() ) { |
|
414 |
UseXmmI2D = true; |
|
415 |
} else { |
|
416 |
UseXmmI2D = false; |
|
417 |
} |
|
418 |
} |
|
2862
fad636edf18f
6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}()
twisti
parents:
2348
diff
changeset
|
419 |
|
fad636edf18f
6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}()
twisti
parents:
2348
diff
changeset
|
420 |
// Use count leading zeros count instruction if available. |
fad636edf18f
6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}()
twisti
parents:
2348
diff
changeset
|
421 |
if (supports_lzcnt()) { |
fad636edf18f
6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}()
twisti
parents:
2348
diff
changeset
|
422 |
if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) { |
fad636edf18f
6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}()
twisti
parents:
2348
diff
changeset
|
423 |
UseCountLeadingZerosInstruction = true; |
fad636edf18f
6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}()
twisti
parents:
2348
diff
changeset
|
424 |
} |
fad636edf18f
6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}()
twisti
parents:
2348
diff
changeset
|
425 |
} |
2111 | 426 |
} |
427 |
||
428 |
if( is_intel() ) { // Intel cpus specific settings |
|
429 |
if( FLAG_IS_DEFAULT(UseStoreImmI16) ) { |
|
430 |
UseStoreImmI16 = false; // don't use it on Intel cpus |
|
431 |
} |
|
432 |
if( cpu_family() == 6 || cpu_family() == 15 ) { |
|
433 |
if( FLAG_IS_DEFAULT(UseAddressNop) ) { |
|
434 |
// Use it on all Intel cpus starting from PentiumPro |
|
435 |
UseAddressNop = true; |
|
436 |
} |
|
437 |
} |
|
438 |
if( FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper) ) { |
|
439 |
UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus |
|
440 |
} |
|
441 |
if( FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll) ) { |
|
442 |
if( supports_sse3() ) { |
|
443 |
UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus |
|
444 |
} else { |
|
445 |
UseXmmRegToRegMoveAll = false; |
|
446 |
} |
|
447 |
} |
|
448 |
if( cpu_family() == 6 && supports_sse3() ) { // New Intel cpus |
|
449 |
#ifdef COMPILER2 |
|
450 |
if( FLAG_IS_DEFAULT(MaxLoopPad) ) { |
|
451 |
// For new Intel cpus do the next optimization: |
|
452 |
// don't align the beginning of a loop if there are enough instructions |
|
453 |
// left (NumberOfLoopInstrToAlign defined in c2_globals.hpp) |
|
454 |
// in current fetch line (OptoLoopAlignment) or the padding |
|
455 |
// is big (> MaxLoopPad). |
|
456 |
// Set MaxLoopPad to 11 for new Intel cpus to reduce number of |
|
457 |
// generated NOP instructions. 11 is the largest size of one |
|
458 |
// address NOP instruction '0F 1F' (see Assembler::nop(i)). |
|
459 |
MaxLoopPad = 11; |
|
460 |
} |
|
461 |
#endif // COMPILER2 |
|
462 |
if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) { |
|
463 |
UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus |
|
464 |
} |
|
465 |
if( supports_sse4_2() && supports_ht() ) { // Newest Intel cpus |
|
466 |
if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) { |
|
467 |
UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus |
|
468 |
} |
|
469 |
} |
|
2348 | 470 |
if( supports_sse4_2() && UseSSE >= 4 ) { |
471 |
if( FLAG_IS_DEFAULT(UseSSE42Intrinsics)) { |
|
472 |
UseSSE42Intrinsics = true; |
|
473 |
} |
|
474 |
} |
|
2111 | 475 |
} |
476 |
} |
|
477 |
||
2255
54abdf3e1055
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
2111
diff
changeset
|
478 |
// Use population count instruction if available. |
54abdf3e1055
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
2111
diff
changeset
|
479 |
if (supports_popcnt()) { |
54abdf3e1055
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
2111
diff
changeset
|
480 |
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) { |
54abdf3e1055
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
2111
diff
changeset
|
481 |
UsePopCountInstruction = true; |
54abdf3e1055
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
2111
diff
changeset
|
482 |
} |
54abdf3e1055
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
2111
diff
changeset
|
483 |
} |
54abdf3e1055
6378821: bitCount() should use POPC on SPARC processors and AMD+10h
twisti
parents:
2111
diff
changeset
|
484 |
|
6272
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
485 |
#ifdef COMPILER2 |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
486 |
if (UseFPUForSpilling) { |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
487 |
if (UseSSE < 2) { |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
488 |
// Only supported with SSE2+ |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
489 |
FLAG_SET_DEFAULT(UseFPUForSpilling, false); |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
490 |
} |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
491 |
} |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
492 |
#endif |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
493 |
|
2111 | 494 |
assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value"); |
495 |
assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value"); |
|
496 |
||
497 |
// set valid Prefetch instruction |
|
498 |
if( ReadPrefetchInstr < 0 ) ReadPrefetchInstr = 0; |
|
499 |
if( ReadPrefetchInstr > 3 ) ReadPrefetchInstr = 3; |
|
500 |
if( ReadPrefetchInstr == 3 && !supports_3dnow() ) ReadPrefetchInstr = 0; |
|
501 |
if( !supports_sse() && supports_3dnow() ) ReadPrefetchInstr = 3; |
|
502 |
||
503 |
if( AllocatePrefetchInstr < 0 ) AllocatePrefetchInstr = 0; |
|
504 |
if( AllocatePrefetchInstr > 3 ) AllocatePrefetchInstr = 3; |
|
505 |
if( AllocatePrefetchInstr == 3 && !supports_3dnow() ) AllocatePrefetchInstr=0; |
|
506 |
if( !supports_sse() && supports_3dnow() ) AllocatePrefetchInstr = 3; |
|
507 |
||
508 |
// Allocation prefetch settings |
|
509 |
intx cache_line_size = L1_data_cache_line_size(); |
|
510 |
if( cache_line_size > AllocatePrefetchStepSize ) |
|
511 |
AllocatePrefetchStepSize = cache_line_size; |
|
512 |
if( FLAG_IS_DEFAULT(AllocatePrefetchLines) ) |
|
513 |
AllocatePrefetchLines = 3; // Optimistic value |
|
514 |
assert(AllocatePrefetchLines > 0, "invalid value"); |
|
515 |
if( AllocatePrefetchLines < 1 ) // set valid value in product VM |
|
516 |
AllocatePrefetchLines = 1; // Conservative value |
|
517 |
||
518 |
AllocatePrefetchDistance = allocate_prefetch_distance(); |
|
519 |
AllocatePrefetchStyle = allocate_prefetch_style(); |
|
520 |
||
5902 | 521 |
if( is_intel() && cpu_family() == 6 && supports_sse3() ) { |
522 |
if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core |
|
2111 | 523 |
#ifdef _LP64 |
5902 | 524 |
AllocatePrefetchDistance = 384; |
2111 | 525 |
#else |
5902 | 526 |
AllocatePrefetchDistance = 320; |
2111 | 527 |
#endif |
5902 | 528 |
} |
529 |
if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus |
|
530 |
AllocatePrefetchDistance = 192; |
|
531 |
AllocatePrefetchLines = 4; |
|
6272
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
532 |
#ifdef COMPILER2 |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
533 |
if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) { |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
534 |
FLAG_SET_DEFAULT(UseFPUForSpilling, true); |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
535 |
} |
94a20ad0e9de
6978249: spill between cpu and fpu registers when those moves are fast
never
parents:
5902
diff
changeset
|
536 |
#endif |
5902 | 537 |
} |
2111 | 538 |
} |
539 |
assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value"); |
|
540 |
||
541 |
#ifdef _LP64 |
|
542 |
// Prefetch settings |
|
543 |
PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); |
|
544 |
PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); |
|
545 |
PrefetchFieldsAhead = prefetch_fields_ahead(); |
|
546 |
#endif |
|
547 |
||
548 |
#ifndef PRODUCT |
|
549 |
if (PrintMiscellaneous && Verbose) { |
|
550 |
tty->print_cr("Logical CPUs per core: %u", |
|
551 |
logical_processors_per_package()); |
|
552 |
tty->print_cr("UseSSE=%d",UseSSE); |
|
553 |
tty->print("Allocation: "); |
|
554 |
if (AllocatePrefetchStyle <= 0 || UseSSE == 0 && !supports_3dnow()) { |
|
555 |
tty->print_cr("no prefetching"); |
|
556 |
} else { |
|
557 |
if (UseSSE == 0 && supports_3dnow()) { |
|
558 |
tty->print("PREFETCHW"); |
|
559 |
} else if (UseSSE >= 1) { |
|
560 |
if (AllocatePrefetchInstr == 0) { |
|
561 |
tty->print("PREFETCHNTA"); |
|
562 |
} else if (AllocatePrefetchInstr == 1) { |
|
563 |
tty->print("PREFETCHT0"); |
|
564 |
} else if (AllocatePrefetchInstr == 2) { |
|
565 |
tty->print("PREFETCHT2"); |
|
566 |
} else if (AllocatePrefetchInstr == 3) { |
|
567 |
tty->print("PREFETCHW"); |
|
568 |
} |
|
569 |
} |
|
570 |
if (AllocatePrefetchLines > 1) { |
|
571 |
tty->print_cr(" %d, %d lines with step %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize); |
|
572 |
} else { |
|
573 |
tty->print_cr(" %d, one line", AllocatePrefetchDistance); |
|
574 |
} |
|
575 |
} |
|
576 |
||
577 |
if (PrefetchCopyIntervalInBytes > 0) { |
|
578 |
tty->print_cr("PrefetchCopyIntervalInBytes %d", PrefetchCopyIntervalInBytes); |
|
579 |
} |
|
580 |
if (PrefetchScanIntervalInBytes > 0) { |
|
581 |
tty->print_cr("PrefetchScanIntervalInBytes %d", PrefetchScanIntervalInBytes); |
|
582 |
} |
|
583 |
if (PrefetchFieldsAhead > 0) { |
|
584 |
tty->print_cr("PrefetchFieldsAhead %d", PrefetchFieldsAhead); |
|
585 |
} |
|
586 |
} |
|
587 |
#endif // !PRODUCT |
|
588 |
} |
|
589 |
||
590 |
void VM_Version::initialize() { |
|
591 |
ResourceMark rm; |
|
592 |
// Making this stub must be FIRST use of assembler |
|
593 |
||
594 |
stub_blob = BufferBlob::create("getPsrInfo_stub", stub_size); |
|
595 |
if (stub_blob == NULL) { |
|
596 |
vm_exit_during_initialization("Unable to allocate getPsrInfo_stub"); |
|
597 |
} |
|
6418 | 598 |
CodeBuffer c(stub_blob); |
2111 | 599 |
VM_Version_StubGenerator g(&c); |
600 |
getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t, |
|
601 |
g.generate_getPsrInfo()); |
|
602 |
||
603 |
get_processor_features(); |
|
604 |
} |