|
1 // |
|
2 // Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved. |
|
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 // |
|
5 // This code is free software; you can redistribute it and/or modify it |
|
6 // under the terms of the GNU General Public License version 2 only, as |
|
7 // published by the Free Software Foundation. |
|
8 // |
|
9 // This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 // version 2 for more details (a copy is included in the LICENSE file that |
|
13 // accompanied this code). |
|
14 // |
|
15 // You should have received a copy of the GNU General Public License version |
|
16 // 2 along with this work; if not, write to the Free Software Foundation, |
|
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 // |
|
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 // or visit www.oracle.com if you need additional information or have any |
|
21 // questions. |
|
22 // |
|
23 // |
|
24 |
|
25 // The argument size of each inline directive is ignored by the compiler |
|
26 // and is set to the number of arguments as documentation. |
|
27 |
|
28 // Get the raw thread ID from %gs:0 |
|
29 .inline _raw_thread_id,0 |
|
30 movq %fs:0, %rax |
|
31 .end |
|
32 |
|
33 // Get current sp |
|
34 .inline _get_current_sp,0 |
|
35 .volatile |
|
36 movq %rsp, %rax |
|
37 .end |
|
38 |
|
39 // Get current fp |
|
40 .inline _get_current_fp,0 |
|
41 .volatile |
|
42 movq %rbp, %rax |
|
43 .end |
|
44 |
|
45 // Support for os::rdtsc() |
|
46 .inline _raw_rdtsc,0 |
|
47 rdtsc |
|
48 salq $32, %rdx |
|
49 orq %rdx, %rax |
|
50 .end |
|
51 |
|
52 // Support for jint Atomic::add(jint add_value, volatile jint* dest) |
|
53 .inline _Atomic_add,2 |
|
54 movl %edi, %eax // save add_value for return |
|
55 lock |
|
56 xaddl %edi, (%rsi) |
|
57 addl %edi, %eax |
|
58 .end |
|
59 |
|
60 // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest) |
|
61 .inline _Atomic_add_long,2 |
|
62 movq %rdi, %rax // save add_value for return |
|
63 lock |
|
64 xaddq %rdi, (%rsi) |
|
65 addq %rdi, %rax |
|
66 .end |
|
67 |
|
68 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). |
|
69 .inline _Atomic_xchg,2 |
|
70 xchgl (%rsi), %edi |
|
71 movl %edi, %eax |
|
72 .end |
|
73 |
|
74 // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest). |
|
75 .inline _Atomic_xchg_long,2 |
|
76 xchgq (%rsi), %rdi |
|
77 movq %rdi, %rax |
|
78 .end |
|
79 |
|
80 // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, |
|
81 // volatile jbyte *dest, |
|
82 // jbyte compare_value) |
|
83 .inline _Atomic_cmpxchg_byte,3 |
|
84 movb %dl, %al // compare_value |
|
85 lock |
|
86 cmpxchgb %dil, (%rsi) |
|
87 .end |
|
88 |
|
89 // Support for jint Atomic::cmpxchg(jint exchange_value, |
|
90 // volatile jint *dest, |
|
91 // jint compare_value) |
|
92 .inline _Atomic_cmpxchg,3 |
|
93 movl %edx, %eax // compare_value |
|
94 lock |
|
95 cmpxchgl %edi, (%rsi) |
|
96 .end |
|
97 |
|
98 // Support for jlong Atomic::cmpxchg(jlong exchange_value, |
|
99 // volatile jlong* dest, |
|
100 // jlong compare_value) |
|
101 .inline _Atomic_cmpxchg_long,3 |
|
102 movq %rdx, %rax // compare_value |
|
103 lock |
|
104 cmpxchgq %rdi, (%rsi) |
|
105 .end |
|
106 |
|
107 // Support for u2 Bytes::swap_u2(u2 x) |
|
108 .inline _raw_swap_u2,1 |
|
109 movw %di, %ax |
|
110 rorw $8, %ax |
|
111 .end |
|
112 |
|
113 // Support for u4 Bytes::swap_u4(u4 x) |
|
114 .inline _raw_swap_u4,1 |
|
115 movl %edi, %eax |
|
116 bswapl %eax |
|
117 .end |
|
118 |
|
119 // Support for u8 Bytes::swap_u8(u8 x) |
|
120 .inline _raw_swap_u8,1 |
|
121 movq %rdi, %rax |
|
122 bswapq %rax |
|
123 .end |
|
124 |
|
125 // Support for void Prefetch::read |
|
126 .inline _Prefetch_read,2 |
|
127 prefetcht0 (%rdi, %rsi, 1) |
|
128 .end |
|
129 |
|
130 // Support for void Prefetch::write |
|
131 // We use prefetcht0 because em64t doesn't support prefetchw. |
|
132 // prefetchw is a 3dnow instruction. |
|
133 .inline _Prefetch_write,2 |
|
134 prefetcht0 (%rdi, %rsi, 1) |
|
135 .end |