author | sla |
Wed, 18 Dec 2013 08:39:06 +0100 | |
changeset 22190 | d306a75a70d3 |
parent 19696 | bd5a0131bde1 |
child 22551 | 9bf46d16dcc6 |
child 22533 | 76088853a2eb |
permissions | -rw-r--r-- |
6975 | 1 |
/* |
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
13963
diff
changeset
|
2 |
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
6975 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "runtime/thread.hpp" |
|
6975 | 27 |
|
28 |
||
29 |
||
30 |
// Lifecycle management for TSM ParkEvents. |
|
31 |
// ParkEvents are type-stable (TSM). |
|
32 |
// In our particular implementation they happen to be immortal. |
|
33 |
// |
|
34 |
// We manage concurrency on the FreeList with a CAS-based |
|
35 |
// detach-modify-reattach idiom that avoids the ABA problems |
|
36 |
// that would otherwise be present in a simple CAS-based |
|
37 |
// push-pop implementation. (push-one and pop-all) |
|
38 |
// |
|
39 |
// Caveat: Allocate() and Release() may be called from threads |
|
40 |
// other than the thread associated with the Event! |
|
41 |
// If we need to call Allocate() when running as the thread in |
|
42 |
// question then look for the PD calls to initialize native TLS. |
|
43 |
// Native TLS (Win32/Linux/Solaris) can only be initialized or |
|
44 |
// accessed by the associated thread. |
|
45 |
// See also pd_initialize(). |
|
46 |
// |
|
47 |
// Note that we could defer associating a ParkEvent with a thread |
|
48 |
// until the 1st time the thread calls park(). unpark() calls to |
|
49 |
// an unprovisioned thread would be ignored. The first park() call |
|
50 |
// for a thread would allocate and associate a ParkEvent and return |
|
51 |
// immediately. |
|
52 |
||
53 |
volatile int ParkEvent::ListLock = 0 ; |
|
54 |
ParkEvent * volatile ParkEvent::FreeList = NULL ; |
|
55 |
||
56 |
ParkEvent * ParkEvent::Allocate (Thread * t) { |
|
57 |
// In rare cases -- JVM_RawMonitor* operations -- we can find t == null. |
|
58 |
ParkEvent * ev ; |
|
59 |
||
60 |
// Start by trying to recycle an existing but unassociated |
|
61 |
// ParkEvent from the global free list. |
|
62 |
for (;;) { |
|
63 |
ev = FreeList ; |
|
64 |
if (ev == NULL) break ; |
|
65 |
// 1: Detach - sequester or privatize the list |
|
66 |
// Tantamount to ev = Swap (&FreeList, NULL) |
|
67 |
if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) { |
|
68 |
continue ; |
|
69 |
} |
|
70 |
||
71 |
// We've detached the list. The list in-hand is now |
|
72 |
// local to this thread. This thread can operate on the |
|
73 |
// list without risk of interference from other threads. |
|
74 |
// 2: Extract -- pop the 1st element from the list. |
|
75 |
ParkEvent * List = ev->FreeNext ; |
|
76 |
if (List == NULL) break ; |
|
77 |
for (;;) { |
|
78 |
// 3: Try to reattach the residual list |
|
79 |
guarantee (List != NULL, "invariant") ; |
|
80 |
ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; |
|
81 |
if (Arv == NULL) break ; |
|
82 |
||
83 |
// New nodes arrived. Try to detach the recent arrivals. |
|
84 |
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { |
|
85 |
continue ; |
|
86 |
} |
|
87 |
guarantee (Arv != NULL, "invariant") ; |
|
88 |
// 4: Merge Arv into List |
|
89 |
ParkEvent * Tail = List ; |
|
90 |
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; |
|
91 |
Tail->FreeNext = Arv ; |
|
92 |
} |
|
93 |
break ; |
|
94 |
} |
|
95 |
||
96 |
if (ev != NULL) { |
|
97 |
guarantee (ev->AssociatedWith == NULL, "invariant") ; |
|
98 |
} else { |
|
99 |
// Do this the hard way -- materialize a new ParkEvent. |
|
100 |
// In rare cases an allocating thread might detach a long list -- |
|
101 |
// installing null into FreeList -- and then stall or be obstructed. |
|
102 |
// A 2nd thread calling Allocate() would see FreeList == null. |
|
103 |
// The list held privately by the 1st thread is unavailable to the 2nd thread. |
|
104 |
// In that case the 2nd thread would have to materialize a new ParkEvent, |
|
105 |
// even though free ParkEvents existed in the system. In this case we end up |
|
106 |
// with more ParkEvents in circulation than we need, but the race is |
|
107 |
// rare and the outcome is benign. Ideally, the # of extant ParkEvents |
|
108 |
// is equal to the maximum # of threads that existed at any one time. |
|
109 |
// Because of the race mentioned above, segments of the freelist |
|
110 |
// can be transiently inaccessible. At worst we may end up with the |
|
111 |
// # of ParkEvents in circulation slightly above the ideal. |
|
112 |
// Note that if we didn't have the TSM/immortal constraint, then |
|
113 |
// when reattaching, above, we could trim the list. |
|
114 |
ev = new ParkEvent () ; |
|
115 |
guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ; |
|
116 |
} |
|
117 |
ev->reset() ; // courtesy to caller |
|
118 |
ev->AssociatedWith = t ; // Associate ev with t |
|
119 |
ev->FreeNext = NULL ; |
|
120 |
return ev ; |
|
121 |
} |
|
122 |
||
123 |
void ParkEvent::Release (ParkEvent * ev) { |
|
124 |
if (ev == NULL) return ; |
|
125 |
guarantee (ev->FreeNext == NULL , "invariant") ; |
|
126 |
ev->AssociatedWith = NULL ; |
|
127 |
for (;;) { |
|
128 |
// Push ev onto FreeList |
|
129 |
// The mechanism is "half" lock-free. |
|
130 |
ParkEvent * List = FreeList ; |
|
131 |
ev->FreeNext = List ; |
|
132 |
if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ; |
|
133 |
} |
|
134 |
} |
|
135 |
||
136 |
// Override operator new and delete so we can ensure that the |
|
137 |
// least significant byte of ParkEvent addresses is 0. |
|
138 |
// Beware that excessive address alignment is undesirable |
|
139 |
// as it can result in D$ index usage imbalance as |
|
140 |
// well as bank access imbalance on Niagara-like platforms, |
|
141 |
// although Niagara's hash function should help. |
|
142 |
||
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
13963
diff
changeset
|
143 |
void * ParkEvent::operator new (size_t sz) throw() { |
13195 | 144 |
return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ; |
6975 | 145 |
} |
146 |
||
147 |
void ParkEvent::operator delete (void * a) { |
|
148 |
// ParkEvents are type-stable and immortal ... |
|
149 |
ShouldNotReachHere(); |
|
150 |
} |
|
151 |
||
152 |
||
153 |
// 6399321 As a temporary measure we copied & modified the ParkEvent:: |
|
154 |
// allocate() and release() code for use by Parkers. The Parker:: forms |
|
155 |
// will eventually be removed as we consolide and shift over to ParkEvents |
|
156 |
// for both builtin synchronization and JSR166 operations. |
|
157 |
||
158 |
volatile int Parker::ListLock = 0 ; |
|
159 |
Parker * volatile Parker::FreeList = NULL ; |
|
160 |
||
161 |
Parker * Parker::Allocate (JavaThread * t) { |
|
162 |
guarantee (t != NULL, "invariant") ; |
|
163 |
Parker * p ; |
|
164 |
||
165 |
// Start by trying to recycle an existing but unassociated |
|
166 |
// Parker from the global free list. |
|
167 |
for (;;) { |
|
168 |
p = FreeList ; |
|
169 |
if (p == NULL) break ; |
|
170 |
// 1: Detach |
|
171 |
// Tantamount to p = Swap (&FreeList, NULL) |
|
172 |
if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) { |
|
173 |
continue ; |
|
174 |
} |
|
175 |
||
176 |
// We've detached the list. The list in-hand is now |
|
177 |
// local to this thread. This thread can operate on the |
|
178 |
// list without risk of interference from other threads. |
|
179 |
// 2: Extract -- pop the 1st element from the list. |
|
180 |
Parker * List = p->FreeNext ; |
|
181 |
if (List == NULL) break ; |
|
182 |
for (;;) { |
|
183 |
// 3: Try to reattach the residual list |
|
184 |
guarantee (List != NULL, "invariant") ; |
|
185 |
Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; |
|
186 |
if (Arv == NULL) break ; |
|
187 |
||
188 |
// New nodes arrived. Try to detach the recent arrivals. |
|
189 |
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { |
|
190 |
continue ; |
|
191 |
} |
|
192 |
guarantee (Arv != NULL, "invariant") ; |
|
193 |
// 4: Merge Arv into List |
|
194 |
Parker * Tail = List ; |
|
195 |
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; |
|
196 |
Tail->FreeNext = Arv ; |
|
197 |
} |
|
198 |
break ; |
|
199 |
} |
|
200 |
||
201 |
if (p != NULL) { |
|
202 |
guarantee (p->AssociatedWith == NULL, "invariant") ; |
|
203 |
} else { |
|
204 |
// Do this the hard way -- materialize a new Parker.. |
|
205 |
// In rare cases an allocating thread might detach |
|
206 |
// a long list -- installing null into FreeList --and |
|
207 |
// then stall. Another thread calling Allocate() would see |
|
208 |
// FreeList == null and then invoke the ctor. In this case we |
|
209 |
// end up with more Parkers in circulation than we need, but |
|
210 |
// the race is rare and the outcome is benign. |
|
211 |
// Ideally, the # of extant Parkers is equal to the |
|
212 |
// maximum # of threads that existed at any one time. |
|
213 |
// Because of the race mentioned above, segments of the |
|
214 |
// freelist can be transiently inaccessible. At worst |
|
215 |
// we may end up with the # of Parkers in circulation |
|
216 |
// slightly above the ideal. |
|
217 |
p = new Parker() ; |
|
218 |
} |
|
219 |
p->AssociatedWith = t ; // Associate p with t |
|
220 |
p->FreeNext = NULL ; |
|
221 |
return p ; |
|
222 |
} |
|
223 |
||
224 |
||
225 |
void Parker::Release (Parker * p) { |
|
226 |
if (p == NULL) return ; |
|
227 |
guarantee (p->AssociatedWith != NULL, "invariant") ; |
|
228 |
guarantee (p->FreeNext == NULL , "invariant") ; |
|
229 |
p->AssociatedWith = NULL ; |
|
230 |
for (;;) { |
|
231 |
// Push p onto FreeList |
|
232 |
Parker * List = FreeList ; |
|
233 |
p->FreeNext = List ; |
|
234 |
if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ; |
|
235 |
} |
|
236 |
} |
|
237 |