author | iveresov |
Wed, 16 Dec 2009 15:12:51 -0800 | |
changeset 4481 | de92ec484f5e |
parent 4454 | d27c981064b4 |
child 4640 | b5edb9b319e4 |
permissions | -rw-r--r-- |
1374 | 1 |
/* |
3261 | 2 |
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
1374 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
|
20 |
* CA 95054 USA or visit www.sun.com if you need additional information or |
|
21 |
* have any questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
# include "incls/_precompiled.incl" |
|
26 |
# include "incls/_ptrQueue.cpp.incl" |
|
27 |
||
28 |
PtrQueue::PtrQueue(PtrQueueSet* qset_, bool perm) : |
|
29 |
_qset(qset_), _buf(NULL), _index(0), _active(false), |
|
30 |
_perm(perm), _lock(NULL) |
|
31 |
{} |
|
32 |
||
1560
1b328492b7f8
6770608: G1: Mutator thread can flush barrier and satb queues during safepoint
iveresov
parents:
1374
diff
changeset
|
33 |
void PtrQueue::flush() { |
1374 | 34 |
if (!_perm && _buf != NULL) { |
35 |
if (_index == _sz) { |
|
36 |
// No work to do. |
|
37 |
qset()->deallocate_buffer(_buf); |
|
38 |
} else { |
|
39 |
// We must NULL out the unused entries, then enqueue. |
|
40 |
for (size_t i = 0; i < _index; i += oopSize) { |
|
41 |
_buf[byte_index_to_index((int)i)] = NULL; |
|
42 |
} |
|
43 |
qset()->enqueue_complete_buffer(_buf); |
|
44 |
} |
|
1560
1b328492b7f8
6770608: G1: Mutator thread can flush barrier and satb queues during safepoint
iveresov
parents:
1374
diff
changeset
|
45 |
_buf = NULL; |
1b328492b7f8
6770608: G1: Mutator thread can flush barrier and satb queues during safepoint
iveresov
parents:
1374
diff
changeset
|
46 |
_index = 0; |
1374 | 47 |
} |
48 |
} |
|
49 |
||
50 |
||
51 |
static int byte_index_to_index(int ind) { |
|
52 |
assert((ind % oopSize) == 0, "Invariant."); |
|
53 |
return ind / oopSize; |
|
54 |
} |
|
55 |
||
56 |
static int index_to_byte_index(int byte_ind) { |
|
57 |
return byte_ind * oopSize; |
|
58 |
} |
|
59 |
||
60 |
void PtrQueue::enqueue_known_active(void* ptr) { |
|
61 |
assert(0 <= _index && _index <= _sz, "Invariant."); |
|
62 |
assert(_index == 0 || _buf != NULL, "invariant"); |
|
63 |
||
64 |
while (_index == 0) { |
|
65 |
handle_zero_index(); |
|
66 |
} |
|
4481 | 67 |
|
1374 | 68 |
assert(_index > 0, "postcondition"); |
69 |
_index -= oopSize; |
|
70 |
_buf[byte_index_to_index((int)_index)] = ptr; |
|
71 |
assert(0 <= _index && _index <= _sz, "Invariant."); |
|
72 |
} |
|
73 |
||
74 |
void PtrQueue::locking_enqueue_completed_buffer(void** buf) { |
|
75 |
assert(_lock->owned_by_self(), "Required."); |
|
76 |
_lock->unlock(); |
|
77 |
qset()->enqueue_complete_buffer(buf); |
|
78 |
// We must relock only because the caller will unlock, for the normal |
|
79 |
// case. |
|
80 |
_lock->lock_without_safepoint_check(); |
|
81 |
} |
|
82 |
||
83 |
||
84 |
PtrQueueSet::PtrQueueSet(bool notify_when_complete) : |
|
85 |
_max_completed_queue(0), |
|
86 |
_cbl_mon(NULL), _fl_lock(NULL), |
|
87 |
_notify_when_complete(notify_when_complete), |
|
88 |
_sz(0), |
|
89 |
_completed_buffers_head(NULL), |
|
90 |
_completed_buffers_tail(NULL), |
|
91 |
_n_completed_buffers(0), |
|
92 |
_process_completed_threshold(0), _process_completed(false), |
|
93 |
_buf_free_list(NULL), _buf_free_list_sz(0) |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
94 |
{ |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
95 |
_fl_owner = this; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
96 |
} |
1374 | 97 |
|
98 |
void** PtrQueueSet::allocate_buffer() { |
|
99 |
assert(_sz > 0, "Didn't set a buffer size."); |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
100 |
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
101 |
if (_fl_owner->_buf_free_list != NULL) { |
4481 | 102 |
void** res = BufferNode::make_buffer_from_node(_fl_owner->_buf_free_list); |
103 |
_fl_owner->_buf_free_list = _fl_owner->_buf_free_list->next(); |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
104 |
_fl_owner->_buf_free_list_sz--; |
1374 | 105 |
return res; |
106 |
} else { |
|
4481 | 107 |
// Allocate space for the BufferNode in front of the buffer. |
108 |
char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size()); |
|
109 |
return BufferNode::make_buffer_from_block(b); |
|
1374 | 110 |
} |
111 |
} |
|
112 |
||
113 |
void PtrQueueSet::deallocate_buffer(void** buf) { |
|
114 |
assert(_sz > 0, "Didn't set a buffer size."); |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
115 |
MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag); |
4481 | 116 |
BufferNode *node = BufferNode::make_node_from_buffer(buf); |
117 |
node->set_next(_fl_owner->_buf_free_list); |
|
118 |
_fl_owner->_buf_free_list = node; |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
119 |
_fl_owner->_buf_free_list_sz++; |
1374 | 120 |
} |
121 |
||
122 |
void PtrQueueSet::reduce_free_list() { |
|
4481 | 123 |
assert(_fl_owner == this, "Free list reduction is allowed only for the owner"); |
1374 | 124 |
// For now we'll adopt the strategy of deleting half. |
125 |
MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag); |
|
126 |
size_t n = _buf_free_list_sz / 2; |
|
127 |
while (n > 0) { |
|
128 |
assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong."); |
|
4481 | 129 |
void* b = BufferNode::make_block_from_node(_buf_free_list); |
130 |
_buf_free_list = _buf_free_list->next(); |
|
131 |
FREE_C_HEAP_ARRAY(char, b); |
|
4454
d27c981064b4
6895788: G1: SATB and update buffer allocation code allocates too much space
johnc
parents:
3261
diff
changeset
|
132 |
_buf_free_list_sz --; |
1374 | 133 |
n--; |
134 |
} |
|
135 |
} |
|
136 |
||
4481 | 137 |
void PtrQueue::handle_zero_index() { |
138 |
assert(0 == _index, "Precondition."); |
|
139 |
// This thread records the full buffer and allocates a new one (while |
|
140 |
// holding the lock if there is one). |
|
141 |
if (_buf != NULL) { |
|
142 |
if (_lock) { |
|
143 |
locking_enqueue_completed_buffer(_buf); |
|
144 |
} else { |
|
145 |
if (qset()->process_or_enqueue_complete_buffer(_buf)) { |
|
146 |
// Recycle the buffer. No allocation. |
|
147 |
_sz = qset()->buffer_size(); |
|
148 |
_index = _sz; |
|
149 |
return; |
|
150 |
} |
|
151 |
} |
|
152 |
} |
|
153 |
// Reallocate the buffer |
|
154 |
_buf = qset()->allocate_buffer(); |
|
155 |
_sz = qset()->buffer_size(); |
|
156 |
_index = _sz; |
|
157 |
assert(0 <= _index && _index <= _sz, "Invariant."); |
|
158 |
} |
|
1374 | 159 |
|
4481 | 160 |
bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) { |
161 |
if (Thread::current()->is_Java_thread()) { |
|
162 |
// We don't lock. It is fine to be epsilon-precise here. |
|
163 |
if (_max_completed_queue == 0 || _max_completed_queue > 0 && |
|
164 |
_n_completed_buffers >= _max_completed_queue + _completed_queue_padding) { |
|
165 |
bool b = mut_process_buffer(buf); |
|
166 |
if (b) { |
|
167 |
// True here means that the buffer hasn't been deallocated and the caller may reuse it. |
|
168 |
return true; |
|
169 |
} |
|
1374 | 170 |
} |
4481 | 171 |
} |
172 |
// The buffer will be enqueued. The caller will have to get a new one. |
|
173 |
enqueue_complete_buffer(buf); |
|
174 |
return false; |
|
175 |
} |
|
1374 | 176 |
|
4481 | 177 |
void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) { |
178 |
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
|
179 |
BufferNode* cbn = BufferNode::new_from_buffer(buf); |
|
180 |
cbn->set_index(index); |
|
1374 | 181 |
if (_completed_buffers_tail == NULL) { |
182 |
assert(_completed_buffers_head == NULL, "Well-formedness"); |
|
183 |
_completed_buffers_head = cbn; |
|
184 |
_completed_buffers_tail = cbn; |
|
185 |
} else { |
|
4481 | 186 |
_completed_buffers_tail->set_next(cbn); |
1374 | 187 |
_completed_buffers_tail = cbn; |
188 |
} |
|
189 |
_n_completed_buffers++; |
|
190 |
||
4481 | 191 |
if (!_process_completed && _process_completed_threshold >= 0 && |
2881 | 192 |
_n_completed_buffers >= _process_completed_threshold) { |
1374 | 193 |
_process_completed = true; |
194 |
if (_notify_when_complete) |
|
4481 | 195 |
_cbl_mon->notify(); |
1374 | 196 |
} |
197 |
debug_only(assert_completed_buffer_list_len_correct_locked()); |
|
198 |
} |
|
199 |
||
200 |
int PtrQueueSet::completed_buffers_list_length() { |
|
201 |
int n = 0; |
|
4481 | 202 |
BufferNode* cbn = _completed_buffers_head; |
1374 | 203 |
while (cbn != NULL) { |
204 |
n++; |
|
4481 | 205 |
cbn = cbn->next(); |
1374 | 206 |
} |
207 |
return n; |
|
208 |
} |
|
209 |
||
210 |
void PtrQueueSet::assert_completed_buffer_list_len_correct() { |
|
211 |
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
|
212 |
assert_completed_buffer_list_len_correct_locked(); |
|
213 |
} |
|
214 |
||
215 |
void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() { |
|
4481 | 216 |
guarantee(completed_buffers_list_length() == _n_completed_buffers, |
1374 | 217 |
"Completed buffer length is wrong."); |
218 |
} |
|
219 |
||
220 |
void PtrQueueSet::set_buffer_size(size_t sz) { |
|
221 |
assert(_sz == 0 && sz > 0, "Should be called only once."); |
|
222 |
_sz = sz * oopSize; |
|
223 |
} |
|
224 |
||
4481 | 225 |
// Merge lists of buffers. Notify the processing threads. |
226 |
// The source queue is emptied as a result. The queues |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
227 |
// must share the monitor. |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
228 |
void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
229 |
assert(_cbl_mon == src->_cbl_mon, "Should share the same lock"); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
230 |
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
231 |
if (_completed_buffers_tail == NULL) { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
232 |
assert(_completed_buffers_head == NULL, "Well-formedness"); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
233 |
_completed_buffers_head = src->_completed_buffers_head; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
234 |
_completed_buffers_tail = src->_completed_buffers_tail; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
235 |
} else { |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
236 |
assert(_completed_buffers_head != NULL, "Well formedness"); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
237 |
if (src->_completed_buffers_head != NULL) { |
4481 | 238 |
_completed_buffers_tail->set_next(src->_completed_buffers_head); |
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
239 |
_completed_buffers_tail = src->_completed_buffers_tail; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
240 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
241 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
242 |
_n_completed_buffers += src->_n_completed_buffers; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
243 |
|
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
244 |
src->_n_completed_buffers = 0; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
245 |
src->_completed_buffers_head = NULL; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
246 |
src->_completed_buffers_tail = NULL; |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
247 |
|
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
248 |
assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
249 |
_completed_buffers_head != NULL && _completed_buffers_tail != NULL, |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
250 |
"Sanity"); |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
251 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
252 |
|
4481 | 253 |
void PtrQueueSet::notify_if_necessary() { |
254 |
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag); |
|
255 |
if (_n_completed_buffers >= _process_completed_threshold || _max_completed_queue == 0) { |
|
256 |
_process_completed = true; |
|
257 |
if (_notify_when_complete) |
|
258 |
_cbl_mon->notify(); |
|
2142
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
259 |
} |
032f4652700c
6720309: G1: don't synchronously update RSet during evacuation pauses
iveresov
parents:
1623
diff
changeset
|
260 |
} |