author | johnc |
Tue, 13 Mar 2012 11:05:32 -0700 | |
changeset 12270 | 9625585c6047 |
parent 10243 | d00a21009f1f |
child 13517 | f158a0c702d4 |
permissions | -rw-r--r-- |
1374 | 1 |
/* |
7905
cc7740616b03
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
7416
diff
changeset
|
2 |
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
1374 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2742
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2742
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
2742
diff
changeset
|
21 |
* questions. |
1374 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
26 |
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |
|
27 |
||
28 |
#include "gc_implementation/g1/concurrentMark.hpp" |
|
29 |
#include "gc_implementation/g1/g1CollectedHeap.hpp" |
|
8928
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
30 |
#include "gc_implementation/g1/g1AllocRegion.inline.hpp" |
7398 | 31 |
#include "gc_implementation/g1/g1CollectorPolicy.hpp" |
7920 | 32 |
#include "gc_implementation/g1/heapRegionSeq.inline.hpp" |
7397 | 33 |
#include "utilities/taskqueue.hpp" |
34 |
||
1374 | 35 |
// Inline functions for G1CollectedHeap |
36 |
||
9989 | 37 |
template <class T> |
1374 | 38 |
inline HeapRegion* |
9989 | 39 |
G1CollectedHeap::heap_region_containing(const T addr) const { |
40 |
HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); |
|
1374 | 41 |
// hr can be null if addr in perm_gen |
42 |
if (hr != NULL && hr->continuesHumongous()) { |
|
43 |
hr = hr->humongous_start_region(); |
|
44 |
} |
|
45 |
return hr; |
|
46 |
} |
|
47 |
||
9989 | 48 |
template <class T> |
1374 | 49 |
inline HeapRegion* |
9989 | 50 |
G1CollectedHeap::heap_region_containing_raw(const T addr) const { |
51 |
assert(_g1_reserved.contains((const void*) addr), "invariant"); |
|
52 |
HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr); |
|
1374 | 53 |
return res; |
54 |
} |
|
55 |
||
56 |
inline bool G1CollectedHeap::obj_in_cs(oop obj) { |
|
9989 | 57 |
HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); |
1374 | 58 |
return r != NULL && r->in_collection_set(); |
59 |
} |
|
60 |
||
7398 | 61 |
inline HeapWord* |
8928
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
62 |
G1CollectedHeap::attempt_allocation(size_t word_size, |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
63 |
unsigned int* gc_count_before_ret) { |
7905
cc7740616b03
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
7416
diff
changeset
|
64 |
assert_heap_not_locked_and_not_at_safepoint(); |
8928
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
65 |
assert(!isHumongous(word_size), "attempt_allocation() should not " |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
66 |
"be called for humongous allocation requests"); |
7398 | 67 |
|
8928
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
68 |
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
69 |
false /* bot_updates */); |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
70 |
if (result == NULL) { |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
71 |
result = attempt_allocation_slow(word_size, gc_count_before_ret); |
1374 | 72 |
} |
8928
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
73 |
assert_heap_not_locked(); |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
74 |
if (result != NULL) { |
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
75 |
dirty_young_block(result, word_size); |
7905
cc7740616b03
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
7416
diff
changeset
|
76 |
} |
8928
e5c53268bef5
7023069: G1: Introduce symmetric locking in the slow allocation path
tonyp
parents:
7923
diff
changeset
|
77 |
return result; |
7905
cc7740616b03
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
7416
diff
changeset
|
78 |
} |
cc7740616b03
6994297: G1: do first-level slow-path allocations with a CAS
tonyp
parents:
7416
diff
changeset
|
79 |
|
10243
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
80 |
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
81 |
word_size) { |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
82 |
assert(!isHumongous(word_size), |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
83 |
"we should not be seeing humongous-size allocations in this path"); |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
84 |
|
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
85 |
HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
86 |
false /* bot_updates */); |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
87 |
if (result == NULL) { |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
88 |
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
89 |
result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
90 |
false /* bot_updates */); |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
91 |
} |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
92 |
if (result != NULL) { |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
93 |
dirty_young_block(result, word_size); |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
94 |
} |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
95 |
return result; |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
96 |
} |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
97 |
|
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
98 |
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
99 |
assert(!isHumongous(word_size), |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
100 |
"we should not be seeing humongous-size allocations in this path"); |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
101 |
|
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
102 |
HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
103 |
true /* bot_updates */); |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
104 |
if (result == NULL) { |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
105 |
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
106 |
result = _old_gc_alloc_region.attempt_allocation_locked(word_size, |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
107 |
true /* bot_updates */); |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
108 |
} |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
109 |
return result; |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
110 |
} |
d00a21009f1f
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
tonyp
parents:
9989
diff
changeset
|
111 |
|
7398 | 112 |
// It dirties the cards that cover the block so that so that the post |
113 |
// write barrier never queues anything when updating objects on this |
|
114 |
// block. It is assumed (and in fact we assert) that the block |
|
115 |
// belongs to a young region. |
|
116 |
inline void |
|
117 |
G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { |
|
118 |
assert_heap_not_locked(); |
|
119 |
||
120 |
// Assign the containing region to containing_hr so that we don't |
|
121 |
// have to keep calling heap_region_containing_raw() in the |
|
122 |
// asserts below. |
|
123 |
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) |
|
124 |
assert(containing_hr != NULL && start != NULL && word_size > 0, |
|
125 |
"pre-condition"); |
|
126 |
assert(containing_hr->is_in(start), "it should contain start"); |
|
127 |
assert(containing_hr->is_young(), "it should be young"); |
|
128 |
assert(!containing_hr->isHumongous(), "it should not be humongous"); |
|
129 |
||
130 |
HeapWord* end = start + word_size; |
|
131 |
assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); |
|
132 |
||
133 |
MemRegion mr(start, end); |
|
134 |
((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); |
|
1374 | 135 |
} |
136 |
||
6251 | 137 |
inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { |
1374 | 138 |
return _task_queues->queue(i); |
139 |
} |
|
140 |
||
141 |
inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { |
|
142 |
return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); |
|
143 |
} |
|
144 |
||
145 |
inline bool G1CollectedHeap::isMarkedNext(oop obj) const { |
|
146 |
return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); |
|
147 |
} |
|
7397 | 148 |
|
149 |
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP |