|
1 /* |
|
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP |
|
26 #define SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP |
|
27 |
|
28 #include "gc/g1/g1CollectedHeap.inline.hpp" |
|
29 #include "gc/g1/g1ConcurrentMark.hpp" |
|
30 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" |
|
31 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp" |
|
32 #include "gc/g1/suspendibleThreadSet.hpp" |
|
33 #include "gc/shared/taskqueue.inline.hpp" |
|
34 #include "utilities/bitMap.inline.hpp" |
|
35 |
|
36 inline bool G1ConcurrentMark::mark_in_next_bitmap(oop const obj) { |
|
37 HeapRegion* const hr = _g1h->heap_region_containing(obj); |
|
38 return mark_in_next_bitmap(hr, obj); |
|
39 } |
|
40 |
|
41 inline bool G1ConcurrentMark::mark_in_next_bitmap(HeapRegion* const hr, oop const obj) { |
|
42 assert(hr != NULL, "just checking"); |
|
43 assert(hr->is_in_reserved(obj), "Attempting to mark object at " PTR_FORMAT " that is not contained in the given region %u", p2i(obj), hr->hrm_index()); |
|
44 |
|
45 if (hr->obj_allocated_since_next_marking(obj)) { |
|
46 return false; |
|
47 } |
|
48 |
|
49 // Some callers may have stale objects to mark above nTAMS after humongous reclaim. |
|
50 // Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread. |
|
51 assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start())); |
|
52 |
|
53 HeapWord* const obj_addr = (HeapWord*)obj; |
|
54 // Dirty read to avoid CAS. |
|
55 if (_nextMarkBitMap->is_marked(obj_addr)) { |
|
56 return false; |
|
57 } |
|
58 |
|
59 return _nextMarkBitMap->par_mark(obj_addr); |
|
60 } |
|
61 |
|
62 #ifndef PRODUCT |
|
63 template<typename Fn> |
|
64 inline void G1CMMarkStack::iterate(Fn fn) const { |
|
65 assert_at_safepoint(true); |
|
66 |
|
67 size_t num_chunks = 0; |
|
68 |
|
69 TaskQueueEntryChunk* cur = _chunk_list; |
|
70 while (cur != NULL) { |
|
71 guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be", num_chunks); |
|
72 |
|
73 for (size_t i = 0; i < EntriesPerChunk; ++i) { |
|
74 if (cur->data[i].is_null()) { |
|
75 break; |
|
76 } |
|
77 fn(cur->data[i]); |
|
78 } |
|
79 cur = cur->next; |
|
80 num_chunks++; |
|
81 } |
|
82 } |
|
83 #endif |
|
84 |
|
85 // It scans an object and visits its children. |
|
86 inline void G1CMTask::scan_task_entry(G1TaskQueueEntry task_entry) { process_grey_task_entry<true>(task_entry); } |
|
87 |
|
88 inline void G1CMTask::push(G1TaskQueueEntry task_entry) { |
|
89 assert(task_entry.is_array_slice() || _g1h->is_in_g1_reserved(task_entry.obj()), "invariant"); |
|
90 assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list( |
|
91 _g1h->heap_region_containing(task_entry.obj())), "invariant"); |
|
92 assert(task_entry.is_array_slice() || !_g1h->is_obj_ill(task_entry.obj()), "invariant"); // FIXME!!! |
|
93 assert(task_entry.is_array_slice() || _nextMarkBitMap->is_marked((HeapWord*)task_entry.obj()), "invariant"); |
|
94 |
|
95 if (!_task_queue->push(task_entry)) { |
|
96 // The local task queue looks full. We need to push some entries |
|
97 // to the global stack. |
|
98 move_entries_to_global_stack(); |
|
99 |
|
100 // this should succeed since, even if we overflow the global |
|
101 // stack, we should have definitely removed some entries from the |
|
102 // local queue. So, there must be space on it. |
|
103 bool success = _task_queue->push(task_entry); |
|
104 assert(success, "invariant"); |
|
105 } |
|
106 } |
|
107 |
|
108 inline bool G1CMTask::is_below_finger(oop obj, HeapWord* global_finger) const { |
|
109 // If obj is above the global finger, then the mark bitmap scan |
|
110 // will find it later, and no push is needed. Similarly, if we have |
|
111 // a current region and obj is between the local finger and the |
|
112 // end of the current region, then no push is needed. The tradeoff |
|
113 // of checking both vs only checking the global finger is that the |
|
114 // local check will be more accurate and so result in fewer pushes, |
|
115 // but may also be a little slower. |
|
116 HeapWord* objAddr = (HeapWord*)obj; |
|
117 if (_finger != NULL) { |
|
118 // We have a current region. |
|
119 |
|
120 // Finger and region values are all NULL or all non-NULL. We |
|
121 // use _finger to check since we immediately use its value. |
|
122 assert(_curr_region != NULL, "invariant"); |
|
123 assert(_region_limit != NULL, "invariant"); |
|
124 assert(_region_limit <= global_finger, "invariant"); |
|
125 |
|
126 // True if obj is less than the local finger, or is between |
|
127 // the region limit and the global finger. |
|
128 if (objAddr < _finger) { |
|
129 return true; |
|
130 } else if (objAddr < _region_limit) { |
|
131 return false; |
|
132 } // Else check global finger. |
|
133 } |
|
134 // Check global finger. |
|
135 return objAddr < global_finger; |
|
136 } |
|
137 |
|
138 template<bool scan> |
|
139 inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) { |
|
140 assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray"); |
|
141 assert(task_entry.is_array_slice() || _nextMarkBitMap->is_marked((HeapWord*)task_entry.obj()), |
|
142 "Any stolen object should be a slice or marked"); |
|
143 |
|
144 if (scan) { |
|
145 if (task_entry.is_array_slice()) { |
|
146 _words_scanned += _objArray_processor.process_slice(task_entry.slice()); |
|
147 } else { |
|
148 oop obj = task_entry.obj(); |
|
149 if (G1CMObjArrayProcessor::should_be_sliced(obj)) { |
|
150 _words_scanned += _objArray_processor.process_obj(obj); |
|
151 } else { |
|
152 _words_scanned += obj->oop_iterate_size(_cm_oop_closure);; |
|
153 } |
|
154 } |
|
155 } |
|
156 check_limits(); |
|
157 } |
|
158 |
|
159 inline size_t G1CMTask::scan_objArray(objArrayOop obj, MemRegion mr) { |
|
160 obj->oop_iterate(_cm_oop_closure, mr); |
|
161 return mr.word_size(); |
|
162 } |
|
163 |
|
164 inline void G1CMTask::make_reference_grey(oop obj) { |
|
165 if (!_cm->mark_in_next_bitmap(obj)) { |
|
166 return; |
|
167 } |
|
168 |
|
169 // No OrderAccess:store_load() is needed. It is implicit in the |
|
170 // CAS done in G1CMBitMap::parMark() call in the routine above. |
|
171 HeapWord* global_finger = _cm->finger(); |
|
172 |
|
173 // We only need to push a newly grey object on the mark |
|
174 // stack if it is in a section of memory the mark bitmap |
|
175 // scan has already examined. Mark bitmap scanning |
|
176 // maintains progress "fingers" for determining that. |
|
177 // |
|
178 // Notice that the global finger might be moving forward |
|
179 // concurrently. This is not a problem. In the worst case, we |
|
180 // mark the object while it is above the global finger and, by |
|
181 // the time we read the global finger, it has moved forward |
|
182 // past this object. In this case, the object will probably |
|
183 // be visited when a task is scanning the region and will also |
|
184 // be pushed on the stack. So, some duplicate work, but no |
|
185 // correctness problems. |
|
186 if (is_below_finger(obj, global_finger)) { |
|
187 G1TaskQueueEntry entry = G1TaskQueueEntry::from_oop(obj); |
|
188 if (obj->is_typeArray()) { |
|
189 // Immediately process arrays of primitive types, rather |
|
190 // than pushing on the mark stack. This keeps us from |
|
191 // adding humongous objects to the mark stack that might |
|
192 // be reclaimed before the entry is processed - see |
|
193 // selection of candidates for eager reclaim of humongous |
|
194 // objects. The cost of the additional type test is |
|
195 // mitigated by avoiding a trip through the mark stack, |
|
196 // by only doing a bookkeeping update and avoiding the |
|
197 // actual scan of the object - a typeArray contains no |
|
198 // references, and the metadata is built-in. |
|
199 process_grey_task_entry<false>(entry); |
|
200 } else { |
|
201 push(entry); |
|
202 } |
|
203 } |
|
204 } |
|
205 |
|
206 inline void G1CMTask::deal_with_reference(oop obj) { |
|
207 increment_refs_reached(); |
|
208 if (obj == NULL) { |
|
209 return; |
|
210 } |
|
211 make_reference_grey(obj); |
|
212 } |
|
213 |
|
214 inline void G1ConcurrentMark::markPrev(oop p) { |
|
215 assert(!_prevMarkBitMap->is_marked((HeapWord*) p), "sanity"); |
|
216 _prevMarkBitMap->mark((HeapWord*) p); |
|
217 } |
|
218 |
|
219 bool G1ConcurrentMark::isPrevMarked(oop p) const { |
|
220 assert(p != NULL && oopDesc::is_oop(p), "expected an oop"); |
|
221 return _prevMarkBitMap->is_marked((HeapWord*)p); |
|
222 } |
|
223 |
|
224 inline bool G1ConcurrentMark::do_yield_check() { |
|
225 if (SuspendibleThreadSet::should_yield()) { |
|
226 SuspendibleThreadSet::yield(); |
|
227 return true; |
|
228 } else { |
|
229 return false; |
|
230 } |
|
231 } |
|
232 |
|
233 #endif // SHARE_VM_GC_G1_G1CONCURRENTMARK_INLINE_HPP |