63 _seq_bottom(NULL) |
63 _seq_bottom(NULL) |
64 {} |
64 {} |
65 |
65 |
66 // Private methods. |
66 // Private methods. |
67 |
67 |
68 HeapWord* |
|
69 HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) { |
|
70 assert(G1CollectedHeap::isHumongous(word_size), |
|
71 "Allocation size should be humongous"); |
|
72 int cur = ind; |
|
73 int first = cur; |
|
74 size_t sumSizes = 0; |
|
75 while (cur < _regions.length() && sumSizes < word_size) { |
|
76 // Loop invariant: |
|
77 // For all i in [first, cur): |
|
78 // _regions.at(i)->is_empty() |
|
79 // && _regions.at(i) is contiguous with its predecessor, if any |
|
80 // && sumSizes is the sum of the sizes of the regions in the interval |
|
81 // [first, cur) |
|
82 HeapRegion* curhr = _regions.at(cur); |
|
83 if (curhr->is_empty() |
|
84 && (first == cur |
|
85 || (_regions.at(cur-1)->end() == |
|
86 curhr->bottom()))) { |
|
87 sumSizes += curhr->capacity() / HeapWordSize; |
|
88 } else { |
|
89 first = cur + 1; |
|
90 sumSizes = 0; |
|
91 } |
|
92 cur++; |
|
93 } |
|
94 if (sumSizes >= word_size) { |
|
95 _alloc_search_start = cur; |
|
96 |
|
97 // We need to initialize the region(s) we just discovered. This is |
|
98 // a bit tricky given that it can happen concurrently with |
|
99 // refinement threads refining cards on these regions and |
|
100 // potentially wanting to refine the BOT as they are scanning |
|
101 // those cards (this can happen shortly after a cleanup; see CR |
|
102 // 6991377). So we have to set up the region(s) carefully and in |
|
103 // a specific order. |
|
104 |
|
105 // Currently, allocs_are_zero_filled() returns false. The zero |
|
106 // filling infrastructure will be going away soon (see CR 6977804). |
|
107 // So no need to do anything else here. |
|
108 bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled(); |
|
109 assert(!zf, "not supported"); |
|
110 |
|
111 // This will be the "starts humongous" region. |
|
112 HeapRegion* first_hr = _regions.at(first); |
|
113 { |
|
114 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
|
115 first_hr->set_zero_fill_allocated(); |
|
116 } |
|
117 // The header of the new object will be placed at the bottom of |
|
118 // the first region. |
|
119 HeapWord* new_obj = first_hr->bottom(); |
|
120 // This will be the new end of the first region in the series that |
|
121 // should also match the end of the last region in the seriers. |
|
122 // (Note: sumSizes = "region size" x "number of regions we found"). |
|
123 HeapWord* new_end = new_obj + sumSizes; |
|
124 // This will be the new top of the first region that will reflect |
|
125 // this allocation. |
|
126 HeapWord* new_top = new_obj + word_size; |
|
127 |
|
128 // First, we need to zero the header of the space that we will be |
|
129 // allocating. When we update top further down, some refinement |
|
130 // threads might try to scan the region. By zeroing the header we |
|
131 // ensure that any thread that will try to scan the region will |
|
132 // come across the zero klass word and bail out. |
|
133 // |
|
134 // NOTE: It would not have been correct to have used |
|
135 // CollectedHeap::fill_with_object() and make the space look like |
|
136 // an int array. The thread that is doing the allocation will |
|
137 // later update the object header to a potentially different array |
|
138 // type and, for a very short period of time, the klass and length |
|
139 // fields will be inconsistent. This could cause a refinement |
|
140 // thread to calculate the object size incorrectly. |
|
141 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0); |
|
142 |
|
143 // We will set up the first region as "starts humongous". This |
|
144 // will also update the BOT covering all the regions to reflect |
|
145 // that there is a single object that starts at the bottom of the |
|
146 // first region. |
|
147 first_hr->set_startsHumongous(new_top, new_end); |
|
148 |
|
149 // Then, if there are any, we will set up the "continues |
|
150 // humongous" regions. |
|
151 HeapRegion* hr = NULL; |
|
152 for (int i = first + 1; i < cur; ++i) { |
|
153 hr = _regions.at(i); |
|
154 { |
|
155 MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag); |
|
156 hr->set_zero_fill_allocated(); |
|
157 } |
|
158 hr->set_continuesHumongous(first_hr); |
|
159 } |
|
160 // If we have "continues humongous" regions (hr != NULL), then the |
|
161 // end of the last one should match new_end. |
|
162 assert(hr == NULL || hr->end() == new_end, "sanity"); |
|
163 |
|
164 // Up to this point no concurrent thread would have been able to |
|
165 // do any scanning on any region in this series. All the top |
|
166 // fields still point to bottom, so the intersection between |
|
167 // [bottom,top] and [card_start,card_end] will be empty. Before we |
|
168 // update the top fields, we'll do a storestore to make sure that |
|
169 // no thread sees the update to top before the zeroing of the |
|
170 // object header and the BOT initialization. |
|
171 OrderAccess::storestore(); |
|
172 |
|
173 // Now that the BOT and the object header have been initialized, |
|
174 // we can update top of the "starts humongous" region. |
|
175 assert(first_hr->bottom() < new_top && new_top <= first_hr->end(), |
|
176 "new_top should be in this region"); |
|
177 first_hr->set_top(new_top); |
|
178 |
|
179 // Now, we will update the top fields of the "continues humongous" |
|
180 // regions. The reason we need to do this is that, otherwise, |
|
181 // these regions would look empty and this will confuse parts of |
|
182 // G1. For example, the code that looks for a consecutive number |
|
183 // of empty regions will consider them empty and try to |
|
184 // re-allocate them. We can extend is_empty() to also include |
|
185 // !continuesHumongous(), but it is easier to just update the top |
|
186 // fields here. |
|
187 hr = NULL; |
|
188 for (int i = first + 1; i < cur; ++i) { |
|
189 hr = _regions.at(i); |
|
190 if ((i + 1) == cur) { |
|
191 // last continues humongous region |
|
192 assert(hr->bottom() < new_top && new_top <= hr->end(), |
|
193 "new_top should fall on this region"); |
|
194 hr->set_top(new_top); |
|
195 } else { |
|
196 // not last one |
|
197 assert(new_top > hr->end(), "new_top should be above this region"); |
|
198 hr->set_top(hr->end()); |
|
199 } |
|
200 } |
|
201 // If we have continues humongous regions (hr != NULL), then the |
|
202 // end of the last one should match new_end and its top should |
|
203 // match new_top. |
|
204 assert(hr == NULL || |
|
205 (hr->end() == new_end && hr->top() == new_top), "sanity"); |
|
206 |
|
207 return new_obj; |
|
208 } else { |
|
209 // If we started from the beginning, we want to know why we can't alloc. |
|
210 return NULL; |
|
211 } |
|
212 } |
|
213 |
|
214 void HeapRegionSeq::print_empty_runs() { |
68 void HeapRegionSeq::print_empty_runs() { |
215 int empty_run = 0; |
69 int empty_run = 0; |
216 int n_empty = 0; |
70 int n_empty = 0; |
217 int empty_run_start; |
71 int empty_run_start; |
218 for (int i = 0; i < _regions.length(); i++) { |
72 for (int i = 0; i < _regions.length(); i++) { |
282 cur--; |
136 cur--; |
283 } |
137 } |
284 return res; |
138 return res; |
285 } |
139 } |
286 |
140 |
287 HeapWord* HeapRegionSeq::obj_allocate(size_t word_size) { |
141 int HeapRegionSeq::find_contiguous_from(int from, size_t num) { |
288 int cur = _alloc_search_start; |
142 assert(num > 1, "pre-condition"); |
289 // Make sure "cur" is a valid index. |
143 assert(0 <= from && from <= _regions.length(), |
290 assert(cur >= 0, "Invariant."); |
144 err_msg("from: %d should be valid and <= than %d", |
291 HeapWord* res = alloc_obj_from_region_index(cur, word_size); |
145 from, _regions.length())); |
292 if (res == NULL) |
146 |
293 res = alloc_obj_from_region_index(0, word_size); |
147 int curr = from; |
|
148 int first = -1; |
|
149 size_t num_so_far = 0; |
|
150 while (curr < _regions.length() && num_so_far < num) { |
|
151 HeapRegion* curr_hr = _regions.at(curr); |
|
152 if (curr_hr->is_empty()) { |
|
153 if (first == -1) { |
|
154 first = curr; |
|
155 num_so_far = 1; |
|
156 } else { |
|
157 num_so_far += 1; |
|
158 } |
|
159 } else { |
|
160 first = -1; |
|
161 num_so_far = 0; |
|
162 } |
|
163 curr += 1; |
|
164 } |
|
165 |
|
166 assert(num_so_far <= num, "post-condition"); |
|
167 if (num_so_far == num) { |
|
168 // we find enough space for the humongous object |
|
169 assert(from <= first && first < _regions.length(), "post-condition"); |
|
170 assert(first < curr && (curr - first) == (int) num, "post-condition"); |
|
171 for (int i = first; i < first + (int) num; ++i) { |
|
172 assert(_regions.at(i)->is_empty(), "post-condition"); |
|
173 } |
|
174 return first; |
|
175 } else { |
|
176 // we failed to find enough space for the humongous object |
|
177 return -1; |
|
178 } |
|
179 } |
|
180 |
|
181 int HeapRegionSeq::find_contiguous(size_t num) { |
|
182 assert(num > 1, "otherwise we should not be calling this"); |
|
183 assert(0 <= _alloc_search_start && _alloc_search_start <= _regions.length(), |
|
184 err_msg("_alloc_search_start: %d should be valid and <= than %d", |
|
185 _alloc_search_start, _regions.length())); |
|
186 |
|
187 int start = _alloc_search_start; |
|
188 int res = find_contiguous_from(start, num); |
|
189 if (res == -1 && start != 0) { |
|
190 // Try starting from the beginning. If _alloc_search_start was 0, |
|
191 // no point in doing this again. |
|
192 res = find_contiguous_from(0, num); |
|
193 } |
|
194 if (res != -1) { |
|
195 assert(0 <= res && res < _regions.length(), |
|
196 err_msg("res: %d should be valid", res)); |
|
197 _alloc_search_start = res + (int) num; |
|
198 } |
|
199 assert(0 < _alloc_search_start && _alloc_search_start <= _regions.length(), |
|
200 err_msg("_alloc_search_start: %d should be valid", |
|
201 _alloc_search_start)); |
294 return res; |
202 return res; |
295 } |
203 } |
296 |
204 |
297 void HeapRegionSeq::iterate(HeapRegionClosure* blk) { |
205 void HeapRegionSeq::iterate(HeapRegionClosure* blk) { |
298 iterate_from((HeapRegion*)NULL, blk); |
206 iterate_from((HeapRegion*)NULL, blk); |