|
1 /* |
|
2 * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "jvm.h" |
|
27 #include "classfile/classLoaderData.inline.hpp" |
|
28 #include "classfile/symbolTable.hpp" |
|
29 #include "classfile/systemDictionary.hpp" |
|
30 #include "classfile/systemDictionaryShared.hpp" |
|
31 #include "logging/log.hpp" |
|
32 #include "memory/metadataFactory.hpp" |
|
33 #include "memory/metaspace.hpp" |
|
34 #include "memory/metaspaceClosure.hpp" |
|
35 #include "memory/metaspaceShared.hpp" |
|
36 #include "memory/resourceArea.hpp" |
|
37 #include "memory/dynamicArchive.hpp" |
|
38 #include "oops/compressedOops.hpp" |
|
39 #include "oops/objArrayKlass.hpp" |
|
40 #include "prims/jvmtiRedefineClasses.hpp" |
|
41 #include "runtime/handles.inline.hpp" |
|
42 #include "runtime/os.inline.hpp" |
|
43 #include "runtime/sharedRuntime.hpp" |
|
44 #include "runtime/vmThread.hpp" |
|
45 #include "runtime/vmOperations.hpp" |
|
46 #include "utilities/bitMap.inline.hpp" |
|
47 |
|
48 #ifndef O_BINARY // if defined (Win32) use binary files. |
|
49 #define O_BINARY 0 // otherwise do nothing. |
|
50 #endif |
|
51 |
|
52 class DynamicArchiveBuilder : ResourceObj { |
|
53 CHeapBitMap _ptrmap; |
|
54 static unsigned my_hash(const address& a) { |
|
55 return primitive_hash<address>(a); |
|
56 } |
|
57 static bool my_equals(const address& a0, const address& a1) { |
|
58 return primitive_equals<address>(a0, a1); |
|
59 } |
|
60 typedef ResourceHashtable< |
|
61 address, address, |
|
62 DynamicArchiveBuilder::my_hash, // solaris compiler doesn't like: primitive_hash<address> |
|
63 DynamicArchiveBuilder::my_equals, // solaris compiler doesn't like: primitive_equals<address> |
|
64 16384, ResourceObj::C_HEAP> RelocationTable; |
|
65 RelocationTable _new_loc_table; |
|
66 |
|
67 intx _buffer_to_target_delta; |
|
68 |
|
69 DumpRegion* _current_dump_space; |
|
70 |
|
71 static size_t reserve_alignment() { |
|
72 return Metaspace::reserve_alignment(); |
|
73 } |
|
74 |
|
75 static const int _total_dump_regions = 3; |
|
76 int _num_dump_regions_used; |
|
77 |
|
78 public: |
|
79 void mark_pointer(address* ptr_loc) { |
|
80 if (is_in_buffer_space(ptr_loc)) { |
|
81 size_t idx = pointer_delta(ptr_loc, _alloc_bottom, sizeof(address)); |
|
82 _ptrmap.set_bit(idx); |
|
83 } |
|
84 } |
|
85 |
|
86 DumpRegion* current_dump_space() const { |
|
87 return _current_dump_space; |
|
88 } |
|
89 |
|
90 bool is_in_buffer_space(address p) const { |
|
91 return (_alloc_bottom <= p && p < (address)current_dump_space()->top()); |
|
92 } |
|
93 |
|
94 template <typename T> bool is_in_target_space(T target_obj) const { |
|
95 address buff_obj = address(target_obj) - _buffer_to_target_delta; |
|
96 return is_in_buffer_space(buff_obj); |
|
97 } |
|
98 |
|
99 template <typename T> bool is_in_buffer_space(T obj) const { |
|
100 return is_in_buffer_space(address(obj)); |
|
101 } |
|
102 |
|
103 template <typename T> T to_target_no_check(T obj) const { |
|
104 return (T)(address(obj) + _buffer_to_target_delta); |
|
105 } |
|
106 |
|
107 template <typename T> T to_target(T obj) const { |
|
108 assert(is_in_buffer_space(obj), "must be"); |
|
109 return (T)(address(obj) + _buffer_to_target_delta); |
|
110 } |
|
111 |
|
112 template <typename T> T get_new_loc(T obj) { |
|
113 address* pp = _new_loc_table.get((address)obj); |
|
114 if (pp == NULL) { |
|
115 // Excluded klasses are not copied |
|
116 return NULL; |
|
117 } else { |
|
118 return (T)*pp; |
|
119 } |
|
120 } |
|
121 |
|
122 address get_new_loc(MetaspaceClosure::Ref* ref) { |
|
123 return get_new_loc(ref->obj()); |
|
124 } |
|
125 |
|
126 template <typename T> bool has_new_loc(T obj) { |
|
127 address* pp = _new_loc_table.get((address)obj); |
|
128 return pp != NULL; |
|
129 } |
|
130 |
|
131 protected: |
|
132 enum FollowMode { |
|
133 make_a_copy, point_to_it, set_to_null |
|
134 }; |
|
135 |
|
136 public: |
|
137 void copy(MetaspaceClosure::Ref* ref, bool read_only) { |
|
138 int bytes = ref->size() * BytesPerWord; |
|
139 address old_obj = ref->obj(); |
|
140 address new_obj = copy_impl(ref, read_only, bytes); |
|
141 |
|
142 assert(new_obj != NULL, "must be"); |
|
143 assert(new_obj != old_obj, "must be"); |
|
144 bool isnew = _new_loc_table.put(old_obj, new_obj); |
|
145 assert(isnew, "must be"); |
|
146 } |
|
147 |
|
148 // Make a shallow copy of each eligible MetaspaceObj into the buffer. |
|
149 class ShallowCopier: public UniqueMetaspaceClosure { |
|
150 DynamicArchiveBuilder* _builder; |
|
151 bool _read_only; |
|
152 public: |
|
153 ShallowCopier(DynamicArchiveBuilder* shuffler, bool read_only) |
|
154 : _builder(shuffler), _read_only(read_only) {} |
|
155 |
|
156 virtual bool do_unique_ref(Ref* orig_obj, bool read_only) { |
|
157 // This method gets called on each *original* object |
|
158 // reachable from _builder->iterate_roots(). Each orig_obj is |
|
159 // called exactly once. |
|
160 FollowMode mode = _builder->follow_ref(orig_obj); |
|
161 |
|
162 if (mode == point_to_it) { |
|
163 if (read_only == _read_only) { |
|
164 log_debug(cds, dynamic)("ptr : " PTR_FORMAT " %s", p2i(orig_obj->obj()), |
|
165 MetaspaceObj::type_name(orig_obj->msotype())); |
|
166 address p = orig_obj->obj(); |
|
167 bool isnew = _builder->_new_loc_table.put(p, p); |
|
168 assert(isnew, "must be"); |
|
169 } |
|
170 return false; |
|
171 } |
|
172 |
|
173 if (mode == set_to_null) { |
|
174 log_debug(cds, dynamic)("nul : " PTR_FORMAT " %s", p2i(orig_obj->obj()), |
|
175 MetaspaceObj::type_name(orig_obj->msotype())); |
|
176 return false; |
|
177 } |
|
178 |
|
179 if (read_only == _read_only) { |
|
180 // Make a shallow copy of orig_obj in a buffer (maintained |
|
181 // by copy_impl in a subclass of DynamicArchiveBuilder). |
|
182 _builder->copy(orig_obj, read_only); |
|
183 } |
|
184 return true; |
|
185 } |
|
186 }; |
|
187 |
|
188 // Relocate all embedded pointer fields within a MetaspaceObj's shallow copy |
|
189 class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { |
|
190 DynamicArchiveBuilder* _builder; |
|
191 public: |
|
192 ShallowCopyEmbeddedRefRelocator(DynamicArchiveBuilder* shuffler) |
|
193 : _builder(shuffler) {} |
|
194 |
|
195 // This method gets called on each *original* object reachable |
|
196 // from _builder->iterate_roots(). Each orig_obj is |
|
197 // called exactly once. |
|
198 virtual bool do_unique_ref(Ref* orig_ref, bool read_only) { |
|
199 FollowMode mode = _builder->follow_ref(orig_ref); |
|
200 |
|
201 if (mode == point_to_it) { |
|
202 // We did not make a copy of this object |
|
203 // and we have nothing to update |
|
204 assert(_builder->get_new_loc(orig_ref) == NULL || |
|
205 _builder->get_new_loc(orig_ref) == orig_ref->obj(), "must be"); |
|
206 return false; |
|
207 } |
|
208 |
|
209 if (mode == set_to_null) { |
|
210 // We did not make a copy of this object |
|
211 // and we have nothing to update |
|
212 assert(!_builder->has_new_loc(orig_ref->obj()), "must not be copied or pointed to"); |
|
213 return false; |
|
214 } |
|
215 |
|
216 // - orig_obj points to the original object. |
|
217 // - new_obj points to the shallow copy (created by ShallowCopier) |
|
218 // of orig_obj. new_obj is NULL if the orig_obj is excluded |
|
219 address orig_obj = orig_ref->obj(); |
|
220 address new_obj = _builder->get_new_loc(orig_ref); |
|
221 |
|
222 assert(new_obj != orig_obj, "must be"); |
|
223 #ifdef ASSERT |
|
224 if (new_obj == NULL) { |
|
225 if (orig_ref->msotype() == MetaspaceObj::ClassType) { |
|
226 Klass* k = (Klass*)orig_obj; |
|
227 assert(k->is_instance_klass() && |
|
228 SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k)), |
|
229 "orig_obj must be excluded Class"); |
|
230 } |
|
231 } |
|
232 #endif |
|
233 |
|
234 log_debug(cds, dynamic)("Relocating " PTR_FORMAT " %s", p2i(new_obj), |
|
235 MetaspaceObj::type_name(orig_ref->msotype())); |
|
236 if (new_obj != NULL) { |
|
237 EmbeddedRefUpdater updater(_builder, orig_obj, new_obj); |
|
238 orig_ref->metaspace_pointers_do(&updater); |
|
239 } |
|
240 |
|
241 return true; // keep recursing until every object is visited exactly once. |
|
242 } |
|
243 }; |
|
244 |
|
245 class EmbeddedRefUpdater: public MetaspaceClosure { |
|
246 DynamicArchiveBuilder* _builder; |
|
247 address _orig_obj; |
|
248 address _new_obj; |
|
249 public: |
|
250 EmbeddedRefUpdater(DynamicArchiveBuilder* shuffler, address orig_obj, address new_obj) : |
|
251 _builder(shuffler), _orig_obj(orig_obj), _new_obj(new_obj) {} |
|
252 |
|
253 // This method gets called once for each pointer field F of orig_obj. |
|
254 // We update new_obj->F to point to the new location of orig_obj->F. |
|
255 // |
|
256 // Example: Klass* 0x100 is copied to 0x400 |
|
257 // Symbol* 0x200 is copied to 0x500 |
|
258 // |
|
259 // Let orig_obj == 0x100; and |
|
260 // new_obj == 0x400; and |
|
261 // ((Klass*)orig_obj)->_name == 0x200; |
|
262 // Then this function effectively assigns |
|
263 // ((Klass*)new_obj)->_name = 0x500; |
|
264 virtual bool do_ref(Ref* ref, bool read_only) { |
|
265 address new_pointee = NULL; |
|
266 |
|
267 if (ref->not_null()) { |
|
268 address old_pointee = ref->obj(); |
|
269 |
|
270 FollowMode mode = _builder->follow_ref(ref); |
|
271 if (mode == point_to_it) { |
|
272 new_pointee = old_pointee; |
|
273 } else if (mode == set_to_null) { |
|
274 new_pointee = NULL; |
|
275 } else { |
|
276 new_pointee = _builder->get_new_loc(old_pointee); |
|
277 } |
|
278 } |
|
279 |
|
280 const char* kind = MetaspaceObj::type_name(ref->msotype()); |
|
281 // offset of this field inside the original object |
|
282 intx offset = (address)ref->addr() - _orig_obj; |
|
283 _builder->update_pointer((address*)(_new_obj + offset), new_pointee, kind, offset); |
|
284 |
|
285 // We can't mark the pointer here, because DynamicArchiveBuilder::sort_methods |
|
286 // may re-layout the [iv]tables, which would change the offset(s) in an InstanceKlass |
|
287 // that would contain pointers. Therefore, we must mark the pointers after |
|
288 // sort_methods(), using PointerMarker. |
|
289 return false; // Do not recurse. |
|
290 } |
|
291 }; |
|
292 |
|
293 class ExternalRefUpdater: public MetaspaceClosure { |
|
294 DynamicArchiveBuilder* _builder; |
|
295 |
|
296 public: |
|
297 ExternalRefUpdater(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} |
|
298 |
|
299 virtual bool do_ref(Ref* ref, bool read_only) { |
|
300 // ref is a pointer that lives OUTSIDE of the buffer, but points to an object inside the buffer |
|
301 if (ref->not_null()) { |
|
302 address new_loc = _builder->get_new_loc(ref); |
|
303 const char* kind = MetaspaceObj::type_name(ref->msotype()); |
|
304 _builder->update_pointer(ref->addr(), new_loc, kind, 0); |
|
305 _builder->mark_pointer(ref->addr()); |
|
306 } |
|
307 return false; // Do not recurse. |
|
308 } |
|
309 }; |
|
310 |
|
311 class PointerMarker: public UniqueMetaspaceClosure { |
|
312 DynamicArchiveBuilder* _builder; |
|
313 |
|
314 public: |
|
315 PointerMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} |
|
316 |
|
317 virtual bool do_unique_ref(Ref* ref, bool read_only) { |
|
318 if (_builder->is_in_buffer_space(ref->obj())) { |
|
319 EmbeddedRefMarker ref_marker(_builder); |
|
320 ref->metaspace_pointers_do(&ref_marker); |
|
321 return true; // keep recursing until every buffered object is visited exactly once. |
|
322 } else { |
|
323 return false; |
|
324 } |
|
325 } |
|
326 }; |
|
327 |
|
328 class EmbeddedRefMarker: public MetaspaceClosure { |
|
329 DynamicArchiveBuilder* _builder; |
|
330 |
|
331 public: |
|
332 EmbeddedRefMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} |
|
333 virtual bool do_ref(Ref* ref, bool read_only) { |
|
334 if (ref->not_null() && _builder->is_in_buffer_space(ref->obj())) { |
|
335 _builder->mark_pointer(ref->addr()); |
|
336 } |
|
337 return false; // Do not recurse. |
|
338 } |
|
339 }; |
|
340 |
|
341 void update_pointer(address* addr, address value, const char* kind, uintx offset, bool is_mso_pointer=true) { |
|
342 // Propagate the the mask bits to the new value -- see comments above MetaspaceClosure::obj() |
|
343 if (is_mso_pointer) { |
|
344 const uintx FLAG_MASK = 0x03; |
|
345 uintx mask_bits = uintx(*addr) & FLAG_MASK; |
|
346 value = (address)(uintx(value) | mask_bits); |
|
347 } |
|
348 |
|
349 if (*addr != value) { |
|
350 log_debug(cds, dynamic)("Update (%18s*) %3d [" PTR_FORMAT "] " PTR_FORMAT " -> " PTR_FORMAT, |
|
351 kind, int(offset), p2i(addr), p2i(*addr), p2i(value)); |
|
352 *addr = value; |
|
353 } |
|
354 } |
|
355 |
|
356 private: |
|
357 GrowableArray<Symbol*>* _symbols; // symbols to dump |
|
358 GrowableArray<InstanceKlass*>* _klasses; // klasses to dump |
|
359 |
|
360 void append(InstanceKlass* k) { _klasses->append(k); } |
|
361 void append(Symbol* s) { _symbols->append(s); } |
|
362 |
|
363 class GatherKlassesAndSymbols : public UniqueMetaspaceClosure { |
|
364 DynamicArchiveBuilder* _builder; |
|
365 bool _read_only; |
|
366 |
|
367 public: |
|
368 GatherKlassesAndSymbols(DynamicArchiveBuilder* builder) |
|
369 : _builder(builder) {} |
|
370 |
|
371 virtual bool do_unique_ref(Ref* ref, bool read_only) { |
|
372 if (_builder->follow_ref(ref) != make_a_copy) { |
|
373 return false; |
|
374 } |
|
375 if (ref->msotype() == MetaspaceObj::ClassType) { |
|
376 Klass* klass = (Klass*)ref->obj(); |
|
377 assert(klass->is_klass(), "must be"); |
|
378 if (klass->is_instance_klass()) { |
|
379 InstanceKlass* ik = InstanceKlass::cast(klass); |
|
380 assert(!SystemDictionaryShared::is_excluded_class(ik), "must be"); |
|
381 _builder->append(ik); |
|
382 _builder->_estimated_metsapceobj_bytes += BytesPerWord; // See RunTimeSharedClassInfo::get_for() |
|
383 } |
|
384 } else if (ref->msotype() == MetaspaceObj::SymbolType) { |
|
385 _builder->append((Symbol*)ref->obj()); |
|
386 } |
|
387 |
|
388 int bytes = ref->size() * BytesPerWord; |
|
389 _builder->_estimated_metsapceobj_bytes += bytes; |
|
390 |
|
391 return true; |
|
392 } |
|
393 }; |
|
394 |
|
395 FollowMode follow_ref(MetaspaceClosure::Ref *ref) { |
|
396 address obj = ref->obj(); |
|
397 if (MetaspaceShared::is_in_shared_metaspace(obj)) { |
|
398 // Don't dump existing shared metadata again. |
|
399 return point_to_it; |
|
400 } else if (ref->msotype() == MetaspaceObj::MethodDataType) { |
|
401 return set_to_null; |
|
402 } else { |
|
403 if (ref->msotype() == MetaspaceObj::ClassType) { |
|
404 Klass* klass = (Klass*)ref->obj(); |
|
405 assert(klass->is_klass(), "must be"); |
|
406 if (klass->is_instance_klass()) { |
|
407 InstanceKlass* ik = InstanceKlass::cast(klass); |
|
408 if (SystemDictionaryShared::is_excluded_class(ik)) { |
|
409 ResourceMark rm; |
|
410 log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name()); |
|
411 return set_to_null; |
|
412 } |
|
413 } else if (klass->is_array_klass()) { |
|
414 // Don't support archiving of array klasses for now. |
|
415 ResourceMark rm; |
|
416 log_debug(cds, dynamic)("Skipping class (array): %s", klass->external_name()); |
|
417 return set_to_null; |
|
418 } |
|
419 } |
|
420 |
|
421 return make_a_copy; |
|
422 } |
|
423 } |
|
424 |
|
425 address copy_impl(MetaspaceClosure::Ref* ref, bool read_only, int bytes) { |
|
426 if (ref->msotype() == MetaspaceObj::ClassType) { |
|
427 // Save a pointer immediate in front of an InstanceKlass, so |
|
428 // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* |
|
429 // without building another hashtable. See RunTimeSharedClassInfo::get_for() |
|
430 // in systemDictionaryShared.cpp. |
|
431 address obj = ref->obj(); |
|
432 Klass* klass = (Klass*)obj; |
|
433 if (klass->is_instance_klass()) { |
|
434 SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); |
|
435 current_dump_space()->allocate(sizeof(address), BytesPerWord); |
|
436 } |
|
437 } |
|
438 address p = (address)current_dump_space()->allocate(bytes); |
|
439 address obj = ref->obj(); |
|
440 log_debug(cds, dynamic)("COPY: " PTR_FORMAT " ==> " PTR_FORMAT " %5d %s", |
|
441 p2i(obj), p2i(p), bytes, |
|
442 MetaspaceObj::type_name(ref->msotype())); |
|
443 memcpy(p, obj, bytes); |
|
444 |
|
445 intptr_t* cloned_vtable = MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(ref->msotype(), p); |
|
446 if (cloned_vtable != NULL) { |
|
447 update_pointer((address*)p, (address)cloned_vtable, "vtb", 0, /*is_mso_pointer*/false); |
|
448 } |
|
449 |
|
450 return (address)p; |
|
451 } |
|
452 |
|
453 DynamicArchiveHeader *_header; |
|
454 address _alloc_bottom; |
|
455 address _last_verified_top; |
|
456 size_t _other_region_used_bytes; |
|
457 |
|
458 // Conservative estimate for number of bytes needed for: |
|
459 size_t _estimated_metsapceobj_bytes; // all archived MetsapceObj's. |
|
460 size_t _estimated_hashtable_bytes; // symbol table and dictionaries |
|
461 size_t _estimated_trampoline_bytes; // method entry trampolines |
|
462 |
|
463 size_t estimate_archive_size(); |
|
464 size_t estimate_trampoline_size(); |
|
465 size_t estimate_class_file_size(); |
|
466 address reserve_space_and_init_buffer_to_target_delta(); |
|
467 void init_header(address addr); |
|
468 void make_trampolines(); |
|
469 void make_klasses_shareable(); |
|
470 void sort_methods(InstanceKlass* ik) const; |
|
471 void set_symbols_permanent(); |
|
472 void relocate_buffer_to_target(); |
|
473 void write_archive(char* read_only_tables_start); |
|
474 |
|
475 void init_first_dump_space(address reserved_bottom) { |
|
476 address first_space_base = reserved_bottom; |
|
477 DumpRegion* rw_space = MetaspaceShared::read_write_dump_space(); |
|
478 MetaspaceShared::init_shared_dump_space(rw_space, first_space_base); |
|
479 _current_dump_space = rw_space; |
|
480 _last_verified_top = first_space_base; |
|
481 _num_dump_regions_used = 1; |
|
482 } |
|
483 |
|
484 public: |
|
485 DynamicArchiveBuilder() { |
|
486 _klasses = new (ResourceObj::C_HEAP, mtClass) GrowableArray<InstanceKlass*>(100, true, mtInternal); |
|
487 _symbols = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Symbol*>(1000, true, mtInternal); |
|
488 |
|
489 _estimated_metsapceobj_bytes = 0; |
|
490 _estimated_hashtable_bytes = 0; |
|
491 _estimated_trampoline_bytes = 0; |
|
492 |
|
493 _num_dump_regions_used = 0; |
|
494 } |
|
495 |
|
496 void start_dump_space(DumpRegion* next) { |
|
497 address bottom = _last_verified_top; |
|
498 address top = (address)(current_dump_space()->top()); |
|
499 _other_region_used_bytes += size_t(top - bottom); |
|
500 |
|
501 MetaspaceShared::pack_dump_space(current_dump_space(), next, MetaspaceShared::shared_rs()); |
|
502 _current_dump_space = next; |
|
503 _num_dump_regions_used ++; |
|
504 |
|
505 _last_verified_top = (address)(current_dump_space()->top()); |
|
506 } |
|
507 |
|
508 void verify_estimate_size(size_t estimate, const char* which) { |
|
509 address bottom = _last_verified_top; |
|
510 address top = (address)(current_dump_space()->top()); |
|
511 size_t used = size_t(top - bottom) + _other_region_used_bytes; |
|
512 int diff = int(estimate) - int(used); |
|
513 |
|
514 log_info(cds)("%s estimate = %lu used = %lu; diff = %d bytes", which, estimate, used, diff); |
|
515 assert(diff >= 0, "Estimate is too small"); |
|
516 |
|
517 _last_verified_top = top; |
|
518 _other_region_used_bytes = 0; |
|
519 } |
|
520 |
|
521 // Do this before and after the archive dump to see if any corruption |
|
522 // is caused by dynamic dumping. |
|
523 void verify_universe(const char* info) { |
|
524 if (VerifyBeforeExit) { |
|
525 log_info(cds)("Verify %s", info); |
|
526 HandleMark hm; |
|
527 // Among other things, this ensures that Eden top is correct. |
|
528 Universe::heap()->prepare_for_verify(); |
|
529 Universe::verify(info); |
|
530 } |
|
531 } |
|
532 |
|
533 void doit() { |
|
534 verify_universe("Before CDS dynamic dump"); |
|
535 DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); |
|
536 SystemDictionaryShared::check_excluded_classes(); |
|
537 |
|
538 { |
|
539 ResourceMark rm; |
|
540 GatherKlassesAndSymbols gatherer(this); |
|
541 |
|
542 SystemDictionaryShared::dumptime_classes_do(&gatherer); |
|
543 SymbolTable::metaspace_pointers_do(&gatherer); |
|
544 FileMapInfo::metaspace_pointers_do(&gatherer); |
|
545 |
|
546 gatherer.finish(); |
|
547 } |
|
548 |
|
549 // rw space starts ... |
|
550 address reserved_bottom = reserve_space_and_init_buffer_to_target_delta(); |
|
551 init_header(reserved_bottom); |
|
552 |
|
553 verify_estimate_size(sizeof(DynamicArchiveHeader), "header"); |
|
554 |
|
555 log_info(cds, dynamic)("Copying %d klasses and %d symbols", |
|
556 _klasses->length(), _symbols->length()); |
|
557 |
|
558 { |
|
559 assert(current_dump_space() == MetaspaceShared::read_write_dump_space(), |
|
560 "Current dump space is not rw space"); |
|
561 // shallow-copy RW objects, if necessary |
|
562 ResourceMark rm; |
|
563 ShallowCopier rw_copier(this, false); |
|
564 iterate_roots(&rw_copier); |
|
565 } |
|
566 |
|
567 // ro space starts ... |
|
568 DumpRegion* ro_space = MetaspaceShared::read_only_dump_space(); |
|
569 { |
|
570 start_dump_space(ro_space); |
|
571 |
|
572 // shallow-copy RO objects, if necessary |
|
573 ResourceMark rm; |
|
574 ShallowCopier ro_copier(this, true); |
|
575 iterate_roots(&ro_copier); |
|
576 } |
|
577 |
|
578 size_t bitmap_size = pointer_delta(current_dump_space()->top(), |
|
579 _alloc_bottom, sizeof(address)); |
|
580 _ptrmap.initialize(bitmap_size); |
|
581 |
|
582 { |
|
583 log_info(cds)("Relocating embedded pointers ... "); |
|
584 ResourceMark rm; |
|
585 ShallowCopyEmbeddedRefRelocator emb_reloc(this); |
|
586 iterate_roots(&emb_reloc); |
|
587 } |
|
588 |
|
589 { |
|
590 log_info(cds)("Relocating external roots ... "); |
|
591 ResourceMark rm; |
|
592 ExternalRefUpdater ext_reloc(this); |
|
593 iterate_roots(&ext_reloc); |
|
594 } |
|
595 |
|
596 verify_estimate_size(_estimated_metsapceobj_bytes, "MetaspaceObjs"); |
|
597 |
|
598 char* read_only_tables_start; |
|
599 { |
|
600 set_symbols_permanent(); |
|
601 |
|
602 // Write the symbol table and system dictionaries to the RO space. |
|
603 // Note that these tables still point to the *original* objects |
|
604 // (because they were not processed by ExternalRefUpdater), so |
|
605 // they would need to call DynamicArchive::original_to_target() to |
|
606 // get the correct addresses. |
|
607 assert(current_dump_space() == ro_space, "Must be RO space"); |
|
608 SymbolTable::write_to_archive(false); |
|
609 SystemDictionaryShared::write_to_archive(false); |
|
610 |
|
611 read_only_tables_start = ro_space->top(); |
|
612 WriteClosure wc(ro_space); |
|
613 SymbolTable::serialize_shared_table_header(&wc, false); |
|
614 SystemDictionaryShared::serialize_dictionary_headers(&wc, false); |
|
615 } |
|
616 |
|
617 verify_estimate_size(_estimated_hashtable_bytes, "Hashtables"); |
|
618 |
|
619 // mc space starts ... |
|
620 { |
|
621 start_dump_space(MetaspaceShared::misc_code_dump_space()); |
|
622 make_trampolines(); |
|
623 } |
|
624 |
|
625 verify_estimate_size(_estimated_trampoline_bytes, "Trampolines"); |
|
626 |
|
627 make_klasses_shareable(); |
|
628 |
|
629 { |
|
630 log_info(cds)("Final relocation of pointers ... "); |
|
631 ResourceMark rm; |
|
632 PointerMarker marker(this); |
|
633 iterate_roots(&marker); |
|
634 relocate_buffer_to_target(); |
|
635 } |
|
636 |
|
637 write_archive(read_only_tables_start); |
|
638 |
|
639 assert(_num_dump_regions_used == _total_dump_regions, "must be"); |
|
640 verify_universe("After CDS dynamic dump"); |
|
641 } |
|
642 |
|
643 void iterate_roots(MetaspaceClosure* it) { |
|
644 int i; |
|
645 int num_klasses = _klasses->length(); |
|
646 for (i = 0; i < num_klasses; i++) { |
|
647 it->push(&_klasses->at(i)); |
|
648 } |
|
649 |
|
650 int num_symbols = _symbols->length(); |
|
651 for (i = 0; i < num_symbols; i++) { |
|
652 it->push(&_symbols->at(i)); |
|
653 } |
|
654 |
|
655 _header->_shared_path_table.metaspace_pointers_do(it); |
|
656 |
|
657 // Do not call these again, as we have already collected all the classes and symbols |
|
658 // that we want to archive. Also, these calls would corrupt the tables when |
|
659 // ExternalRefUpdater is used. |
|
660 // |
|
661 // SystemDictionaryShared::dumptime_classes_do(it); |
|
662 // SymbolTable::metaspace_pointers_do(it); |
|
663 |
|
664 it->finish(); |
|
665 } |
|
666 }; |
|
667 |
|
668 size_t DynamicArchiveBuilder::estimate_archive_size() { |
|
669 // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's |
|
670 _estimated_hashtable_bytes = 0; |
|
671 _estimated_hashtable_bytes += SymbolTable::estimate_size_for_archive(); |
|
672 _estimated_hashtable_bytes += SystemDictionaryShared::estimate_size_for_archive(); |
|
673 |
|
674 _estimated_trampoline_bytes = estimate_trampoline_size(); |
|
675 |
|
676 size_t total = 0; |
|
677 |
|
678 total += _estimated_metsapceobj_bytes; |
|
679 total += _estimated_hashtable_bytes; |
|
680 total += _estimated_trampoline_bytes; |
|
681 |
|
682 // allow fragmentation at the end of each dump region |
|
683 total += _total_dump_regions * reserve_alignment(); |
|
684 |
|
685 return align_up(total, reserve_alignment()); |
|
686 } |
|
687 |
|
688 address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() { |
|
689 size_t total = estimate_archive_size(); |
|
690 bool large_pages = false; // No large pages when dumping the CDS archive. |
|
691 size_t increment = align_up(1*G, reserve_alignment()); |
|
692 char* addr = (char*)align_up(CompressedKlassPointers::base() + MetaspaceSize + increment, |
|
693 reserve_alignment()); |
|
694 |
|
695 ReservedSpace* rs = MetaspaceShared::reserve_shared_rs( |
|
696 total, reserve_alignment(), large_pages, addr); |
|
697 while (!rs->is_reserved() && (addr + increment > addr)) { |
|
698 addr += increment; |
|
699 rs = MetaspaceShared::reserve_shared_rs( |
|
700 total, reserve_alignment(), large_pages, addr); |
|
701 } |
|
702 if (!rs->is_reserved()) { |
|
703 log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer.", (int)total); |
|
704 vm_direct_exit(0); |
|
705 } |
|
706 |
|
707 address buffer_base = (address)rs->base(); |
|
708 log_info(cds, dynamic)("Reserved output buffer space at : " PTR_FORMAT " [%d bytes]", |
|
709 p2i(buffer_base), (int)total); |
|
710 |
|
711 // At run time, we will mmap the dynamic archive at target_space_bottom. |
|
712 // However, at dump time, we may not be able to write into the target_space, |
|
713 // as it's occupied by dynamically loaded Klasses. So we allocate a buffer |
|
714 // at an arbitrary location chosen by the OS. We will write all the dynamically |
|
715 // archived classes into this buffer. At the final stage of dumping, we relocate |
|
716 // all pointers that are inside the buffer_space to point to their (runtime) |
|
717 // target location inside thetarget_space. |
|
718 address target_space_bottom = |
|
719 (address)align_up(MetaspaceShared::shared_metaspace_top(), reserve_alignment()); |
|
720 _buffer_to_target_delta = intx(target_space_bottom) - intx(buffer_base); |
|
721 |
|
722 log_info(cds, dynamic)("Target archive space at : " PTR_FORMAT, p2i(target_space_bottom)); |
|
723 log_info(cds, dynamic)("Buffer-space to target-space delta : " PTR_FORMAT, p2i((address)_buffer_to_target_delta)); |
|
724 |
|
725 return buffer_base; |
|
726 } |
|
727 |
|
728 void DynamicArchiveBuilder::init_header(address reserved_bottom) { |
|
729 _alloc_bottom = reserved_bottom; |
|
730 _last_verified_top = reserved_bottom; |
|
731 _other_region_used_bytes = 0; |
|
732 |
|
733 init_first_dump_space(reserved_bottom); |
|
734 |
|
735 FileMapInfo* mapinfo = new FileMapInfo(false); |
|
736 _header = (DynamicArchiveHeader*)mapinfo->_header; |
|
737 |
|
738 Thread* THREAD = Thread::current(); |
|
739 FileMapInfo* base_info = FileMapInfo::current_info(); |
|
740 int* crc = _header->_base_archive_crc; |
|
741 *crc++ = base_info->crc(); // base archive header crc |
|
742 for (int i = 0; i < MetaspaceShared::n_regions; i++) { |
|
743 *crc++ = base_info->space_crc(i); |
|
744 } |
|
745 _header->populate(base_info, os::vm_allocation_granularity()); |
|
746 } |
|
747 |
|
748 size_t DynamicArchiveBuilder::estimate_trampoline_size() { |
|
749 size_t total = 0; |
|
750 size_t each_method_bytes = |
|
751 align_up(SharedRuntime::trampoline_size(), BytesPerWord) + |
|
752 align_up(sizeof(AdapterHandlerEntry*), BytesPerWord); |
|
753 |
|
754 for (int i = 0; i < _klasses->length(); i++) { |
|
755 InstanceKlass* ik = _klasses->at(i); |
|
756 Array<Method*>* methods = ik->methods(); |
|
757 total += each_method_bytes * methods->length(); |
|
758 } |
|
759 return total; |
|
760 } |
|
761 |
|
762 void DynamicArchiveBuilder::make_trampolines() { |
|
763 for (int i = 0; i < _klasses->length(); i++) { |
|
764 InstanceKlass* ik = _klasses->at(i); |
|
765 Array<Method*>* methods = ik->methods(); |
|
766 for (int j = 0; j < methods->length(); j++) { |
|
767 Method* m = methods->at(j); |
|
768 address c2i_entry_trampoline = |
|
769 (address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size()); |
|
770 m->set_from_compiled_entry(to_target(c2i_entry_trampoline)); |
|
771 AdapterHandlerEntry** adapter_trampoline = |
|
772 (AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*)); |
|
773 *adapter_trampoline = NULL; |
|
774 m->set_adapter_trampoline(to_target(adapter_trampoline)); |
|
775 } |
|
776 } |
|
777 } |
|
778 |
|
779 void DynamicArchiveBuilder::make_klasses_shareable() { |
|
780 int i, count = _klasses->length(); |
|
781 |
|
782 for (i = 0; i < count; i++) { |
|
783 InstanceKlass* ik = _klasses->at(i); |
|
784 sort_methods(ik); |
|
785 } |
|
786 |
|
787 for (i = 0; i < count; i++) { |
|
788 InstanceKlass* ik = _klasses->at(i); |
|
789 ClassLoaderData *cld = ik->class_loader_data(); |
|
790 if (cld->is_boot_class_loader_data()) { |
|
791 ik->set_class_loader_type(ClassLoader::BOOT_LOADER); |
|
792 } |
|
793 else if (cld->is_platform_class_loader_data()) { |
|
794 ik->set_class_loader_type(ClassLoader::PLATFORM_LOADER); |
|
795 } |
|
796 else if (cld->is_system_class_loader_data()) { |
|
797 ik->set_class_loader_type(ClassLoader::APP_LOADER); |
|
798 } |
|
799 |
|
800 MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(ik); |
|
801 ik->remove_unshareable_info(); |
|
802 |
|
803 assert(ik->array_klasses() == NULL, "sanity"); |
|
804 |
|
805 if (log_is_enabled(Debug, cds, dynamic)) { |
|
806 ResourceMark rm; |
|
807 log_debug(cds, dynamic)("klasses[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(ik)), ik->external_name()); |
|
808 } |
|
809 } |
|
810 } |
|
811 |
|
812 // The address order of the copied Symbols may be different than when the original |
|
813 // klasses were created. Re-sort all the tables. See Method::sort_methods(). |
|
814 void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const { |
|
815 assert(ik != NULL, "DynamicArchiveBuilder currently doesn't support dumping the base archive"); |
|
816 if (MetaspaceShared::is_in_shared_metaspace(ik)) { |
|
817 // We have reached a supertype that's already in the base archive |
|
818 return; |
|
819 } |
|
820 |
|
821 if (ik->java_mirror() == NULL) { |
|
822 // NULL mirror means this class has already been visited and methods are already sorted |
|
823 return; |
|
824 } |
|
825 ik->remove_java_mirror(); |
|
826 |
|
827 if (log_is_enabled(Debug, cds, dynamic)) { |
|
828 ResourceMark rm; |
|
829 log_debug(cds, dynamic)("sorting methods for " PTR_FORMAT " %s", p2i(to_target(ik)), ik->external_name()); |
|
830 } |
|
831 |
|
832 // Make sure all supertypes have been sorted |
|
833 sort_methods(ik->java_super()); |
|
834 Array<InstanceKlass*>* interfaces = ik->local_interfaces(); |
|
835 int len = interfaces->length(); |
|
836 for (int i = 0; i < len; i++) { |
|
837 sort_methods(interfaces->at(i)); |
|
838 } |
|
839 |
|
840 #ifdef ASSERT |
|
841 { |
|
842 for (int m = 0; m < ik->methods()->length(); m++) { |
|
843 Symbol* name = ik->methods()->at(m)->name(); |
|
844 assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be"); |
|
845 } |
|
846 } |
|
847 #endif |
|
848 |
|
849 Thread* THREAD = Thread::current(); |
|
850 Method::sort_methods(ik->methods()); |
|
851 if (ik->default_methods() != NULL) { |
|
852 Method::sort_methods(ik->default_methods(), /*set_idnums=*/false); |
|
853 } |
|
854 ik->vtable().initialize_vtable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail"); |
|
855 ik->itable().initialize_itable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail"); |
|
856 } |
|
857 |
|
858 void DynamicArchiveBuilder::set_symbols_permanent() { |
|
859 int count = _symbols->length(); |
|
860 for (int i=0; i<count; i++) { |
|
861 Symbol* s = _symbols->at(i); |
|
862 s->set_permanent(); |
|
863 |
|
864 if (log_is_enabled(Trace, cds, dynamic)) { |
|
865 ResourceMark rm; |
|
866 log_trace(cds, dynamic)("symbols[%4i] = " PTR_FORMAT " %s", i, p2i(to_target(s)), s->as_quoted_ascii()); |
|
867 } |
|
868 } |
|
869 } |
|
870 |
|
871 class RelocateBufferToTarget: public BitMapClosure { |
|
872 DynamicArchiveBuilder *_builder; |
|
873 address* _buffer_bottom; |
|
874 intx _buffer_to_target_delta; |
|
875 public: |
|
876 RelocateBufferToTarget(DynamicArchiveBuilder* builder, address* bottom, intx delta) : |
|
877 _builder(builder), _buffer_bottom(bottom), _buffer_to_target_delta(delta) {} |
|
878 |
|
879 bool do_bit(size_t offset) { |
|
880 address* p = _buffer_bottom + offset; |
|
881 assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space"); |
|
882 |
|
883 address old_ptr = *p; |
|
884 if (_builder->is_in_buffer_space(old_ptr)) { |
|
885 address new_ptr = old_ptr + _buffer_to_target_delta; |
|
886 log_trace(cds, dynamic)("Final patch: @%6d [" PTR_FORMAT " -> " PTR_FORMAT "] " PTR_FORMAT " => " PTR_FORMAT, |
|
887 (int)offset, p2i(p), p2i(_builder->to_target(p)), |
|
888 p2i(old_ptr), p2i(new_ptr)); |
|
889 *p = new_ptr; |
|
890 } |
|
891 |
|
892 return true; // keep iterating |
|
893 } |
|
894 }; |
|
895 |
|
896 |
|
897 void DynamicArchiveBuilder::relocate_buffer_to_target() { |
|
898 RelocateBufferToTarget patcher(this, (address*)_alloc_bottom, _buffer_to_target_delta); |
|
899 _ptrmap.iterate(&patcher); |
|
900 |
|
901 Array<u8>* table = _header->_shared_path_table.table(); |
|
902 table = to_target(table); |
|
903 _header->_shared_path_table.set_table(table); |
|
904 } |
|
905 |
|
906 static void write_archive_info(FileMapInfo* dynamic_info, DynamicArchiveHeader *header) { |
|
907 dynamic_info->write_header(); |
|
908 dynamic_info->align_file_position(); |
|
909 dynamic_info->write_region(MetaspaceShared::rw, |
|
910 MetaspaceShared::read_write_dump_space()->base(), |
|
911 MetaspaceShared::read_write_dump_space()->used(), |
|
912 /*read_only=*/false,/*allow_exec=*/false); |
|
913 dynamic_info->write_region(MetaspaceShared::ro, |
|
914 MetaspaceShared::read_only_dump_space()->base(), |
|
915 MetaspaceShared::read_only_dump_space()->used(), |
|
916 /*read_only=*/true, /*allow_exec=*/false); |
|
917 dynamic_info->write_region(MetaspaceShared::mc, |
|
918 MetaspaceShared::misc_code_dump_space()->base(), |
|
919 MetaspaceShared::misc_code_dump_space()->used(), |
|
920 /*read_only=*/false,/*allow_exec=*/true); |
|
921 } |
|
922 |
|
923 void DynamicArchiveBuilder::write_archive(char* read_only_tables_start) { |
|
924 int num_klasses = _klasses->length(); |
|
925 int num_symbols = _symbols->length(); |
|
926 |
|
927 _header->_read_only_tables_start = to_target(read_only_tables_start); |
|
928 |
|
929 FileMapInfo* dynamic_info = FileMapInfo::dynamic_info(); |
|
930 assert(dynamic_info != NULL, "Sanity"); |
|
931 |
|
932 // Populate the file offsets, region crcs, etc. No data is written out. |
|
933 write_archive_info(dynamic_info, _header); |
|
934 |
|
935 // the header will no longer change. Compute its crc. |
|
936 dynamic_info->set_header_crc(dynamic_info->compute_header_crc()); |
|
937 |
|
938 // Now write the archived data including the file offsets. |
|
939 const char* archive_name = Arguments::GetSharedDynamicArchivePath(); |
|
940 dynamic_info->open_for_write(archive_name); |
|
941 write_archive_info(dynamic_info, _header); |
|
942 dynamic_info->close(); |
|
943 |
|
944 |
|
945 address base = to_target(_alloc_bottom); |
|
946 address top = address(current_dump_space()->top()) + _buffer_to_target_delta; |
|
947 int file_size = int(top - base); |
|
948 |
|
949 log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT " [%d bytes header, %d bytes total]", |
|
950 p2i(base), p2i(top), (int)_header->_header_size, file_size); |
|
951 log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols); |
|
952 } |
|
953 |
|
954 |
|
955 class VM_PopulateDynamicDumpSharedSpace: public VM_Operation { |
|
956 DynamicArchiveBuilder* _builder; |
|
957 public: |
|
958 VM_PopulateDynamicDumpSharedSpace(DynamicArchiveBuilder* builder) : _builder(builder) {} |
|
959 VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } |
|
960 void doit() { |
|
961 ResourceMark rm; |
|
962 if (SystemDictionaryShared::empty_dumptime_table()) { |
|
963 log_warning(cds, dynamic)("There is no class to be included in the dynamic archive."); |
|
964 return; |
|
965 } |
|
966 if (AllowArchivingWithJavaAgent) { |
|
967 warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " |
|
968 "for testing purposes only and should not be used in a production environment"); |
|
969 } |
|
970 FileMapInfo::check_nonempty_dir_in_shared_path_table(); |
|
971 |
|
972 _builder->doit(); |
|
973 } |
|
974 }; |
|
975 |
|
976 |
|
977 void DynamicArchive::dump() { |
|
978 if (Arguments::GetSharedDynamicArchivePath() == NULL) { |
|
979 log_warning(cds, dynamic)("SharedDynamicArchivePath is not specified"); |
|
980 return; |
|
981 } |
|
982 |
|
983 DynamicArchiveBuilder builder; |
|
984 _builder = &builder; |
|
985 VM_PopulateDynamicDumpSharedSpace op(&builder); |
|
986 VMThread::execute(&op); |
|
987 _builder = NULL; |
|
988 } |
|
989 |
|
990 address DynamicArchive::original_to_buffer_impl(address orig_obj) { |
|
991 assert(DynamicDumpSharedSpaces, "must be"); |
|
992 address buff_obj = _builder->get_new_loc(orig_obj); |
|
993 assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive"); |
|
994 assert(buff_obj != orig_obj, "call this only when you know orig_obj must be copied and not just referenced"); |
|
995 assert(_builder->is_in_buffer_space(buff_obj), "must be"); |
|
996 return buff_obj; |
|
997 } |
|
998 |
|
999 address DynamicArchive::buffer_to_target_impl(address buff_obj) { |
|
1000 assert(DynamicDumpSharedSpaces, "must be"); |
|
1001 assert(_builder->is_in_buffer_space(buff_obj), "must be"); |
|
1002 return _builder->to_target(buff_obj); |
|
1003 } |
|
1004 |
|
1005 address DynamicArchive::original_to_target_impl(address orig_obj) { |
|
1006 assert(DynamicDumpSharedSpaces, "must be"); |
|
1007 if (MetaspaceShared::is_in_shared_metaspace(orig_obj)) { |
|
1008 // This happens when the top archive points to a Symbol* in the base archive. |
|
1009 return orig_obj; |
|
1010 } |
|
1011 address buff_obj = _builder->get_new_loc(orig_obj); |
|
1012 assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive"); |
|
1013 if (buff_obj == orig_obj) { |
|
1014 // We are storing a pointer to an original object into the dynamic buffer. E.g., |
|
1015 // a Symbol* that used by both the base and top archives. |
|
1016 assert(MetaspaceShared::is_in_shared_metaspace(orig_obj), "must be"); |
|
1017 return orig_obj; |
|
1018 } else { |
|
1019 return _builder->to_target(buff_obj); |
|
1020 } |
|
1021 } |
|
1022 |
|
1023 uintx DynamicArchive::object_delta_uintx(void* buff_obj) { |
|
1024 assert(DynamicDumpSharedSpaces, "must be"); |
|
1025 address target_obj = _builder->to_target_no_check(address(buff_obj)); |
|
1026 assert(uintx(target_obj) >= SharedBaseAddress, "must be"); |
|
1027 return uintx(target_obj) - SharedBaseAddress; |
|
1028 } |
|
1029 |
|
1030 bool DynamicArchive::is_in_target_space(void *obj) { |
|
1031 assert(DynamicDumpSharedSpaces, "must be"); |
|
1032 return _builder->is_in_target_space(obj); |
|
1033 } |
|
1034 |
|
1035 |
|
1036 static DynamicArchiveHeader *_dynamic_header = NULL; |
|
1037 DynamicArchiveBuilder* DynamicArchive::_builder = NULL; |
|
1038 |
|
1039 void DynamicArchive::map_failed(FileMapInfo* mapinfo) { |
|
1040 if (mapinfo->_header != NULL) { |
|
1041 os::free(mapinfo->_header); |
|
1042 } |
|
1043 delete mapinfo; |
|
1044 } |
|
1045 |
|
1046 // Returns the top of the mapped address space |
|
1047 address DynamicArchive::map() { |
|
1048 assert(UseSharedSpaces, "Sanity"); |
|
1049 |
|
1050 // Create the dynamic archive map info |
|
1051 FileMapInfo* mapinfo; |
|
1052 const char* filename = Arguments::GetSharedDynamicArchivePath(); |
|
1053 struct stat st; |
|
1054 address result; |
|
1055 if ((filename != NULL) && (os::stat(filename, &st) == 0)) { |
|
1056 mapinfo = new FileMapInfo(false); |
|
1057 if (!mapinfo->open_for_read(filename)) { |
|
1058 result = NULL; |
|
1059 } |
|
1060 result = map_impl(mapinfo); |
|
1061 if (result == NULL) { |
|
1062 map_failed(mapinfo); |
|
1063 mapinfo->restore_shared_path_table(); |
|
1064 } |
|
1065 } else { |
|
1066 if (filename != NULL) { |
|
1067 log_warning(cds, dynamic)("specified dynamic archive doesn't exist: %s", filename); |
|
1068 } |
|
1069 result = NULL; |
|
1070 } |
|
1071 return result; |
|
1072 } |
|
1073 |
|
1074 address DynamicArchive::map_impl(FileMapInfo* mapinfo) { |
|
1075 |
|
1076 |
|
1077 // Read header |
|
1078 if (!mapinfo->initialize(false)) { |
|
1079 return NULL; |
|
1080 } |
|
1081 |
|
1082 _dynamic_header = (DynamicArchiveHeader*)mapinfo->header(); |
|
1083 |
|
1084 int regions[] = {MetaspaceShared::rw, |
|
1085 MetaspaceShared::ro, |
|
1086 MetaspaceShared::mc}; |
|
1087 |
|
1088 size_t len = sizeof(regions)/sizeof(int); |
|
1089 char* saved_base[] = {NULL, NULL, NULL}; |
|
1090 char* top = mapinfo->map_regions(regions, saved_base, len); |
|
1091 if (top == NULL) { |
|
1092 mapinfo->unmap_regions(regions, saved_base, len); |
|
1093 FileMapInfo::fail_continue("Unable to use dynamic archive. Failed map_region for using -Xshare:on."); |
|
1094 return NULL; |
|
1095 } |
|
1096 |
|
1097 if (!validate(mapinfo)) { |
|
1098 return NULL; |
|
1099 } |
|
1100 |
|
1101 if (_dynamic_header == NULL) { |
|
1102 return NULL; |
|
1103 } |
|
1104 |
|
1105 intptr_t* buffer = (intptr_t*)_dynamic_header->_read_only_tables_start; |
|
1106 ReadClosure rc(&buffer); |
|
1107 SymbolTable::serialize_shared_table_header(&rc, false); |
|
1108 SystemDictionaryShared::serialize_dictionary_headers(&rc, false); |
|
1109 |
|
1110 return (address)top; |
|
1111 } |
|
1112 |
|
1113 bool DynamicArchive::validate(FileMapInfo* dynamic_info) { |
|
1114 // Check if the recorded base archive matches with the current one |
|
1115 FileMapInfo* base_info = FileMapInfo::current_info(); |
|
1116 DynamicArchiveHeader* dynamic_header = (DynamicArchiveHeader*)dynamic_info->header(); |
|
1117 int* crc = dynamic_header->_base_archive_crc; |
|
1118 |
|
1119 // Check the header crc |
|
1120 if (*crc++ != base_info->crc()) { |
|
1121 FileMapInfo::fail_continue("Archive header checksum verification failed."); |
|
1122 return false; |
|
1123 } |
|
1124 |
|
1125 // Check each space's crc |
|
1126 for (int i = 0; i < MetaspaceShared::n_regions; i++) { |
|
1127 if (*crc++ != base_info->space_crc(i)) { |
|
1128 FileMapInfo::fail_continue("Archive region #%d checksum verification failed.", i); |
|
1129 return false; |
|
1130 } |
|
1131 } |
|
1132 |
|
1133 // Validate the dynamic archived shared path table, and set the global |
|
1134 // _shared_path_table to that. |
|
1135 if (!dynamic_info->validate_shared_path_table()) { |
|
1136 return false; |
|
1137 } |
|
1138 return true; |
|
1139 } |
|
1140 |
|
1141 bool DynamicArchive::is_mapped() { |
|
1142 return (_dynamic_header != NULL); |
|
1143 } |
|
1144 |
|
1145 void DynamicArchive::disable() { |
|
1146 _dynamic_header = NULL; |
|
1147 } |