137 inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS); |
136 inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS); |
138 |
137 |
139 // Like allocate_init, but the block returned by a successful allocation |
138 // Like allocate_init, but the block returned by a successful allocation |
140 // is guaranteed initialized to zeros. |
139 // is guaranteed initialized to zeros. |
141 inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS); |
140 inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS); |
142 |
|
143 // Same as common_mem version, except memory is allocated in the permanent area |
|
144 // If there is no permanent area, revert to common_mem_allocate_noinit |
|
145 inline static HeapWord* common_permanent_mem_allocate_noinit(size_t size, TRAPS); |
|
146 |
|
147 // Same as common_mem version, except memory is allocated in the permanent area |
|
148 // If there is no permanent area, revert to common_mem_allocate_init |
|
149 inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS); |
|
150 |
141 |
151 // Helper functions for (VM) allocation. |
142 // Helper functions for (VM) allocation. |
152 inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj); |
143 inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj); |
153 inline static void post_allocation_setup_no_klass_install(KlassHandle klass, |
144 inline static void post_allocation_setup_no_klass_install(KlassHandle klass, |
154 HeapWord* objPtr); |
145 HeapWord* objPtr); |
219 // Return "true" if the part of the heap that allocates Java |
210 // Return "true" if the part of the heap that allocates Java |
220 // objects has reached the maximal committed limit that it can |
211 // objects has reached the maximal committed limit that it can |
221 // reach, without a garbage collection. |
212 // reach, without a garbage collection. |
222 virtual bool is_maximal_no_gc() const = 0; |
213 virtual bool is_maximal_no_gc() const = 0; |
223 |
214 |
224 virtual size_t permanent_capacity() const = 0; |
|
225 virtual size_t permanent_used() const = 0; |
|
226 |
|
227 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of |
215 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of |
228 // memory that the vm could make available for storing 'normal' java objects. |
216 // memory that the vm could make available for storing 'normal' java objects. |
229 // This is based on the reserved address space, but should not include space |
217 // This is based on the reserved address space, but should not include space |
230 // that the vm uses internally for bookkeeping or temporary storage (e.g., |
218 // that the vm uses internally for bookkeeping or temporary storage |
231 // perm gen space or, in the case of the young gen, one of the survivor |
219 // (e.g., in the case of the young gen, one of the survivor |
232 // spaces). |
220 // spaces). |
233 virtual size_t max_capacity() const = 0; |
221 virtual size_t max_capacity() const = 0; |
234 |
222 |
235 // Returns "TRUE" if "p" points into the reserved area of the heap. |
223 // Returns "TRUE" if "p" points into the reserved area of the heap. |
236 bool is_in_reserved(const void* p) const { |
224 bool is_in_reserved(const void* p) const { |
246 // use to assertion checking only. |
234 // use to assertion checking only. |
247 virtual bool is_in(const void* p) const = 0; |
235 virtual bool is_in(const void* p) const = 0; |
248 |
236 |
249 bool is_in_or_null(const void* p) const { |
237 bool is_in_or_null(const void* p) const { |
250 return p == NULL || is_in(p); |
238 return p == NULL || is_in(p); |
|
239 } |
|
240 |
|
241 bool is_in_place(Metadata** p) { |
|
242 return !Universe::heap()->is_in(p); |
|
243 } |
|
244 bool is_in_place(oop* p) { return Universe::heap()->is_in(p); } |
|
245 bool is_in_place(narrowOop* p) { |
|
246 oop o = oopDesc::load_decode_heap_oop_not_null(p); |
|
247 return Universe::heap()->is_in((const void*)o); |
251 } |
248 } |
252 |
249 |
253 // Let's define some terms: a "closed" subset of a heap is one that |
250 // Let's define some terms: a "closed" subset of a heap is one that |
254 // |
251 // |
255 // 1) contains all currently-allocated objects, and |
252 // 1) contains all currently-allocated objects, and |
280 |
277 |
281 bool is_in_closed_subset_or_null(const void* p) const { |
278 bool is_in_closed_subset_or_null(const void* p) const { |
282 return p == NULL || is_in_closed_subset(p); |
279 return p == NULL || is_in_closed_subset(p); |
283 } |
280 } |
284 |
281 |
285 // XXX is_permanent() and is_in_permanent() should be better named |
|
286 // to distinguish one from the other. |
|
287 |
|
288 // Returns "TRUE" if "p" is allocated as "permanent" data. |
|
289 // If the heap does not use "permanent" data, returns the same |
|
290 // value is_in_reserved() would return. |
|
291 // NOTE: this actually returns true if "p" is in reserved space |
|
292 // for the space not that it is actually allocated (i.e. in committed |
|
293 // space). If you need the more conservative answer use is_permanent(). |
|
294 virtual bool is_in_permanent(const void *p) const = 0; |
|
295 |
|
296 |
|
297 #ifdef ASSERT |
282 #ifdef ASSERT |
298 // Returns true if "p" is in the part of the |
283 // Returns true if "p" is in the part of the |
299 // heap being collected. |
284 // heap being collected. |
300 virtual bool is_in_partial_collection(const void *p) = 0; |
285 virtual bool is_in_partial_collection(const void *p) = 0; |
301 #endif |
286 #endif |
302 |
287 |
303 bool is_in_permanent_or_null(const void *p) const { |
|
304 return p == NULL || is_in_permanent(p); |
|
305 } |
|
306 |
|
307 // Returns "TRUE" if "p" is in the committed area of "permanent" data. |
|
308 // If the heap does not use "permanent" data, returns the same |
|
309 // value is_in() would return. |
|
310 virtual bool is_permanent(const void *p) const = 0; |
|
311 |
|
312 bool is_permanent_or_null(const void *p) const { |
|
313 return p == NULL || is_permanent(p); |
|
314 } |
|
315 |
|
316 // An object is scavengable if its location may move during a scavenge. |
288 // An object is scavengable if its location may move during a scavenge. |
317 // (A scavenge is a GC which is not a full GC.) |
289 // (A scavenge is a GC which is not a full GC.) |
318 virtual bool is_scavengable(const void *p) = 0; |
290 virtual bool is_scavengable(const void *p) = 0; |
319 |
291 |
320 // Returns "TRUE" if "p" is a method oop in the |
292 // Returns "TRUE" if "p" is a method oop in the |
321 // current heap, with high probability. This predicate |
293 // current heap, with high probability. This predicate |
322 // is not stable, in general. |
294 // is not stable, in general. |
323 bool is_valid_method(oop p) const; |
295 bool is_valid_method(Method* p) const; |
324 |
296 |
325 void set_gc_cause(GCCause::Cause v) { |
297 void set_gc_cause(GCCause::Cause v) { |
326 if (UsePerfData) { |
298 if (UsePerfData) { |
327 _gc_lastcause = _gc_cause; |
299 _gc_lastcause = _gc_cause; |
328 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); |
300 _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); |
336 uint n_par_threads() { return _n_par_threads; } |
308 uint n_par_threads() { return _n_par_threads; } |
337 |
309 |
338 // May be overridden to set additional parallelism. |
310 // May be overridden to set additional parallelism. |
339 virtual void set_par_threads(uint t) { _n_par_threads = t; }; |
311 virtual void set_par_threads(uint t) { _n_par_threads = t; }; |
340 |
312 |
341 // Preload classes into the shared portion of the heap, and then dump |
|
342 // that data to a file so that it can be loaded directly by another |
|
343 // VM (then terminate). |
|
344 virtual void preload_and_dump(TRAPS) { ShouldNotReachHere(); } |
|
345 |
|
346 // Allocate and initialize instances of Class |
313 // Allocate and initialize instances of Class |
347 static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS); |
314 static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS); |
348 |
315 |
349 // General obj/array allocation facilities. |
316 // General obj/array allocation facilities. |
350 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); |
317 inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); |
351 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS); |
318 inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS); |
352 inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS); |
319 inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS); |
353 |
320 |
354 // Special obj/array allocation facilities. |
321 inline static void post_allocation_install_obj_klass(KlassHandle klass, |
355 // Some heaps may want to manage "permanent" data uniquely. These default |
322 oop obj); |
356 // to the general routines if the heap does not support such handling. |
|
357 inline static oop permanent_obj_allocate(KlassHandle klass, int size, TRAPS); |
|
358 // permanent_obj_allocate_no_klass_install() does not do the installation of |
|
359 // the klass pointer in the newly created object (as permanent_obj_allocate() |
|
360 // above does). This allows for a delay in the installation of the klass |
|
361 // pointer that is needed during the create of klassKlass's. The |
|
362 // method post_allocation_install_obj_klass() is used to install the |
|
363 // klass pointer. |
|
364 inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass, |
|
365 int size, |
|
366 TRAPS); |
|
367 inline static void post_allocation_install_obj_klass(KlassHandle klass, oop obj); |
|
368 inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS); |
|
369 |
323 |
370 // Raw memory allocation facilities |
324 // Raw memory allocation facilities |
371 // The obj and array allocate methods are covers for these methods. |
325 // The obj and array allocate methods are covers for these methods. |
372 // The permanent allocation method should default to mem_allocate if |
326 // mem_allocate() should never be |
373 // permanent memory isn't supported. mem_allocate() should never be |
|
374 // called to allocate TLABs, only individual objects. |
327 // called to allocate TLABs, only individual objects. |
375 virtual HeapWord* mem_allocate(size_t size, |
328 virtual HeapWord* mem_allocate(size_t size, |
376 bool* gc_overhead_limit_was_exceeded) = 0; |
329 bool* gc_overhead_limit_was_exceeded) = 0; |
377 virtual HeapWord* permanent_mem_allocate(size_t size) = 0; |
|
378 |
330 |
379 // Utilities for turning raw memory into filler objects. |
331 // Utilities for turning raw memory into filler objects. |
380 // |
332 // |
381 // min_fill_size() is the smallest region that can be filled. |
333 // min_fill_size() is the smallest region that can be filled. |
382 // fill_with_objects() can fill arbitrary-sized regions of the heap using |
334 // fill_with_objects() can fill arbitrary-sized regions of the heap using |
502 // If the CollectedHeap was asked to defer a store barrier above, |
454 // If the CollectedHeap was asked to defer a store barrier above, |
503 // this informs it to flush such a deferred store barrier to the |
455 // this informs it to flush such a deferred store barrier to the |
504 // remembered set. |
456 // remembered set. |
505 virtual void flush_deferred_store_barrier(JavaThread* thread); |
457 virtual void flush_deferred_store_barrier(JavaThread* thread); |
506 |
458 |
507 // Can a compiler elide a store barrier when it writes |
|
508 // a permanent oop into the heap? Applies when the compiler |
|
509 // is storing x to the heap, where x->is_perm() is true. |
|
510 virtual bool can_elide_permanent_oop_store_barriers() const = 0; |
|
511 |
|
512 // Does this heap support heap inspection (+PrintClassHistogram?) |
459 // Does this heap support heap inspection (+PrintClassHistogram?) |
513 virtual bool supports_heap_inspection() const = 0; |
460 virtual bool supports_heap_inspection() const = 0; |
514 |
461 |
515 // Perform a collection of the heap; intended for use in implementing |
462 // Perform a collection of the heap; intended for use in implementing |
516 // "System.gc". This probably implies as full a collection as the |
463 // "System.gc". This probably implies as full a collection as the |
517 // "CollectedHeap" supports. |
464 // "CollectedHeap" supports. |
518 virtual void collect(GCCause::Cause cause) = 0; |
465 virtual void collect(GCCause::Cause cause) = 0; |
519 |
466 |
|
467 // Perform a full collection |
|
468 virtual void do_full_collection(bool clear_all_soft_refs) = 0; |
|
469 |
520 // This interface assumes that it's being called by the |
470 // This interface assumes that it's being called by the |
521 // vm thread. It collects the heap assuming that the |
471 // vm thread. It collects the heap assuming that the |
522 // heap lock is already held and that we are executing in |
472 // heap lock is already held and that we are executing in |
523 // the context of the vm thread. |
473 // the context of the vm thread. |
524 virtual void collect_as_vm_thread(GCCause::Cause cause) = 0; |
474 virtual void collect_as_vm_thread(GCCause::Cause cause); |
|
475 |
|
476 // Callback from VM_CollectForMetadataAllocation operation. |
|
477 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, |
|
478 size_t size, |
|
479 Metaspace::MetadataType mdtype); |
525 |
480 |
526 // Returns the barrier set for this heap |
481 // Returns the barrier set for this heap |
527 BarrierSet* barrier_set() { return _barrier_set; } |
482 BarrierSet* barrier_set() { return _barrier_set; } |
528 |
483 |
529 // Returns "true" iff there is a stop-world GC in progress. (I assume |
484 // Returns "true" iff there is a stop-world GC in progress. (I assume |
550 virtual AdaptiveSizePolicy* size_policy() = 0; |
505 virtual AdaptiveSizePolicy* size_policy() = 0; |
551 |
506 |
552 // Return the CollectorPolicy for the heap |
507 // Return the CollectorPolicy for the heap |
553 virtual CollectorPolicy* collector_policy() const = 0; |
508 virtual CollectorPolicy* collector_policy() const = 0; |
554 |
509 |
|
510 void oop_iterate_no_header(OopClosure* cl); |
|
511 |
555 // Iterate over all the ref-containing fields of all objects, calling |
512 // Iterate over all the ref-containing fields of all objects, calling |
556 // "cl.do_oop" on each. This includes objects in permanent memory. |
513 // "cl.do_oop" on each. |
557 virtual void oop_iterate(OopClosure* cl) = 0; |
514 virtual void oop_iterate(ExtendedOopClosure* cl) = 0; |
558 |
515 |
559 // Iterate over all objects, calling "cl.do_object" on each. |
516 // Iterate over all objects, calling "cl.do_object" on each. |
560 // This includes objects in permanent memory. |
|
561 virtual void object_iterate(ObjectClosure* cl) = 0; |
517 virtual void object_iterate(ObjectClosure* cl) = 0; |
562 |
518 |
563 // Similar to object_iterate() except iterates only |
519 // Similar to object_iterate() except iterates only |
564 // over live objects. |
520 // over live objects. |
565 virtual void safe_object_iterate(ObjectClosure* cl) = 0; |
521 virtual void safe_object_iterate(ObjectClosure* cl) = 0; |
566 |
|
567 // Behaves the same as oop_iterate, except only traverses |
|
568 // interior pointers contained in permanent memory. If there |
|
569 // is no permanent memory, does nothing. |
|
570 virtual void permanent_oop_iterate(OopClosure* cl) = 0; |
|
571 |
|
572 // Behaves the same as object_iterate, except only traverses |
|
573 // object contained in permanent memory. If there is no |
|
574 // permanent memory, does nothing. |
|
575 virtual void permanent_object_iterate(ObjectClosure* cl) = 0; |
|
576 |
522 |
577 // NOTE! There is no requirement that a collector implement these |
523 // NOTE! There is no requirement that a collector implement these |
578 // functions. |
524 // functions. |
579 // |
525 // |
580 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |
526 // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |