1 /* |
1 /* |
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
323 // (c) cause == _g1_humongous_allocation |
323 // (c) cause == _g1_humongous_allocation |
324 bool should_do_concurrent_full_gc(GCCause::Cause cause); |
324 bool should_do_concurrent_full_gc(GCCause::Cause cause); |
325 |
325 |
326 // Keeps track of how many "old marking cycles" (i.e., Full GCs or |
326 // Keeps track of how many "old marking cycles" (i.e., Full GCs or |
327 // concurrent cycles) we have started. |
327 // concurrent cycles) we have started. |
328 volatile unsigned int _old_marking_cycles_started; |
328 volatile uint _old_marking_cycles_started; |
329 |
329 |
330 // Keeps track of how many "old marking cycles" (i.e., Full GCs or |
330 // Keeps track of how many "old marking cycles" (i.e., Full GCs or |
331 // concurrent cycles) we have completed. |
331 // concurrent cycles) we have completed. |
332 volatile unsigned int _old_marking_cycles_completed; |
332 volatile uint _old_marking_cycles_completed; |
333 |
333 |
334 bool _concurrent_cycle_started; |
334 bool _concurrent_cycle_started; |
335 bool _heap_summary_sent; |
335 bool _heap_summary_sent; |
336 |
336 |
337 // This is a non-product method that is helpful for testing. It is |
337 // This is a non-product method that is helpful for testing. It is |
495 |
495 |
496 // First-level mutator allocation attempt: try to allocate out of |
496 // First-level mutator allocation attempt: try to allocate out of |
497 // the mutator alloc region without taking the Heap_lock. This |
497 // the mutator alloc region without taking the Heap_lock. This |
498 // should only be used for non-humongous allocations. |
498 // should only be used for non-humongous allocations. |
499 inline HeapWord* attempt_allocation(size_t word_size, |
499 inline HeapWord* attempt_allocation(size_t word_size, |
500 unsigned int* gc_count_before_ret, |
500 uint* gc_count_before_ret, |
501 int* gclocker_retry_count_ret); |
501 uint* gclocker_retry_count_ret); |
502 |
502 |
503 // Second-level mutator allocation attempt: take the Heap_lock and |
503 // Second-level mutator allocation attempt: take the Heap_lock and |
504 // retry the allocation attempt, potentially scheduling a GC |
504 // retry the allocation attempt, potentially scheduling a GC |
505 // pause. This should only be used for non-humongous allocations. |
505 // pause. This should only be used for non-humongous allocations. |
506 HeapWord* attempt_allocation_slow(size_t word_size, |
506 HeapWord* attempt_allocation_slow(size_t word_size, |
507 AllocationContext_t context, |
507 AllocationContext_t context, |
508 unsigned int* gc_count_before_ret, |
508 uint* gc_count_before_ret, |
509 int* gclocker_retry_count_ret); |
509 uint* gclocker_retry_count_ret); |
510 |
510 |
511 // Takes the Heap_lock and attempts a humongous allocation. It can |
511 // Takes the Heap_lock and attempts a humongous allocation. It can |
512 // potentially schedule a GC pause. |
512 // potentially schedule a GC pause. |
513 HeapWord* attempt_allocation_humongous(size_t word_size, |
513 HeapWord* attempt_allocation_humongous(size_t word_size, |
514 unsigned int* gc_count_before_ret, |
514 uint* gc_count_before_ret, |
515 int* gclocker_retry_count_ret); |
515 uint* gclocker_retry_count_ret); |
516 |
516 |
517 // Allocation attempt that should be called during safepoints (e.g., |
517 // Allocation attempt that should be called during safepoints (e.g., |
518 // at the end of a successful GC). expect_null_mutator_alloc_region |
518 // at the end of a successful GC). expect_null_mutator_alloc_region |
519 // specifies whether the mutator alloc region is expected to be NULL |
519 // specifies whether the mutator alloc region is expected to be NULL |
520 // or not. |
520 // or not. |
684 // the FullGCCount_lock in case a Java thread is waiting for a full |
684 // the FullGCCount_lock in case a Java thread is waiting for a full |
685 // GC to happen (e.g., it called System.gc() with |
685 // GC to happen (e.g., it called System.gc() with |
686 // +ExplicitGCInvokesConcurrent). |
686 // +ExplicitGCInvokesConcurrent). |
687 void increment_old_marking_cycles_completed(bool concurrent); |
687 void increment_old_marking_cycles_completed(bool concurrent); |
688 |
688 |
689 unsigned int old_marking_cycles_completed() { |
689 uint old_marking_cycles_completed() { |
690 return _old_marking_cycles_completed; |
690 return _old_marking_cycles_completed; |
691 } |
691 } |
692 |
692 |
693 void register_concurrent_cycle_start(const Ticks& start_time); |
693 void register_concurrent_cycle_start(const Ticks& start_time); |
694 void register_concurrent_cycle_end(); |
694 void register_concurrent_cycle_end(); |
743 // gc_count_before (i.e., total_collections()) as a parameter since |
743 // gc_count_before (i.e., total_collections()) as a parameter since |
744 // it has to be read while holding the Heap_lock. Currently, both |
744 // it has to be read while holding the Heap_lock. Currently, both |
745 // methods that call do_collection_pause() release the Heap_lock |
745 // methods that call do_collection_pause() release the Heap_lock |
746 // before the call, so it's easy to read gc_count_before just before. |
746 // before the call, so it's easy to read gc_count_before just before. |
747 HeapWord* do_collection_pause(size_t word_size, |
747 HeapWord* do_collection_pause(size_t word_size, |
748 unsigned int gc_count_before, |
748 uint gc_count_before, |
749 bool* succeeded, |
749 bool* succeeded, |
750 GCCause::Cause gc_cause); |
750 GCCause::Cause gc_cause); |
751 |
751 |
752 // The guts of the incremental collection pause, executed by the vm |
752 // The guts of the incremental collection pause, executed by the vm |
753 // thread. It returns false if it is unable to do the collection due |
753 // thread. It returns false if it is unable to do the collection due |
979 // Time stamp to validate the regions recorded in the cache |
979 // Time stamp to validate the regions recorded in the cache |
980 // used by G1CollectedHeap::start_cset_region_for_worker(). |
980 // used by G1CollectedHeap::start_cset_region_for_worker(). |
981 // The heap region entry for a given worker is valid iff |
981 // The heap region entry for a given worker is valid iff |
982 // the associated time stamp value matches the current value |
982 // the associated time stamp value matches the current value |
983 // of G1CollectedHeap::_gc_time_stamp. |
983 // of G1CollectedHeap::_gc_time_stamp. |
984 unsigned int* _worker_cset_start_region_time_stamp; |
984 uint* _worker_cset_start_region_time_stamp; |
985 |
985 |
986 enum G1H_process_roots_tasks { |
986 enum G1H_process_roots_tasks { |
987 G1H_PS_filter_satb_buffers, |
987 G1H_PS_filter_satb_buffers, |
988 G1H_PS_refProcessor_oops_do, |
988 G1H_PS_refProcessor_oops_do, |
989 // Leave this one last. |
989 // Leave this one last. |