574 _min_old_size, _initial_old_size, _max_old_size); |
574 _min_old_size, _initial_old_size, _max_old_size); |
575 |
575 |
576 DEBUG_ONLY(GenCollectorPolicy::assert_size_info();) |
576 DEBUG_ONLY(GenCollectorPolicy::assert_size_info();) |
577 } |
577 } |
578 |
578 |
579 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, |
|
580 bool is_tlab, |
|
581 bool* gc_overhead_limit_was_exceeded) { |
|
582 GenCollectedHeap *gch = GenCollectedHeap::heap(); |
|
583 |
|
584 debug_only(gch->check_for_valid_allocation_state()); |
|
585 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed"); |
|
586 |
|
587 // In general gc_overhead_limit_was_exceeded should be false so |
|
588 // set it so here and reset it to true only if the gc time |
|
589 // limit is being exceeded as checked below. |
|
590 *gc_overhead_limit_was_exceeded = false; |
|
591 |
|
592 HeapWord* result = NULL; |
|
593 |
|
594 // Loop until the allocation is satisfied, or unsatisfied after GC. |
|
595 for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { |
|
596 HandleMark hm; // Discard any handles allocated in each iteration. |
|
597 |
|
598 // First allocation attempt is lock-free. |
|
599 Generation *young = gch->young_gen(); |
|
600 assert(young->supports_inline_contig_alloc(), |
|
601 "Otherwise, must do alloc within heap lock"); |
|
602 if (young->should_allocate(size, is_tlab)) { |
|
603 result = young->par_allocate(size, is_tlab); |
|
604 if (result != NULL) { |
|
605 assert(gch->is_in_reserved(result), "result not in heap"); |
|
606 return result; |
|
607 } |
|
608 } |
|
609 uint gc_count_before; // Read inside the Heap_lock locked region. |
|
610 { |
|
611 MutexLocker ml(Heap_lock); |
|
612 log_trace(gc, alloc)("GenCollectorPolicy::mem_allocate_work: attempting locked slow path allocation"); |
|
613 // Note that only large objects get a shot at being |
|
614 // allocated in later generations. |
|
615 bool first_only = ! should_try_older_generation_allocation(size); |
|
616 |
|
617 result = gch->attempt_allocation(size, is_tlab, first_only); |
|
618 if (result != NULL) { |
|
619 assert(gch->is_in_reserved(result), "result not in heap"); |
|
620 return result; |
|
621 } |
|
622 |
|
623 if (GCLocker::is_active_and_needs_gc()) { |
|
624 if (is_tlab) { |
|
625 return NULL; // Caller will retry allocating individual object. |
|
626 } |
|
627 if (!gch->is_maximal_no_gc()) { |
|
628 // Try and expand heap to satisfy request. |
|
629 result = expand_heap_and_allocate(size, is_tlab); |
|
630 // Result could be null if we are out of space. |
|
631 if (result != NULL) { |
|
632 return result; |
|
633 } |
|
634 } |
|
635 |
|
636 if (gclocker_stalled_count > GCLockerRetryAllocationCount) { |
|
637 return NULL; // We didn't get to do a GC and we didn't get any memory. |
|
638 } |
|
639 |
|
640 // If this thread is not in a jni critical section, we stall |
|
641 // the requestor until the critical section has cleared and |
|
642 // GC allowed. When the critical section clears, a GC is |
|
643 // initiated by the last thread exiting the critical section; so |
|
644 // we retry the allocation sequence from the beginning of the loop, |
|
645 // rather than causing more, now probably unnecessary, GC attempts. |
|
646 JavaThread* jthr = JavaThread::current(); |
|
647 if (!jthr->in_critical()) { |
|
648 MutexUnlocker mul(Heap_lock); |
|
649 // Wait for JNI critical section to be exited |
|
650 GCLocker::stall_until_clear(); |
|
651 gclocker_stalled_count += 1; |
|
652 continue; |
|
653 } else { |
|
654 if (CheckJNICalls) { |
|
655 fatal("Possible deadlock due to allocating while" |
|
656 " in jni critical section"); |
|
657 } |
|
658 return NULL; |
|
659 } |
|
660 } |
|
661 |
|
662 // Read the gc count while the heap lock is held. |
|
663 gc_count_before = gch->total_collections(); |
|
664 } |
|
665 |
|
666 VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); |
|
667 VMThread::execute(&op); |
|
668 if (op.prologue_succeeded()) { |
|
669 result = op.result(); |
|
670 if (op.gc_locked()) { |
|
671 assert(result == NULL, "must be NULL if gc_locked() is true"); |
|
672 continue; // Retry and/or stall as necessary. |
|
673 } |
|
674 |
|
675 // Allocation has failed and a collection |
|
676 // has been done. If the gc time limit was exceeded the |
|
677 // this time, return NULL so that an out-of-memory |
|
678 // will be thrown. Clear gc_overhead_limit_exceeded |
|
679 // so that the overhead exceeded does not persist. |
|
680 |
|
681 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); |
|
682 const bool softrefs_clear = all_soft_refs_clear(); |
|
683 |
|
684 if (limit_exceeded && softrefs_clear) { |
|
685 *gc_overhead_limit_was_exceeded = true; |
|
686 size_policy()->set_gc_overhead_limit_exceeded(false); |
|
687 if (op.result() != NULL) { |
|
688 CollectedHeap::fill_with_object(op.result(), size); |
|
689 } |
|
690 return NULL; |
|
691 } |
|
692 assert(result == NULL || gch->is_in_reserved(result), |
|
693 "result not in heap"); |
|
694 return result; |
|
695 } |
|
696 |
|
697 // Give a warning if we seem to be looping forever. |
|
698 if ((QueuedAllocationWarningCount > 0) && |
|
699 (try_count % QueuedAllocationWarningCount == 0)) { |
|
700 log_warning(gc, ergo)("GenCollectorPolicy::mem_allocate_work retries %d times," |
|
701 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : ""); |
|
702 } |
|
703 } |
|
704 } |
|
705 |
|
706 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size, |
|
707 bool is_tlab) { |
|
708 GenCollectedHeap *gch = GenCollectedHeap::heap(); |
|
709 HeapWord* result = NULL; |
|
710 Generation *old = gch->old_gen(); |
|
711 if (old->should_allocate(size, is_tlab)) { |
|
712 result = old->expand_and_allocate(size, is_tlab); |
|
713 } |
|
714 if (result == NULL) { |
|
715 Generation *young = gch->young_gen(); |
|
716 if (young->should_allocate(size, is_tlab)) { |
|
717 result = young->expand_and_allocate(size, is_tlab); |
|
718 } |
|
719 } |
|
720 assert(result == NULL || gch->is_in_reserved(result), "result not in heap"); |
|
721 return result; |
|
722 } |
|
723 |
|
724 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size, |
|
725 bool is_tlab) { |
|
726 GenCollectedHeap *gch = GenCollectedHeap::heap(); |
|
727 GCCauseSetter x(gch, GCCause::_allocation_failure); |
|
728 HeapWord* result = NULL; |
|
729 |
|
730 assert(size != 0, "Precondition violated"); |
|
731 if (GCLocker::is_active_and_needs_gc()) { |
|
732 // GC locker is active; instead of a collection we will attempt |
|
733 // to expand the heap, if there's room for expansion. |
|
734 if (!gch->is_maximal_no_gc()) { |
|
735 result = expand_heap_and_allocate(size, is_tlab); |
|
736 } |
|
737 return result; // Could be null if we are out of space. |
|
738 } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) { |
|
739 // Do an incremental collection. |
|
740 gch->do_collection(false, // full |
|
741 false, // clear_all_soft_refs |
|
742 size, // size |
|
743 is_tlab, // is_tlab |
|
744 GenCollectedHeap::OldGen); // max_generation |
|
745 } else { |
|
746 log_trace(gc)(" :: Trying full because partial may fail :: "); |
|
747 // Try a full collection; see delta for bug id 6266275 |
|
748 // for the original code and why this has been simplified |
|
749 // with from-space allocation criteria modified and |
|
750 // such allocation moved out of the safepoint path. |
|
751 gch->do_collection(true, // full |
|
752 false, // clear_all_soft_refs |
|
753 size, // size |
|
754 is_tlab, // is_tlab |
|
755 GenCollectedHeap::OldGen); // max_generation |
|
756 } |
|
757 |
|
758 result = gch->attempt_allocation(size, is_tlab, false /*first_only*/); |
|
759 |
|
760 if (result != NULL) { |
|
761 assert(gch->is_in_reserved(result), "result not in heap"); |
|
762 return result; |
|
763 } |
|
764 |
|
765 // OK, collection failed, try expansion. |
|
766 result = expand_heap_and_allocate(size, is_tlab); |
|
767 if (result != NULL) { |
|
768 return result; |
|
769 } |
|
770 |
|
771 // If we reach this point, we're really out of memory. Try every trick |
|
772 // we can to reclaim memory. Force collection of soft references. Force |
|
773 // a complete compaction of the heap. Any additional methods for finding |
|
774 // free memory should be here, especially if they are expensive. If this |
|
775 // attempt fails, an OOM exception will be thrown. |
|
776 { |
|
777 UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted |
|
778 |
|
779 gch->do_collection(true, // full |
|
780 true, // clear_all_soft_refs |
|
781 size, // size |
|
782 is_tlab, // is_tlab |
|
783 GenCollectedHeap::OldGen); // max_generation |
|
784 } |
|
785 |
|
786 result = gch->attempt_allocation(size, is_tlab, false /* first_only */); |
|
787 if (result != NULL) { |
|
788 assert(gch->is_in_reserved(result), "result not in heap"); |
|
789 return result; |
|
790 } |
|
791 |
|
792 assert(!should_clear_all_soft_refs(), |
|
793 "Flag should have been handled and cleared prior to this point"); |
|
794 |
|
795 // What else? We might try synchronous finalization later. If the total |
|
796 // space available is large enough for the allocation, then a more |
|
797 // complete compaction phase than we've tried so far might be |
|
798 // appropriate. |
|
799 return NULL; |
|
800 } |
|
801 |
|
802 // Return true if any of the following is true: |
|
803 // . the allocation won't fit into the current young gen heap |
|
804 // . gc locker is occupied (jni critical section) |
|
805 // . heap memory is tight -- the most recent previous collection |
|
806 // was a full collection because a partial collection (would |
|
807 // have) failed and is likely to fail again |
|
808 bool GenCollectorPolicy::should_try_older_generation_allocation( |
|
809 size_t word_size) const { |
|
810 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
811 size_t young_capacity = gch->young_gen()->capacity_before_gc(); |
|
812 return (word_size > heap_word_size(young_capacity)) |
|
813 || GCLocker::is_active_and_needs_gc() |
|
814 || gch->incremental_collection_failed(); |
|
815 } |
|
816 |
|
817 |
|
818 // |
579 // |
819 // MarkSweepPolicy methods |
580 // MarkSweepPolicy methods |
820 // |
581 // |
821 |
582 |
822 void MarkSweepPolicy::initialize_alignments() { |
583 void MarkSweepPolicy::initialize_alignments() { |