hotspot/src/share/vm/memory/permGen.cpp
changeset 386 7f121b1192f2
parent 1 489c9b5090e2
child 670 ddf3e9583f2f
equal deleted inserted replaced
342:c7bc1fed1d90 386:7f121b1192f2
    23  */
    23  */
    24 
    24 
    25 #include "incls/_precompiled.incl"
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_permGen.cpp.incl"
    26 #include "incls/_permGen.cpp.incl"
    27 
    27 
       
    28 HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
       
    29   MutexLocker ml(Heap_lock);
       
    30   GCCause::Cause next_cause = GCCause::_permanent_generation_full;
       
    31   GCCause::Cause prev_cause = GCCause::_no_gc;
       
    32 
       
    33   for (;;) {
       
    34     HeapWord* obj = gen->allocate(size, false);
       
    35     if (obj != NULL) {
       
    36       return obj;
       
    37     }
       
    38     if (gen->capacity() < _capacity_expansion_limit ||
       
    39         prev_cause != GCCause::_no_gc) {
       
    40       obj = gen->expand_and_allocate(size, false);
       
    41     }
       
    42     if (obj == NULL && prev_cause != GCCause::_last_ditch_collection) {
       
    43       if (GC_locker::is_active_and_needs_gc()) {
       
    44         // If this thread is not in a jni critical section, we stall
       
    45         // the requestor until the critical section has cleared and
       
    46         // GC allowed. When the critical section clears, a GC is
       
    47         // initiated by the last thread exiting the critical section; so
       
    48         // we retry the allocation sequence from the beginning of the loop,
       
    49         // rather than causing more, now probably unnecessary, GC attempts.
       
    50         JavaThread* jthr = JavaThread::current();
       
    51         if (!jthr->in_critical()) {
       
    52           MutexUnlocker mul(Heap_lock);
       
    53           // Wait for JNI critical section to be exited
       
    54           GC_locker::stall_until_clear();
       
    55           continue;
       
    56         } else {
       
    57           if (CheckJNICalls) {
       
    58             fatal("Possible deadlock due to allocating while"
       
    59                   " in jni critical section");
       
    60           }
       
    61           return NULL;
       
    62         }
       
    63       }
       
    64 
       
    65       // Read the GC count while holding the Heap_lock
       
    66       unsigned int gc_count_before      = SharedHeap::heap()->total_collections();
       
    67       unsigned int full_gc_count_before = SharedHeap::heap()->total_full_collections();
       
    68       {
       
    69         MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
       
    70         VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
       
    71                                                next_cause);
       
    72         VMThread::execute(&op);
       
    73         if (!op.prologue_succeeded() || op.gc_locked()) {
       
    74           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
       
    75           continue;  // retry and/or stall as necessary
       
    76         }
       
    77         obj = op.result();
       
    78         assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj),
       
    79                "result not in heap");
       
    80         if (obj != NULL) {
       
    81           return obj;
       
    82         }
       
    83       }
       
    84       prev_cause = next_cause;
       
    85       next_cause = GCCause::_last_ditch_collection;
       
    86     } else {
       
    87       return obj;
       
    88     }
       
    89   }
       
    90 }
       
    91 
    28 CompactingPermGen::CompactingPermGen(ReservedSpace rs,
    92 CompactingPermGen::CompactingPermGen(ReservedSpace rs,
    29                                      ReservedSpace shared_rs,
    93                                      ReservedSpace shared_rs,
    30                                      size_t initial_byte_size,
    94                                      size_t initial_byte_size,
    31                                      GenRemSet* remset,
    95                                      GenRemSet* remset,
    32                                      PermanentGenerationSpec* perm_spec)
    96                                      PermanentGenerationSpec* perm_spec)
    42 
   106 
    43   _capacity_expansion_limit = g->capacity() + MaxPermHeapExpansion;
   107   _capacity_expansion_limit = g->capacity() + MaxPermHeapExpansion;
    44 }
   108 }
    45 
   109 
    46 HeapWord* CompactingPermGen::mem_allocate(size_t size) {
   110 HeapWord* CompactingPermGen::mem_allocate(size_t size) {
    47   MutexLocker ml(Heap_lock);
   111   return mem_allocate_in_gen(size, _gen);
    48   HeapWord* obj = _gen->allocate(size, false);
       
    49   bool tried_collection = false;
       
    50   bool tried_expansion = false;
       
    51   while (obj == NULL) {
       
    52     if (_gen->capacity() >= _capacity_expansion_limit || tried_expansion) {
       
    53       // Expansion limit reached, try collection before expanding further
       
    54       // For now we force a full collection, this could be changed
       
    55       SharedHeap::heap()->collect_locked(GCCause::_permanent_generation_full);
       
    56       obj = _gen->allocate(size, false);
       
    57       tried_collection = true;
       
    58       tried_expansion =  false;    // ... following the collection:
       
    59                                    // the collection may have shrunk the space.
       
    60     }
       
    61     if (obj == NULL && !tried_expansion) {
       
    62       obj = _gen->expand_and_allocate(size, false);
       
    63       tried_expansion = true;
       
    64     }
       
    65     if (obj == NULL && tried_collection && tried_expansion) {
       
    66       // We have not been able to allocate despite a collection and
       
    67       // an attempted space expansion. We now make a last-ditch collection
       
    68       // attempt that will try to reclaim as much space as possible (for
       
    69       // example by aggressively clearing all soft refs).
       
    70       SharedHeap::heap()->collect_locked(GCCause::_last_ditch_collection);
       
    71       obj = _gen->allocate(size, false);
       
    72       if (obj == NULL) {
       
    73         // An expansion attempt is necessary since the previous
       
    74         // collection may have shrunk the space.
       
    75         obj = _gen->expand_and_allocate(size, false);
       
    76       }
       
    77       break;
       
    78     }
       
    79   }
       
    80   return obj;
       
    81 }
   112 }
    82 
   113 
    83 void CompactingPermGen::compute_new_size() {
   114 void CompactingPermGen::compute_new_size() {
    84   size_t desired_capacity = align_size_up(_gen->used(), MinPermHeapExpansion);
   115   size_t desired_capacity = align_size_up(_gen->used(), MinPermHeapExpansion);
    85   if (desired_capacity < PermSize) {
   116   if (desired_capacity < PermSize) {