hotspot/src/share/vm/code/codeCache.cpp
changeset 27420 04e6f914cce1
parent 27410 dd80df7cfa2b
child 27642 8c9eff693145
equal deleted inserted replaced
27419:a934f24b4dcf 27420:04e6f914cce1
    42 #include "runtime/handles.inline.hpp"
    42 #include "runtime/handles.inline.hpp"
    43 #include "runtime/arguments.hpp"
    43 #include "runtime/arguments.hpp"
    44 #include "runtime/icache.hpp"
    44 #include "runtime/icache.hpp"
    45 #include "runtime/java.hpp"
    45 #include "runtime/java.hpp"
    46 #include "runtime/mutexLocker.hpp"
    46 #include "runtime/mutexLocker.hpp"
       
    47 #include "runtime/sweeper.hpp"
    47 #include "runtime/compilationPolicy.hpp"
    48 #include "runtime/compilationPolicy.hpp"
    48 #include "services/memoryService.hpp"
    49 #include "services/memoryService.hpp"
    49 #include "trace/tracing.hpp"
    50 #include "trace/tracing.hpp"
    50 #include "utilities/xmlstream.hpp"
    51 #include "utilities/xmlstream.hpp"
    51 #ifdef COMPILER1
    52 #ifdef COMPILER1
   190     FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
   191     FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
   191     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
   192     FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
   192   }
   193   }
   193 
   194 
   194   // Make sure we have enough space for VM internal code
   195   // Make sure we have enough space for VM internal code
   195   uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace;
   196   uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
   196   if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
   197   if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
   197     vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
   198     vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
   198   }
   199   }
   199   guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
   200   guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");
   200 
   201 
   201   // Align reserved sizes of CodeHeaps
   202   // Align reserved sizes of CodeHeaps
   202   size_t non_method_size    = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
   203   size_t non_method_size   = ReservedCodeSpace::allocation_align_size_up(NonNMethodCodeHeapSize);
   203   size_t profiled_size      = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
   204   size_t profiled_size     = ReservedCodeSpace::allocation_align_size_up(ProfiledCodeHeapSize);
   204   size_t non_profiled_size  = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
   205   size_t non_profiled_size = ReservedCodeSpace::allocation_align_size_up(NonProfiledCodeHeapSize);
   205 
   206 
   206   // Compute initial sizes of CodeHeaps
   207   // Compute initial sizes of CodeHeaps
   207   size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
   208   size_t init_non_method_size   = MIN2(InitialCodeCacheSize, non_method_size);
   208   size_t init_profiled_size     = MIN2(InitialCodeCacheSize, profiled_size);
   209   size_t init_profiled_size     = MIN2(InitialCodeCacheSize, profiled_size);
   209   size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
   210   size_t init_non_profiled_size = MIN2(InitialCodeCacheSize, non_profiled_size);
   346 
   347 
   347 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
   348 CodeBlob* CodeCache::next_blob(CodeBlob* cb) {
   348   return next_blob(get_code_heap(cb), cb);
   349   return next_blob(get_code_heap(cb), cb);
   349 }
   350 }
   350 
   351 
   351 CodeBlob* CodeCache::allocate(int size, int code_blob_type, bool is_critical) {
   352 /**
   352   // Do not seize the CodeCache lock here--if the caller has not
   353  * Do not seize the CodeCache lock here--if the caller has not
   353   // already done so, we are going to lose bigtime, since the code
   354  * already done so, we are going to lose bigtime, since the code
   354   // cache will contain a garbage CodeBlob until the caller can
   355  * cache will contain a garbage CodeBlob until the caller can
   355   // run the constructor for the CodeBlob subclass he is busy
   356  * run the constructor for the CodeBlob subclass he is busy
   356   // instantiating.
   357  * instantiating.
   357   assert_locked_or_safepoint(CodeCache_lock);
   358  */
   358   assert(size > 0, "allocation request must be reasonable");
   359 CodeBlob* CodeCache::allocate(int size, int code_blob_type) {
       
   360   // Possibly wakes up the sweeper thread.
       
   361   NMethodSweeper::notify(code_blob_type);
       
   362   assert_locked_or_safepoint(CodeCache_lock);
       
   363   assert(size > 0, err_msg_res("Code cache allocation request must be > 0 but is %d", size));
   359   if (size <= 0) {
   364   if (size <= 0) {
   360     return NULL;
   365     return NULL;
   361   }
   366   }
   362   CodeBlob* cb = NULL;
   367   CodeBlob* cb = NULL;
   363 
   368 
   364   // Get CodeHeap for the given CodeBlobType
   369   // Get CodeHeap for the given CodeBlobType
   365   CodeHeap* heap = get_code_heap(code_blob_type);
   370   CodeHeap* heap = get_code_heap(code_blob_type);
   366   assert(heap != NULL, "heap is null");
   371   assert(heap != NULL, "heap is null");
   367 
   372 
   368   while (true) {
   373   while (true) {
   369     cb = (CodeBlob*)heap->allocate(size, is_critical);
   374     cb = (CodeBlob*)heap->allocate(size);
   370     if (cb != NULL) break;
   375     if (cb != NULL) break;
   371     if (!heap->expand_by(CodeCacheExpansionSize)) {
   376     if (!heap->expand_by(CodeCacheExpansionSize)) {
   372       // Expansion failed
   377       // Expansion failed
   373       if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
   378       if (SegmentedCodeCache && (code_blob_type == CodeBlobType::NonNMethod)) {
   374         // Fallback solution: Store non-nmethod code in the non-profiled code heap
   379         // Fallback solution: Store non-nmethod code in the non-profiled code heap.
   375         return allocate(size, CodeBlobType::MethodNonProfiled, is_critical);
   380         // Note that at in the sweeper, we check the reverse_free_ratio of the non-profiled
       
   381         // code heap and force stack scanning if less than 10% if the code heap are free.
       
   382         return allocate(size, CodeBlobType::MethodNonProfiled);
   376       }
   383       }
       
   384       MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       
   385       CompileBroker::handle_full_code_cache(code_blob_type);
   377       return NULL;
   386       return NULL;
   378     }
   387     }
   379     if (PrintCodeCacheExtension) {
   388     if (PrintCodeCacheExtension) {
   380       ResourceMark rm;
   389       ResourceMark rm;
   381       if (SegmentedCodeCache) {
   390       if (SegmentedCodeCache) {
   769   }
   778   }
   770   return max_cap;
   779   return max_cap;
   771 }
   780 }
   772 
   781 
   773 /**
   782 /**
   774  * Returns true if a CodeHeap is full and sets code_blob_type accordingly.
       
   775  */
       
   776 bool CodeCache::is_full(int* code_blob_type) {
       
   777   FOR_ALL_HEAPS(heap) {
       
   778     if ((*heap)->unallocated_capacity() < CodeCacheMinimumFreeSpace) {
       
   779       *code_blob_type = (*heap)->code_blob_type();
       
   780       return true;
       
   781     }
       
   782   }
       
   783   return false;
       
   784 }
       
   785 
       
   786 /**
       
   787  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
   783  * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap
   788  * is free, reverse_free_ratio() returns 4.
   784  * is free, reverse_free_ratio() returns 4.
   789  */
   785  */
   790 double CodeCache::reverse_free_ratio(int code_blob_type) {
   786 double CodeCache::reverse_free_ratio(int code_blob_type) {
   791   CodeHeap* heap = get_code_heap(code_blob_type);
   787   CodeHeap* heap = get_code_heap(code_blob_type);
   792   if (heap == NULL) {
   788   if (heap == NULL) {
   793     return 0;
   789     return 0;
   794   }
   790   }
   795   double unallocated_capacity = (double)(heap->unallocated_capacity() - CodeCacheMinimumFreeSpace);
   791 
       
   792   double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0;
   796   double max_capacity = (double)heap->max_capacity();
   793   double max_capacity = (double)heap->max_capacity();
   797   return max_capacity / unallocated_capacity;
   794   double result = max_capacity / unallocated_capacity;
       
   795   assert (max_capacity >= unallocated_capacity, "Must be");
       
   796   assert (result >= 1.0, err_msg_res("reverse_free_ratio must be at least 1. It is %f", result));
       
   797   return result;
   798 }
   798 }
   799 
   799 
   800 size_t CodeCache::bytes_allocated_in_freelists() {
   800 size_t CodeCache::bytes_allocated_in_freelists() {
   801   size_t allocated_bytes = 0;
   801   size_t allocated_bytes = 0;
   802   FOR_ALL_HEAPS(heap) {
   802   FOR_ALL_HEAPS(heap) {