hotspot/src/share/vm/opto/graphKit.cpp
changeset 27150 5a09b3a7b974
parent 26166 4b49fd58bbd9
child 27637 cf68c0af6882
equal deleted inserted replaced
27149:9246fc481aa3 27150:5a09b3a7b974
  3824   }
  3824   }
  3825 
  3825 
  3826   // Final sync IdealKit and GraphKit.
  3826   // Final sync IdealKit and GraphKit.
  3827   final_sync(ideal);
  3827   final_sync(ideal);
  3828 }
  3828 }
       
  3829 /*
       
  3830  * Determine if the G1 pre-barrier can be removed. The pre-barrier is
       
  3831  * required by SATB to make sure all objects live at the start of the
       
  3832  * marking are kept alive, all reference updates need to any previous
       
  3833  * reference stored before writing.
       
  3834  *
       
  3835  * If the previous value is NULL there is no need to save the old value.
       
  3836  * References that are NULL are filtered during runtime by the barrier
       
  3837  * code to avoid unnecessary queuing.
       
  3838  *
       
  3839  * However in the case of newly allocated objects it might be possible to
       
  3840  * prove that the reference about to be overwritten is NULL during compile
       
  3841  * time and avoid adding the barrier code completely.
       
  3842  *
       
  3843  * The compiler needs to determine that the object in which a field is about
       
  3844  * to be written is newly allocated, and that no prior store to the same field
       
  3845  * has happened since the allocation.
       
  3846  *
       
  3847  * Returns true if the pre-barrier can be removed
       
  3848  */
       
  3849 bool GraphKit::g1_can_remove_pre_barrier(PhaseTransform* phase, Node* adr,
       
  3850                                          BasicType bt, uint adr_idx) {
       
  3851   intptr_t offset = 0;
       
  3852   Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
       
  3853   AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
       
  3854 
       
  3855   if (offset == Type::OffsetBot) {
       
  3856     return false; // cannot unalias unless there are precise offsets
       
  3857   }
       
  3858 
       
  3859   if (alloc == NULL) {
       
  3860     return false; // No allocation found
       
  3861   }
       
  3862 
       
  3863   intptr_t size_in_bytes = type2aelembytes(bt);
       
  3864 
       
  3865   Node* mem = memory(adr_idx); // start searching here...
       
  3866 
       
  3867   for (int cnt = 0; cnt < 50; cnt++) {
       
  3868 
       
  3869     if (mem->is_Store()) {
       
  3870 
       
  3871       Node* st_adr = mem->in(MemNode::Address);
       
  3872       intptr_t st_offset = 0;
       
  3873       Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
       
  3874 
       
  3875       if (st_base == NULL) {
       
  3876         break; // inscrutable pointer
       
  3877       }
       
  3878 
       
  3879       // Break we have found a store with same base and offset as ours so break
       
  3880       if (st_base == base && st_offset == offset) {
       
  3881         break;
       
  3882       }
       
  3883 
       
  3884       if (st_offset != offset && st_offset != Type::OffsetBot) {
       
  3885         const int MAX_STORE = BytesPerLong;
       
  3886         if (st_offset >= offset + size_in_bytes ||
       
  3887             st_offset <= offset - MAX_STORE ||
       
  3888             st_offset <= offset - mem->as_Store()->memory_size()) {
       
  3889           // Success:  The offsets are provably independent.
       
  3890           // (You may ask, why not just test st_offset != offset and be done?
       
  3891           // The answer is that stores of different sizes can co-exist
       
  3892           // in the same sequence of RawMem effects.  We sometimes initialize
       
  3893           // a whole 'tile' of array elements with a single jint or jlong.)
       
  3894           mem = mem->in(MemNode::Memory);
       
  3895           continue; // advance through independent store memory
       
  3896         }
       
  3897       }
       
  3898 
       
  3899       if (st_base != base
       
  3900           && MemNode::detect_ptr_independence(base, alloc, st_base,
       
  3901                                               AllocateNode::Ideal_allocation(st_base, phase),
       
  3902                                               phase)) {
       
  3903         // Success:  The bases are provably independent.
       
  3904         mem = mem->in(MemNode::Memory);
       
  3905         continue; // advance through independent store memory
       
  3906       }
       
  3907     } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
       
  3908 
       
  3909       InitializeNode* st_init = mem->in(0)->as_Initialize();
       
  3910       AllocateNode* st_alloc = st_init->allocation();
       
  3911 
       
  3912       // Make sure that we are looking at the same allocation site.
       
  3913       // The alloc variable is guaranteed to not be null here from earlier check.
       
  3914       if (alloc == st_alloc) {
       
  3915         // Check that the initialization is storing NULL so that no previous store
       
  3916         // has been moved up and directly write a reference
       
  3917         Node* captured_store = st_init->find_captured_store(offset,
       
  3918                                                             type2aelembytes(T_OBJECT),
       
  3919                                                             phase);
       
  3920         if (captured_store == NULL || captured_store == st_init->zero_memory()) {
       
  3921           return true;
       
  3922         }
       
  3923       }
       
  3924     }
       
  3925 
       
  3926     // Unless there is an explicit 'continue', we must bail out here,
       
  3927     // because 'mem' is an inscrutable memory state (e.g., a call).
       
  3928     break;
       
  3929   }
       
  3930 
       
  3931   return false;
       
  3932 }
  3829 
  3933 
  3830 // G1 pre/post barriers
  3934 // G1 pre/post barriers
  3831 void GraphKit::g1_write_barrier_pre(bool do_load,
  3935 void GraphKit::g1_write_barrier_pre(bool do_load,
  3832                                     Node* obj,
  3936                                     Node* obj,
  3833                                     Node* adr,
  3937                                     Node* adr,
  3844     // We need to generate the load of the previous value
  3948     // We need to generate the load of the previous value
  3845     assert(obj != NULL, "must have a base");
  3949     assert(obj != NULL, "must have a base");
  3846     assert(adr != NULL, "where are loading from?");
  3950     assert(adr != NULL, "where are loading from?");
  3847     assert(pre_val == NULL, "loaded already?");
  3951     assert(pre_val == NULL, "loaded already?");
  3848     assert(val_type != NULL, "need a type");
  3952     assert(val_type != NULL, "need a type");
       
  3953 
       
  3954     if (use_ReduceInitialCardMarks()
       
  3955         && g1_can_remove_pre_barrier(&_gvn, adr, bt, alias_idx)) {
       
  3956       return;
       
  3957     }
       
  3958 
  3849   } else {
  3959   } else {
  3850     // In this case both val_type and alias_idx are unused.
  3960     // In this case both val_type and alias_idx are unused.
  3851     assert(pre_val != NULL, "must be loaded already");
  3961     assert(pre_val != NULL, "must be loaded already");
  3852     // Nothing to be done if pre_val is null.
  3962     // Nothing to be done if pre_val is null.
  3853     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
  3963     if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
  3923     } __ end_if();  // (pre_val != NULL)
  4033     } __ end_if();  // (pre_val != NULL)
  3924   } __ end_if();  // (!marking)
  4034   } __ end_if();  // (!marking)
  3925 
  4035 
  3926   // Final sync IdealKit and GraphKit.
  4036   // Final sync IdealKit and GraphKit.
  3927   final_sync(ideal);
  4037   final_sync(ideal);
       
  4038 }
       
  4039 
       
  4040 /*
       
  4041  * G1 similar to any GC with a Young Generation requires a way to keep track of
       
  4042  * references from Old Generation to Young Generation to make sure all live
       
  4043  * objects are found. G1 also requires to keep track of object references
       
  4044  * between different regions to enable evacuation of old regions, which is done
       
  4045  * as part of mixed collections. References are tracked in remembered sets and
       
  4046  * is continuously updated as reference are written to with the help of the
       
  4047  * post-barrier.
       
  4048  *
       
  4049  * To reduce the number of updates to the remembered set the post-barrier
       
  4050  * filters updates to fields in objects located in the Young Generation,
       
  4051  * the same region as the reference, when the NULL is being written or
       
  4052  * if the card is already marked as dirty by an earlier write.
       
  4053  *
       
  4054  * Under certain circumstances it is possible to avoid generating the
       
  4055  * post-barrier completely if it is possible during compile time to prove
       
  4056  * the object is newly allocated and that no safepoint exists between the
       
  4057  * allocation and the store.
       
  4058  *
       
  4059  * In the case of slow allocation the allocation code must handle the barrier
       
  4060  * as part of the allocation in the case the allocated object is not located
       
  4061  * in the nursery, this would happen for humongous objects. This is similar to
       
  4062  * how CMS is required to handle this case, see the comments for the method
       
  4063  * CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier.
       
  4064  * A deferred card mark is required for these objects and handled in the above
       
  4065  * mentioned methods.
       
  4066  *
       
  4067  * Returns true if the post barrier can be removed
       
  4068  */
       
  4069 bool GraphKit::g1_can_remove_post_barrier(PhaseTransform* phase, Node* store,
       
  4070                                           Node* adr) {
       
  4071   intptr_t      offset = 0;
       
  4072   Node*         base   = AddPNode::Ideal_base_and_offset(adr, phase, offset);
       
  4073   AllocateNode* alloc  = AllocateNode::Ideal_allocation(base, phase);
       
  4074 
       
  4075   if (offset == Type::OffsetBot) {
       
  4076     return false; // cannot unalias unless there are precise offsets
       
  4077   }
       
  4078 
       
  4079   if (alloc == NULL) {
       
  4080      return false; // No allocation found
       
  4081   }
       
  4082 
       
  4083   // Start search from Store node
       
  4084   Node* mem = store->in(MemNode::Control);
       
  4085   if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
       
  4086 
       
  4087     InitializeNode* st_init = mem->in(0)->as_Initialize();
       
  4088     AllocateNode*  st_alloc = st_init->allocation();
       
  4089 
       
  4090     // Make sure we are looking at the same allocation
       
  4091     if (alloc == st_alloc) {
       
  4092       return true;
       
  4093     }
       
  4094   }
       
  4095 
       
  4096   return false;
  3928 }
  4097 }
  3929 
  4098 
  3930 //
  4099 //
  3931 // Update the card table and add card address to the queue
  4100 // Update the card table and add card address to the queue
  3932 //
  4101 //
  3974   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
  4143   if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
  3975     // Must be NULL
  4144     // Must be NULL
  3976     const Type* t = val->bottom_type();
  4145     const Type* t = val->bottom_type();
  3977     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
  4146     assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
  3978     // No post barrier if writing NULLx
  4147     // No post barrier if writing NULLx
       
  4148     return;
       
  4149   }
       
  4150 
       
  4151   if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) {
       
  4152     // We can skip marks on a freshly-allocated object in Eden.
       
  4153     // Keep this code in sync with new_store_pre_barrier() in runtime.cpp.
       
  4154     // That routine informs GC to take appropriate compensating steps,
       
  4155     // upon a slow-path allocation, so as to make this card-mark
       
  4156     // elision safe.
       
  4157     return;
       
  4158   }
       
  4159 
       
  4160   if (use_ReduceInitialCardMarks()
       
  4161       && g1_can_remove_post_barrier(&_gvn, oop_store, adr)) {
  3979     return;
  4162     return;
  3980   }
  4163   }
  3981 
  4164 
  3982   if (!use_precise) {
  4165   if (!use_precise) {
  3983     // All card marks for a (non-array) instance are in one place:
  4166     // All card marks for a (non-array) instance are in one place: