src/hotspot/share/opto/parse1.cpp
branchdatagramsocketimpl-branch
changeset 58678 9cf78a70fa4f
parent 54640 2f4393ec54d4
child 58679 9c3209ff7550
equal deleted inserted replaced
58677:13588c901957 58678:9cf78a70fa4f
   521   }
   521   }
   522 
   522 
   523 #ifdef ASSERT
   523 #ifdef ASSERT
   524   if (depth() == 1) {
   524   if (depth() == 1) {
   525     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
   525     assert(C->is_osr_compilation() == this->is_osr_parse(), "OSR in sync");
   526     if (C->tf() != tf()) {
       
   527       assert(C->env()->system_dictionary_modification_counter_changed(),
       
   528              "Must invalidate if TypeFuncs differ");
       
   529     }
       
   530   } else {
   526   } else {
   531     assert(!this->is_osr_parse(), "no recursive OSR");
   527     assert(!this->is_osr_parse(), "no recursive OSR");
   532   }
   528   }
   533 #endif
   529 #endif
   534 
   530 
   582       decrement_age();
   578       decrement_age();
   583     }
   579     }
   584   }
   580   }
   585 
   581 
   586   if (depth() == 1 && !failing()) {
   582   if (depth() == 1 && !failing()) {
       
   583     if (C->clinit_barrier_on_entry()) {
       
   584       // Add check to deoptimize the nmethod once the holder class is fully initialized
       
   585       clinit_deopt();
       
   586     }
       
   587 
   587     // Add check to deoptimize the nmethod if RTM state was changed
   588     // Add check to deoptimize the nmethod if RTM state was changed
   588     rtm_deopt();
   589     rtm_deopt();
   589   }
   590   }
   590 
   591 
   591   // Check for bailouts during method entry or RTM state check setup.
   592   // Check for bailouts during method entry or RTM state check setup.
   976   //    publishes the reference to the newly constructed object. Rather
   977   //    publishes the reference to the newly constructed object. Rather
   977   //    than wait for the publication, we simply block the writes here.
   978   //    than wait for the publication, we simply block the writes here.
   978   //    Rather than put a barrier on only those writes which are required
   979   //    Rather than put a barrier on only those writes which are required
   979   //    to complete, we force all writes to complete.
   980   //    to complete, we force all writes to complete.
   980   //
   981   //
   981   // 2. On PPC64, also add MemBarRelease for constructors which write
   982   // 2. Experimental VM option is used to force the barrier if any field
   982   //    volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu
   983   //    was written out in the constructor.
   983   //    is set on PPC64, no sync instruction is issued after volatile
       
   984   //    stores. We want to guarantee the same behavior as on platforms
       
   985   //    with total store order, although this is not required by the Java
       
   986   //    memory model. So as with finals, we add a barrier here.
       
   987   //
   984   //
   988   // 3. Experimental VM option is used to force the barrier if any field
   985   // 3. On processors which are not CPU_MULTI_COPY_ATOMIC (e.g. PPC64),
   989   //    was written out in the constructor.
   986   //    support_IRIW_for_not_multiple_copy_atomic_cpu selects that
       
   987   //    MemBarVolatile is used before volatile load instead of after volatile
       
   988   //    store, so there's no barrier after the store.
       
   989   //    We want to guarantee the same behavior as on platforms with total store
       
   990   //    order, although this is not required by the Java memory model.
       
   991   //    In this case, we want to enforce visibility of volatile field
       
   992   //    initializations which are performed in constructors.
       
   993   //    So as with finals, we add a barrier here.
   990   //
   994   //
   991   // "All bets are off" unless the first publication occurs after a
   995   // "All bets are off" unless the first publication occurs after a
   992   // normal return from the constructor.  We do not attempt to detect
   996   // normal return from the constructor.  We do not attempt to detect
   993   // such unusual early publications.  But no barrier is needed on
   997   // such unusual early publications.  But no barrier is needed on
   994   // exceptional returns, since they cannot publish normally.
   998   // exceptional returns, since they cannot publish normally.
   995   //
   999   //
   996   if (method()->is_initializer() &&
  1000   if (method()->is_initializer() &&
   997         (wrote_final() ||
  1001        (wrote_final() ||
   998            PPC64_ONLY(wrote_volatile() ||)
  1002          (AlwaysSafeConstructors && wrote_fields()) ||
   999            (AlwaysSafeConstructors && wrote_fields()))) {
  1003          (support_IRIW_for_not_multiple_copy_atomic_cpu && wrote_volatile()))) {
  1000     _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
  1004     _exits.insert_mem_bar(Op_MemBarRelease, alloc_with_final());
  1001 
  1005 
  1002     // If Memory barrier is created for final fields write
  1006     // If Memory barrier is created for final fields write
  1003     // and allocation node does not escape the initialize method,
  1007     // and allocation node does not escape the initialize method,
  1004     // then barrier introduced by allocation node can be removed.
  1008     // then barrier introduced by allocation node can be removed.
  1033 
  1037 
  1034   if (tf()->range()->cnt() > TypeFunc::Parms) {
  1038   if (tf()->range()->cnt() > TypeFunc::Parms) {
  1035     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
  1039     const Type* ret_type = tf()->range()->field_at(TypeFunc::Parms);
  1036     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
  1040     Node*       ret_phi  = _gvn.transform( _exits.argument(0) );
  1037     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
  1041     if (!_exits.control()->is_top() && _gvn.type(ret_phi)->empty()) {
  1038       // In case of concurrent class loading, the type we set for the
  1042       // If the type we set for the ret_phi in build_exits() is too optimistic and
  1039       // ret_phi in build_exits() may have been too optimistic and the
  1043       // the ret_phi is top now, there's an extremely small chance that it may be due to class
  1040       // ret_phi may be top now.
  1044       // loading.  It could also be due to an error, so mark this method as not compilable because
  1041       // Otherwise, we've encountered an error and have to mark the method as
  1045       // otherwise this could lead to an infinite compile loop.
  1042       // not compilable. Just using an assertion instead would be dangerous
  1046       // In any case, this code path is rarely (and never in my testing) reached.
  1043       // as this could lead to an infinite compile loop in non-debug builds.
  1047       C->record_method_not_compilable("Can't determine return type.");
  1044       {
       
  1045         if (C->env()->system_dictionary_modification_counter_changed()) {
       
  1046           C->record_failure(C2Compiler::retry_class_loading_during_parsing());
       
  1047         } else {
       
  1048           C->record_method_not_compilable("Can't determine return type.");
       
  1049         }
       
  1050       }
       
  1051       return;
  1048       return;
  1052     }
  1049     }
  1053     if (ret_type->isa_int()) {
  1050     if (ret_type->isa_int()) {
  1054       BasicType ret_bt = method()->return_type()->basic_type();
  1051       BasicType ret_bt = method()->return_type()->basic_type();
  1055       ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
  1052       ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
  1190 //-----------------------------do_method_entry--------------------------------
  1187 //-----------------------------do_method_entry--------------------------------
  1191 // Emit any code needed in the pseudo-block before BCI zero.
  1188 // Emit any code needed in the pseudo-block before BCI zero.
  1192 // The main thing to do is lock the receiver of a synchronized method.
  1189 // The main thing to do is lock the receiver of a synchronized method.
  1193 void Parse::do_method_entry() {
  1190 void Parse::do_method_entry() {
  1194   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
  1191   set_parse_bci(InvocationEntryBci); // Pseudo-BCP
  1195   set_sp(0);                      // Java Stack Pointer
  1192   set_sp(0);                         // Java Stack Pointer
  1196 
  1193 
  1197   NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
  1194   NOT_PRODUCT( count_compiled_calls(true/*at_method_entry*/, false/*is_inline*/); )
  1198 
  1195 
  1199   if (C->env()->dtrace_method_probes()) {
  1196   if (C->env()->dtrace_method_probes()) {
  1200     make_dtrace_method_entry(method());
  1197     make_dtrace_method_entry(method());
  2100   }
  2097   }
  2101 
  2098 
  2102   set_control( _gvn.transform(result_rgn) );
  2099   set_control( _gvn.transform(result_rgn) );
  2103 }
  2100 }
  2104 
  2101 
       
  2102 // Add check to deoptimize once holder klass is fully initialized.
       
  2103 void Parse::clinit_deopt() {
       
  2104   assert(C->has_method(), "only for normal compilations");
       
  2105   assert(depth() == 1, "only for main compiled method");
       
  2106   assert(is_normal_parse(), "no barrier needed on osr entry");
       
  2107   assert(!method()->holder()->is_not_initialized(), "initialization should have been started");
       
  2108 
       
  2109   set_parse_bci(0);
       
  2110 
       
  2111   Node* holder = makecon(TypeKlassPtr::make(method()->holder()));
       
  2112   guard_klass_being_initialized(holder);
       
  2113 }
       
  2114 
  2105 // Add check to deoptimize if RTM state is not ProfileRTM
  2115 // Add check to deoptimize if RTM state is not ProfileRTM
  2106 void Parse::rtm_deopt() {
  2116 void Parse::rtm_deopt() {
  2107 #if INCLUDE_RTM_OPT
  2117 #if INCLUDE_RTM_OPT
  2108   if (C->profile_rtm()) {
  2118   if (C->profile_rtm()) {
  2109     assert(C->method() != NULL, "only for normal compilations");
  2119     assert(C->has_method(), "only for normal compilations");
  2110     assert(!C->method()->method_data()->is_empty(), "MDO is needed to record RTM state");
  2120     assert(!C->method()->method_data()->is_empty(), "MDO is needed to record RTM state");
  2111     assert(depth() == 1, "generate check only for main compiled method");
  2121     assert(depth() == 1, "generate check only for main compiled method");
  2112 
  2122 
  2113     // Set starting bci for uncommon trap.
  2123     // Set starting bci for uncommon trap.
  2114     set_parse_bci(is_osr_parse() ? osr_bci() : 0);
  2124     set_parse_bci(is_osr_parse() ? osr_bci() : 0);