hotspot/src/share/vm/opto/library_call.cpp
changeset 13886 8d82c4dfa722
parent 13728 882756847a04
child 13895 f6dfe4123709
equal deleted inserted replaced
13885:6b056026ecad 13886:8d82c4dfa722
    62 
    62 
    63 // Local helper class for LibraryIntrinsic:
    63 // Local helper class for LibraryIntrinsic:
    64 class LibraryCallKit : public GraphKit {
    64 class LibraryCallKit : public GraphKit {
    65  private:
    65  private:
    66   LibraryIntrinsic* _intrinsic;   // the library intrinsic being called
    66   LibraryIntrinsic* _intrinsic;   // the library intrinsic being called
       
    67 
       
    68   const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false);
    67 
    69 
    68  public:
    70  public:
    69   LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic)
    71   LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic)
    70     : GraphKit(caller),
    72     : GraphKit(caller),
    71       _intrinsic(intrinsic)
    73       _intrinsic(intrinsic)
   239                                     BasicType basic_elem_type,
   241                                     BasicType basic_elem_type,
   240                                     bool disjoint_bases,
   242                                     bool disjoint_bases,
   241                                     Node* src,  Node* src_offset,
   243                                     Node* src,  Node* src_offset,
   242                                     Node* dest, Node* dest_offset,
   244                                     Node* dest, Node* dest_offset,
   243                                     Node* copy_length, bool dest_uninitialized);
   245                                     Node* copy_length, bool dest_uninitialized);
   244   bool inline_unsafe_CAS(BasicType type);
   246   typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
       
   247   bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
   245   bool inline_unsafe_ordered_store(BasicType type);
   248   bool inline_unsafe_ordered_store(BasicType type);
   246   bool inline_fp_conversions(vmIntrinsics::ID id);
   249   bool inline_fp_conversions(vmIntrinsics::ID id);
   247   bool inline_numberOfLeadingZeros(vmIntrinsics::ID id);
   250   bool inline_numberOfLeadingZeros(vmIntrinsics::ID id);
   248   bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
   251   bool inline_numberOfTrailingZeros(vmIntrinsics::ID id);
   249   bool inline_bitCount(vmIntrinsics::ID id);
   252   bool inline_bitCount(vmIntrinsics::ID id);
   288     switch (id) {
   291     switch (id) {
   289     case vmIntrinsics::_indexOf:
   292     case vmIntrinsics::_indexOf:
   290     case vmIntrinsics::_compareTo:
   293     case vmIntrinsics::_compareTo:
   291     case vmIntrinsics::_equals:
   294     case vmIntrinsics::_equals:
   292     case vmIntrinsics::_equalsC:
   295     case vmIntrinsics::_equalsC:
       
   296     case vmIntrinsics::_getAndAddInt:
       
   297     case vmIntrinsics::_getAndAddLong:
       
   298     case vmIntrinsics::_getAndSetInt:
       
   299     case vmIntrinsics::_getAndSetLong:
       
   300     case vmIntrinsics::_getAndSetObject:
   293       break;  // InlineNatives does not control String.compareTo
   301       break;  // InlineNatives does not control String.compareTo
   294     case vmIntrinsics::_Reference_get:
   302     case vmIntrinsics::_Reference_get:
   295       break;  // InlineNatives does not control Reference.get
   303       break;  // InlineNatives does not control Reference.get
   296     default:
   304     default:
   297       return NULL;
   305       return NULL;
   366     // Use the intrinsic version of Reference.get() so that the value in
   374     // Use the intrinsic version of Reference.get() so that the value in
   367     // the referent field can be registered by the G1 pre-barrier code.
   375     // the referent field can be registered by the G1 pre-barrier code.
   368     // Also add memory barrier to prevent commoning reads from this field
   376     // Also add memory barrier to prevent commoning reads from this field
   369     // across safepoint since GC can change it value.
   377     // across safepoint since GC can change it value.
   370     break;
   378     break;
       
   379 
       
   380   case vmIntrinsics::_compareAndSwapObject:
       
   381 #ifdef _LP64
       
   382     if (!UseCompressedOops && !Matcher::match_rule_supported(Op_CompareAndSwapP)) return NULL;
       
   383 #endif
       
   384     break;
       
   385 
       
   386   case vmIntrinsics::_compareAndSwapLong:
       
   387     if (!Matcher::match_rule_supported(Op_CompareAndSwapL)) return NULL;
       
   388     break;
       
   389 
       
   390   case vmIntrinsics::_getAndAddInt:
       
   391     if (!Matcher::match_rule_supported(Op_GetAndAddI)) return NULL;
       
   392     break;
       
   393 
       
   394   case vmIntrinsics::_getAndAddLong:
       
   395     if (!Matcher::match_rule_supported(Op_GetAndAddL)) return NULL;
       
   396     break;
       
   397 
       
   398   case vmIntrinsics::_getAndSetInt:
       
   399     if (!Matcher::match_rule_supported(Op_GetAndSetI)) return NULL;
       
   400     break;
       
   401 
       
   402   case vmIntrinsics::_getAndSetLong:
       
   403     if (!Matcher::match_rule_supported(Op_GetAndSetL)) return NULL;
       
   404     break;
       
   405 
       
   406   case vmIntrinsics::_getAndSetObject:
       
   407 #ifdef _LP64
       
   408     if (!UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
       
   409     if (UseCompressedOops && !Matcher::match_rule_supported(Op_GetAndSetN)) return NULL;
       
   410     break;
       
   411 #else
       
   412     if (!Matcher::match_rule_supported(Op_GetAndSetP)) return NULL;
       
   413     break;
       
   414 #endif
   371 
   415 
   372  default:
   416  default:
   373     assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
   417     assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility");
   374     assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
   418     assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?");
   375     break;
   419     break;
   618     return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
   662     return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static);
   619   case vmIntrinsics::_prefetchWriteStatic:
   663   case vmIntrinsics::_prefetchWriteStatic:
   620     return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
   664     return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static);
   621 
   665 
   622   case vmIntrinsics::_compareAndSwapObject:
   666   case vmIntrinsics::_compareAndSwapObject:
   623     return inline_unsafe_CAS(T_OBJECT);
   667     return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg);
   624   case vmIntrinsics::_compareAndSwapInt:
   668   case vmIntrinsics::_compareAndSwapInt:
   625     return inline_unsafe_CAS(T_INT);
   669     return inline_unsafe_load_store(T_INT, LS_cmpxchg);
   626   case vmIntrinsics::_compareAndSwapLong:
   670   case vmIntrinsics::_compareAndSwapLong:
   627     return inline_unsafe_CAS(T_LONG);
   671     return inline_unsafe_load_store(T_LONG, LS_cmpxchg);
   628 
   672 
   629   case vmIntrinsics::_putOrderedObject:
   673   case vmIntrinsics::_putOrderedObject:
   630     return inline_unsafe_ordered_store(T_OBJECT);
   674     return inline_unsafe_ordered_store(T_OBJECT);
   631   case vmIntrinsics::_putOrderedInt:
   675   case vmIntrinsics::_putOrderedInt:
   632     return inline_unsafe_ordered_store(T_INT);
   676     return inline_unsafe_ordered_store(T_INT);
   633   case vmIntrinsics::_putOrderedLong:
   677   case vmIntrinsics::_putOrderedLong:
   634     return inline_unsafe_ordered_store(T_LONG);
   678     return inline_unsafe_ordered_store(T_LONG);
       
   679 
       
   680   case vmIntrinsics::_getAndAddInt:
       
   681     return inline_unsafe_load_store(T_INT, LS_xadd);
       
   682   case vmIntrinsics::_getAndAddLong:
       
   683     return inline_unsafe_load_store(T_LONG, LS_xadd);
       
   684   case vmIntrinsics::_getAndSetInt:
       
   685     return inline_unsafe_load_store(T_INT, LS_xchg);
       
   686   case vmIntrinsics::_getAndSetLong:
       
   687     return inline_unsafe_load_store(T_LONG, LS_xchg);
       
   688   case vmIntrinsics::_getAndSetObject:
       
   689     return inline_unsafe_load_store(T_OBJECT, LS_xchg);
   635 
   690 
   636   case vmIntrinsics::_currentThread:
   691   case vmIntrinsics::_currentThread:
   637     return inline_native_currentThread();
   692     return inline_native_currentThread();
   638   case vmIntrinsics::_isInterrupted:
   693   case vmIntrinsics::_isInterrupted:
   639     return inline_native_isInterrupted();
   694     return inline_native_isInterrupted();
  2299 
  2354 
  2300 
  2355 
  2301 // Interpret Unsafe.fieldOffset cookies correctly:
  2356 // Interpret Unsafe.fieldOffset cookies correctly:
  2302 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
  2357 extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset);
  2303 
  2358 
       
  2359 const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) {
       
  2360   // Attempt to infer a sharper value type from the offset and base type.
       
  2361   ciKlass* sharpened_klass = NULL;
       
  2362 
       
  2363   // See if it is an instance field, with an object type.
       
  2364   if (alias_type->field() != NULL) {
       
  2365     assert(!is_native_ptr, "native pointer op cannot use a java address");
       
  2366     if (alias_type->field()->type()->is_klass()) {
       
  2367       sharpened_klass = alias_type->field()->type()->as_klass();
       
  2368     }
       
  2369   }
       
  2370 
       
  2371   // See if it is a narrow oop array.
       
  2372   if (adr_type->isa_aryptr()) {
       
  2373     if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
       
  2374       const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
       
  2375       if (elem_type != NULL) {
       
  2376         sharpened_klass = elem_type->klass();
       
  2377       }
       
  2378     }
       
  2379   }
       
  2380 
       
  2381   if (sharpened_klass != NULL) {
       
  2382     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
       
  2383 
       
  2384 #ifndef PRODUCT
       
  2385     if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
       
  2386       tty->print("  from base type:  ");   adr_type->dump();
       
  2387       tty->print("  sharpened value: ");   tjp->dump();
       
  2388     }
       
  2389 #endif
       
  2390     // Sharpen the value type.
       
  2391     return tjp;
       
  2392   }
       
  2393   return NULL;
       
  2394 }
       
  2395 
  2304 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
  2396 bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
  2305   if (callee()->is_static())  return false;  // caller must have the capability!
  2397   if (callee()->is_static())  return false;  // caller must have the capability!
  2306 
  2398 
  2307 #ifndef PRODUCT
  2399 #ifndef PRODUCT
  2308   {
  2400   {
  2428   // from this field across safepoint since GC can change its value.
  2520   // from this field across safepoint since GC can change its value.
  2429   bool need_read_barrier = !is_native_ptr && !is_store &&
  2521   bool need_read_barrier = !is_native_ptr && !is_store &&
  2430                            offset != top() && heap_base_oop != top();
  2522                            offset != top() && heap_base_oop != top();
  2431 
  2523 
  2432   if (!is_store && type == T_OBJECT) {
  2524   if (!is_store && type == T_OBJECT) {
  2433     // Attempt to infer a sharper value type from the offset and base type.
  2525     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type, is_native_ptr);
  2434     ciKlass* sharpened_klass = NULL;
  2526     if (tjp != NULL) {
  2435 
       
  2436     // See if it is an instance field, with an object type.
       
  2437     if (alias_type->field() != NULL) {
       
  2438       assert(!is_native_ptr, "native pointer op cannot use a java address");
       
  2439       if (alias_type->field()->type()->is_klass()) {
       
  2440         sharpened_klass = alias_type->field()->type()->as_klass();
       
  2441       }
       
  2442     }
       
  2443 
       
  2444     // See if it is a narrow oop array.
       
  2445     if (adr_type->isa_aryptr()) {
       
  2446       if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) {
       
  2447         const TypeOopPtr *elem_type = adr_type->is_aryptr()->elem()->isa_oopptr();
       
  2448         if (elem_type != NULL) {
       
  2449           sharpened_klass = elem_type->klass();
       
  2450         }
       
  2451       }
       
  2452     }
       
  2453 
       
  2454     if (sharpened_klass != NULL) {
       
  2455       const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
       
  2456 
       
  2457       // Sharpen the value type.
       
  2458       value_type = tjp;
  2527       value_type = tjp;
  2459 
       
  2460 #ifndef PRODUCT
       
  2461       if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
       
  2462         tty->print("  from base type:  ");   adr_type->dump();
       
  2463         tty->print("  sharpened value: "); value_type->dump();
       
  2464       }
       
  2465 #endif
       
  2466     }
  2528     }
  2467   }
  2529   }
  2468 
  2530 
  2469   // Null check on self without removing any arguments.  The argument
  2531   // Null check on self without removing any arguments.  The argument
  2470   // null check technically happens in the wrong place, which can lead to
  2532   // null check technically happens in the wrong place, which can lead to
  2671   set_i_o(_gvn.transform(prefetch));
  2733   set_i_o(_gvn.transform(prefetch));
  2672 
  2734 
  2673   return true;
  2735   return true;
  2674 }
  2736 }
  2675 
  2737 
  2676 //----------------------------inline_unsafe_CAS----------------------------
  2738 //----------------------------inline_unsafe_load_store----------------------------
  2677 
  2739 
  2678 bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
  2740 bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) {
  2679   // This basic scheme here is the same as inline_unsafe_access, but
  2741   // This basic scheme here is the same as inline_unsafe_access, but
  2680   // differs in enough details that combining them would make the code
  2742   // differs in enough details that combining them would make the code
  2681   // overly confusing.  (This is a true fact! I originally combined
  2743   // overly confusing.  (This is a true fact! I originally combined
  2682   // them, but even I was confused by it!) As much code/comments as
  2744   // them, but even I was confused by it!) As much code/comments as
  2683   // possible are retained from inline_unsafe_access though to make
  2745   // possible are retained from inline_unsafe_access though to make
  2684   // the correspondences clearer. - dl
  2746   // the correspondences clearer. - dl
  2685 
  2747 
  2686   if (callee()->is_static())  return false;  // caller must have the capability!
  2748   if (callee()->is_static())  return false;  // caller must have the capability!
  2687 
  2749 
  2688 #ifndef PRODUCT
  2750 #ifndef PRODUCT
       
  2751   BasicType rtype;
  2689   {
  2752   {
  2690     ResourceMark rm;
  2753     ResourceMark rm;
  2691     // Check the signatures.
       
  2692     ciSignature* sig = signature();
  2754     ciSignature* sig = signature();
       
  2755     rtype = sig->return_type()->basic_type();
       
  2756     if (kind == LS_xadd || kind == LS_xchg) {
       
  2757       // Check the signatures.
  2693 #ifdef ASSERT
  2758 #ifdef ASSERT
  2694     BasicType rtype = sig->return_type()->basic_type();
  2759       assert(rtype == type, "get and set must return the expected type");
  2695     assert(rtype == T_BOOLEAN, "CAS must return boolean");
  2760       assert(sig->count() == 3, "get and set has 3 arguments");
  2696     assert(sig->count() == 4, "CAS has 4 arguments");
  2761       assert(sig->type_at(0)->basic_type() == T_OBJECT, "get and set base is object");
  2697     assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
  2762       assert(sig->type_at(1)->basic_type() == T_LONG, "get and set offset is long");
  2698     assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
  2763       assert(sig->type_at(2)->basic_type() == type, "get and set must take expected type as new value/delta");
  2699 #endif // ASSERT
  2764 #endif // ASSERT
       
  2765     } else if (kind == LS_cmpxchg) {
       
  2766       // Check the signatures.
       
  2767 #ifdef ASSERT
       
  2768       assert(rtype == T_BOOLEAN, "CAS must return boolean");
       
  2769       assert(sig->count() == 4, "CAS has 4 arguments");
       
  2770       assert(sig->type_at(0)->basic_type() == T_OBJECT, "CAS base is object");
       
  2771       assert(sig->type_at(1)->basic_type() == T_LONG, "CAS offset is long");
       
  2772 #endif // ASSERT
       
  2773     } else {
       
  2774       ShouldNotReachHere();
       
  2775     }
  2700   }
  2776   }
  2701 #endif //PRODUCT
  2777 #endif //PRODUCT
  2702 
  2778 
  2703   // number of stack slots per value argument (1 or 2)
  2779   // number of stack slots per value argument (1 or 2)
  2704   int type_words = type2size[type];
  2780   int type_words = type2size[type];
  2705 
  2781 
  2706   // Cannot inline wide CAS on machines that don't support it natively
       
  2707   if (type2aelembytes(type) > BytesPerInt && !VM_Version::supports_cx8())
       
  2708     return false;
       
  2709 
       
  2710   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
  2782   C->set_has_unsafe_access(true);  // Mark eventual nmethod as "unsafe".
  2711 
  2783 
  2712   // Argument words:  "this" plus oop plus offset plus oldvalue plus newvalue;
  2784   // Argument words:  "this" plus oop plus offset (plus oldvalue) plus newvalue/delta;
  2713   int nargs = 1 + 1 + 2  + type_words + type_words;
  2785   int nargs = 1 + 1 + 2  + ((kind == LS_cmpxchg) ? type_words : 0) + type_words;
  2714 
  2786 
  2715   // pop arguments: newval, oldval, offset, base, and receiver
  2787   // pop arguments: newval, offset, base, and receiver
  2716   debug_only(int saved_sp = _sp);
  2788   debug_only(int saved_sp = _sp);
  2717   _sp += nargs;
  2789   _sp += nargs;
  2718   Node* newval   = (type_words == 1) ? pop() : pop_pair();
  2790   Node* newval   = (type_words == 1) ? pop() : pop_pair();
  2719   Node* oldval   = (type_words == 1) ? pop() : pop_pair();
  2791   Node* oldval   = (kind == LS_cmpxchg) ? ((type_words == 1) ? pop() : pop_pair()) : NULL;
  2720   Node *offset   = pop_pair();
  2792   Node *offset   = pop_pair();
  2721   Node *base     = pop();
  2793   Node *base     = pop();
  2722   Node *receiver = pop();
  2794   Node *receiver = pop();
  2723   assert(saved_sp == _sp, "must have correct argument count");
  2795   assert(saved_sp == _sp, "must have correct argument count");
  2724 
  2796 
  2738   // 32-bit machines ignore the high half of long offsets
  2810   // 32-bit machines ignore the high half of long offsets
  2739   offset = ConvL2X(offset);
  2811   offset = ConvL2X(offset);
  2740   Node* adr = make_unsafe_address(base, offset);
  2812   Node* adr = make_unsafe_address(base, offset);
  2741   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
  2813   const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
  2742 
  2814 
  2743   // (Unlike inline_unsafe_access, there seems no point in trying
  2815   // For CAS, unlike inline_unsafe_access, there seems no point in
  2744   // to refine types. Just use the coarse types here.
  2816   // trying to refine types. Just use the coarse types here.
  2745   const Type *value_type = Type::get_const_basic_type(type);
  2817   const Type *value_type = Type::get_const_basic_type(type);
  2746   Compile::AliasType* alias_type = C->alias_type(adr_type);
  2818   Compile::AliasType* alias_type = C->alias_type(adr_type);
  2747   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
  2819   assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
       
  2820 
       
  2821   if (kind == LS_xchg && type == T_OBJECT) {
       
  2822     const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
       
  2823     if (tjp != NULL) {
       
  2824       value_type = tjp;
       
  2825     }
       
  2826   }
       
  2827 
  2748   int alias_idx = C->get_alias_index(adr_type);
  2828   int alias_idx = C->get_alias_index(adr_type);
  2749 
  2829 
  2750   // Memory-model-wise, a CAS acts like a little synchronized block,
  2830   // Memory-model-wise, a LoadStore acts like a little synchronized
  2751   // so needs barriers on each side.  These don't translate into
  2831   // block, so needs barriers on each side.  These don't translate
  2752   // actual barriers on most machines, but we still need rest of
  2832   // into actual barriers on most machines, but we still need rest of
  2753   // compiler to respect ordering.
  2833   // compiler to respect ordering.
  2754 
  2834 
  2755   insert_mem_bar(Op_MemBarRelease);
  2835   insert_mem_bar(Op_MemBarRelease);
  2756   insert_mem_bar(Op_MemBarCPUOrder);
  2836   insert_mem_bar(Op_MemBarCPUOrder);
  2757 
  2837 
  2760   //          dependency which will confuse the scheduler.
  2840   //          dependency which will confuse the scheduler.
  2761   Node *mem = memory(alias_idx);
  2841   Node *mem = memory(alias_idx);
  2762 
  2842 
  2763   // For now, we handle only those cases that actually exist: ints,
  2843   // For now, we handle only those cases that actually exist: ints,
  2764   // longs, and Object. Adding others should be straightforward.
  2844   // longs, and Object. Adding others should be straightforward.
  2765   Node* cas;
  2845   Node* load_store;
  2766   switch(type) {
  2846   switch(type) {
  2767   case T_INT:
  2847   case T_INT:
  2768     cas = _gvn.transform(new (C, 5) CompareAndSwapINode(control(), mem, adr, newval, oldval));
  2848     if (kind == LS_xadd) {
       
  2849       load_store = _gvn.transform(new (C, 4) GetAndAddINode(control(), mem, adr, newval, adr_type));
       
  2850     } else if (kind == LS_xchg) {
       
  2851       load_store = _gvn.transform(new (C, 4) GetAndSetINode(control(), mem, adr, newval, adr_type));
       
  2852     } else if (kind == LS_cmpxchg) {
       
  2853       load_store = _gvn.transform(new (C, 5) CompareAndSwapINode(control(), mem, adr, newval, oldval));
       
  2854     } else {
       
  2855       ShouldNotReachHere();
       
  2856     }
  2769     break;
  2857     break;
  2770   case T_LONG:
  2858   case T_LONG:
  2771     cas = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
  2859     if (kind == LS_xadd) {
       
  2860       load_store = _gvn.transform(new (C, 4) GetAndAddLNode(control(), mem, adr, newval, adr_type));
       
  2861     } else if (kind == LS_xchg) {
       
  2862       load_store = _gvn.transform(new (C, 4) GetAndSetLNode(control(), mem, adr, newval, adr_type));
       
  2863     } else if (kind == LS_cmpxchg) {
       
  2864       load_store = _gvn.transform(new (C, 5) CompareAndSwapLNode(control(), mem, adr, newval, oldval));
       
  2865     } else {
       
  2866       ShouldNotReachHere();
       
  2867     }
  2772     break;
  2868     break;
  2773   case T_OBJECT:
  2869   case T_OBJECT:
  2774     // Transformation of a value which could be NULL pointer (CastPP #NULL)
  2870     // Transformation of a value which could be NULL pointer (CastPP #NULL)
  2775     // could be delayed during Parse (for example, in adjust_map_after_if()).
  2871     // could be delayed during Parse (for example, in adjust_map_after_if()).
  2776     // Execute transformation here to avoid barrier generation in such case.
  2872     // Execute transformation here to avoid barrier generation in such case.
  2777     if (_gvn.type(newval) == TypePtr::NULL_PTR)
  2873     if (_gvn.type(newval) == TypePtr::NULL_PTR)
  2778       newval = _gvn.makecon(TypePtr::NULL_PTR);
  2874       newval = _gvn.makecon(TypePtr::NULL_PTR);
  2779 
  2875 
  2780     // Reference stores need a store barrier.
  2876     // Reference stores need a store barrier.
  2781     // (They don't if CAS fails, but it isn't worth checking.)
       
  2782     pre_barrier(true /* do_load*/,
  2877     pre_barrier(true /* do_load*/,
  2783                 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
  2878                 control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
  2784                 NULL /* pre_val*/,
  2879                 NULL /* pre_val*/,
  2785                 T_OBJECT);
  2880                 T_OBJECT);
  2786 #ifdef _LP64
  2881 #ifdef _LP64
  2787     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
  2882     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
  2788       Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
  2883       Node *newval_enc = _gvn.transform(new (C, 2) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
  2789       Node *oldval_enc = _gvn.transform(new (C, 2) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
  2884       if (kind == LS_xchg) {
  2790       cas = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr,
  2885         load_store = _gvn.transform(new (C, 4) GetAndSetNNode(control(), mem, adr,
  2791                                                           newval_enc, oldval_enc));
  2886                                                               newval_enc, adr_type, value_type->make_narrowoop()));
       
  2887       } else {
       
  2888         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
       
  2889         Node *oldval_enc = _gvn.transform(new (C, 2) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
       
  2890         load_store = _gvn.transform(new (C, 5) CompareAndSwapNNode(control(), mem, adr,
       
  2891                                                                    newval_enc, oldval_enc));
       
  2892       }
  2792     } else
  2893     } else
  2793 #endif
  2894 #endif
  2794     {
  2895     {
  2795       cas = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
  2896       if (kind == LS_xchg) {
  2796     }
  2897         load_store = _gvn.transform(new (C, 4) GetAndSetPNode(control(), mem, adr, newval, adr_type, value_type->is_oopptr()));
  2797     post_barrier(control(), cas, base, adr, alias_idx, newval, T_OBJECT, true);
  2898       } else {
       
  2899         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
       
  2900         load_store = _gvn.transform(new (C, 5) CompareAndSwapPNode(control(), mem, adr, newval, oldval));
       
  2901       }
       
  2902     }
       
  2903     post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true);
  2798     break;
  2904     break;
  2799   default:
  2905   default:
  2800     ShouldNotReachHere();
  2906     ShouldNotReachHere();
  2801     break;
  2907     break;
  2802   }
  2908   }
  2803 
  2909 
  2804   // SCMemProjNodes represent the memory state of CAS. Their main
  2910   // SCMemProjNodes represent the memory state of a LoadStore. Their
  2805   // role is to prevent CAS nodes from being optimized away when their
  2911   // main role is to prevent LoadStore nodes from being optimized away
  2806   // results aren't used.
  2912   // when their results aren't used.
  2807   Node* proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
  2913   Node* proj = _gvn.transform( new (C, 1) SCMemProjNode(load_store));
  2808   set_memory(proj, alias_idx);
  2914   set_memory(proj, alias_idx);
  2809 
  2915 
  2810   // Add the trailing membar surrounding the access
  2916   // Add the trailing membar surrounding the access
  2811   insert_mem_bar(Op_MemBarCPUOrder);
  2917   insert_mem_bar(Op_MemBarCPUOrder);
  2812   insert_mem_bar(Op_MemBarAcquire);
  2918   insert_mem_bar(Op_MemBarAcquire);
  2813 
  2919 
  2814   push(cas);
  2920 #ifdef _LP64
       
  2921   if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
       
  2922     load_store = _gvn.transform(new (C, 2) DecodeNNode(load_store, load_store->bottom_type()->make_ptr()));
       
  2923   }
       
  2924 #endif
       
  2925 
       
  2926   assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
       
  2927   push_node(load_store->bottom_type()->basic_type(), load_store);
  2815   return true;
  2928   return true;
  2816 }
  2929 }
  2817 
  2930 
  2818 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
  2931 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) {
  2819   // This is another variant of inline_unsafe_access, differing in
  2932   // This is another variant of inline_unsafe_access, differing in