8033380: Experimental VM flag to enforce access atomicity
authorshade
Mon, 03 Mar 2014 15:54:45 +0400
changeset 23189 27cf1316709b
parent 23188 21757c31b2c9
child 23190 e8bbf9cd711e
8033380: Experimental VM flag to enforce access atomicity Summary: -XX:+AlwaysAtomicAccesses to unconditionally enforce the access atomicity. Reviewed-by: roland, kvn, iveresov
hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
hotspot/src/share/vm/c1/c1_Runtime1.cpp
hotspot/src/share/vm/opto/parse3.cpp
hotspot/src/share/vm/runtime/globals.hpp
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Jan 28 15:05:46 2014 +0100
+++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp	Mon Mar 03 15:54:45 2014 +0400
@@ -1734,7 +1734,8 @@
                 (info ? new CodeEmitInfo(info) : NULL));
   }
 
-  if (is_volatile && !needs_patching) {
+  bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
+  if (needs_atomic_access && !needs_patching) {
     volatile_field_store(value.result(), address, info);
   } else {
     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
@@ -1807,7 +1808,8 @@
     address = generate_address(object.result(), x->offset(), field_type);
   }
 
-  if (is_volatile && !needs_patching) {
+  bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
+  if (needs_atomic_access && !needs_patching) {
     volatile_field_load(address, reg, info);
   } else {
     LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Tue Jan 28 15:05:46 2014 +0100
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Mon Mar 03 15:54:45 2014 +0400
@@ -809,11 +809,10 @@
   int bci = vfst.bci();
   Bytecodes::Code code = caller_method()->java_code_at(bci);
 
-#ifndef PRODUCT
   // this is used by assertions in the access_field_patching_id
   BasicType patch_field_type = T_ILLEGAL;
-#endif // PRODUCT
   bool deoptimize_for_volatile = false;
+  bool deoptimize_for_atomic = false;
   int patch_field_offset = -1;
   KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
   KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
@@ -839,11 +838,24 @@
     // is the path for patching field offsets.  load_klass is only
     // used for patching references to oops which don't need special
     // handling in the volatile case.
+
     deoptimize_for_volatile = result.access_flags().is_volatile();
 
-#ifndef PRODUCT
+    // If we are patching a field which should be atomic, then
+    // the generated code is not correct either, force deoptimizing.
+    // We need to only cover T_LONG and T_DOUBLE fields, as we can
+    // break access atomicity only for them.
+
+    // Strictly speaking, the deoptimizaation on 64-bit platforms
+    // is unnecessary, and T_LONG stores on 32-bit platforms need
+    // to be handled by special patching code when AlwaysAtomicAccesses
+    // becomes product feature. At this point, we are still going
+    // for the deoptimization for consistency against volatile
+    // accesses.
+
     patch_field_type = result.field_type();
-#endif
+    deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
+
   } else if (load_klass_or_mirror_patch_id) {
     Klass* k = NULL;
     switch (code) {
@@ -918,13 +930,19 @@
     ShouldNotReachHere();
   }
 
-  if (deoptimize_for_volatile) {
-    // At compile time we assumed the field wasn't volatile but after
-    // loading it turns out it was volatile so we have to throw the
+  if (deoptimize_for_volatile || deoptimize_for_atomic) {
+    // At compile time we assumed the field wasn't volatile/atomic but after
+    // loading it turns out it was volatile/atomic so we have to throw the
     // compiled code out and let it be regenerated.
     if (TracePatching) {
-      tty->print_cr("Deoptimizing for patching volatile field reference");
+      if (deoptimize_for_volatile) {
+        tty->print_cr("Deoptimizing for patching volatile field reference");
+      }
+      if (deoptimize_for_atomic) {
+        tty->print_cr("Deoptimizing for patching atomic field reference");
+      }
     }
+
     // It's possible the nmethod was invalidated in the last
     // safepoint, but if it's still alive then make it not_entrant.
     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
--- a/hotspot/src/share/vm/opto/parse3.cpp	Tue Jan 28 15:05:46 2014 +0100
+++ b/hotspot/src/share/vm/opto/parse3.cpp	Mon Mar 03 15:54:45 2014 +0400
@@ -233,7 +233,8 @@
   // Build the load.
   //
   MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
-  Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, is_vol);
+  bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
+  Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, needs_atomic_access);
 
   // Adjust Java stack
   if (type2size[bt] == 1)
@@ -314,7 +315,8 @@
     }
     store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
   } else {
-    store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol);
+    bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
+    store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
   }
 
   // If reference is volatile, prevent following volatiles ops from
--- a/hotspot/src/share/vm/runtime/globals.hpp	Tue Jan 28 15:05:46 2014 +0100
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Mon Mar 03 15:54:45 2014 +0400
@@ -3864,6 +3864,9 @@
           "Allocation less than this value will be allocated "              \
           "using malloc. Larger allocations will use mmap.")                \
                                                                             \
+  experimental(bool, AlwaysAtomicAccesses, false,                           \
+          "Accesses to all variables should always be atomic")              \
+                                                                            \
   product(bool, EnableTracing, false,                                       \
           "Enable event-based tracing")                                     \
                                                                             \