89 // Read once. |
89 // Read once. |
90 volatile Bytecodes::Code c = bytecode_1(); |
90 volatile Bytecodes::Code c = bytecode_1(); |
91 assert(c == 0 || c == code || code == 0, "update must be consistent"); |
91 assert(c == 0 || c == code || code == 0, "update must be consistent"); |
92 #endif |
92 #endif |
93 // Need to flush pending stores here before bytecode is written. |
93 // Need to flush pending stores here before bytecode is written. |
94 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift)); |
94 OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift)); |
95 } |
95 } |
96 |
96 |
97 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { |
97 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { |
98 #ifdef ASSERT |
98 #ifdef ASSERT |
99 // Read once. |
99 // Read once. |
100 volatile Bytecodes::Code c = bytecode_2(); |
100 volatile Bytecodes::Code c = bytecode_2(); |
101 assert(c == 0 || c == code || code == 0, "update must be consistent"); |
101 assert(c == 0 || c == code || code == 0, "update must be consistent"); |
102 #endif |
102 #endif |
103 // Need to flush pending stores here before bytecode is written. |
103 // Need to flush pending stores here before bytecode is written. |
104 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift)); |
104 OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift)); |
105 } |
105 } |
106 |
106 |
107 // Sets f1, ordering with previous writes. |
107 // Sets f1, ordering with previous writes. |
108 void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) { |
108 void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) { |
109 assert(f1 != NULL, ""); |
109 assert(f1 != NULL, ""); |
110 OrderAccess::release_store_ptr((HeapWord*) &_f1, f1); |
110 OrderAccess::release_store(&_f1, f1); |
111 } |
|
112 |
|
113 // Sets flags, but only if the value was previously zero. |
|
114 bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) { |
|
115 intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0); |
|
116 return (result == 0); |
|
117 } |
111 } |
118 |
112 |
119 // Note that concurrent update of both bytecodes can leave one of them |
113 // Note that concurrent update of both bytecodes can leave one of them |
120 // reset to zero. This is harmless; the interpreter will simply re-resolve |
114 // reset to zero. This is harmless; the interpreter will simply re-resolve |
121 // the damaged entry. More seriously, the memory synchronization is needed |
115 // the damaged entry. More seriously, the memory synchronization is needed |
152 // current value of _flags is 0, otherwise another thread may have |
146 // current value of _flags is 0, otherwise another thread may have |
153 // updated it and we don't want to overwrite that value. Don't |
147 // updated it and we don't want to overwrite that value. Don't |
154 // bother trying to update it once it's nonzero but always make |
148 // bother trying to update it once it's nonzero but always make |
155 // sure that the final parameter size agrees with what was passed. |
149 // sure that the final parameter size agrees with what was passed. |
156 if (_flags == 0) { |
150 if (_flags == 0) { |
157 Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0); |
151 intx newflags = (value & parameter_size_mask); |
|
152 Atomic::cmpxchg(newflags, &_flags, (intx)0); |
158 } |
153 } |
159 guarantee(parameter_size() == value, |
154 guarantee(parameter_size() == value, |
160 "size must not change: parameter_size=%d, value=%d", parameter_size(), value); |
155 "size must not change: parameter_size=%d, value=%d", parameter_size(), value); |
161 } |
156 } |
162 |
157 |