--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -144,7 +144,7 @@
const size_t max_address_offset_bits = 44; // 16TB
const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset_bits = log2_intptr(address_offset);
- return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
+ return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}
size_t ZPlatformAddressMetadataShift() {
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -437,7 +437,8 @@
// for which we do not support MP and so membars are not necessary. This ARMv5 code will
// be removed in the future.
- // Support for jint Atomic::add(jint add_value, volatile jint *dest)
+ // Implementation of atomic_add(jint add_value, volatile jint* dest)
+ // used by Atomic::add(volatile jint* dest, jint add_value)
//
// Arguments :
//
@@ -487,7 +488,8 @@
return start;
}
- // Support for jint Atomic::xchg(jint exchange_value, volatile jint *dest)
+ // Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest)
+ // used by Atomic::add(volatile jint* dest, jint exchange_value)
//
// Arguments :
//
@@ -535,7 +537,8 @@
return start;
}
- // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
+ // Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
+ // used by Atomic::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value)
//
// Arguments :
//
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -883,7 +883,7 @@
//
// markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
- // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+ // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
// // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
@@ -921,7 +921,7 @@
std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
- // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+ // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
// Store stack address of the BasicObjectLock (this is monitor) into object.
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
@@ -997,7 +997,7 @@
// if ((displaced_header = monitor->displaced_header()) == NULL) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL);
- // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
+ // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
// } else {
@@ -1030,7 +1030,7 @@
cmpdi(CCR0, displaced_header, 0);
beq(CCR0, free_slot); // recursive unlock
- // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
+ // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -374,7 +374,7 @@
// Finally patch out the jump.
volatile juint *jump_addr = (volatile juint*)instr_addr;
// Release not needed because caller uses invalidate_range after copying the remaining bytes.
- //OrderAccess::release_store(jump_addr, *((juint*)code_buffer));
+ //Atomic::release_store(jump_addr, *((juint*)code_buffer));
*jump_addr = *((juint*)code_buffer); // atomically store code over branch instruction
ICache::ppc64_flush_icache_bytes(instr_addr, NativeGeneralJump::instruction_size);
}
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -976,7 +976,7 @@
//
// markWord displaced_header = obj->mark().set_unlocked();
// monitor->lock()->set_displaced_header(displaced_header);
- // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+ // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
// // We stored the monitor address into the object's mark word.
// } else if (THREAD->is_lock_owned((address)displaced_header))
// // Simple recursive case.
@@ -1011,7 +1011,7 @@
z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
BasicLock::displaced_header_offset_in_bytes(), monitor);
- // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+ // if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
// Store stack address of the BasicObjectLock (this is monitor) into object.
add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object);
@@ -1082,7 +1082,7 @@
// if ((displaced_header = monitor->displaced_header()) == NULL) {
// // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
// monitor->set_obj(NULL);
- // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
+ // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
// } else {
@@ -1123,7 +1123,7 @@
BasicLock::displaced_header_offset_in_bytes()));
z_bre(done); // displaced_header == 0 -> goto done
- // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
+ // } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
// // We swapped the unlocked mark in displaced_header into the object's mark word.
// monitor->set_obj(NULL);
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -585,7 +585,8 @@
return start;
}
- // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
+ // Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest)
+ // used by Atomic::xchg(volatile jint* dest, jint exchange_value)
//
// Arguments:
//
@@ -622,7 +623,8 @@
}
- // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
+ // Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
+ // used by Atomic::cmpxchg(volatile jint* dest, jint compare_value, jint exchange_value)
//
// Arguments:
//
@@ -646,7 +648,8 @@
return start;
}
- // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
+ // Implementation of jlong atomic_cmpxchg_long(jlong exchange_value, volatile jlong *dest, jlong compare_value)
+ // used by Atomic::cmpxchg(volatile jlong *dest, jlong compare_value, jlong exchange_value)
//
// Arguments:
//
@@ -679,7 +682,8 @@
}
- // Support for jint Atomic::add(jint add_value, volatile jint* dest).
+ // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
+ // used by Atomic::add(volatile jint* dest, jint add_value)
//
// Arguments:
//
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -144,7 +144,7 @@
const size_t max_address_offset_bits = 44; // 16TB
const size_t address_offset = ZUtils::round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
const size_t address_offset_bits = log2_intptr(address_offset);
- return MIN2(MAX2(address_offset_bits, min_address_offset_bits), max_address_offset_bits);
+ return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
}
size_t ZPlatformAddressMetadataShift() {
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -430,7 +430,8 @@
//----------------------------------------------------------------------------------------------------
- // Support for int32_t Atomic::xchg(int32_t exchange_value, volatile int32_t* dest)
+ // Implementation of int32_t atomic_xchg(int32_t exchange_value, volatile int32_t* dest)
+ // used by Atomic::xchg(volatile int32_t* dest, int32_t exchange_value)
//
// xchg exists as far back as 8086, lock needed for MP only
// Stack layout immediately after call:
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -552,7 +552,8 @@
return start;
}
- // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
+ // Implementation of jint atomic_xchg(jint add_value, volatile jint* dest)
+ // used by Atomic::xchg(volatile jint* dest, jint exchange_value)
//
// Arguments :
// c_rarg0: exchange_value
@@ -571,7 +572,8 @@
return start;
}
- // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest)
+ // Implementation of intptr_t atomic_xchg(jlong add_value, volatile jlong* dest)
+ // used by Atomic::xchg(volatile jlong* dest, jlong exchange_value)
//
// Arguments :
// c_rarg0: exchange_value
@@ -668,7 +670,8 @@
return start;
}
- // Support for jint atomic::add(jint add_value, volatile jint* dest)
+ // Implementation of jint atomic_add(jint add_value, volatile jint* dest)
+ // used by Atomic::add(volatile jint* dest, jint add_value)
//
// Arguments :
// c_rarg0: add_value
@@ -690,7 +693,8 @@
return start;
}
- // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
+ // Implementation of intptr_t atomic_add(intptr_t add_value, volatile intptr_t* dest)
+ // used by Atomic::add(volatile intptr_t* dest, intptr_t add_value)
//
// Arguments :
// c_rarg0: add_value
--- a/src/hotspot/os/aix/os_aix.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os/aix/os_aix.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1084,7 +1084,7 @@
if (now <= prev) {
return prev; // same or retrograde time;
}
- jlong obsv = Atomic::cmpxchg(now, &max_real_time, prev);
+ jlong obsv = Atomic::cmpxchg(&max_real_time, prev, now);
assert(obsv >= prev, "invariant"); // Monotonicity
// If the CAS succeeded then we're done and return "now".
// If the CAS failed and the observed value "obsv" is >= now then
@@ -1794,7 +1794,7 @@
for (;;) {
for (int i = 0; i < NSIG + 1; i++) {
jint n = pending_signals[i];
- if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+ if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
return i;
}
}
--- a/src/hotspot/os/bsd/os_bsd.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os/bsd/os_bsd.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -51,7 +51,6 @@
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.hpp"
#include "runtime/osThread.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/semaphore.hpp"
@@ -931,7 +930,7 @@
if (now <= prev) {
return prev; // same or retrograde time;
}
- const uint64_t obsv = Atomic::cmpxchg(now, &Bsd::_max_abstime, prev);
+ const uint64_t obsv = Atomic::cmpxchg(&Bsd::_max_abstime, prev, now);
assert(obsv >= prev, "invariant"); // Monotonicity
// If the CAS succeeded then we're done and return "now".
// If the CAS failed and the observed value "obsv" is >= now then
@@ -1834,7 +1833,7 @@
for (;;) {
for (int i = 0; i < NSIG + 1; i++) {
jint n = pending_signals[i];
- if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+ if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
return i;
}
}
@@ -1895,7 +1894,7 @@
}
char buf[PATH_MAX + 1];
- int num = Atomic::add(1, &cnt);
+ int num = Atomic::add(&cnt, 1);
snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d",
os::get_temp_directory(), os::current_process_id(), num);
@@ -3209,7 +3208,7 @@
static volatile int next_processor_id = 0;
static inline volatile int* get_apic_to_processor_mapping() {
- volatile int* mapping = OrderAccess::load_acquire(&apic_to_processor_mapping);
+ volatile int* mapping = Atomic::load_acquire(&apic_to_processor_mapping);
if (mapping == NULL) {
// Calculate possible number space for APIC ids. This space is not necessarily
// in the range [0, number_of_processors).
@@ -3238,9 +3237,9 @@
mapping[i] = -1;
}
- if (!Atomic::replace_if_null(mapping, &apic_to_processor_mapping)) {
+ if (!Atomic::replace_if_null(&apic_to_processor_mapping, mapping)) {
FREE_C_HEAP_ARRAY(int, mapping);
- mapping = OrderAccess::load_acquire(&apic_to_processor_mapping);
+ mapping = Atomic::load_acquire(&apic_to_processor_mapping);
}
}
@@ -3264,12 +3263,14 @@
int processor_id = Atomic::load(&mapping[apic_id]);
while (processor_id < 0) {
- if (Atomic::cmpxchg(-2, &mapping[apic_id], -1)) {
- Atomic::store(Atomic::add(1, &next_processor_id) - 1, &mapping[apic_id]);
+ if (Atomic::cmpxchg(&mapping[apic_id], -1, -2) == -1) {
+ Atomic::store(&mapping[apic_id], Atomic::add(&next_processor_id, 1) - 1);
}
processor_id = Atomic::load(&mapping[apic_id]);
}
+ assert(processor_id >= 0 && processor_id < os::processor_count(), "invalid processor id");
+
return (uint)processor_id;
}
#endif
--- a/src/hotspot/os/linux/os_linux.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os/linux/os_linux.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -2752,7 +2752,7 @@
for (;;) {
for (int i = 0; i < NSIG + 1; i++) {
jint n = pending_signals[i];
- if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+ if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
return i;
}
}
@@ -2813,7 +2813,7 @@
}
char buf[PATH_MAX+1];
- int num = Atomic::add(1, &cnt);
+ int num = Atomic::add(&cnt, 1);
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
os::get_temp_directory(), os::current_process_id(), num);
--- a/src/hotspot/os/posix/os_posix.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os/posix/os_posix.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1900,7 +1900,7 @@
// atomically decrement _event
for (;;) {
v = _event;
- if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
+ if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
}
guarantee(v >= 0, "invariant");
@@ -1940,7 +1940,7 @@
// atomically decrement _event
for (;;) {
v = _event;
- if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
+ if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
}
guarantee(v >= 0, "invariant");
@@ -1998,7 +1998,7 @@
// but only in the correctly written condition checking loops of ObjectMonitor,
// Mutex/Monitor, Thread::muxAcquire and JavaThread::sleep
- if (Atomic::xchg(1, &_event) >= 0) return;
+ if (Atomic::xchg(&_event, 1) >= 0) return;
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
@@ -2046,7 +2046,7 @@
// Return immediately if a permit is available.
// We depend on Atomic::xchg() having full barrier semantics
// since we are doing a lock-free update to _counter.
- if (Atomic::xchg(0, &_counter) > 0) return;
+ if (Atomic::xchg(&_counter, 0) > 0) return;
Thread* thread = Thread::current();
assert(thread->is_Java_thread(), "Must be JavaThread");
--- a/src/hotspot/os/solaris/os_solaris.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os/solaris/os_solaris.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1024,7 +1024,7 @@
if (now <= prev) {
return prev; // same or retrograde time;
}
- const hrtime_t obsv = Atomic::cmpxchg(now, &max_hrtime, prev);
+ const hrtime_t obsv = Atomic::cmpxchg(&max_hrtime, prev, now);
assert(obsv >= prev, "invariant"); // Monotonicity
// If the CAS succeeded then we're done and return "now".
// If the CAS failed and the observed value "obsv" is >= now then
@@ -1984,7 +1984,7 @@
while (true) {
for (int i = 0; i < Sigexit + 1; i++) {
jint n = pending_signals[i];
- if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+ if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
return i;
}
}
@@ -4710,7 +4710,7 @@
int v;
for (;;) {
v = _Event;
- if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+ if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
}
guarantee(v >= 0, "invariant");
if (v == 0) {
@@ -4748,7 +4748,7 @@
int v;
for (;;) {
v = _Event;
- if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+ if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
}
guarantee(v >= 0, "invariant");
if (v != 0) return OS_OK;
@@ -4797,7 +4797,7 @@
// from the first park() call after an unpark() call which will help
// shake out uses of park() and unpark() without condition variables.
- if (Atomic::xchg(1, &_Event) >= 0) return;
+ if (Atomic::xchg(&_Event, 1) >= 0) return;
// If the thread associated with the event was parked, wake it.
// Wait for the thread assoc with the PlatformEvent to vacate.
@@ -4896,7 +4896,7 @@
// Return immediately if a permit is available.
// We depend on Atomic::xchg() having full barrier semantics
// since we are doing a lock-free update to _counter.
- if (Atomic::xchg(0, &_counter) > 0) return;
+ if (Atomic::xchg(&_counter, 0) > 0) return;
// Optional fast-exit: Check interrupt before trying to wait
Thread* thread = Thread::current();
--- a/src/hotspot/os/windows/os_windows.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os/windows/os_windows.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -2096,7 +2096,7 @@
while (true) {
for (int i = 0; i < NSIG + 1; i++) {
jint n = pending_signals[i];
- if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+ if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
return i;
}
}
@@ -3747,15 +3747,15 @@
// The first thread that reached this point, initializes the critical section.
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
- } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
+ } else if (Atomic::load_acquire(&process_exiting) == 0) {
if (what != EPT_THREAD) {
// Atomically set process_exiting before the critical section
// to increase the visibility between racing threads.
- Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0);
+ Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
}
EnterCriticalSection(&crit_sect);
- if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
+ if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
// Remove from the array those handles of the threads that have completed exiting.
for (i = 0, j = 0; i < handle_count; ++i) {
res = WaitForSingleObject(handles[i], 0 /* don't wait */);
@@ -3868,7 +3868,7 @@
}
if (!registered &&
- OrderAccess::load_acquire(&process_exiting) != 0 &&
+ Atomic::load_acquire(&process_exiting) != 0 &&
process_exiting != GetCurrentThreadId()) {
// Some other thread is about to call exit(), so we don't let
// the current unregistered thread proceed to exit() or _endthreadex()
@@ -5136,7 +5136,7 @@
int v;
for (;;) {
v = _Event;
- if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+ if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
}
guarantee((v == 0) || (v == 1), "invariant");
if (v != 0) return OS_OK;
@@ -5198,7 +5198,7 @@
int v;
for (;;) {
v = _Event;
- if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
+ if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
}
guarantee((v == 0) || (v == 1), "invariant");
if (v != 0) return;
@@ -5236,7 +5236,7 @@
// from the first park() call after an unpark() call which will help
// shake out uses of park() and unpark() without condition variables.
- if (Atomic::xchg(1, &_Event) >= 0) return;
+ if (Atomic::xchg(&_Event, 1) >= 0) return;
::SetEvent(_ParkHandle);
}
--- a/src/hotspot/os/windows/threadCritical_windows.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os/windows/threadCritical_windows.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -56,7 +56,7 @@
if (lock_owner != current_thread) {
// Grab the lock before doing anything.
- while (Atomic::cmpxchg(0, &lock_count, -1) != -1) {
+ while (Atomic::cmpxchg(&lock_count, -1, 0) != -1) {
if (initialized) {
DWORD ret = WaitForSingleObject(lock_event, INFINITE);
assert(ret == WAIT_OBJECT_0, "unexpected return value from WaitForSingleObject");
--- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -30,6 +30,7 @@
#error "Atomic currently only implemented for PPC64"
#endif
+#include "orderAccess_aix_ppc.hpp"
#include "utilities/debug.hpp"
// Implementation of class atomic
@@ -95,13 +96,13 @@
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@@ -126,8 +127,8 @@
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@@ -152,8 +153,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
@@ -191,8 +192,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg doesn't necessarily do an acquire
@@ -231,9 +232,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));
@@ -301,9 +302,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
@@ -351,9 +352,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
@@ -399,4 +400,15 @@
return old_value;
}
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE> {
+ template <typename T>
+ T operator()(const volatile T* p) const {
+ T t = Atomic::load(p);
+ // Use twi-isync for load_acquire (faster than lwsync).
+ __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
+ return t;
+ }
+};
+
#endif // OS_CPU_AIX_PPC_ATOMIC_AIX_PPC_HPP
--- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -64,8 +64,6 @@
#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory");
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
-// Use twi-isync for load_acquire (faster than lwsync).
-#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
inline void OrderAccess::loadload() { inlasm_lwsync(); }
inline void OrderAccess::storestore() { inlasm_lwsync(); }
@@ -78,13 +76,6 @@
inline void OrderAccess::cross_modify_fence()
{ inlasm_isync(); }
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
- template <typename T>
- T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
-};
-
#undef inlasm_sync
#undef inlasm_lwsync
#undef inlasm_eieio
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -31,13 +31,13 @@
struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const;
+ template<typename D, typename I>
+ D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
};
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@@ -51,8 +51,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "xchgl (%2),%0"
@@ -64,9 +64,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(1 == sizeof(T));
__asm__ volatile ( "lock cmpxchgb %1,(%3)"
@@ -78,9 +78,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "lock cmpxchgl %1,(%3)"
@@ -92,8 +92,8 @@
#ifdef AMD64
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@@ -107,8 +107,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("xchgq (%2),%0"
@@ -120,9 +120,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ( "lock cmpxchgq %1,(%3)"
@@ -142,12 +142,12 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(T));
- return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
+ return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
}
template<>
@@ -161,12 +161,62 @@
template<>
template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
- T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+ T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif // AMD64
+template<>
+struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm__ volatile ( "xchgb (%2),%0"
+ : "=q" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+ }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm__ volatile ( "xchgw (%2),%0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+ }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm__ volatile ( "xchgl (%2),%0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+ }
+};
+
+#ifdef AMD64
+template<>
+struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm__ volatile ( "xchgq (%2), %0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+ }
+};
+#endif // AMD64
+
#endif // OS_CPU_BSD_X86_ATOMIC_BSD_X86_HPP
--- a/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s Mon Nov 25 15:16:29 2019 +0000
@@ -633,9 +633,9 @@
ret
- # Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
+ # Support for int64_t Atomic::cmpxchg(int64_t compare_value,
# volatile int64_t* dest,
- # int64_t compare_value)
+ # int64_t exchange_value)
#
.p2align 4,,15
ELF_TYPE(_Atomic_cmpxchg_long,@function)
@@ -665,4 +665,3 @@
movl 8(%esp), %eax # dest
fistpll (%eax)
ret
-
--- a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -64,54 +64,4 @@
__asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
}
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm__ volatile ( "xchgb (%2),%0"
- : "=q" (v)
- : "0" (v), "r" (p)
- : "memory");
- }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm__ volatile ( "xchgw (%2),%0"
- : "=r" (v)
- : "0" (v), "r" (p)
- : "memory");
- }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm__ volatile ( "xchgl (%2),%0"
- : "=r" (v)
- : "0" (v), "r" (p)
- : "memory");
- }
-};
-
-#ifdef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm__ volatile ( "xchgq (%2), %0"
- : "=r" (v)
- : "0" (v), "r" (p)
- : "memory");
- }
-};
-#endif // AMD64
-
#endif // OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -163,22 +163,22 @@
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
#ifdef ARM
- return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
+ return add_using_helper<int>(arm_add_and_fetch, dest, add_value);
#else
#ifdef M68K
- return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
+ return add_using_helper<int>(m68k_add_and_fetch, dest, add_value);
#else
return __sync_add_and_fetch(dest, add_value);
#endif // M68K
@@ -186,8 +186,8 @@
}
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename !>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@@ -197,15 +197,15 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM
- return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
+ return xchg_using_helper<int>(arm_lock_test_and_set, dest, exchange_value);
#else
#ifdef M68K
- return xchg_using_helper<int>(m68k_lock_test_and_set, exchange_value, dest);
+ return xchg_using_helper<int>(m68k_lock_test_and_set, dest, exchange_value);
#else
// __sync_lock_test_and_set is a bizarrely named atomic exchange
// operation. Note that some platforms only support this with the
@@ -224,8 +224,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
T result = __sync_lock_test_and_set (dest, exchange_value);
@@ -239,16 +239,16 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM
- return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
+ return cmpxchg_using_helper<int>(arm_compare_and_swap, dest, compare_value, exchange_value);
#else
#ifdef M68K
- return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
+ return cmpxchg_using_helper<int>(m68k_compare_and_swap, dest, compare_value, exchange_value);
#else
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
#endif // M68K
@@ -257,9 +257,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
@@ -276,8 +276,8 @@
template<>
template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
- T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+ T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -32,16 +32,12 @@
// Note that memory_order_conservative requires a full barrier after atomic stores.
// See https://patchwork.kernel.org/patch/3575821/
-#define FULL_MEM_BARRIER __sync_synchronize()
-#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
-#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
-
template<size_t byte_size>
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
FULL_MEM_BARRIER;
return res;
@@ -50,8 +46,8 @@
template<size_t byte_size>
template<typename T>
-inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(byte_size == sizeof(T));
T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
@@ -61,9 +57,9 @@
template<size_t byte_size>
template<typename T>
-inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(byte_size == sizeof(T));
if (order == memory_order_relaxed) {
@@ -81,4 +77,25 @@
}
}
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+ template <typename T>
+ T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
+};
+
+template<size_t byte_size>
+struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
+};
+
+template<size_t byte_size>
+struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); }
+};
+
#endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
--- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -37,6 +37,10 @@
inline void OrderAccess::loadstore() { acquire(); }
inline void OrderAccess::storeload() { fence(); }
+#define FULL_MEM_BARRIER __sync_synchronize()
+#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
+
inline void OrderAccess::acquire() {
READ_MEM_BARRIER;
}
@@ -51,25 +55,4 @@
inline void OrderAccess::cross_modify_fence() { }
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
- template <typename T>
- T operator()(const volatile T* p) const { T data; __atomic_load(const_cast<T*>(p), &data, __ATOMIC_ACQUIRE); return data; }
-};
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const { __atomic_store(const_cast<T*>(p), &v, __ATOMIC_RELEASE); }
-};
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
-};
-
#endif // OS_CPU_LINUX_AARCH64_ORDERACCESS_LINUX_AARCH64_HPP
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -54,8 +54,8 @@
template<>
template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
- T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+ T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
(*os::atomic_store_long_func)(
PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
@@ -70,27 +70,27 @@
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
- return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
+ return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
}
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
- return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
+ return xchg_using_helper<int32_t>(os::atomic_xchg_func, dest, exchange_value);
}
@@ -119,22 +119,22 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
- return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
+ return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, dest, compare_value, exchange_value);
}
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
- return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
+ return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, dest, compare_value, exchange_value);
}
#endif // OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
--- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -30,6 +30,7 @@
#error "Atomic currently only implemented for PPC64"
#endif
+#include "orderAccess_linux_ppc.hpp"
#include "utilities/debug.hpp"
// Implementation of class atomic
@@ -95,13 +96,13 @@
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@@ -126,8 +127,8 @@
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@@ -152,8 +153,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
// Note that xchg doesn't necessarily do an acquire
// (see synchronizer.cpp).
@@ -191,8 +192,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
// Note that xchg doesn't necessarily do an acquire
@@ -231,9 +232,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));
@@ -301,9 +302,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
@@ -351,9 +352,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
@@ -399,4 +400,16 @@
return old_value;
}
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+ template <typename T>
+ T operator()(const volatile T* p) const {
+ T t = Atomic::load(p);
+ // Use twi-isync for load_acquire (faster than lwsync).
+ __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory");
+ return t;
+ }
+};
+
#endif // OS_CPU_LINUX_PPC_ATOMIC_LINUX_PPC_HPP
--- a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -68,8 +68,6 @@
#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory");
#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory");
#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory");
-// Use twi-isync for load_acquire (faster than lwsync).
-#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
inline void OrderAccess::loadload() { inlasm_lwsync(); }
inline void OrderAccess::storestore() { inlasm_lwsync(); }
@@ -82,17 +80,9 @@
inline void OrderAccess::cross_modify_fence()
{ inlasm_isync(); }
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
- template <typename T>
- T operator()(const volatile T* p) const { T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
-};
-
#undef inlasm_sync
#undef inlasm_lwsync
#undef inlasm_eieio
#undef inlasm_isync
-#undef inlasm_acquire_reg
#endif // OS_CPU_LINUX_PPC_ORDERACCESS_LINUX_PPC_HPP
--- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -78,13 +78,13 @@
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I inc,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@@ -137,8 +137,8 @@
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I inc,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@@ -208,8 +208,8 @@
// replacement succeeded.
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order unused) const {
STATIC_ASSERT(4 == sizeof(T));
T old;
@@ -232,8 +232,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order unused) const {
STATIC_ASSERT(8 == sizeof(T));
T old;
@@ -289,9 +289,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T cmp_val,
+ T xchg_val,
atomic_memory_order unused) const {
STATIC_ASSERT(4 == sizeof(T));
T old;
@@ -313,9 +313,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T cmp_val,
+ T xchg_val,
atomic_memory_order unused) const {
STATIC_ASSERT(8 == sizeof(T));
T old;
@@ -335,4 +335,11 @@
return old;
}
+template<size_t byte_size>
+struct Atomic::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+ template <typename T>
+ T operator()(const volatile T* p) const { T t = *p; OrderAccess::acquire(); return t; }
+};
+
#endif // OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP
--- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -76,13 +76,6 @@
inline void OrderAccess::fence() { inlasm_zarch_sync(); }
inline void OrderAccess::cross_modify_fence() { inlasm_zarch_sync(); }
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
- template <typename T>
- T operator()(const volatile T* p) const { T t = *p; inlasm_zarch_acquire(); return t; }
-};
-
#undef inlasm_compiler_barrier
#undef inlasm_zarch_sync
#undef inlasm_zarch_release
--- a/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -31,13 +31,13 @@
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@@ -59,8 +59,8 @@
}
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@@ -83,8 +83,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
T rv = exchange_value;
@@ -98,8 +98,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
T rv = exchange_value;
@@ -124,9 +124,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
T rv;
@@ -140,9 +140,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
T rv;
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -31,13 +31,13 @@
struct Atomic::PlatformAdd
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@@ -51,8 +51,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "xchgl (%2),%0"
@@ -64,9 +64,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(1 == sizeof(T));
__asm__ volatile ("lock cmpxchgb %1,(%3)"
@@ -78,9 +78,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ("lock cmpxchgl %1,(%3)"
@@ -93,8 +93,8 @@
#ifdef AMD64
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@@ -108,7 +108,7 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("xchgq (%2),%0"
@@ -120,9 +120,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order /* order */) const {
STATIC_ASSERT(8 == sizeof(T));
__asm__ __volatile__ ("lock cmpxchgq %1,(%3)"
@@ -142,12 +142,12 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
- return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
+ return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
}
template<>
@@ -161,12 +161,62 @@
template<>
template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
- T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+ T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
#endif // AMD64
+template<>
+struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm__ volatile ( "xchgb (%2),%0"
+ : "=q" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+ }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm__ volatile ( "xchgw (%2),%0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+ }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm__ volatile ( "xchgl (%2),%0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+ }
+};
+
+#ifdef AMD64
+template<>
+struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm__ volatile ( "xchgq (%2), %0"
+ : "=r" (v)
+ : "0" (v), "r" (p)
+ : "memory");
+ }
+};
+#endif // AMD64
+
#endif // OS_CPU_LINUX_X86_ATOMIC_LINUX_X86_HPP
--- a/src/hotspot/os_cpu/linux_x86/linux_x86_32.s Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_x86/linux_x86_32.s Mon Nov 25 15:16:29 2019 +0000
@@ -1,4 +1,4 @@
-#
+#
# Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
@@ -19,15 +19,15 @@
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
-#
+#
-
+
# NOTE WELL! The _Copy functions are called directly
# from server-compiler-generated code via CallLeafNoFP,
# which means that they *must* either not use floating
# point or use it in the same manner as does the server
# compiler.
-
+
.globl _Copy_conjoint_bytes
.globl _Copy_arrayof_conjoint_bytes
.globl _Copy_conjoint_jshorts_atomic
@@ -174,7 +174,7 @@
leal -1(%esi,%ecx),%eax # from + count - 1
jbe acb_CopyRight
cmpl %eax,%edi
- jbe acb_CopyLeft
+ jbe acb_CopyLeft
# copy from low to high
acb_CopyRight:
cmpl $3,%ecx
@@ -262,7 +262,7 @@
leal -2(%esi,%ecx,2),%eax # from + count*2 - 2
jbe cs_CopyRight
cmpl %eax,%edi
- jbe cs_CopyLeft
+ jbe cs_CopyLeft
# copy from low to high
cs_CopyRight:
# align source address at dword address boundary
@@ -283,7 +283,7 @@
jbe 2f # <= 32 dwords
# copy aligned dwords
rep; smovl
- jmp 4f
+ jmp 4f
# copy aligned dwords
2: subl %esi,%edi
.p2align 4,,15
@@ -349,7 +349,7 @@
leal -2(%esi,%ecx,2),%eax # from + count*2 - 2
jbe acs_CopyRight
cmpl %eax,%edi
- jbe acs_CopyLeft
+ jbe acs_CopyLeft
acs_CopyRight:
movl %ecx,%eax # word count
sarl %ecx # dword count
@@ -358,10 +358,10 @@
jbe 2f # <= 32 dwords
# copy aligned dwords
rep; smovl
- jmp 4f
+ jmp 4f
# copy aligned dwords
.space 5
-2: subl %esi,%edi
+2: subl %esi,%edi
.p2align 4,,15
3: movl (%esi),%edx
movl %edx,(%edi,%esi,1)
@@ -428,7 +428,7 @@
leal -4(%esi,%ecx,4),%eax # from + count*4 - 4
jbe ci_CopyRight
cmpl %eax,%edi
- jbe ci_CopyLeft
+ jbe ci_CopyLeft
ci_CopyRight:
cmpl $32,%ecx
jbe 2f # <= 32 dwords
@@ -471,7 +471,7 @@
popl %edi
popl %esi
ret
-
+
# Support for void Copy::conjoint_jlongs_atomic(jlong* from,
# jlong* to,
# size_t count)
@@ -537,7 +537,7 @@
je 5f
cmpl $33,%ecx
jae 3f
-1: subl %esi,%edi
+1: subl %esi,%edi
.p2align 4,,15
2: movl (%esi),%edx
movl %edx,(%edi,%esi,1)
@@ -545,7 +545,7 @@
subl $1,%ecx
jnz 2b
addl %esi,%edi
- jmp 5f
+ jmp 5f
3: smovl # align to 8 bytes, we know we are 4 byte aligned to start
subl $1,%ecx
4: .p2align 4,,15
@@ -612,9 +612,9 @@
ret
- # Support for jlong Atomic::cmpxchg(jlong exchange_value,
- # volatile jlong* dest,
- # jlong compare_value)
+ # Support for jlong Atomic::cmpxchg(volatile jlong* dest,
+ # jlong compare_value,
+ # jlong exchange_value)
#
.p2align 4,,15
.type _Atomic_cmpxchg_long,@function
@@ -643,4 +643,3 @@
movl 8(%esp), %eax # dest
fistpll (%eax)
ret
-
--- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -66,54 +66,4 @@
#endif
}
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm__ volatile ( "xchgb (%2),%0"
- : "=q" (v)
- : "0" (v), "r" (p)
- : "memory");
- }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm__ volatile ( "xchgw (%2),%0"
- : "=r" (v)
- : "0" (v), "r" (p)
- : "memory");
- }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm__ volatile ( "xchgl (%2),%0"
- : "=r" (v)
- : "0" (v), "r" (p)
- : "memory");
- }
-};
-
-#ifdef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm__ volatile ( "xchgq (%2), %0"
- : "=r" (v)
- : "0" (v), "r" (p)
- : "memory");
- }
-};
-#endif // AMD64
-
#endif // OS_CPU_LINUX_X86_ORDERACCESS_LINUX_X86_HPP
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -34,13 +34,13 @@
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@@ -49,8 +49,8 @@
}
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@@ -59,8 +59,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
// __sync_lock_test_and_set is a bizarrely named atomic exchange
@@ -78,8 +78,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
T result = __sync_lock_test_and_set (dest, exchange_value);
@@ -93,9 +93,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
@@ -103,9 +103,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
@@ -122,8 +122,8 @@
template<>
template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
- T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+ T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}
--- a/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -30,12 +30,12 @@
// Implement ADD using a CAS loop.
template<size_t byte_size>
struct Atomic::PlatformAdd {
- template<typename I, typename D>
- inline D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
+ template<typename D, typename I>
+ inline D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
D old_value = *dest;
while (true) {
D new_value = old_value + add_value;
- D result = cmpxchg(new_value, dest, old_value);
+ D result = cmpxchg(dest, old_value, new_value);
if (result == old_value) break;
old_value = result;
}
@@ -45,8 +45,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
__asm__ volatile ( "swap [%2],%0"
@@ -58,13 +58,13 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
T old_value = *dest;
while (true) {
- T result = cmpxchg(exchange_value, dest, old_value);
+ T result = cmpxchg(dest, old_value, exchange_value);
if (result == old_value) break;
old_value = result;
}
@@ -77,9 +77,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
T rv;
@@ -93,9 +93,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
T rv;
--- a/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -44,14 +44,14 @@
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
};
// Not using add_using_helper; see comment for cmpxchg.
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@@ -62,8 +62,8 @@
// Not using add_using_helper; see comment for cmpxchg.
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(I));
STATIC_ASSERT(8 == sizeof(D));
@@ -74,8 +74,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
return PrimitiveConversions::cast<T>(
@@ -87,8 +87,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
@@ -104,9 +104,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));
return PrimitiveConversions::cast<T>(
@@ -117,9 +117,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
return PrimitiveConversions::cast<T>(
@@ -130,9 +130,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
--- a/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/solaris_x86/solaris_x86_64.il Mon Nov 25 15:16:29 2019 +0000
@@ -49,7 +49,8 @@
orq %rdx, %rax
.end
- // Support for jint Atomic::add(jint add_value, volatile jint* dest)
+ // Implementation of jint _Atomic_add(jint add_value, volatile jint* dest)
+ // used by Atomic::add(volatile jint* dest, jint add_value)
.inline _Atomic_add,2
movl %edi, %eax // save add_value for return
lock
@@ -57,7 +58,8 @@
addl %edi, %eax
.end
- // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest)
+ // Implementation of jlong _Atomic_add(jlong add_value, volatile jlong* dest)
+ // used by Atomic::add(volatile jlong* dest, jint add_value)
.inline _Atomic_add_long,2
movq %rdi, %rax // save add_value for return
lock
@@ -65,39 +67,41 @@
addq %rdi, %rax
.end
- // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
+ // Implementation of jint _Atomic_xchg(jint exchange_value, volatile jint* dest)
+ // used by Atomic::xchg(volatile jint* dest, jint exchange_value)
.inline _Atomic_xchg,2
xchgl (%rsi), %edi
movl %edi, %eax
.end
- // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest).
+ // Implementation of jlong _Atomic_xchg(jlong exchange_value, volatile jlong* dest)
+ // used by Atomic::xchg(volatile jlong* dest, jlong exchange_value)
.inline _Atomic_xchg_long,2
xchgq (%rsi), %rdi
movq %rdi, %rax
.end
- // Support for jbyte Atomic::cmpxchg(jbyte exchange_value,
- // volatile jbyte *dest,
- // jbyte compare_value)
+ // Support for jbyte Atomic::cmpxchg(volatile jbyte *dest,
+ // jbyte compare_value,
+ // jbyte exchange_value)
.inline _Atomic_cmpxchg_byte,3
movb %dl, %al // compare_value
lock
cmpxchgb %dil, (%rsi)
.end
- // Support for jint Atomic::cmpxchg(jint exchange_value,
- // volatile jint *dest,
- // jint compare_value)
+ // Support for jint Atomic::cmpxchg(volatile jint *dest,
+ // int compare_value,
+ // jint exchange_value)
.inline _Atomic_cmpxchg,3
movl %edx, %eax // compare_value
lock
cmpxchgl %edi, (%rsi)
.end
- // Support for jlong Atomic::cmpxchg(jlong exchange_value,
- // volatile jlong* dest,
- // jlong compare_value)
+ // Support for jlong Atomic::cmpxchg(volatile jlong* dest,
+ // jlong compare_value,
+ // jlong exchange_value)
.inline _Atomic_cmpxchg_long,3
movq %rdx, %rax // compare_value
lock
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -27,6 +27,17 @@
#include "runtime/os.hpp"
+// Note that in MSVC, volatile memory accesses are explicitly
+// guaranteed to have acquire release semantics (w.r.t. compiler
+// reordering) and therefore does not even need a compiler barrier
+// for normal acquire release accesses. And all generalized
+// bound calls like release_store go through Atomic::load
+// and Atomic::store which do volatile memory accesses.
+template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }
+template<> inline void ScopedFence<RELEASE_X>::prefix() { }
+template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
+template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
+
// The following alternative implementations are needed because
// Windows 95 doesn't support (some of) the corresponding Windows NT
// calls. Furthermore, these versions allow inlining in the caller.
@@ -46,33 +57,33 @@
struct Atomic::PlatformAdd
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
{
- template<typename I, typename D>
- D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
};
#ifdef AMD64
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
- return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
+ return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
}
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
- return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
+ return add_using_helper<int64_t>(os::atomic_add_long_func, dest, add_value);
}
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
template<> \
template<typename T> \
- inline T Atomic::PlatformXchg<ByteSize>::operator()(T exchange_value, \
- T volatile* dest, \
+ inline T Atomic::PlatformXchg<ByteSize>::operator()(T volatile* dest, \
+ T exchange_value, \
atomic_memory_order order) const { \
STATIC_ASSERT(ByteSize == sizeof(T)); \
- return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
+ return xchg_using_helper<StubType>(StubName, dest, exchange_value); \
}
DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
@@ -80,15 +91,15 @@
#undef DEFINE_STUB_XCHG
-#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
- template<> \
- template<typename T> \
- inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T exchange_value, \
- T volatile* dest, \
- T compare_value, \
+#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \
+ template<> \
+ template<typename T> \
+ inline T Atomic::PlatformCmpxchg<ByteSize>::operator()(T volatile* dest, \
+ T compare_value, \
+ T exchange_value, \
atomic_memory_order order) const { \
- STATIC_ASSERT(ByteSize == sizeof(T)); \
- return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
+ STATIC_ASSERT(ByteSize == sizeof(T)); \
+ return cmpxchg_using_helper<StubType>(StubName, dest, compare_value, exchange_value); \
}
DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func)
@@ -100,8 +111,8 @@
#else // !AMD64
template<>
-template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
@@ -116,8 +127,8 @@
template<>
template<typename T>
-inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
// alternative for InterlockedExchange
@@ -130,9 +141,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(1 == sizeof(T));
// alternative for InterlockedCompareExchange
@@ -146,9 +157,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
// alternative for InterlockedCompareExchange
@@ -162,9 +173,9 @@
template<>
template<typename T>
-inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(8 == sizeof(T));
int32_t ex_lo = (int32_t)exchange_value;
@@ -202,8 +213,8 @@
template<>
template<typename T>
-inline void Atomic::PlatformStore<8>::operator()(T store_value,
- T volatile* dest) const {
+inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
+ T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
volatile T* src = &store_value;
__asm {
@@ -218,4 +229,45 @@
#pragma warning(default: 4035) // Enables warnings reporting missing return statement
+#ifndef AMD64
+template<>
+struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm {
+ mov edx, p;
+ mov al, v;
+ xchg al, byte ptr [edx];
+ }
+ }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm {
+ mov edx, p;
+ mov ax, v;
+ xchg ax, word ptr [edx];
+ }
+ }
+};
+
+template<>
+struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ __asm {
+ mov edx, p;
+ mov eax, v;
+ xchg eax, dword ptr [edx];
+ }
+ }
+};
+#endif // AMD64
+
#endif // OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP
--- a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -39,17 +39,6 @@
_ReadWriteBarrier();
}
-// Note that in MSVC, volatile memory accesses are explicitly
-// guaranteed to have acquire release semantics (w.r.t. compiler
-// reordering) and therefore does not even need a compiler barrier
-// for normal acquire release accesses. And all generalized
-// bound calls like release_store go through OrderAccess::load
-// and OrderAccess::store which do volatile memory accesses.
-template<> inline void ScopedFence<X_ACQUIRE>::postfix() { }
-template<> inline void ScopedFence<RELEASE_X>::prefix() { }
-template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix() { }
-template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
-
inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { compiler_barrier(); }
@@ -74,45 +63,4 @@
__cpuid(regs, 0);
}
-#ifndef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm {
- mov edx, p;
- mov al, v;
- xchg al, byte ptr [edx];
- }
- }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm {
- mov edx, p;
- mov ax, v;
- xchg ax, word ptr [edx];
- }
- }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
- template <typename T>
- void operator()(T v, volatile T* p) const {
- __asm {
- mov edx, p;
- mov eax, v;
- xchg eax, dword ptr [edx];
- }
- }
-};
-#endif // AMD64
-
#endif // OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP
--- a/src/hotspot/share/aot/aotCodeHeap.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -347,7 +347,7 @@
AOTCompiledMethod *aot = new AOTCompiledMethod(code, mh(), meta, metadata_table, metadata_size, state_adr, this, name, code_id, _aot_id);
assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized");
_code_to_aot[code_id]._aot = aot; // Should set this first
- if (Atomic::cmpxchg(in_use, &_code_to_aot[code_id]._state, not_set) != not_set) {
+ if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, in_use) != not_set) {
_code_to_aot[code_id]._aot = NULL; // Clean
} else { // success
// Publish method
@@ -410,7 +410,7 @@
AOTCompiledMethod* aot = new AOTCompiledMethod(entry, NULL, meta, metadata_table, metadata_size, state_adr, this, full_name, code_id, i);
assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized");
_code_to_aot[code_id]._aot = aot;
- if (Atomic::cmpxchg(in_use, &_code_to_aot[code_id]._state, not_set) != not_set) {
+ if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, in_use) != not_set) {
fatal("stab '%s' code state is %d", full_name, _code_to_aot[code_id]._state);
}
// Adjust code buffer boundaries only for stubs because they are last in the buffer.
@@ -721,7 +721,7 @@
for (int i = 0; i < methods_cnt; ++i) {
int code_id = indexes[i];
// Invalidate aot code.
- if (Atomic::cmpxchg(invalid, &_code_to_aot[code_id]._state, not_set) != not_set) {
+ if (Atomic::cmpxchg(&_code_to_aot[code_id]._state, not_set, invalid) != not_set) {
if (_code_to_aot[code_id]._state == in_use) {
AOTCompiledMethod* aot = _code_to_aot[code_id]._aot;
assert(aot != NULL, "aot should be set");
--- a/src/hotspot/share/classfile/classLoader.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/classfile/classLoader.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -29,11 +29,11 @@
#include "runtime/orderAccess.hpp"
// Next entry in class path
-inline ClassPathEntry* ClassPathEntry::next() const { return OrderAccess::load_acquire(&_next); }
+inline ClassPathEntry* ClassPathEntry::next() const { return Atomic::load_acquire(&_next); }
inline void ClassPathEntry::set_next(ClassPathEntry* next) {
// may have unlocked readers, so ensure visibility.
- OrderAccess::release_store(&_next, next);
+ Atomic::release_store(&_next, next);
}
inline ClassPathEntry* ClassLoader::classpath_entry(int n) {
--- a/src/hotspot/share/classfile/classLoaderData.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/classfile/classLoaderData.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -187,11 +187,11 @@
oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
if (_head == NULL || _head->_size == Chunk::CAPACITY) {
Chunk* next = new Chunk(_head);
- OrderAccess::release_store(&_head, next);
+ Atomic::release_store(&_head, next);
}
oop* handle = &_head->_data[_head->_size];
NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, o);
- OrderAccess::release_store(&_head->_size, _head->_size + 1);
+ Atomic::release_store(&_head->_size, _head->_size + 1);
return handle;
}
@@ -214,10 +214,10 @@
}
void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
- Chunk* head = OrderAccess::load_acquire(&_head);
+ Chunk* head = Atomic::load_acquire(&_head);
if (head != NULL) {
// Must be careful when reading size of head
- oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size));
+ oops_do_chunk(f, head, Atomic::load_acquire(&head->_size));
for (Chunk* c = head->_next; c != NULL; c = c->_next) {
oops_do_chunk(f, c, c->_size);
}
@@ -273,7 +273,7 @@
return;
}
int new_claim = old_claim & ~claim;
- if (Atomic::cmpxchg(new_claim, &_claim, old_claim) == old_claim) {
+ if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
return;
}
}
@@ -286,7 +286,7 @@
return false;
}
int new_claim = old_claim | claim;
- if (Atomic::cmpxchg(new_claim, &_claim, old_claim) == old_claim) {
+ if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) {
return true;
}
}
@@ -326,7 +326,7 @@
void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_acquire
- for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+ for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
klass_closure->do_klass(k);
assert(k != k->next_link(), "no loops!");
}
@@ -334,7 +334,7 @@
void ClassLoaderData::classes_do(void f(Klass * const)) {
// Lock-free access requires load_acquire
- for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+ for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
f(k);
assert(k != k->next_link(), "no loops!");
}
@@ -342,7 +342,7 @@
void ClassLoaderData::methods_do(void f(Method*)) {
// Lock-free access requires load_acquire
- for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+ for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
InstanceKlass::cast(k)->methods_do(f);
}
@@ -351,7 +351,7 @@
void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
// Lock-free access requires load_acquire
- for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+ for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
// Do not filter ArrayKlass oops here...
if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
#ifdef ASSERT
@@ -366,7 +366,7 @@
void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
// Lock-free access requires load_acquire
- for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+ for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k->is_instance_klass()) {
f(InstanceKlass::cast(k));
}
@@ -465,7 +465,7 @@
k->set_next_link(old_value);
// Link the new item into the list, making sure the linked class is stable
// since the list can be walked without a lock
- OrderAccess::release_store(&_klasses, k);
+ Atomic::release_store(&_klasses, k);
if (k->is_array_klass()) {
ClassLoaderDataGraph::inc_array_classes(1);
} else {
@@ -552,7 +552,7 @@
ModuleEntryTable* ClassLoaderData::modules() {
// Lazily create the module entry table at first request.
// Lock-free access requires load_acquire.
- ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules);
+ ModuleEntryTable* modules = Atomic::load_acquire(&_modules);
if (modules == NULL) {
MutexLocker m1(Module_lock);
// Check if _modules got allocated while we were waiting for this lock.
@@ -562,7 +562,7 @@
{
MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
// Ensure _modules is stable, since it is examined without a lock
- OrderAccess::release_store(&_modules, modules);
+ Atomic::release_store(&_modules, modules);
}
}
}
@@ -752,7 +752,7 @@
// The reason for the delayed allocation is because some class loaders are
// simply for delegating with no metadata of their own.
// Lock-free access requires load_acquire.
- ClassLoaderMetaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
+ ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace);
if (metaspace == NULL) {
MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag);
// Check if _metaspace got allocated while we were waiting for this lock.
@@ -768,7 +768,7 @@
metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
}
// Ensure _metaspace is stable, since it is examined without a lock
- OrderAccess::release_store(&_metaspace, metaspace);
+ Atomic::release_store(&_metaspace, metaspace);
}
}
return metaspace;
@@ -969,7 +969,7 @@
bool ClassLoaderData::contains_klass(Klass* klass) {
// Lock-free access requires load_acquire
- for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
+ for (Klass* k = Atomic::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
if (k == klass) return true;
}
return false;
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -59,13 +59,13 @@
//
// Any ClassLoaderData added after or during walking the list are prepended to
// _head. Their claim mark need not be handled here.
- for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) {
+ for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != NULL; cld = cld->next()) {
cld->clear_claim();
}
}
void ClassLoaderDataGraph::clear_claimed_marks(int claim) {
- for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) {
+ for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != NULL; cld = cld->next()) {
cld->clear_claim(claim);
}
}
@@ -220,7 +220,7 @@
// First install the new CLD to the Graph.
cld->set_next(_head);
- OrderAccess::release_store(&_head, cld);
+ Atomic::release_store(&_head, cld);
// Next associate with the class_loader.
if (!is_unsafe_anonymous) {
@@ -676,7 +676,7 @@
while (head != NULL) {
Klass* next = next_klass_in_cldg(head);
- Klass* old_head = Atomic::cmpxchg(next, &_next_klass, head);
+ Klass* old_head = Atomic::cmpxchg(&_next_klass, head, next);
if (old_head == head) {
return head; // Won the CAS.
--- a/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -50,21 +50,21 @@
}
void ClassLoaderDataGraph::inc_instance_classes(size_t count) {
- Atomic::add(count, &_num_instance_classes);
+ Atomic::add(&_num_instance_classes, count);
}
void ClassLoaderDataGraph::dec_instance_classes(size_t count) {
assert(count <= _num_instance_classes, "Sanity");
- Atomic::sub(count, &_num_instance_classes);
+ Atomic::sub(&_num_instance_classes, count);
}
void ClassLoaderDataGraph::inc_array_classes(size_t count) {
- Atomic::add(count, &_num_array_classes);
+ Atomic::add(&_num_array_classes, count);
}
void ClassLoaderDataGraph::dec_array_classes(size_t count) {
assert(count <= _num_array_classes, "Sanity");
- Atomic::sub(count, &_num_array_classes);
+ Atomic::sub(&_num_array_classes, count);
}
bool ClassLoaderDataGraph::should_clean_metaspaces_and_reset() {
--- a/src/hotspot/share/classfile/stringTable.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/classfile/stringTable.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -214,11 +214,11 @@
}
size_t StringTable::item_added() {
- return Atomic::add((size_t)1, &_items_count);
+ return Atomic::add(&_items_count, (size_t)1);
}
size_t StringTable::add_items_to_clean(size_t ndead) {
- size_t total = Atomic::add((size_t)ndead, &_uncleaned_items_count);
+ size_t total = Atomic::add(&_uncleaned_items_count, (size_t)ndead);
log_trace(stringtable)(
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
_uncleaned_items_count, ndead, total);
@@ -226,7 +226,7 @@
}
void StringTable::item_removed() {
- Atomic::add((size_t)-1, &_items_count);
+ Atomic::add(&_items_count, (size_t)-1);
}
double StringTable::get_load_factor() {
--- a/src/hotspot/share/classfile/symbolTable.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/classfile/symbolTable.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -189,8 +189,8 @@
}
}
-void SymbolTable::reset_has_items_to_clean() { Atomic::store(false, &_has_items_to_clean); }
-void SymbolTable::mark_has_items_to_clean() { Atomic::store(true, &_has_items_to_clean); }
+void SymbolTable::reset_has_items_to_clean() { Atomic::store(&_has_items_to_clean, false); }
+void SymbolTable::mark_has_items_to_clean() { Atomic::store(&_has_items_to_clean, true); }
bool SymbolTable::has_items_to_clean() { return Atomic::load(&_has_items_to_clean); }
void SymbolTable::item_added() {
@@ -724,7 +724,7 @@
bdt.done(jt);
}
- Atomic::add(stdc._processed, &_symbols_counted);
+ Atomic::add(&_symbols_counted, stdc._processed);
log_debug(symboltable)("Cleaned " SIZE_FORMAT " of " SIZE_FORMAT,
stdd._deleted, stdc._processed);
--- a/src/hotspot/share/code/codeCache.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/code/codeCache.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -749,7 +749,7 @@
for (;;) {
ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
entry->set_purge_list_next(purge_list_head);
- if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) {
+ if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
break;
}
}
--- a/src/hotspot/share/code/compiledMethod.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/code/compiledMethod.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -113,7 +113,7 @@
//-----------------------------------------------------------------------------
ExceptionCache* CompiledMethod::exception_cache_acquire() const {
- return OrderAccess::load_acquire(&_exception_cache);
+ return Atomic::load_acquire(&_exception_cache);
}
void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
@@ -133,7 +133,7 @@
// next pointers always point at live ExceptionCaches, that are not removed due
// to concurrent ExceptionCache cleanup.
ExceptionCache* next = ec->next();
- if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
+ if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
CodeCache::release_exception_cache(ec);
}
continue;
@@ -143,7 +143,7 @@
new_entry->set_next(ec);
}
}
- if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
+ if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
return;
}
}
@@ -176,7 +176,7 @@
// Try to clean head; this is contended by concurrent inserts, that
// both lazily clean the head, and insert entries at the head. If
// the CAS fails, the operation is restarted.
- if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
+ if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
prev = NULL;
curr = exception_cache_acquire();
continue;
@@ -615,7 +615,7 @@
if (md != NULL && md->is_method()) {
Method* method = static_cast<Method*>(md);
if (!method->method_holder()->is_loader_alive()) {
- Atomic::store((Method*)NULL, r->metadata_addr());
+ Atomic::store(r->metadata_addr(), (Method*)NULL);
if (!r->metadata_is_immediate()) {
r->fix_metadata_relocation();
--- a/src/hotspot/share/code/compiledMethod.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/code/compiledMethod.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -61,7 +61,7 @@
// class ExceptionCache methods
-inline int ExceptionCache::count() { return OrderAccess::load_acquire(&_count); }
+inline int ExceptionCache::count() { return Atomic::load_acquire(&_count); }
address ExceptionCache::pc_at(int index) {
assert(index >= 0 && index < count(),"");
@@ -74,7 +74,7 @@
}
// increment_count is only called under lock, but there may be concurrent readers.
-inline void ExceptionCache::increment_count() { OrderAccess::release_store(&_count, _count + 1); }
+inline void ExceptionCache::increment_count() { Atomic::release_store(&_count, _count + 1); }
#endif // SHARE_CODE_COMPILEDMETHOD_INLINE_HPP
--- a/src/hotspot/share/code/dependencyContext.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/code/dependencyContext.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -101,7 +101,7 @@
for (;;) {
nmethodBucket* head = Atomic::load(_dependency_context_addr);
new_head->set_next(head);
- if (Atomic::cmpxchg(new_head, _dependency_context_addr, head) == head) {
+ if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) {
break;
}
}
@@ -124,7 +124,7 @@
for (;;) {
nmethodBucket* purge_list_head = Atomic::load(&_purge_list);
b->set_purge_list_next(purge_list_head);
- if (Atomic::cmpxchg(b, &_purge_list, purge_list_head) == purge_list_head) {
+ if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) {
break;
}
}
@@ -260,7 +260,7 @@
#endif //PRODUCT
int nmethodBucket::decrement() {
- return Atomic::sub(1, &_count);
+ return Atomic::sub(&_count, 1);
}
// We use a monotonically increasing epoch counter to track the last epoch a given
@@ -272,7 +272,7 @@
if (last_cleanup >= cleaning_epoch) {
return false;
}
- return Atomic::cmpxchg(cleaning_epoch, _last_cleanup_addr, last_cleanup) == last_cleanup;
+ return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup;
}
// Retrieve the first nmethodBucket that has a dependent that does not correspond to
@@ -281,7 +281,7 @@
nmethodBucket* DependencyContext::dependencies_not_unloading() {
for (;;) {
// Need acquire becase the read value could come from a concurrent insert.
- nmethodBucket* head = OrderAccess::load_acquire(_dependency_context_addr);
+ nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr);
if (head == NULL || !head->get_nmethod()->is_unloading()) {
return head;
}
@@ -291,7 +291,7 @@
// Unstable load of head w.r.t. head->next
continue;
}
- if (Atomic::cmpxchg(head_next, _dependency_context_addr, head) == head) {
+ if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) {
// Release is_unloading entries if unlinking was claimed
DependencyContext::release(head);
}
@@ -300,7 +300,7 @@
// Relaxed accessors
void DependencyContext::set_dependencies(nmethodBucket* b) {
- Atomic::store(b, _dependency_context_addr);
+ Atomic::store(_dependency_context_addr, b);
}
nmethodBucket* DependencyContext::dependencies() {
@@ -313,7 +313,7 @@
void DependencyContext::cleaning_start() {
assert(SafepointSynchronize::is_at_safepoint(), "must be");
uint64_t epoch = ++_cleaning_epoch_monotonic;
- Atomic::store(epoch, &_cleaning_epoch);
+ Atomic::store(&_cleaning_epoch, epoch);
}
// The epilogue marks the end of dependency context cleanup by the GC,
@@ -323,7 +323,7 @@
// was called. That allows dependency contexts to be cleaned concurrently.
void DependencyContext::cleaning_end() {
uint64_t epoch = 0;
- Atomic::store(epoch, &_cleaning_epoch);
+ Atomic::store(&_cleaning_epoch, epoch);
}
// This function skips over nmethodBuckets in the list corresponding to
@@ -345,7 +345,7 @@
// Unstable load of next w.r.t. next->next
continue;
}
- if (Atomic::cmpxchg(next_next, &_next, next) == next) {
+ if (Atomic::cmpxchg(&_next, next, next_next) == next) {
// Release is_unloading entries if unlinking was claimed
DependencyContext::release(next);
}
@@ -358,7 +358,7 @@
}
void nmethodBucket::set_next(nmethodBucket* b) {
- Atomic::store(b, &_next);
+ Atomic::store(&_next, b);
}
nmethodBucket* nmethodBucket::purge_list_next() {
@@ -366,5 +366,5 @@
}
void nmethodBucket::set_purge_list_next(nmethodBucket* b) {
- Atomic::store(b, &_purge_list_next);
+ Atomic::store(&_purge_list_next, b);
}
--- a/src/hotspot/share/code/nmethod.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/code/nmethod.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -315,7 +315,7 @@
}
void ExceptionCache::set_next(ExceptionCache *ec) {
- Atomic::store(ec, &_next);
+ Atomic::store(&_next, ec);
}
//-----------------------------------------------------------------------------
@@ -1150,7 +1150,7 @@
// Ensure monotonicity of transitions.
return false;
}
- if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) {
+ if (Atomic::cmpxchg(&_state, old_state, new_state) == old_state) {
return true;
}
}
@@ -1849,7 +1849,7 @@
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
if ((_oops_do_mark_link == NULL) &&
- (Atomic::replace_if_null(mark_link(this, claim_weak_request_tag), &_oops_do_mark_link))) {
+ (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) {
oops_do_log_change("oops_do, mark weak request");
return true;
}
@@ -1863,7 +1863,7 @@
nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() {
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
- oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(this, claim_strong_done_tag), &_oops_do_mark_link, mark_link(NULL, claim_weak_request_tag));
+ oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(NULL, claim_weak_request_tag), mark_link(this, claim_strong_done_tag));
if (old_next == NULL) {
oops_do_log_change("oops_do, mark strong done");
}
@@ -1874,7 +1874,7 @@
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak");
- oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(this, claim_strong_request_tag), &_oops_do_mark_link, next);
+ oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag));
if (old_next == next) {
oops_do_log_change("oops_do, mark strong request");
}
@@ -1885,7 +1885,7 @@
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done");
- oops_do_mark_link* old_next = Atomic::cmpxchg(mark_link(extract_nmethod(next), claim_strong_done_tag), &_oops_do_mark_link, next);
+ oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag));
if (old_next == next) {
oops_do_log_change("oops_do, mark weak done -> mark strong done");
return true;
@@ -1900,13 +1900,13 @@
extract_state(_oops_do_mark_link) == claim_strong_request_tag,
"must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link));
- nmethod* old_head = Atomic::xchg(this, &_oops_do_mark_nmethods);
+ nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
// Self-loop if needed.
if (old_head == NULL) {
old_head = this;
}
// Try to install end of list and weak done tag.
- if (Atomic::cmpxchg(mark_link(old_head, claim_weak_done_tag), &_oops_do_mark_link, mark_link(this, claim_weak_request_tag)) == mark_link(this, claim_weak_request_tag)) {
+ if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) {
oops_do_log_change("oops_do, mark weak done");
return NULL;
} else {
@@ -1917,7 +1917,7 @@
void nmethod::oops_do_add_to_list_as_strong_done() {
assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint");
- nmethod* old_head = Atomic::xchg(this, &_oops_do_mark_nmethods);
+ nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this);
// Self-loop if needed.
if (old_head == NULL) {
old_head = this;
--- a/src/hotspot/share/compiler/compileBroker.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/compiler/compileBroker.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1479,14 +1479,14 @@
assert(!is_osr, "can't be osr");
// Adapters, native wrappers and method handle intrinsics
// should be generated always.
- return Atomic::add(1, &_compilation_id);
+ return Atomic::add(&_compilation_id, 1);
} else if (CICountOSR && is_osr) {
- id = Atomic::add(1, &_osr_compilation_id);
+ id = Atomic::add(&_osr_compilation_id, 1);
if (CIStartOSR <= id && id < CIStopOSR) {
return id;
}
} else {
- id = Atomic::add(1, &_compilation_id);
+ id = Atomic::add(&_compilation_id, 1);
if (CIStart <= id && id < CIStop) {
return id;
}
@@ -1498,7 +1498,7 @@
#else
// CICountOSR is a develop flag and set to 'false' by default. In a product built,
// only _compilation_id is incremented.
- return Atomic::add(1, &_compilation_id);
+ return Atomic::add(&_compilation_id, 1);
#endif
}
--- a/src/hotspot/share/compiler/compileBroker.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/compiler/compileBroker.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -335,7 +335,7 @@
static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
static bool set_should_compile_new_jobs(jint new_state) {
// Return success if the current caller set it
- jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
+ jint old = Atomic::cmpxchg(&_should_compile_new_jobs, 1-new_state, new_state);
bool success = (old == (1-new_state));
if (success) {
if (new_state == run_compilation) {
@@ -350,7 +350,7 @@
static void disable_compilation_forever() {
UseCompiler = false;
AlwaysCompileLoopMethods = false;
- Atomic::xchg(jint(shutdown_compilation), &_should_compile_new_jobs);
+ Atomic::xchg(&_should_compile_new_jobs, jint(shutdown_compilation));
}
static bool is_compilation_disabled_forever() {
@@ -359,7 +359,7 @@
static void handle_full_code_cache(int code_blob_type);
// Ensures that warning is only printed once.
static bool should_print_compiler_warning() {
- jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0);
+ jint old = Atomic::cmpxchg(&_print_compilation_warning, 0, 1);
return old == 0;
}
// Return total compilation ticks
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -156,7 +156,7 @@
// Allocation successful, update counters
{
size_t last = _last_counter_update;
- if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
+ if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) {
_monitoring_support->update_counters();
}
}
@@ -164,7 +164,7 @@
// ...and print the occupancy line, if needed
{
size_t last = _last_heap_print;
- if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
+ if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) {
print_heap_info(used);
print_metaspace_info();
}
@@ -212,7 +212,7 @@
}
// Always honor boundaries
- size = MAX2(min_size, MIN2(_max_tlab_size, size));
+ size = clamp(size, min_size, _max_tlab_size);
// Always honor alignment
size = align_up(size, MinObjAlignment);
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -194,7 +194,7 @@
if (hr == NULL) {
return max_tlab;
} else {
- return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
+ return clamp(hr->free(), MinTLABSize, max_tlab);
}
}
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -28,6 +28,7 @@
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
#include "utilities/numberSeq.hpp"
// Different defaults for different number of GC threads
@@ -144,17 +145,9 @@
void G1Analytics::compute_pause_time_ratio(double interval_ms, double pause_time_ms) {
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms;
- if (_recent_avg_pause_time_ratio < 0.0 ||
- (_recent_avg_pause_time_ratio - 1.0 > 0.0)) {
- // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
- // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
- if (_recent_avg_pause_time_ratio < 0.0) {
- _recent_avg_pause_time_ratio = 0.0;
- } else {
- assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
- _recent_avg_pause_time_ratio = 1.0;
- }
- }
+
+ // Clamp the result to [0.0 ... 1.0] to filter out nonsensical results due to bad input.
+ _recent_avg_pause_time_ratio = clamp(_recent_avg_pause_time_ratio, 0.0, 1.0);
// Compute the ratio of just this last pause time to the entire time range stored
// in the vectors. Comparing this pause to the entire range, rather than only the
--- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -55,7 +55,7 @@
}
void G1BlockOffsetTable::set_offset_array_raw(size_t index, u_char offset) {
- Atomic::store(offset, &_offset_array[index]);
+ Atomic::store(&_offset_array[index], offset);
}
void G1BlockOffsetTable::set_offset_array(size_t index, u_char offset) {
--- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -158,19 +158,19 @@
}
G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
- return OrderAccess::load_acquire(&_table);
+ return Atomic::load_acquire(&_table);
}
void G1CodeRootSet::allocate_small_table() {
G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
- OrderAccess::release_store(&_table, temp);
+ Atomic::release_store(&_table, temp);
}
void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
for (;;) {
table->_purge_next = _purge_list;
- G1CodeRootSetTable* old = Atomic::cmpxchg(table, &_purge_list, table->_purge_next);
+ G1CodeRootSetTable* old = Atomic::cmpxchg(&_purge_list, table->_purge_next, table);
if (old == table->_purge_next) {
break;
}
@@ -194,7 +194,7 @@
G1CodeRootSetTable::purge_list_append(_table);
- OrderAccess::release_store(&_table, temp);
+ Atomic::release_store(&_table, temp);
}
void G1CodeRootSet::purge() {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -3377,7 +3377,7 @@
BufferNode* next = Atomic::load(&_nodes);
while (next != NULL) {
BufferNode* node = next;
- next = Atomic::cmpxchg(node->next(), &_nodes, node);
+ next = Atomic::cmpxchg(&_nodes, node, node->next());
if (next == node) {
cl->apply_to_buffer(node, buffer_size, worker_id);
next = node->next();
@@ -4226,7 +4226,7 @@
HeapRegion* r = g1h->region_at(region_idx);
assert(!g1h->is_on_master_free_list(r), "sanity");
- Atomic::add(r->rem_set()->occupied_locked(), &_rs_length);
+ Atomic::add(&_rs_length, r->rem_set()->occupied_locked());
if (!is_young) {
g1h->hot_card_cache()->reset_card_counts(r);
@@ -4290,7 +4290,7 @@
// Claim serial work.
if (_serial_work_claim == 0) {
- jint value = Atomic::add(1, &_serial_work_claim) - 1;
+ jint value = Atomic::add(&_serial_work_claim, 1) - 1;
if (value == 0) {
double serial_time = os::elapsedTime();
do_serial_work();
@@ -4305,7 +4305,7 @@
bool has_non_young_time = false;
while (true) {
- size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
+ size_t end = Atomic::add(&_parallel_work_claim, chunk_size());
size_t cur = end - chunk_size();
if (cur >= _num_work_items) {
--- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -112,7 +112,7 @@
// Claim a new chunk, returning its bounds [from, to[.
void claim_chunk(uint& from, uint& to) {
- uint result = Atomic::add(_chunk_size, &_cur_claim_idx);
+ uint result = Atomic::add(&_cur_claim_idx, _chunk_size);
assert(_max_size > result - 1,
"Array too small, is %u should be %u with chunk size %u.",
_max_size, result, _chunk_size);
@@ -214,8 +214,8 @@
void update_totals(uint num_regions, size_t reclaimable_bytes) {
if (num_regions > 0) {
assert(reclaimable_bytes > 0, "invariant");
- Atomic::add(num_regions, &_num_regions_added);
- Atomic::add(reclaimable_bytes, &_reclaimable_bytes_added);
+ Atomic::add(&_num_regions_added, num_regions);
+ Atomic::add(&_reclaimable_bytes_added, reclaimable_bytes);
} else {
assert(reclaimable_bytes == 0, "invariant");
}
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -207,7 +207,7 @@
return NULL;
}
- size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
+ size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
if (cur_idx >= _chunk_capacity) {
return NULL;
}
@@ -280,7 +280,7 @@
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
- size_t idx = Atomic::add((size_t)1, &_num_root_regions) - 1;
+ size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
@@ -308,7 +308,7 @@
return NULL;
}
- size_t claimed_index = Atomic::add((size_t)1, &_claimed_root_regions) - 1;
+ size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
if (claimed_index < _num_root_regions) {
return &_root_regions[claimed_index];
}
@@ -1121,7 +1121,7 @@
virtual void work(uint worker_id) {
G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
_g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
- Atomic::add(update_cl.num_selected_for_rebuild(), &_total_selected_for_rebuild);
+ Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
}
uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
@@ -1611,7 +1611,7 @@
// we utilize all the worker threads we can.
bool processing_is_mt = rp->processing_is_mt();
uint active_workers = (processing_is_mt ? _g1h->workers()->active_workers() : 1U);
- active_workers = MAX2(MIN2(active_workers, _max_num_tasks), 1U);
+ active_workers = clamp(active_workers, 1u, _max_num_tasks);
// Parallel processing task executor.
G1CMRefProcTaskExecutor par_task_executor(_g1h, this,
@@ -1906,7 +1906,7 @@
HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
// Is the gap between reading the finger and doing the CAS too long?
- HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
+ HeapWord* res = Atomic::cmpxchg(&_finger, finger, end);
if (res == finger && curr_region != NULL) {
// we succeeded
HeapWord* bottom = curr_region->bottom();
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -41,6 +41,7 @@
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
+#include "utilities/quickSort.hpp"
G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) :
// Dirty card queues are always active, so we create them with their
@@ -226,21 +227,127 @@
return result;
}
+class G1RefineBufferedCards : public StackObj {
+ BufferNode* const _node;
+ CardTable::CardValue** const _node_buffer;
+ const size_t _node_buffer_size;
+ const uint _worker_id;
+ size_t* _total_refined_cards;
+ G1RemSet* const _g1rs;
+
+ static inline int compare_card(const CardTable::CardValue* p1,
+ const CardTable::CardValue* p2) {
+ return p2 - p1;
+ }
+
+ // Sorts the cards from start_index to _node_buffer_size in *decreasing*
+ // address order. Tests showed that this order is preferable to not sorting
+ // or increasing address order.
+ void sort_cards(size_t start_index) {
+ QuickSort::sort(&_node_buffer[start_index],
+ _node_buffer_size - start_index,
+ compare_card,
+ false);
+ }
+
+ // Returns the index to the first clean card in the buffer.
+ size_t clean_cards() {
+ const size_t start = _node->index();
+ assert(start <= _node_buffer_size, "invariant");
+
+ // Two-fingered compaction algorithm similar to the filtering mechanism in
+ // SATBMarkQueue. The main difference is that clean_card_before_refine()
+ // could change the buffer element in-place.
+ // We don't check for SuspendibleThreadSet::should_yield(), because
+ // cleaning and redirtying the cards is fast.
+ CardTable::CardValue** src = &_node_buffer[start];
+ CardTable::CardValue** dst = &_node_buffer[_node_buffer_size];
+ assert(src <= dst, "invariant");
+ for ( ; src < dst; ++src) {
+ // Search low to high for a card to keep.
+ if (_g1rs->clean_card_before_refine(src)) {
+ // Found keeper. Search high to low for a card to discard.
+ while (src < --dst) {
+ if (!_g1rs->clean_card_before_refine(dst)) {
+ *dst = *src; // Replace discard with keeper.
+ break;
+ }
+ }
+ // If discard search failed (src == dst), the outer loop will also end.
+ }
+ }
+
+ // dst points to the first retained clean card, or the end of the buffer
+ // if all the cards were discarded.
+ const size_t first_clean = dst - _node_buffer;
+ assert(first_clean >= start && first_clean <= _node_buffer_size, "invariant");
+ // Discarded cards are considered as refined.
+ *_total_refined_cards += first_clean - start;
+ return first_clean;
+ }
+
+ bool refine_cleaned_cards(size_t start_index) {
+ bool result = true;
+ size_t i = start_index;
+ for ( ; i < _node_buffer_size; ++i) {
+ if (SuspendibleThreadSet::should_yield()) {
+ redirty_unrefined_cards(i);
+ result = false;
+ break;
+ }
+ _g1rs->refine_card_concurrently(_node_buffer[i], _worker_id);
+ }
+ _node->set_index(i);
+ *_total_refined_cards += i - start_index;
+ return result;
+ }
+
+ void redirty_unrefined_cards(size_t start) {
+ for ( ; start < _node_buffer_size; ++start) {
+ *_node_buffer[start] = G1CardTable::dirty_card_val();
+ }
+ }
+
+public:
+ G1RefineBufferedCards(BufferNode* node,
+ size_t node_buffer_size,
+ uint worker_id,
+ size_t* total_refined_cards) :
+ _node(node),
+ _node_buffer(reinterpret_cast<CardTable::CardValue**>(BufferNode::make_buffer_from_node(node))),
+ _node_buffer_size(node_buffer_size),
+ _worker_id(worker_id),
+ _total_refined_cards(total_refined_cards),
+ _g1rs(G1CollectedHeap::heap()->rem_set()) {}
+
+ bool refine() {
+ size_t first_clean_index = clean_cards();
+ if (first_clean_index == _node_buffer_size) {
+ _node->set_index(first_clean_index);
+ return true;
+ }
+ // This fence serves two purposes. First, the cards must be cleaned
+ // before processing the contents. Second, we can't proceed with
+ // processing a region until after the read of the region's top in
+ // collect_and_clean_cards(), for synchronization with possibly concurrent
+ // humongous object allocation (see comment at the StoreStore fence before
+ // setting the regions' tops in humongous allocation path).
+ // It's okay that reading region's top and reading region's type were racy
+ // wrto each other. We need both set, in any order, to proceed.
+ OrderAccess::fence();
+ sort_cards(first_clean_index);
+ return refine_cleaned_cards(first_clean_index);
+ }
+};
+
bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node,
uint worker_id,
size_t* total_refined_cards) {
- G1RemSet* rem_set = G1CollectedHeap::heap()->rem_set();
- size_t size = buffer_size();
- void** buffer = BufferNode::make_buffer_from_node(node);
- size_t i = node->index();
- assert(i <= size, "invariant");
- for ( ; (i < size) && !SuspendibleThreadSet::should_yield(); ++i) {
- CardTable::CardValue* cp = static_cast<CardTable::CardValue*>(buffer[i]);
- rem_set->refine_card_concurrently(cp, worker_id);
- }
- *total_refined_cards += (i - node->index());
- node->set_index(i);
- return i == size;
+ G1RefineBufferedCards buffered_cards(node,
+ buffer_size(),
+ worker_id,
+ total_refined_cards);
+ return buffered_cards.refine();
}
#ifndef ASSERT
--- a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -29,17 +29,17 @@
#include "runtime/atomic.hpp"
inline void G1EvacStats::add_direct_allocated(size_t value) {
- Atomic::add(value, &_direct_allocated);
+ Atomic::add(&_direct_allocated, value);
}
inline void G1EvacStats::add_region_end_waste(size_t value) {
- Atomic::add(value, &_region_end_waste);
+ Atomic::add(&_region_end_waste, value);
Atomic::inc(&_regions_filled);
}
inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
- Atomic::add(used, &_failure_used);
- Atomic::add(waste, &_failure_waste);
+ Atomic::add(&_failure_used, used);
+ Atomic::add(&_failure_waste, waste);
}
#endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1FreeIdSet.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1FreeIdSet.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -79,7 +79,7 @@
index = head_index(old_head);
assert(index < _size, "invariant");
uintx new_head = make_head(_next[index], old_head);
- new_head = Atomic::cmpxchg(new_head, &_head, old_head);
+ new_head = Atomic::cmpxchg(&_head, old_head, new_head);
if (new_head == old_head) break;
old_head = new_head;
}
@@ -95,7 +95,7 @@
while (true) {
_next[index] = head_index(old_head);
uintx new_head = make_head(index, old_head);
- new_head = Atomic::cmpxchg(new_head, &_head, old_head);
+ new_head = Atomic::cmpxchg(&_head, old_head, new_head);
if (new_head == old_head) break;
old_head = new_head;
}
--- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -101,7 +101,7 @@
// Adjust the weak roots.
- if (Atomic::add(1u, &_references_done) == 1u) { // First incr claims task.
+ if (Atomic::add(&_references_done, 1u) == 1u) { // First incr claims task.
G1CollectedHeap::heap()->ref_processor_stw()->weak_oops_do(&_adjust);
}
--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -53,7 +53,7 @@
assert(current->is_empty(), "Should have been cleared in phase 2.");
}
}
- current->reset_during_compaction();
+ current->reset_humongous_during_compaction();
}
return false;
}
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -68,7 +68,7 @@
return card_ptr;
}
// Otherwise, the card is hot.
- size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
+ size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1;
size_t masked_index = index & (_hot_cache_size - 1);
CardValue* current_ptr = _hot_cache[masked_index];
@@ -78,9 +78,9 @@
// card_ptr in favor of the other option, which would be starting over. This
// should be OK since card_ptr will likely be the older card already when/if
// this ever happens.
- CardValue* previous_ptr = Atomic::cmpxchg(card_ptr,
- &_hot_cache[masked_index],
- current_ptr);
+ CardValue* previous_ptr = Atomic::cmpxchg(&_hot_cache[masked_index],
+ current_ptr,
+ card_ptr);
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
}
@@ -91,8 +91,8 @@
assert(!use_cache(), "cache should be disabled");
while (_hot_cache_par_claimed_idx < _hot_cache_size) {
- size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
- &_hot_cache_par_claimed_idx);
+ size_t end_idx = Atomic::add(&_hot_cache_par_claimed_idx,
+ _hot_cache_par_chunk_size);
size_t start_idx = end_idx - _hot_cache_par_chunk_size;
// The current worker has successfully claimed the chunk [start_idx..end_idx)
end_idx = MIN2(end_idx, _hot_cache_size);
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -261,7 +261,7 @@
virtual void work(uint worker_id) {
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
while (true) {
- char* touch_addr = Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size;
+ char* touch_addr = Atomic::add(&_cur_addr, actual_chunk_size) - actual_chunk_size;
if (touch_addr < _start_addr || touch_addr >= _end_addr) {
break;
}
--- a/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -39,7 +39,7 @@
return false;
}
- return Atomic::cmpxchg(1, &_cleaning_claimed, 0) == 0;
+ return Atomic::cmpxchg(&_cleaning_claimed, 0, 1) == 0;
}
void JVMCICleaningTask::work(bool unloading_occurred) {
--- a/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -129,7 +129,7 @@
void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) {
assert(_collecting, "precondition");
- Atomic::add(buffer_size() - node->index(), &_entry_count);
+ Atomic::add(&_entry_count, buffer_size() - node->index());
_list.push(*node);
update_tail(node);
}
@@ -139,7 +139,7 @@
const G1BufferNodeList from = src->take_all_completed_buffers();
if (from._head != NULL) {
assert(from._tail != NULL, "invariant");
- Atomic::add(from._entry_count, &_entry_count);
+ Atomic::add(&_entry_count, from._entry_count);
_list.prepend(*from._head, *from._tail);
update_tail(from._tail);
}
--- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -46,7 +46,7 @@
inline void G1RegionMarkStatsCache::evict(uint idx) {
G1RegionMarkStatsCacheEntry* cur = &_cache[idx];
if (cur->_stats._live_words != 0) {
- Atomic::add(cur->_stats._live_words, &_target[cur->_region_idx]._live_words);
+ Atomic::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words);
}
cur->clear();
}
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -177,9 +177,9 @@
return;
}
- bool marked_as_dirty = Atomic::cmpxchg(true, &_contains[region], false) == false;
+ bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
if (marked_as_dirty) {
- uint allocated = Atomic::add(1u, &_cur_idx) - 1;
+ uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
_buffer[allocated] = region;
}
}
@@ -255,7 +255,7 @@
void work(uint worker_id) {
while (_cur_dirty_regions < _regions->size()) {
- uint next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
+ uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
uint max = MIN2(next + _chunk_length, _regions->size());
for (uint i = next; i < max; i++) {
@@ -437,7 +437,7 @@
if (_collection_set_iter_state[region]) {
return false;
}
- return !Atomic::cmpxchg(true, &_collection_set_iter_state[region], false);
+ return !Atomic::cmpxchg(&_collection_set_iter_state[region], false, true);
}
bool has_cards_to_scan(uint region) {
@@ -447,7 +447,7 @@
uint claim_cards_to_scan(uint region, uint increment) {
assert(region < _max_regions, "Tried to access invalid region %u", region);
- return Atomic::add(increment, &_card_table_scan_state[region]) - increment;
+ return Atomic::add(&_card_table_scan_state[region], increment) - increment;
}
void add_dirty_region(uint const region) {
@@ -1137,7 +1137,7 @@
if (_initial_evacuation &&
p->fast_reclaim_humongous_candidates() > 0 &&
!_fast_reclaim_handled &&
- !Atomic::cmpxchg(true, &_fast_reclaim_handled, false)) {
+ !Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) {
G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeER, worker_id);
@@ -1261,25 +1261,27 @@
#endif
}
-void G1RemSet::refine_card_concurrently(CardValue* card_ptr,
- uint worker_id) {
+bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) {
assert(!_g1h->is_gc_active(), "Only call concurrently");
- // Construct the region representing the card.
+ CardValue* card_ptr = *card_ptr_addr;
+ // Find the start address represented by the card.
HeapWord* start = _ct->addr_for(card_ptr);
// And find the region containing it.
HeapRegion* r = _g1h->heap_region_containing_or_null(start);
// If this is a (stale) card into an uncommitted region, exit.
if (r == NULL) {
- return;
+ return false;
}
check_card_ptr(card_ptr, _ct);
// If the card is no longer dirty, nothing to do.
+ // We cannot load the card value before the "r == NULL" check, because G1
+ // could uncommit parts of the card table covering uncommitted regions.
if (*card_ptr != G1CardTable::dirty_card_val()) {
- return;
+ return false;
}
// This check is needed for some uncommon cases where we should
@@ -1302,7 +1304,7 @@
// enqueueing of the card and processing it here will have ensured
// we see the up-to-date region type here.
if (!r->is_old_or_humongous_or_archive()) {
- return;
+ return false;
}
// The result from the hot card cache insert call is either:
@@ -1321,7 +1323,7 @@
card_ptr = _hot_card_cache->insert(card_ptr);
if (card_ptr == NULL) {
// There was no eviction. Nothing to do.
- return;
+ return false;
} else if (card_ptr != orig_card_ptr) {
// Original card was inserted and an old card was evicted.
start = _ct->addr_for(card_ptr);
@@ -1331,8 +1333,9 @@
// ignored, as discussed earlier for the original card. The
// region could have been freed while in the cache.
if (!r->is_old_or_humongous_or_archive()) {
- return;
+ return false;
}
+ *card_ptr_addr = card_ptr;
} // Else we still have the original card.
}
@@ -1341,18 +1344,19 @@
// (part of) an object at the end of the allocated space and extend
// beyond the end of allocation.
- // Non-humongous objects are only allocated in the old-gen during
- // GC, so if region is old then top is stable. Humongous object
- // allocation sets top last; if top has not yet been set, this is
- // a stale card and we'll end up with an empty intersection. If
- // this is not a stale card, the synchronization between the
+ // Non-humongous objects are either allocated in the old regions during GC,
+ // or mapped in archive regions during startup. So if region is old or
+ // archive then top is stable.
+ // Humongous object allocation sets top last; if top has not yet been set,
+ // this is a stale card and we'll end up with an empty intersection.
+ // If this is not a stale card, the synchronization between the
// enqueuing of the card and processing it here will have ensured
// we see the up-to-date top here.
HeapWord* scan_limit = r->top();
if (scan_limit <= start) {
// If the trimmed region is empty, the card must be stale.
- return;
+ return false;
}
// Okay to clean and process the card now. There are still some
@@ -1360,13 +1364,26 @@
// as iteration failure.
*const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val();
- // This fence serves two purposes. First, the card must be cleaned
- // before processing the contents. Second, we can't proceed with
- // processing until after the read of top, for synchronization with
- // possibly concurrent humongous object allocation. It's okay that
- // reading top and reading type were racy wrto each other. We need
- // both set, in any order, to proceed.
- OrderAccess::fence();
+ return true;
+}
+
+void G1RemSet::refine_card_concurrently(CardValue* const card_ptr,
+ const uint worker_id) {
+ assert(!_g1h->is_gc_active(), "Only call concurrently");
+ check_card_ptr(card_ptr, _ct);
+
+ // Construct the MemRegion representing the card.
+ HeapWord* start = _ct->addr_for(card_ptr);
+ // And find the region containing it.
+ HeapRegion* r = _g1h->heap_region_containing(start);
+ // This reload of the top is safe even though it happens after the full
+ // fence, because top is stable for old, archive and unfiltered humongous
+ // regions, so it must return the same value as the previous load when
+ // cleaning the card. Also cleaning the card and refinement of the card
+ // cannot span across safepoint, so we don't need to worry about top being
+ // changed during safepoint.
+ HeapWord* scan_limit = r->top();
+ assert(scan_limit > start, "sanity");
// Don't use addr_for(card_ptr + 1) which can ask for
// a card beyond the heap.
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -113,10 +113,17 @@
G1GCPhaseTimes::GCParPhases coderoots_phase,
G1GCPhaseTimes::GCParPhases objcopy_phase);
- // Refine the card corresponding to "card_ptr". Safe to be called concurrently
- // to the mutator.
- void refine_card_concurrently(CardValue* card_ptr,
- uint worker_id);
+ // Two methods for concurrent refinement support, executed concurrently to
+ // the mutator:
+ // Cleans the card at "*card_ptr_addr" before refinement, returns true iff the
+ // card needs later refinement. Note that "*card_ptr_addr" could be updated to
+ // a different card due to use of hot card cache.
+ bool clean_card_before_refine(CardValue** const card_ptr_addr);
+ // Refine the region corresponding to "card_ptr". Must be called after
+ // being filtered by clean_card_before_refine(), and after proper
+ // fence/synchronization.
+ void refine_card_concurrently(CardValue* const card_ptr,
+ const uint worker_id);
// Print accumulated summary info from the start of the VM.
void print_summary_info();
--- a/src/hotspot/share/gc/g1/heapRegion.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -136,6 +136,11 @@
init_top_at_mark_start();
if (clear_space) clear(SpaceDecorator::Mangle);
+
+ _evacuation_failed = false;
+ _gc_efficiency = 0.0;
+ _recorded_rs_length = 0;
+ _predicted_elapsed_time_ms = 0.0;
}
void HeapRegion::clear_cardtable() {
@@ -233,8 +238,8 @@
HeapRegion::HeapRegion(uint hrm_index,
G1BlockOffsetTable* bot,
MemRegion mr) :
- _bottom(NULL),
- _end(NULL),
+ _bottom(mr.start()),
+ _end(mr.end()),
_top(NULL),
_compaction_top(NULL),
_bot_part(bot, this),
@@ -245,30 +250,28 @@
_type(),
_humongous_start_region(NULL),
_evacuation_failed(false),
+ _index_in_opt_cset(InvalidCSetIndex),
_next(NULL), _prev(NULL),
#ifdef ASSERT
_containing_set(NULL),
#endif
- _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
- _index_in_opt_cset(InvalidCSetIndex), _young_index_in_cset(-1),
- _surv_rate_group(NULL), _age_index(-1),
_prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL),
+ _prev_marked_bytes(0), _next_marked_bytes(0),
+ _young_index_in_cset(-1),
+ _surv_rate_group(NULL), _age_index(-1), _gc_efficiency(0.0),
_recorded_rs_length(0), _predicted_elapsed_time_ms(0),
_node_index(G1NUMA::UnknownNodeIndex)
{
- _rem_set = new HeapRegionRemSet(bot, this);
-
- initialize(mr);
-}
-
-void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
- assert(_rem_set->is_empty(), "Remembered set must be empty");
-
assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
"invalid space boundaries");
- set_bottom(mr.start());
- set_end(mr.end());
+ _rem_set = new HeapRegionRemSet(bot, this);
+ initialize();
+}
+
+void HeapRegion::initialize(bool clear_space, bool mangle_space) {
+ assert(_rem_set->is_empty(), "Remembered set must be empty");
+
if (clear_space) {
clear(mangle_space);
}
@@ -755,7 +758,7 @@
if (p < the_end) {
// Look up top
HeapWord* addr_1 = p;
- HeapWord* b_start_1 = _bot_part.block_start_const(addr_1);
+ HeapWord* b_start_1 = block_start_const(addr_1);
if (b_start_1 != p) {
log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " "
" yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
@@ -767,7 +770,7 @@
// Look up top + 1
HeapWord* addr_2 = p + 1;
if (addr_2 < the_end) {
- HeapWord* b_start_2 = _bot_part.block_start_const(addr_2);
+ HeapWord* b_start_2 = block_start_const(addr_2);
if (b_start_2 != p) {
log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " "
" yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
@@ -781,7 +784,7 @@
size_t diff = pointer_delta(the_end, p) / 2;
HeapWord* addr_3 = p + diff;
if (addr_3 < the_end) {
- HeapWord* b_start_3 = _bot_part.block_start_const(addr_3);
+ HeapWord* b_start_3 = block_start_const(addr_3);
if (b_start_3 != p) {
log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " "
" yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
@@ -793,7 +796,7 @@
// Look up end - 1
HeapWord* addr_4 = the_end - 1;
- HeapWord* b_start_4 = _bot_part.block_start_const(addr_4);
+ HeapWord* b_start_4 = block_start_const(addr_4);
if (b_start_4 != p) {
log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " "
" yielded " PTR_FORMAT ", expecting " PTR_FORMAT,
--- a/src/hotspot/share/gc/g1/heapRegion.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -67,8 +67,8 @@
class HeapRegion : public CHeapObj<mtGC> {
friend class VMStructs;
- HeapWord* _bottom;
- HeapWord* _end;
+ HeapWord* const _bottom;
+ HeapWord* const _end;
HeapWord* volatile _top;
HeapWord* _compaction_top;
@@ -84,10 +84,7 @@
HeapWord* _pre_dummy_top;
public:
- void set_bottom(HeapWord* value) { _bottom = value; }
HeapWord* bottom() const { return _bottom; }
-
- void set_end(HeapWord* value) { _end = value; }
HeapWord* end() const { return _end; }
void set_compaction_top(HeapWord* compaction_top) { _compaction_top = compaction_top; }
@@ -96,6 +93,15 @@
void set_top(HeapWord* value) { _top = value; }
HeapWord* top() const { return _top; }
+ // See the comment above in the declaration of _pre_dummy_top for an
+ // explanation of what it is.
+ void set_pre_dummy_top(HeapWord* pre_dummy_top) {
+ assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
+ _pre_dummy_top = pre_dummy_top;
+ }
+ HeapWord* pre_dummy_top() { return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top; }
+ void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
+
// Returns true iff the given the heap region contains the
// given address as part of an allocated object. This may
// be a potentially, so we restrict its use to assertion checks only.
@@ -118,6 +124,12 @@
private:
void reset_after_compaction() { set_top(compaction_top()); }
+ void clear(bool mangle_space);
+
+ HeapWord* block_start_const(const void* p) const;
+
+ void mangle_unused_area() PRODUCT_RETURN;
+
// Try to allocate at least min_word_size and up to desired_size from this region.
// Returns NULL if not possible, otherwise sets actual_word_size to the amount of
// space allocated.
@@ -130,27 +142,10 @@
// This version synchronizes with other calls to par_allocate_impl().
inline HeapWord* par_allocate_impl(size_t min_word_size, size_t desired_word_size, size_t* actual_word_size);
- void mangle_unused_area() PRODUCT_RETURN;
-
public:
- void object_iterate(ObjectClosure* blk);
+ HeapWord* block_start(const void* p);
- // See the comment above in the declaration of _pre_dummy_top for an
- // explanation of what it is.
- void set_pre_dummy_top(HeapWord* pre_dummy_top) {
- assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
- _pre_dummy_top = pre_dummy_top;
- }
-
- HeapWord* pre_dummy_top() {
- return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
- }
- void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
-
- void clear(bool mangle_space);
-
- HeapWord* block_start(const void* p);
- HeapWord* block_start_const(const void* p) const;
+ void object_iterate(ObjectClosure* blk);
// Allocation (return NULL if full). Assumes the caller has established
// mutually exclusive access to the HeapRegion.
@@ -161,35 +156,50 @@
HeapWord* allocate(size_t word_size);
HeapWord* par_allocate(size_t word_size);
- HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
+ inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
+ inline HeapWord* allocate_no_bot_updates(size_t word_size);
+ inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
- // MarkSweep support phase3
+ // Full GC support methods.
+
HeapWord* initialize_threshold();
HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
+ // Update heap region to be consistent after Full GC compaction.
+ void reset_humongous_during_compaction() {
+ assert(is_humongous(),
+ "should only be called for humongous regions");
+
+ zero_marked_bytes();
+ init_top_at_mark_start();
+ }
+ // Update heap region to be consistent after Full GC compaction.
+ void complete_compaction();
+
+ // All allocated blocks are occupied by objects in a HeapRegion
+ bool block_is_obj(const HeapWord* p) const;
+
+ // Returns whether the given object is dead based on TAMS and bitmap.
+ bool is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const;
+
+ // Returns the object size for all valid block starts
+ // and the amount of unallocated words if called on top()
+ size_t block_size(const HeapWord* p) const;
+
+ // Scans through the region using the bitmap to determine what
+ // objects to call size_t ApplyToMarkedClosure::apply(oop) for.
+ template<typename ApplyToMarkedClosure>
+ inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
void reset_bot() {
_bot_part.reset_bot();
}
- void print_bot_on(outputStream* out) {
- _bot_part.print_on(out);
- }
-
private:
// The remembered set for this region.
HeapRegionRemSet* _rem_set;
- void report_region_type_change(G1HeapRegionTraceType::Type to);
-
- // Returns whether the given object address refers to a dead object, and either the
- // size of the object (if live) or the size of the block (if dead) in size.
- // May
- // - only called with obj < top()
- // - not called on humongous objects or archive regions
- inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
-
- // The index of this region in the heap region sequence.
- uint _hrm_index;
+ // Cached index of this region in the heap region sequence.
+ const uint _hrm_index;
HeapRegionType _type;
@@ -199,6 +209,12 @@
// True iff an attempt to evacuate an object in the region failed.
bool _evacuation_failed;
+ static const uint InvalidCSetIndex = UINT_MAX;
+
+ // The index in the optional regions array, if this region
+ // is considered optional during a mixed collections.
+ uint _index_in_opt_cset;
+
// Fields used by the HeapRegionSetBase class and subclasses.
HeapRegion* _next;
HeapRegion* _prev;
@@ -206,25 +222,6 @@
HeapRegionSetBase* _containing_set;
#endif // ASSERT
- // We use concurrent marking to determine the amount of live data
- // in each heap region.
- size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
- size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
-
- // The calculated GC efficiency of the region.
- double _gc_efficiency;
-
- static const uint InvalidCSetIndex = UINT_MAX;
-
- // The index in the optional regions array, if this region
- // is considered optional during a mixed collections.
- uint _index_in_opt_cset;
-
- // Data for young region survivor prediction.
- uint _young_index_in_cset;
- SurvRateGroup* _surv_rate_group;
- int _age_index;
-
// The start of the unmarked area. The unmarked area extends from this
// word until the top and/or end of the region, and is the part
// of the region for which no marking was done, i.e. objects may
@@ -234,18 +231,29 @@
HeapWord* _prev_top_at_mark_start;
HeapWord* _next_top_at_mark_start;
+ // We use concurrent marking to determine the amount of live data
+ // in each heap region.
+ size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
+ size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
+
void init_top_at_mark_start() {
assert(_prev_marked_bytes == 0 &&
_next_marked_bytes == 0,
"Must be called after zero_marked_bytes.");
- HeapWord* bot = bottom();
- _prev_top_at_mark_start = bot;
- _next_top_at_mark_start = bot;
+ _prev_top_at_mark_start = _next_top_at_mark_start = bottom();
}
+ // Data for young region survivor prediction.
+ uint _young_index_in_cset;
+ SurvRateGroup* _surv_rate_group;
+ int _age_index;
+
// Cached attributes used in the collection set policy information
- // The RSet length that was added to the total value
+ // The calculated GC efficiency of the region.
+ double _gc_efficiency;
+
+ // The remembered set length that was added to the total value
// for the collection set.
size_t _recorded_rs_length;
@@ -255,6 +263,15 @@
uint _node_index;
+ void report_region_type_change(G1HeapRegionTraceType::Type to);
+
+ // Returns whether the given object address refers to a dead object, and either the
+ // size of the object (if live) or the size of the block (if dead) in size.
+ // May
+ // - only called with obj < top()
+ // - not called on humongous objects or archive regions
+ inline bool is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const;
+
// Iterate over the references covered by the given MemRegion in a humongous
// object and apply the given closure to them.
// Humongous objects are allocated directly in the old-gen. So we need special
@@ -273,11 +290,15 @@
public:
HeapRegion(uint hrm_index, G1BlockOffsetTable* bot, MemRegion mr);
+ // If this region is a member of a HeapRegionManager, the index in that
+ // sequence, otherwise -1.
+ uint hrm_index() const { return _hrm_index; }
+
// Initializing the HeapRegion not only resets the data structure, but also
// resets the BOT for that heap region.
// The default values for clear_space means that we will do the clearing if
// there's clearing to be done ourselves. We also always mangle the space.
- void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
+ void initialize(bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
static int LogOfHRGrainBytes;
static int LogOfHRGrainWords;
@@ -292,7 +313,6 @@
~((1 << (size_t) LogOfHRGrainBytes) - 1);
}
-
// Returns whether a field is in the same region as the obj it points to.
template <typename T>
static bool is_in_same_region(T* p, oop obj) {
@@ -312,31 +332,6 @@
// up once during initialization time.
static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
- // All allocated blocks are occupied by objects in a HeapRegion
- bool block_is_obj(const HeapWord* p) const;
-
- // Returns whether the given object is dead based on TAMS and bitmap.
- bool is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const;
-
- // Returns the object size for all valid block starts
- // and the amount of unallocated words if called on top()
- size_t block_size(const HeapWord* p) const;
-
- // Scans through the region using the bitmap to determine what
- // objects to call size_t ApplyToMarkedClosure::apply(oop) for.
- template<typename ApplyToMarkedClosure>
- inline void apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure);
- // Update heap region to be consistent after compaction.
- void complete_compaction();
-
- inline HeapWord* par_allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* word_size);
- inline HeapWord* allocate_no_bot_updates(size_t word_size);
- inline HeapWord* allocate_no_bot_updates(size_t min_word_size, size_t desired_word_size, size_t* actual_size);
-
- // If this region is a member of a HeapRegionManager, the index in that
- // sequence, otherwise -1.
- uint hrm_index() const { return _hrm_index; }
-
// The number of bytes marked live in the region in the last marking phase.
size_t marked_bytes() { return _prev_marked_bytes; }
size_t live_bytes() {
@@ -378,6 +373,22 @@
void zero_marked_bytes() {
_prev_marked_bytes = _next_marked_bytes = 0;
}
+ // Get the start of the unmarked area in this region.
+ HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
+ HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
+
+ // Note the start or end of marking. This tells the heap region
+ // that the collector is about to start or has finished (concurrently)
+ // marking the heap.
+
+ // Notify the region that concurrent marking is starting. Initialize
+ // all fields related to the next marking info.
+ inline void note_start_of_marking();
+
+ // Notify the region that concurrent marking has finished. Copy the
+ // (now finalized) next marking info fields into the prev marking
+ // info fields.
+ inline void note_end_of_marking();
const char* get_type_str() const { return _type.get_str(); }
const char* get_short_type_str() const { return _type.get_short_str(); }
@@ -410,6 +421,18 @@
bool is_open_archive() const { return _type.is_open_archive(); }
bool is_closed_archive() const { return _type.is_closed_archive(); }
+ void set_free();
+
+ void set_eden();
+ void set_eden_pre_gc();
+ void set_survivor();
+
+ void move_to_old();
+ void set_old();
+
+ void set_open_archive();
+ void set_closed_archive();
+
// For a humongous region, region in which it starts.
HeapRegion* humongous_start_region() const {
return _humongous_start_region;
@@ -442,11 +465,11 @@
// Getter and setter for the next and prev fields used to link regions into
// linked lists.
+ void set_next(HeapRegion* next) { _next = next; }
HeapRegion* next() { return _next; }
- HeapRegion* prev() { return _prev; }
- void set_next(HeapRegion* next) { _next = next; }
void set_prev(HeapRegion* prev) { _prev = prev; }
+ HeapRegion* prev() { return _prev; }
// Every region added to a set is tagged with a reference to that
// set. This is used for doing consistency checking to make sure that
@@ -480,22 +503,17 @@
// Clear the card table corresponding to this region.
void clear_cardtable();
- // Get the start of the unmarked area in this region.
- HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
- HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
-
- // Note the start or end of marking. This tells the heap region
- // that the collector is about to start or has finished (concurrently)
- // marking the heap.
+ // Returns the "evacuation_failed" property of the region.
+ bool evacuation_failed() { return _evacuation_failed; }
- // Notify the region that concurrent marking is starting. Initialize
- // all fields related to the next marking info.
- inline void note_start_of_marking();
+ // Sets the "evacuation_failed" property of the region.
+ void set_evacuation_failed(bool b) {
+ _evacuation_failed = b;
- // Notify the region that concurrent marking has finished. Copy the
- // (now finalized) next marking info fields into the prev marking
- // info fields.
- inline void note_end_of_marking();
+ if (b) {
+ _next_marked_bytes = 0;
+ }
+ }
// Notify the region that we are about to start processing
// self-forwarded objects during evac failure handling.
@@ -506,17 +524,6 @@
// objects during evac failure handling.
void note_self_forwarding_removal_end(size_t marked_bytes);
- void reset_during_compaction() {
- assert(is_humongous(),
- "should only be called for humongous regions");
-
- zero_marked_bytes();
- init_top_at_mark_start();
- }
-
- void calc_gc_efficiency(void);
- double gc_efficiency() const { return _gc_efficiency;}
-
uint index_in_opt_cset() const {
assert(has_index_in_opt_cset(), "Opt cset index not set.");
return _index_in_opt_cset;
@@ -525,6 +532,9 @@
void set_index_in_opt_cset(uint index) { _index_in_opt_cset = index; }
void clear_index_in_opt_cset() { _index_in_opt_cset = InvalidCSetIndex; }
+ void calc_gc_efficiency(void);
+ double gc_efficiency() const { return _gc_efficiency;}
+
uint young_index_in_cset() const { return _young_index_in_cset; }
void clear_young_index_in_cset() { _young_index_in_cset = 0; }
void set_young_index_in_cset(uint index) {
@@ -579,18 +589,6 @@
}
}
- void set_free();
-
- void set_eden();
- void set_eden_pre_gc();
- void set_survivor();
-
- void move_to_old();
- void set_old();
-
- void set_open_archive();
- void set_closed_archive();
-
// Determine if an object has been allocated since the last
// mark performed by the collector. This returns true iff the object
// is within the unmarked area of the region.
@@ -601,18 +599,6 @@
return (HeapWord *) obj >= next_top_at_mark_start();
}
- // Returns the "evacuation_failed" property of the region.
- bool evacuation_failed() { return _evacuation_failed; }
-
- // Sets the "evacuation_failed" property of the region.
- void set_evacuation_failed(bool b) {
- _evacuation_failed = b;
-
- if (b) {
- _next_marked_bytes = 0;
- }
- }
-
// Iterate over the objects overlapping the given memory region, applying cl
// to all references in the region. This is a helper for
// G1RemSet::refine_card*, and is tightly coupled with them.
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -61,7 +61,7 @@
size_t want_to_allocate = MIN2(available, desired_word_size);
if (want_to_allocate >= min_word_size) {
HeapWord* new_top = obj + want_to_allocate;
- HeapWord* result = Atomic::cmpxchg(new_top, &_top, obj);
+ HeapWord* result = Atomic::cmpxchg(&_top, obj, new_top);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.
@@ -304,7 +304,7 @@
template <bool is_gc_active, class Closure>
HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
- Closure* cl) {
+ Closure* cl) {
assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -217,10 +217,8 @@
if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
G1CollectedHeap::heap()->hr_printer()->commit(hr);
}
- HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i);
- MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
- hr->initialize(mr);
+ hr->initialize();
hr->set_node_index(G1NUMA::numa()->index_for_region(hr));
insert_into_free_list(at(i));
}
@@ -611,6 +609,6 @@
bool HeapRegionClaimer::claim_region(uint region_index) {
assert(region_index < _n_regions, "Invalid index.");
- uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed);
+ uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed);
return old_val == Unclaimed;
}
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -46,7 +46,7 @@
PerRegionTable* fl = _free_list;
while (fl != NULL) {
PerRegionTable* nxt = fl->next();
- PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl);
+ PerRegionTable* res = Atomic::cmpxchg(&_free_list, fl, nxt);
if (res == fl) {
fl->init(hr, true);
return fl;
@@ -219,7 +219,7 @@
// some mark bits may not yet seem cleared or a 'later' update
// performed by a concurrent thread could be undone when the
// zeroing becomes visible). This requires store ordering.
- OrderAccess::release_store(&_fine_grain_regions[ind], prt);
+ Atomic::release_store(&_fine_grain_regions[ind], prt);
_n_fine_entries++;
// Transfer from sparse to fine-grain.
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -190,7 +190,7 @@
// We need access in order to union things into the base table.
BitMap* bm() { return &_bm; }
- HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); }
+ HeapRegion* hr() const { return Atomic::load_acquire(&_hr); }
jint occupied() const {
// Overkill, but if we ever need it...
@@ -229,7 +229,7 @@
while (true) {
PerRegionTable* fl = _free_list;
last->set_next(fl);
- PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl);
+ PerRegionTable* res = Atomic::cmpxchg(&_free_list, fl, prt);
if (res == fl) {
return;
}
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -65,7 +65,7 @@
_bm.clear();
// Make sure that the bitmap clearing above has been finished before publishing
// this PRT to concurrent threads.
- OrderAccess::release_store(&_hr, hr);
+ Atomic::release_store(&_hr, hr);
}
template <class Closure>
--- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -38,9 +38,9 @@
static_field(HeapRegion, LogOfHRGrainBytes, int) \
\
nonstatic_field(HeapRegion, _type, HeapRegionType) \
- nonstatic_field(HeapRegion, _bottom, HeapWord*) \
+ nonstatic_field(HeapRegion, _bottom, HeapWord* const) \
nonstatic_field(HeapRegion, _top, HeapWord* volatile) \
- nonstatic_field(HeapRegion, _end, HeapWord*) \
+ nonstatic_field(HeapRegion, _end, HeapWord* const) \
nonstatic_field(HeapRegion, _compaction_top, HeapWord*) \
\
nonstatic_field(HeapRegionType, _tag, HeapRegionType::Tag volatile) \
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -167,8 +167,7 @@
// Adjust new generation size
const size_t eden_plus_survivors =
align_up(eden_size + 2 * survivor_size, alignment);
- size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
- min_gen_size());
+ size_t desired_size = clamp(eden_plus_survivors, min_gen_size(), gen_size_limit());
assert(desired_size <= gen_size_limit(), "just checking");
if (desired_size > orig_size) {
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -864,7 +864,7 @@
if (p != NULL) {
HeapWord* cur_top, *cur_chunk_top = p + size;
while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
- if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) {
+ if (Atomic::cmpxchg(top_addr(), cur_top, cur_chunk_top) == cur_top) {
break;
}
}
--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -194,7 +194,7 @@
HeapWord* obj = top();
if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size;
- HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
+ HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.
@@ -213,7 +213,7 @@
// Try to deallocate previous allocation. Returns true upon success.
bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
HeapWord* expected_top = obj + size;
- return Atomic::cmpxchg(obj, top_addr(), expected_top) == expected_top;
+ return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
}
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -90,7 +90,7 @@
bool end_bit_ok = _end_bits.par_set_bit(end_bit);
assert(end_bit_ok, "concurrency problem");
DEBUG_ONLY(Atomic::inc(&mark_bitmap_count));
- DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size));
+ DEBUG_ONLY(Atomic::add(&mark_bitmap_size, size));
return true;
}
return false;
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -355,7 +355,7 @@
new_size = gen_size_limit();
}
// Adjust according to our min and max
- new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
+ new_size = clamp(new_size, min_gen_size(), gen_size_limit());
assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
new_size = align_up(new_size, alignment);
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -532,7 +532,7 @@
const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
DEBUG_ONLY(Atomic::inc(&add_obj_count);)
- DEBUG_ONLY(Atomic::add(len, &add_obj_size);)
+ DEBUG_ONLY(Atomic::add(&add_obj_size, len);)
if (beg_region == end_region) {
// All in one region.
@@ -2449,7 +2449,7 @@
}
bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
- uint claimed = Atomic::add(1u, &_counter) - 1; // -1 is so that we start with zero
+ uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero
if (claimed < _insert_index) {
reference = _backing_array[claimed];
return true;
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -536,7 +536,7 @@
{
assert(_dc_and_los < dc_claimed, "already claimed");
assert(_dc_and_los >= dc_one, "count would go negative");
- Atomic::add(dc_mask, &_dc_and_los);
+ Atomic::add(&_dc_and_los, dc_mask);
}
inline HeapWord* ParallelCompactData::RegionData::data_location() const
@@ -576,7 +576,7 @@
inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
{
assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
- Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los);
+ Atomic::add(&_dc_and_los, static_cast<region_sz_t>(words));
}
inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
@@ -584,7 +584,7 @@
#ifdef ASSERT
HeapWord* tmp = _highest_ref;
while (addr > tmp) {
- tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
+ tmp = Atomic::cmpxchg(&_highest_ref, tmp, addr);
}
#endif // #ifdef ASSERT
}
@@ -592,7 +592,7 @@
inline bool ParallelCompactData::RegionData::claim()
{
const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
- const region_sz_t old = Atomic::cmpxchg(dc_claimed | los, &_dc_and_los, los);
+ const region_sz_t old = Atomic::cmpxchg(&_dc_and_los, los, dc_claimed | los);
return old == los;
}
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -300,8 +300,7 @@
// Adjust new generation size
const size_t eden_plus_survivors =
align_up(eden_size + 2 * survivor_size, alignment);
- size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
- min_gen_size());
+ size_t desired_size = clamp(eden_plus_survivors, min_gen_size(), max_size());
assert(desired_size <= max_size(), "just checking");
if (desired_size > orig_size) {
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -392,7 +392,7 @@
size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before, alignment);
// Adjust new generation size
- desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
+ desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
assert(desired_new_size <= max_new_size, "just checking");
bool changed = false;
--- a/src/hotspot/share/gc/shared/barrierSet.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -211,23 +211,23 @@
}
template <typename T>
- static T atomic_cmpxchg_in_heap(T new_value, T* addr, T compare_value) {
- return Raw::atomic_cmpxchg(new_value, addr, compare_value);
+ static T atomic_cmpxchg_in_heap(T* addr, T compare_value, T new_value) {
+ return Raw::atomic_cmpxchg(addr, compare_value, new_value);
}
template <typename T>
- static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
- return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value);
+ static T atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
+ return Raw::atomic_cmpxchg_at(base, offset, compare_value, new_value);
}
template <typename T>
- static T atomic_xchg_in_heap(T new_value, T* addr) {
- return Raw::atomic_xchg(new_value, addr);
+ static T atomic_xchg_in_heap(T* addr, T new_value) {
+ return Raw::atomic_xchg(addr, new_value);
}
template <typename T>
- static T atomic_xchg_in_heap_at(T new_value, oop base, ptrdiff_t offset) {
- return Raw::atomic_xchg_at(new_value, base, offset);
+ static T atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, T new_value) {
+ return Raw::atomic_xchg_at(base, offset, new_value);
}
template <typename T>
@@ -261,21 +261,21 @@
}
template <typename T>
- static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
- return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+ static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
+ return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
}
- static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
- return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value);
+ static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
+ return Raw::oop_atomic_cmpxchg_at(base, offset, compare_value, new_value);
}
template <typename T>
- static oop oop_atomic_xchg_in_heap(oop new_value, T* addr) {
- return Raw::oop_atomic_xchg(new_value, addr);
+ static oop oop_atomic_xchg_in_heap(T* addr, oop new_value) {
+ return Raw::oop_atomic_xchg(addr, new_value);
}
- static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
- return Raw::oop_atomic_xchg_at(new_value, base, offset);
+ static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
+ return Raw::oop_atomic_xchg_at(base, offset, new_value);
}
template <typename T>
@@ -297,13 +297,13 @@
}
template <typename T>
- static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
- return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+ static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
+ return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
}
template <typename T>
- static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
- return Raw::oop_atomic_xchg(new_value, addr);
+ static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
+ return Raw::oop_atomic_xchg(addr, new_value);
}
// Clone barrier support
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -27,14 +27,14 @@
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTable.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
template <DecoratorSet decorators, typename T>
inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
volatile CardValue* byte = _card_table->byte_for(field);
if (_card_table->scanned_concurrently()) {
// Perform a releasing store if the card table is scanned concurrently
- OrderAccess::release_store(byte, CardTable::dirty_card_val());
+ Atomic::release_store(byte, CardTable::dirty_card_val());
} else {
*byte = CardTable::dirty_card_val();
}
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -139,7 +139,7 @@
if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
|| _ct->is_prev_youngergen_card_val(entry_val)) {
CardValue res =
- Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
+ Atomic::cmpxchg(entry, entry_val, CardTableRS::clean_card_val());
if (res == entry_val) {
break;
} else {
@@ -264,7 +264,7 @@
// Mark it as both cur and prev youngergen; card cleaning thread will
// eventually remove the previous stuff.
CardValue new_val = cur_youngergen_and_prev_nonclean_card;
- CardValue res = Atomic::cmpxchg(new_val, entry, entry_val);
+ CardValue res = Atomic::cmpxchg(entry, entry_val, new_val);
// Did the CAS succeed?
if (res == entry_val) return;
// Otherwise, retry, to see the new value.
--- a/src/hotspot/share/gc/shared/concurrentGCThread.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/concurrentGCThread.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -51,7 +51,7 @@
// Signal thread has terminated
MonitorLocker ml(Terminator_lock);
- OrderAccess::release_store(&_has_terminated, true);
+ Atomic::release_store(&_has_terminated, true);
ml.notify_all();
}
@@ -60,7 +60,7 @@
assert(!has_terminated(), "Invalid state");
// Signal thread to terminate
- OrderAccess::release_store_fence(&_should_terminate, true);
+ Atomic::release_store_fence(&_should_terminate, true);
stop_service();
@@ -72,9 +72,9 @@
}
bool ConcurrentGCThread::should_terminate() const {
- return OrderAccess::load_acquire(&_should_terminate);
+ return Atomic::load_acquire(&_should_terminate);
}
bool ConcurrentGCThread::has_terminated() const {
- return OrderAccess::load_acquire(&_has_terminated);
+ return Atomic::load_acquire(&_has_terminated);
}
--- a/src/hotspot/share/gc/shared/gc_globals.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -210,10 +210,6 @@
"Number of threads concurrent gc will use") \
constraint(ConcGCThreadsConstraintFunc,AfterErgo) \
\
- product(uint, GCTaskTimeStampEntries, 200, \
- "Number of time stamp entries per gc worker thread") \
- range(1, max_jint) \
- \
product(bool, AlwaysTenure, false, \
"Always tenure objects in eden (ParallelGC only)") \
\
--- a/src/hotspot/share/gc/shared/genArguments.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/genArguments.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -229,7 +229,7 @@
// yield a size that is too small) and bound it by MaxNewSize above.
// Ergonomics plays here by previously calculating the desired
// NewSize and MaxNewSize.
- max_young_size = MIN2(MAX2(max_young_size, NewSize), MaxNewSize);
+ max_young_size = clamp(max_young_size, NewSize, MaxNewSize);
}
// Given the maximum young size, determine the initial and
@@ -260,7 +260,7 @@
// NewSize as the floor, because if NewRatio is overly large, the resulting
// size can be too small.
initial_young_size =
- MIN2(max_young_size, MAX2(scale_by_NewRatio_aligned(InitialHeapSize, GenAlignment), NewSize));
+ clamp(scale_by_NewRatio_aligned(InitialHeapSize, GenAlignment), NewSize, max_young_size);
}
}
@@ -285,7 +285,7 @@
// the minimum, maximum and initial sizes consistent
// with the young sizes and the overall heap sizes.
MinOldSize = GenAlignment;
- initial_old_size = MIN2(MaxOldSize, MAX2(InitialHeapSize - initial_young_size, MinOldSize));
+ initial_old_size = clamp(InitialHeapSize - initial_young_size, MinOldSize, MaxOldSize);
// MaxOldSize has already been made consistent above.
} else {
// OldSize has been explicitly set on the command line. Use it
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -79,9 +79,9 @@
template <typename T>
static void oop_store_in_heap(T* addr, oop value);
template <typename T>
- static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
+ static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
template <typename T>
- static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
+ static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
template <typename T>
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
@@ -94,12 +94,12 @@
oop_store_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), value);
}
- static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
- return oop_atomic_xchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset));
+ static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
+ return oop_atomic_xchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), new_value);
}
- static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
- return oop_atomic_cmpxchg_in_heap(new_value, AccessInternal::oop_field_addr<decorators>(base, offset), compare_value);
+ static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
+ return oop_atomic_cmpxchg_in_heap(AccessInternal::oop_field_addr<decorators>(base, offset), compare_value, new_value);
}
};
};
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -67,10 +67,10 @@
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
-oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
+oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
bs->template write_ref_field_pre<decorators>(addr);
- oop result = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+ oop result = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
if (result == compare_value) {
bs->template write_ref_field_post<decorators>(addr, new_value);
}
@@ -80,10 +80,10 @@
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline oop ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
-oop_atomic_xchg_in_heap(oop new_value, T* addr) {
+oop_atomic_xchg_in_heap(T* addr, oop new_value) {
BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
bs->template write_ref_field_pre<decorators>(addr);
- oop result = Raw::oop_atomic_xchg(new_value, addr);
+ oop result = Raw::oop_atomic_xchg(addr, new_value);
bs->template write_ref_field_post<decorators>(addr, new_value);
return result;
}
--- a/src/hotspot/share/gc/shared/oopStorage.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -140,16 +140,16 @@
}
size_t OopStorage::ActiveArray::block_count_acquire() const {
- return OrderAccess::load_acquire(&_block_count);
+ return Atomic::load_acquire(&_block_count);
}
void OopStorage::ActiveArray::increment_refcount() const {
- int new_value = Atomic::add(1, &_refcount);
+ int new_value = Atomic::add(&_refcount, 1);
assert(new_value >= 1, "negative refcount %d", new_value - 1);
}
bool OopStorage::ActiveArray::decrement_refcount() const {
- int new_value = Atomic::sub(1, &_refcount);
+ int new_value = Atomic::sub(&_refcount, 1);
assert(new_value >= 0, "negative refcount %d", new_value);
return new_value == 0;
}
@@ -161,7 +161,7 @@
*block_ptr(index) = block;
// Use a release_store to ensure all the setup is complete before
// making the block visible.
- OrderAccess::release_store(&_block_count, index + 1);
+ Atomic::release_store(&_block_count, index + 1);
return true;
} else {
return false;
@@ -264,8 +264,8 @@
bool OopStorage::Block::is_safe_to_delete() const {
assert(is_empty(), "precondition");
OrderAccess::loadload();
- return (OrderAccess::load_acquire(&_release_refcount) == 0) &&
- (OrderAccess::load_acquire(&_deferred_updates_next) == NULL);
+ return (Atomic::load_acquire(&_release_refcount) == 0) &&
+ (Atomic::load_acquire(&_deferred_updates_next) == NULL);
}
OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
@@ -307,7 +307,7 @@
assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
unsigned index = count_trailing_zeros(~allocated);
uintx new_value = allocated | bitmask_for_index(index);
- uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, allocated);
+ uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, allocated, new_value);
if (fetched == allocated) {
return get_pointer(index); // CAS succeeded; return entry for index.
}
@@ -514,7 +514,7 @@
// Update new_array refcount to account for the new reference.
new_array->increment_refcount();
// Install new_array, ensuring its initialization is complete first.
- OrderAccess::release_store(&_active_array, new_array);
+ Atomic::release_store(&_active_array, new_array);
// Wait for any readers that could read the old array from _active_array.
// Can't use GlobalCounter here, because this is called from allocate(),
// which may be called in the scope of a GlobalCounter critical section
@@ -532,7 +532,7 @@
// using it.
OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
- ActiveArray* result = OrderAccess::load_acquire(&_active_array);
+ ActiveArray* result = Atomic::load_acquire(&_active_array);
result->increment_refcount();
return result;
}
@@ -595,7 +595,7 @@
while (true) {
assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
uintx new_value = old_allocated ^ releasing;
- uintx fetched = Atomic::cmpxchg(new_value, &_allocated_bitmask, old_allocated);
+ uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
if (fetched == old_allocated) break; // Successful update.
old_allocated = fetched; // Retry with updated bitmask.
}
@@ -614,12 +614,12 @@
// then someone else has made such a claim and the deferred update has not
// yet been processed and will include our change, so we don't need to do
// anything further.
- if (Atomic::replace_if_null(this, &_deferred_updates_next)) {
+ if (Atomic::replace_if_null(&_deferred_updates_next, this)) {
// Successfully claimed. Push, with self-loop for end-of-list.
Block* head = owner->_deferred_updates;
while (true) {
_deferred_updates_next = (head == NULL) ? this : head;
- Block* fetched = Atomic::cmpxchg(this, &owner->_deferred_updates, head);
+ Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this);
if (fetched == head) break; // Successful update.
head = fetched; // Retry with updated head.
}
@@ -645,13 +645,13 @@
// Atomically pop a block off the list, if any available.
// No ABA issue because this is only called by one thread at a time.
// The atomicity is wrto pushes by release().
- Block* block = OrderAccess::load_acquire(&_deferred_updates);
+ Block* block = Atomic::load_acquire(&_deferred_updates);
while (true) {
if (block == NULL) return false;
// Try atomic pop of block from list.
Block* tail = block->deferred_updates_next();
if (block == tail) tail = NULL; // Handle self-loop end marker.
- Block* fetched = Atomic::cmpxchg(tail, &_deferred_updates, block);
+ Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail);
if (fetched == block) break; // Update successful.
block = fetched; // Retry with updated block.
}
@@ -724,7 +724,7 @@
}
// Release the contiguous entries that are in block.
block->release_entries(releasing, this);
- Atomic::sub(count, &_allocation_count);
+ Atomic::sub(&_allocation_count, count);
}
}
@@ -825,7 +825,7 @@
// Set the request flag false and return its old value.
// Needs to be atomic to avoid dropping a concurrent request.
// Can't use Atomic::xchg, which may not support bool.
- return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
+ return Atomic::cmpxchg(&needs_cleanup_requested, true, false);
}
// Record that cleanup is needed, without notifying the Service thread.
@@ -833,23 +833,23 @@
void OopStorage::record_needs_cleanup() {
// Set local flag first, else service thread could wake up and miss
// the request. This order may instead (rarely) unnecessarily notify.
- OrderAccess::release_store(&_needs_cleanup, true);
- OrderAccess::release_store_fence(&needs_cleanup_requested, true);
+ Atomic::release_store(&_needs_cleanup, true);
+ Atomic::release_store_fence(&needs_cleanup_requested, true);
}
bool OopStorage::delete_empty_blocks() {
// Service thread might have oopstorage work, but not for this object.
// Check for deferred updates even though that's not a service thread
// trigger; since we're here, we might as well process them.
- if (!OrderAccess::load_acquire(&_needs_cleanup) &&
- (OrderAccess::load_acquire(&_deferred_updates) == NULL)) {
+ if (!Atomic::load_acquire(&_needs_cleanup) &&
+ (Atomic::load_acquire(&_deferred_updates) == NULL)) {
return false;
}
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Clear the request before processing.
- OrderAccess::release_store_fence(&_needs_cleanup, false);
+ Atomic::release_store_fence(&_needs_cleanup, false);
// Other threads could be adding to the empty block count or the
// deferred update list while we're working. Set an upper bound on
@@ -993,7 +993,7 @@
bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
data->_processed += data->_segment_end - data->_segment_start;
- size_t start = OrderAccess::load_acquire(&_next_block);
+ size_t start = Atomic::load_acquire(&_next_block);
if (start >= _block_count) {
return finish_iteration(data); // No more blocks available.
}
@@ -1010,7 +1010,7 @@
// than a CAS loop on some platforms when there is contention.
// We can cope with the uncertainty by recomputing start/end from
// the result of the add, and dealing with potential overshoot.
- size_t end = Atomic::add(step, &_next_block);
+ size_t end = Atomic::add(&_next_block, step);
// _next_block may have changed, so recompute start from result of add.
start = end - step;
// _next_block may have changed so much that end has overshot.
--- a/src/hotspot/share/gc/shared/parallelCleaning.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/parallelCleaning.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -94,7 +94,7 @@
}
}
- } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first);
+ } while (Atomic::cmpxchg(&_claimed_nmethod, first, last.method()) != first);
}
void CodeCacheUnloadingTask::work(uint worker_id) {
@@ -130,7 +130,7 @@
return false;
}
- return Atomic::cmpxchg(1, &_clean_klass_tree_claimed, 0) == 0;
+ return Atomic::cmpxchg(&_clean_klass_tree_claimed, 0, 1) == 0;
}
InstanceKlass* KlassCleaningTask::claim_next_klass() {
--- a/src/hotspot/share/gc/shared/plab.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/plab.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -135,7 +135,7 @@
// Calculates plab size for current number of gc worker threads.
size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
- return align_object_size(MIN2(MAX2(min_size(), _desired_net_plab_sz / no_of_gc_workers), max_size()));
+ return align_object_size(clamp(_desired_net_plab_sz / no_of_gc_workers, min_size(), max_size()));
}
// Compute desired plab size for one gc worker thread and latch result for later
--- a/src/hotspot/share/gc/shared/plab.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/plab.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -43,19 +43,19 @@
}
void PLABStats::add_allocated(size_t v) {
- Atomic::add(v, &_allocated);
+ Atomic::add(&_allocated, v);
}
void PLABStats::add_unused(size_t v) {
- Atomic::add(v, &_unused);
+ Atomic::add(&_unused, v);
}
void PLABStats::add_wasted(size_t v) {
- Atomic::add(v, &_wasted);
+ Atomic::add(&_wasted, v);
}
void PLABStats::add_undo_wasted(size_t v) {
- Atomic::add(v, &_undo_wasted);
+ Atomic::add(&_undo_wasted, v);
}
#endif // SHARE_GC_SHARED_PLAB_INLINE_HPP
--- a/src/hotspot/share/gc/shared/preservedMarks.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/preservedMarks.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -55,7 +55,7 @@
restore();
// Only do the atomic add if the size is > 0.
if (stack_size > 0) {
- Atomic::add(stack_size, total_size_addr);
+ Atomic::add(total_size_addr, stack_size);
}
}
--- a/src/hotspot/share/gc/shared/ptrQueue.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/ptrQueue.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -150,7 +150,7 @@
// Decrement count after getting buffer from free list. This, along
// with incrementing count before adding to free list, ensures count
// never underflows.
- size_t count = Atomic::sub(1u, &_free_count);
+ size_t count = Atomic::sub(&_free_count, 1u);
assert((count + 1) != 0, "_free_count underflow");
}
return node;
@@ -182,7 +182,7 @@
const size_t trigger_transfer = 10;
// Add to pending list. Update count first so no underflow in transfer.
- size_t pending_count = Atomic::add(1u, &_pending_count);
+ size_t pending_count = Atomic::add(&_pending_count, 1u);
_pending_list.push(*node);
if (pending_count > trigger_transfer) {
try_transfer_pending();
@@ -197,7 +197,7 @@
bool BufferNode::Allocator::try_transfer_pending() {
// Attempt to claim the lock.
if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail.
- Atomic::cmpxchg(true, &_transfer_lock, false)) {
+ Atomic::cmpxchg(&_transfer_lock, false, true)) {
return false;
}
// Have the lock; perform the transfer.
@@ -212,19 +212,19 @@
last = next;
++count;
}
- Atomic::sub(count, &_pending_count);
+ Atomic::sub(&_pending_count, count);
// Wait for any in-progress pops, to avoid ABA for them.
GlobalCounter::write_synchronize();
// Add synchronized nodes to _free_list.
// Update count first so no underflow in allocate().
- Atomic::add(count, &_free_count);
+ Atomic::add(&_free_count, count);
_free_list.prepend(*first, *last);
log_trace(gc, ptrqueue, freelist)
("Transferred %s pending to free: " SIZE_FORMAT, name(), count);
}
- OrderAccess::release_store(&_transfer_lock, false);
+ Atomic::release_store(&_transfer_lock, false);
return true;
}
@@ -236,7 +236,7 @@
if (node == NULL) break;
BufferNode::deallocate(node);
}
- size_t new_count = Atomic::sub(removed, &_free_count);
+ size_t new_count = Atomic::sub(&_free_count, removed);
log_debug(gc, ptrqueue, freelist)
("Reduced %s free list by " SIZE_FORMAT " to " SIZE_FORMAT,
name(), removed, new_count);
@@ -258,4 +258,3 @@
void PtrQueueSet::deallocate_buffer(BufferNode* node) {
_allocator->release(node);
}
-
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1031,7 +1031,7 @@
// The last ref must have its discovered field pointing to itself.
oop next_discovered = (current_head != NULL) ? current_head : obj;
- oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL));
+ oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(discovered_addr, oop(NULL), next_discovered);
if (retest == NULL) {
// This thread just won the right to enqueue the object.
--- a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -246,7 +246,7 @@
void ReferenceProcessorPhaseTimes::add_ref_cleared(ReferenceType ref_type, size_t count) {
ASSERT_REF_TYPE(ref_type);
- Atomic::add(count, &_ref_cleared[ref_type_2_index(ref_type)]);
+ Atomic::add(&_ref_cleared[ref_type_2_index(ref_type)], count);
}
void ReferenceProcessorPhaseTimes::set_ref_discovered(ReferenceType ref_type, size_t count) {
--- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -136,7 +136,7 @@
value += 2;
assert(value > old, "overflow");
if (value > threshold) value |= 1;
- value = Atomic::cmpxchg(value, cfptr, old);
+ value = Atomic::cmpxchg(cfptr, old, value);
} while (value != old);
}
@@ -149,7 +149,7 @@
old = value;
value -= 2;
if (value <= 1) value = 0;
- value = Atomic::cmpxchg(value, cfptr, old);
+ value = Atomic::cmpxchg(cfptr, old, value);
} while (value != old);
}
@@ -329,7 +329,7 @@
#endif // PRODUCT
void SATBMarkQueueSet::abandon_completed_buffers() {
- Atomic::store(size_t(0), &_count_and_process_flag);
+ Atomic::store(&_count_and_process_flag, size_t(0));
BufferNode* buffers_to_delete = _list.pop_all();
while (buffers_to_delete != NULL) {
BufferNode* bn = buffers_to_delete;
--- a/src/hotspot/share/gc/shared/space.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/space.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -554,7 +554,7 @@
HeapWord* obj = top();
if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size;
- HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
+ HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top);
// result can be one of two:
// the old top value: the exchange succeeded
// otherwise: the new value of the top is returned.
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -32,7 +32,7 @@
volatile size_t StringDedupQueue::_claimed_index = 0;
size_t StringDedupQueue::claim() {
- return Atomic::add(size_t(1), &_claimed_index) - 1;
+ return Atomic::add(&_claimed_index, size_t(1)) - 1;
}
void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -589,7 +589,7 @@
}
size_t StringDedupTable::claim_table_partition(size_t partition_size) {
- return Atomic::add(partition_size, &_claimed_index) - partition_size;
+ return Atomic::add(&_claimed_index, partition_size) - partition_size;
}
void StringDedupTable::verify() {
--- a/src/hotspot/share/gc/shared/taskqueue.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/taskqueue.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -57,7 +57,7 @@
{
// Use a width w: 1 <= w <= max_width
const unsigned int max_width = 40;
- const unsigned int w = MAX2(MIN2(width, max_width), 1U);
+ const unsigned int w = clamp(width, 1u, max_width);
if (line == 0) { // spaces equal in width to the header
const unsigned int hdr_width = w * last_stat_id + last_stat_id - 1;
@@ -244,7 +244,7 @@
return true;
}
expected_value = current_offered;
- } while ((current_offered = Atomic::cmpxchg(current_offered - 1, &_offered_termination, current_offered)) != expected_value);
+ } while ((current_offered = Atomic::cmpxchg(&_offered_termination, current_offered, current_offered - 1)) != expected_value);
assert(_offered_termination < _n_threads, "Invariant");
return false;
--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -68,7 +68,7 @@
// assignment. However, casting to E& means that we trigger an
// unused-value warning. So, we cast the E& to void.
(void)const_cast<E&>(_elems[localBot] = t);
- OrderAccess::release_store(&_bottom, increment_index(localBot));
+ Atomic::release_store(&_bottom, increment_index(localBot));
TASKQUEUE_STATS_ONLY(stats.record_push());
return true;
}
@@ -89,7 +89,7 @@
// assignment. However, casting to E& means that we trigger an
// unused-value warning. So, we cast the E& to void.
(void) const_cast<E&>(_elems[localBot] = t);
- OrderAccess::release_store(&_bottom, increment_index(localBot));
+ Atomic::release_store(&_bottom, increment_index(localBot));
TASKQUEUE_STATS_ONLY(stats.record_push());
return true;
} else {
@@ -210,7 +210,7 @@
#ifndef CPU_MULTI_COPY_ATOMIC
OrderAccess::fence();
#endif
- uint localBot = OrderAccess::load_acquire(&_bottom);
+ uint localBot = Atomic::load_acquire(&_bottom);
uint n_elems = size(localBot, oldAge.top());
if (n_elems == 0) {
return false;
@@ -321,7 +321,7 @@
template <unsigned int N, MEMFLAGS F>
inline typename TaskQueueSuper<N, F>::Age TaskQueueSuper<N, F>::Age::cmpxchg(const Age new_age, const Age old_age) volatile {
- return Atomic::cmpxchg(new_age._data, &_data, old_age._data);
+ return Atomic::cmpxchg(&_data, old_age._data, new_age._data);
}
template<class E, MEMFLAGS F, unsigned int N>
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -132,7 +132,7 @@
(Universe::heap()->tlab_capacity(thread()) / HeapWordSize));
size_t new_size = alloc / _target_refills;
- new_size = MIN2(MAX2(new_size, min_size()), max_size());
+ new_size = clamp(new_size, min_size(), max_size());
size_t aligned_new_size = align_object_size(new_size);
@@ -251,6 +251,10 @@
(nof_threads * target_refills());
init_sz = align_object_size(init_sz);
}
+ // We can't use clamp() between min_size() and max_size() here because some
+ // options based on them may still be inconsistent and so it may assert;
+ // inconsistencies between those will be caught by following AfterMemoryInit
+ // constraint checking.
init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
return init_sz;
}
--- a/src/hotspot/share/gc/shared/workgroup.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shared/workgroup.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -153,7 +153,7 @@
// Wait for the coordinator to dispatch a task.
_start_semaphore->wait();
- uint num_started = Atomic::add(1u, &_started);
+ uint num_started = Atomic::add(&_started, 1u);
// Subtract one to get a zero-indexed worker id.
uint worker_id = num_started - 1;
@@ -164,7 +164,7 @@
void worker_done_with_task() {
// Mark that the worker is done with the task.
// The worker is not allowed to read the state variables after this line.
- uint not_finished = Atomic::sub(1u, &_not_finished);
+ uint not_finished = Atomic::sub(&_not_finished, 1u);
// The last worker signals to the coordinator that all work is completed.
if (not_finished == 0) {
@@ -426,7 +426,7 @@
assert(t < _n_tasks, "bad task id.");
uint old = _tasks[t];
if (old == 0) {
- old = Atomic::cmpxchg(1u, &_tasks[t], 0u);
+ old = Atomic::cmpxchg(&_tasks[t], 0u, 1u);
}
bool res = old == 0;
#ifdef ASSERT
@@ -443,7 +443,7 @@
uint old;
do {
old = observed;
- observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
+ observed = Atomic::cmpxchg(&_threads_completed, old, old+1);
} while (observed != old);
// If this was the last thread checking in, clear the tasks.
uint adjusted_thread_count = (n_threads == 0 ? 1 : n_threads);
@@ -471,7 +471,7 @@
bool SequentialSubTasksDone::try_claim_task(uint& t) {
t = _n_claimed;
while (t < _n_tasks) {
- uint res = Atomic::cmpxchg(t+1, &_n_claimed, t);
+ uint res = Atomic::cmpxchg(&_n_claimed, t, t+1);
if (res == t) {
return true;
}
@@ -483,7 +483,7 @@
bool SequentialSubTasksDone::all_tasks_completed() {
uint complete = _n_completed;
while (true) {
- uint res = Atomic::cmpxchg(complete+1, &_n_completed, complete);
+ uint res = Atomic::cmpxchg(&_n_completed, complete, complete+1);
if (res == complete) {
break;
}
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -142,10 +142,10 @@
typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;
template <typename T>
- static oop oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value);
+ static oop oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value);
template <typename T>
- static oop oop_atomic_xchg_in_heap_impl(oop new_value, T* addr);
+ static oop oop_atomic_xchg_in_heap_impl(T* addr, oop new_value);
public:
// Heap oop accesses. These accessors get resolved when
@@ -160,12 +160,12 @@
static void oop_store_in_heap_at(oop base, ptrdiff_t offset, oop value);
template <typename T>
- static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
- static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
+ static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
+ static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value);
template <typename T>
- static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
- static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
+ static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
+ static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value);
template <typename T>
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
@@ -184,10 +184,10 @@
static void oop_store_not_in_heap(T* addr, oop value);
template <typename T>
- static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
+ static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value);
template <typename T>
- static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
+ static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value);
};
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -99,12 +99,12 @@
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
oop res;
oop expected = compare_value;
do {
compare_value = expected;
- res = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+ res = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
expected = res;
} while ((compare_value != expected) && (resolve_forwarded(compare_value) == resolve_forwarded(expected)));
if (res != NULL) {
@@ -116,9 +116,9 @@
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_impl(oop new_value, T* addr, oop compare_value) {
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_impl(T* addr, oop compare_value, oop new_value) {
ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
- oop result = oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
+ oop result = oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value);
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
(result == compare_value) &&
@@ -130,23 +130,23 @@
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
- oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, addr, compare_value);
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
+ oop result = oop_atomic_cmpxchg_in_heap_impl(addr, compare_value, new_value);
keep_alive_if_weak(decorators, result);
return result;
}
template <DecoratorSet decorators, typename BarrierSetT>
-inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
- oop result = oop_atomic_cmpxchg_in_heap_impl(new_value, AccessInternal::oop_field_addr<decorators>(base, offset), compare_value);
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
+ oop result = oop_atomic_cmpxchg_in_heap_impl(AccessInternal::oop_field_addr<decorators>(base, offset), compare_value, new_value);
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), result);
return result;
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
- oop previous = Raw::oop_atomic_xchg(new_value, addr);
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
+ oop previous = Raw::oop_atomic_xchg(addr, new_value);
if (previous != NULL) {
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(previous);
} else {
@@ -156,9 +156,9 @@
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_impl(oop new_value, T* addr) {
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_impl(T* addr, oop new_value) {
ShenandoahBarrierSet::barrier_set()->storeval_barrier(new_value);
- oop result = oop_atomic_xchg_not_in_heap(new_value, addr);
+ oop result = oop_atomic_xchg_not_in_heap(addr, new_value);
const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) &&
ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) {
@@ -169,15 +169,15 @@
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
- oop result = oop_atomic_xchg_in_heap_impl(new_value, addr);
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(T* addr, oop new_value) {
+ oop result = oop_atomic_xchg_in_heap_impl(addr, new_value);
keep_alive_if_weak(addr, result);
return result;
}
template <DecoratorSet decorators, typename BarrierSetT>
-inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
- oop result = oop_atomic_xchg_in_heap_impl(new_value, AccessInternal::oop_field_addr<decorators>(base, offset));
+inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
+ oop result = oop_atomic_xchg_in_heap_impl(AccessInternal::oop_field_addr<decorators>(base, offset), new_value);
keep_alive_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), result);
return result;
}
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -147,7 +147,7 @@
resolved = _heap->evacuate_object(obj, _thread);
}
- Atomic::cmpxchg(resolved, p, obj);
+ Atomic::cmpxchg(p, obj, resolved);
}
}
}
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -81,7 +81,7 @@
int current = count++;
if ((current & stride_mask) == 0) {
process_block = (current >= _claimed_idx) &&
- (Atomic::cmpxchg(current + stride, &_claimed_idx, current) == current);
+ (Atomic::cmpxchg(&_claimed_idx, current, current + stride) == current);
}
if (process_block) {
if (cb->is_alive()) {
@@ -264,7 +264,7 @@
size_t max = (size_t)list->length();
while (_claimed < max) {
- size_t cur = Atomic::add(stride, &_claimed) - stride;
+ size_t cur = Atomic::add(&_claimed, stride) - stride;
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -146,7 +146,7 @@
while(index < num_regions) {
if (is_in(index)) {
- jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current);
+ jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1));
assert(cur >= (jint)saved_current, "Must move forward");
if (cur == saved_current) {
assert(is_in(index), "Invariant");
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -92,7 +92,7 @@
bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
// This control loop iteration have seen this much allocations.
- size_t allocs_seen = Atomic::xchg<size_t>(0, &_allocs_seen);
+ size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0);
// Choose which GC mode to run in. The block below should select a single mode.
GCMode mode = none;
@@ -593,7 +593,7 @@
void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
assert(ShenandoahPacing, "should only call when pacing is enabled");
- Atomic::add(words, &_allocs_seen);
+ Atomic::add(&_allocs_seen, words);
}
void ShenandoahControlThread::set_forced_counters_update(bool value) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -38,7 +38,7 @@
}
void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() {
- while ((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) {
+ while ((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) != 0) {
os::naked_short_sleep(1);
}
// At this point we are sure that no threads can evacuate anything. Raise
@@ -48,7 +48,7 @@
}
void ShenandoahEvacOOMHandler::enter_evacuation() {
- jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac);
+ jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac);
assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity");
assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
@@ -59,7 +59,7 @@
}
while (true) {
- jint other = Atomic::cmpxchg(threads_in_evac + 1, &_threads_in_evac, threads_in_evac);
+ jint other = Atomic::cmpxchg(&_threads_in_evac, threads_in_evac, threads_in_evac + 1);
if (other == threads_in_evac) {
// Success: caller may safely enter evacuation
DEBUG_ONLY(ShenandoahThreadLocalData::set_evac_allowed(Thread::current(), true));
@@ -79,7 +79,7 @@
void ShenandoahEvacOOMHandler::leave_evacuation() {
if (!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current())) {
- assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity");
+ assert((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) > 0, "sanity");
// NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive.
Atomic::dec(&_threads_in_evac);
} else {
@@ -96,10 +96,9 @@
assert(ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity");
assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
- jint threads_in_evac = OrderAccess::load_acquire(&_threads_in_evac);
+ jint threads_in_evac = Atomic::load_acquire(&_threads_in_evac);
while (true) {
- jint other = Atomic::cmpxchg((threads_in_evac - 1) | OOM_MARKER_MASK,
- &_threads_in_evac, threads_in_evac);
+ jint other = Atomic::cmpxchg(&_threads_in_evac, threads_in_evac, (threads_in_evac - 1) | OOM_MARKER_MASK);
if (other == threads_in_evac) {
// Success: wait for other threads to get out of the protocol and return.
wait_for_no_evac_threads();
@@ -113,8 +112,8 @@
void ShenandoahEvacOOMHandler::clear() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
- assert((OrderAccess::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity");
- OrderAccess::release_store_fence<jint>(&_threads_in_evac, 0);
+ assert((Atomic::load_acquire(&_threads_in_evac) & ~OOM_MARKER_MASK) == 0, "sanity");
+ Atomic::release_store_fence<jint>(&_threads_in_evac, 0);
}
ShenandoahEvacOOMScope::ShenandoahEvacOOMScope() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -601,7 +601,7 @@
}
size_t ShenandoahHeap::used() const {
- return OrderAccess::load_acquire(&_used);
+ return Atomic::load_acquire(&_used);
}
size_t ShenandoahHeap::committed() const {
@@ -620,20 +620,20 @@
}
void ShenandoahHeap::increase_used(size_t bytes) {
- Atomic::add(bytes, &_used);
+ Atomic::add(&_used, bytes);
}
void ShenandoahHeap::set_used(size_t bytes) {
- OrderAccess::release_store_fence(&_used, bytes);
+ Atomic::release_store_fence(&_used, bytes);
}
void ShenandoahHeap::decrease_used(size_t bytes) {
assert(used() >= bytes, "never decrease heap size by more than we've left");
- Atomic::sub(bytes, &_used);
+ Atomic::sub(&_used, bytes);
}
void ShenandoahHeap::increase_allocated(size_t bytes) {
- Atomic::add(bytes, &_bytes_allocated_since_gc_start);
+ Atomic::add(&_bytes_allocated_since_gc_start, bytes);
}
void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
@@ -1350,7 +1350,7 @@
size_t max = _heap->num_regions();
while (_index < max) {
- size_t cur = Atomic::add(stride, &_index) - stride;
+ size_t cur = Atomic::add(&_index, stride) - stride;
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
@@ -2114,11 +2114,11 @@
}
size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
- return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
+ return Atomic::load_acquire(&_bytes_allocated_since_gc_start);
}
void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
- OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
+ Atomic::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
}
void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -49,7 +49,7 @@
inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
- size_t new_index = Atomic::add((size_t) 1, &_index);
+ size_t new_index = Atomic::add(&_index, (size_t) 1);
// get_region() provides the bounds-check and returns NULL on OOB.
return _heap->get_region(new_index - 1);
}
@@ -131,20 +131,20 @@
inline oop ShenandoahHeap::cas_oop(oop n, oop* addr, oop c) {
assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr));
- return (oop) Atomic::cmpxchg(n, addr, c);
+ return (oop) Atomic::cmpxchg(addr, c, n);
}
inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) {
assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
narrowOop val = CompressedOops::encode(n);
- return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, c));
+ return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, c, val));
}
inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) {
assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr));
narrowOop cmp = CompressedOops::encode(c);
narrowOop val = CompressedOops::encode(n);
- return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, cmp));
+ return CompressedOops::decode((narrowOop) Atomic::cmpxchg(addr, cmp, val));
}
template <class T>
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -305,7 +305,7 @@
}
void ShenandoahHeapRegion::clear_live_data() {
- OrderAccess::release_store_fence<size_t>(&_live_data, 0);
+ Atomic::release_store_fence(&_live_data, (size_t)0);
}
void ShenandoahHeapRegion::reset_alloc_metadata() {
@@ -351,7 +351,7 @@
}
size_t ShenandoahHeapRegion::get_live_data_words() const {
- return OrderAccess::load_acquire(&_live_data);
+ return Atomic::load_acquire(&_live_data);
}
size_t ShenandoahHeapRegion::get_live_data_bytes() const {
@@ -687,12 +687,12 @@
}
void ShenandoahHeapRegion::record_pin() {
- Atomic::add((size_t)1, &_critical_pins);
+ Atomic::add(&_critical_pins, (size_t)1);
}
void ShenandoahHeapRegion::record_unpin() {
assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", region_number());
- Atomic::sub((size_t)1, &_critical_pins);
+ Atomic::sub(&_critical_pins, (size_t)1);
}
size_t ShenandoahHeapRegion::pin_count() const {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -103,7 +103,7 @@
}
inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) {
- size_t new_live_data = Atomic::add(s, &_live_data);
+ size_t new_live_data = Atomic::add(&_live_data, s);
#ifdef ASSERT
size_t live_bytes = new_live_data * HeapWordSize;
size_t used_bytes = used();
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -76,7 +76,7 @@
jlong current = os::javaTimeMillis();
jlong last = _last_sample_millis;
if (current - last > ShenandoahRegionSamplingRate &&
- Atomic::cmpxchg(current, &_last_sample_millis, last) == last) {
+ Atomic::cmpxchg(&_last_sample_millis, last, current) == last) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
jlong status = 0;
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -97,7 +97,7 @@
while(index < num_regions) {
if (_set->is_in(index)) {
- jint cur = Atomic::cmpxchg((jint)(index + 1), &_current_index, saved_current);
+ jint cur = Atomic::cmpxchg(&_current_index, saved_current, (jint)(index + 1));
assert(cur >= (jint)saved_current, "Must move forward");
if (cur == saved_current) {
assert(_set->is_in(index), "Invariant");
--- a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -132,7 +132,7 @@
}
void BinaryMagnitudeSeq::add(size_t val) {
- Atomic::add(val, &_sum);
+ Atomic::add(&_sum, val);
int mag = log2_intptr(val) + 1;
@@ -147,7 +147,7 @@
mag = BitsPerSize_t - 1;
}
- Atomic::add((size_t)1, &_mags[mag]);
+ Atomic::add(&_mags[mag], (size_t)1);
}
size_t BinaryMagnitudeSeq::level(int level) const {
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -178,12 +178,12 @@
size_t ShenandoahPacer::update_and_get_progress_history() {
if (_progress == -1) {
// First initialization, report some prior
- Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
+ Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
return (size_t) (_heap->max_capacity() * 0.1);
} else {
// Record history, and reply historical data
_progress_history->add(_progress);
- Atomic::store((intptr_t)PACING_PROGRESS_ZERO, &_progress);
+ Atomic::store(&_progress, (intptr_t)PACING_PROGRESS_ZERO);
return (size_t) (_progress_history->avg() * HeapWordSize);
}
}
@@ -191,8 +191,8 @@
void ShenandoahPacer::restart_with(size_t non_taxable_bytes, double tax_rate) {
size_t initial = (size_t)(non_taxable_bytes * tax_rate) >> LogHeapWordSize;
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
- Atomic::xchg((intptr_t)initial, &_budget);
- Atomic::store(tax_rate, &_tax_rate);
+ Atomic::xchg(&_budget, (intptr_t)initial);
+ Atomic::store(&_tax_rate, tax_rate);
Atomic::inc(&_epoch);
}
@@ -210,7 +210,7 @@
return false;
}
new_val = cur - tax;
- } while (Atomic::cmpxchg(new_val, &_budget, cur) != cur);
+ } while (Atomic::cmpxchg(&_budget, cur, new_val) != cur);
return true;
}
@@ -223,7 +223,7 @@
}
intptr_t tax = MAX2<intptr_t>(1, words * Atomic::load(&_tax_rate));
- Atomic::add(tax, &_budget);
+ Atomic::add(&_budget, tax);
}
intptr_t ShenandoahPacer::epoch() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -47,13 +47,13 @@
inline void ShenandoahPacer::report_internal(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
- Atomic::add((intptr_t)words, &_budget);
+ Atomic::add(&_budget, (intptr_t)words);
}
inline void ShenandoahPacer::report_progress_internal(size_t words) {
assert(ShenandoahPacing, "Only be here when pacing is enabled");
STATIC_ASSERT(sizeof(size_t) <= sizeof(intptr_t));
- Atomic::add((intptr_t)words, &_progress);
+ Atomic::add(&_progress, (intptr_t)words);
}
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHPACER_INLINE_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -47,19 +47,19 @@
}
void set() {
- OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)SET);
+ Atomic::release_store_fence(&value, (ShenandoahSharedValue)SET);
}
void unset() {
- OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET);
+ Atomic::release_store_fence(&value, (ShenandoahSharedValue)UNSET);
}
bool is_set() const {
- return OrderAccess::load_acquire(&value) == SET;
+ return Atomic::load_acquire(&value) == SET;
}
bool is_unset() const {
- return OrderAccess::load_acquire(&value) == UNSET;
+ return Atomic::load_acquire(&value) == UNSET;
}
void set_cond(bool val) {
@@ -74,7 +74,7 @@
if (is_set()) {
return false;
}
- ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)SET, &value, (ShenandoahSharedValue)UNSET);
+ ShenandoahSharedValue old = Atomic::cmpxchg(&value, (ShenandoahSharedValue)UNSET, (ShenandoahSharedValue)SET);
return old == UNSET; // success
}
@@ -82,7 +82,7 @@
if (!is_set()) {
return false;
}
- ShenandoahSharedValue old = Atomic::cmpxchg((ShenandoahSharedValue)UNSET, &value, (ShenandoahSharedValue)SET);
+ ShenandoahSharedValue old = Atomic::cmpxchg(&value, (ShenandoahSharedValue)SET, (ShenandoahSharedValue)UNSET);
return old == SET; // success
}
@@ -118,14 +118,14 @@
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
while (true) {
- ShenandoahSharedValue ov = OrderAccess::load_acquire(&value);
+ ShenandoahSharedValue ov = Atomic::load_acquire(&value);
if ((ov & mask_val) != 0) {
// already set
return;
}
ShenandoahSharedValue nv = ov | mask_val;
- if (Atomic::cmpxchg(nv, &value, ov) == ov) {
+ if (Atomic::cmpxchg(&value, ov, nv) == ov) {
// successfully set
return;
}
@@ -136,14 +136,14 @@
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask;
while (true) {
- ShenandoahSharedValue ov = OrderAccess::load_acquire(&value);
+ ShenandoahSharedValue ov = Atomic::load_acquire(&value);
if ((ov & mask_val) == 0) {
// already unset
return;
}
ShenandoahSharedValue nv = ov & ~mask_val;
- if (Atomic::cmpxchg(nv, &value, ov) == ov) {
+ if (Atomic::cmpxchg(&value, ov, nv) == ov) {
// successfully unset
return;
}
@@ -151,7 +151,7 @@
}
void clear() {
- OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)0);
+ Atomic::release_store_fence(&value, (ShenandoahSharedValue)0);
}
bool is_set(uint mask) const {
@@ -160,11 +160,11 @@
bool is_unset(uint mask) const {
assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
- return (OrderAccess::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0;
+ return (Atomic::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0;
}
bool is_clear() const {
- return (OrderAccess::load_acquire(&value)) == 0;
+ return (Atomic::load_acquire(&value)) == 0;
}
void set_cond(uint mask, bool val) {
@@ -211,17 +211,17 @@
void set(T v) {
assert (v >= 0, "sanity");
assert (v < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
- OrderAccess::release_store_fence(&value, (ShenandoahSharedValue)v);
+ Atomic::release_store_fence(&value, (ShenandoahSharedValue)v);
}
T get() const {
- return (T)OrderAccess::load_acquire(&value);
+ return (T)Atomic::load_acquire(&value);
}
T cmpxchg(T new_value, T expected) {
assert (new_value >= 0, "sanity");
assert (new_value < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity");
- return (T)Atomic::cmpxchg((ShenandoahSharedValue)new_value, &value, (ShenandoahSharedValue)expected);
+ return (T)Atomic::cmpxchg(&value, (ShenandoahSharedValue)expected, (ShenandoahSharedValue)new_value);
}
volatile ShenandoahSharedValue* addr_of() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -304,7 +304,7 @@
return NULL;
}
- jint index = Atomic::add(1, &_claimed_index);
+ jint index = Atomic::add(&_claimed_index, 1);
if (index <= size) {
return GenericTaskQueueSet<T, F>::queue((uint)index - 1);
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -139,7 +139,7 @@
// skip
break;
case ShenandoahVerifier::_verify_liveness_complete:
- Atomic::add((uint) obj->size(), &_ld[obj_reg->region_number()]);
+ Atomic::add(&_ld[obj_reg->region_number()], (uint) obj->size());
// fallthrough for fast failure for un-live regions:
case ShenandoahVerifier::_verify_liveness_conservative:
check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live(),
@@ -479,7 +479,7 @@
}
}
- Atomic::add(processed, &_processed);
+ Atomic::add(&_processed, processed);
}
};
@@ -518,7 +518,7 @@
_options);
while (true) {
- size_t v = Atomic::add(1u, &_claimed) - 1;
+ size_t v = Atomic::add(&_claimed, 1u) - 1;
if (v < _heap->num_regions()) {
ShenandoahHeapRegion* r = _heap->get_region(v);
if (!r->is_humongous() && !r->is_trash()) {
@@ -538,7 +538,7 @@
if (_heap->complete_marking_context()->is_marked((oop)obj)) {
verify_and_follow(obj, stack, cl, &processed);
}
- Atomic::add(processed, &_processed);
+ Atomic::add(&_processed, processed);
}
virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) {
@@ -571,7 +571,7 @@
}
}
- Atomic::add(processed, &_processed);
+ Atomic::add(&_processed, processed);
}
void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) {
@@ -756,12 +756,12 @@
if (r->is_humongous()) {
// For humongous objects, test if start region is marked live, and if so,
// all humongous regions in that chain have live data equal to their "used".
- juint start_live = OrderAccess::load_acquire(&ld[r->humongous_start_region()->region_number()]);
+ juint start_live = Atomic::load_acquire(&ld[r->humongous_start_region()->region_number()]);
if (start_live > 0) {
verf_live = (juint)(r->used() / HeapWordSize);
}
} else {
- verf_live = OrderAccess::load_acquire(&ld[r->region_number()]);
+ verf_live = Atomic::load_acquire(&ld[r->region_number()]);
}
size_t reg_live = r->get_live_data_words();
--- a/src/hotspot/share/gc/z/zArray.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zArray.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -101,7 +101,7 @@
template <typename T, bool parallel>
inline bool ZArrayIteratorImpl<T, parallel>::next(T* elem) {
if (parallel) {
- const size_t next = Atomic::add(1u, &_next) - 1u;
+ const size_t next = Atomic::add(&_next, 1u) - 1u;
if (next < _array->size()) {
*elem = _array->at(next);
return true;
--- a/src/hotspot/share/gc/z/zBarrier.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -48,7 +48,7 @@
}
// Heal
- const uintptr_t prev_addr = Atomic::cmpxchg(heal_addr, (volatile uintptr_t*)p, addr);
+ const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr);
if (prev_addr == addr) {
// Success
return;
--- a/src/hotspot/share/gc/z/zBarrierSet.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -70,12 +70,12 @@
static oop oop_load_in_heap_at(oop base, ptrdiff_t offset);
template <typename T>
- static oop oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value);
- static oop oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value);
+ static oop oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value);
+ static oop oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value);
template <typename T>
- static oop oop_atomic_xchg_in_heap(oop new_value, T* addr);
- static oop oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset);
+ static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
+ static oop oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value);
template <typename T>
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
@@ -91,10 +91,10 @@
static oop oop_load_not_in_heap(T* addr);
template <typename T>
- static oop oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value);
+ static oop oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value);
template <typename T>
- static oop oop_atomic_xchg_not_in_heap(oop new_value, T* addr);
+ static oop oop_atomic_xchg_not_in_heap(T* addr, oop new_value);
};
};
--- a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -132,16 +132,16 @@
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(oop new_value, T* addr, oop compare_value) {
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
ZBarrier::load_barrier_on_oop_field(addr);
- return Raw::oop_atomic_cmpxchg_in_heap(new_value, addr, compare_value);
+ return Raw::oop_atomic_cmpxchg_in_heap(addr, compare_value, new_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
-inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_in_heap_at(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF | ON_UNKNOWN_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
@@ -150,25 +150,25 @@
// with the motivation that if you're doing Unsafe operations on a Reference.referent
// field, then you're on your own anyway.
ZBarrier::load_barrier_on_oop_field(field_addr(base, offset));
- return Raw::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
+ return Raw::oop_atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(oop new_value, T* addr) {
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap(T* addr, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
- const oop o = Raw::oop_atomic_xchg_in_heap(new_value, addr);
+ const oop o = Raw::oop_atomic_xchg_in_heap(addr, new_value);
return ZBarrier::load_barrier_on_oop(o);
}
template <DecoratorSet decorators, typename BarrierSetT>
-inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop new_value, oop base, ptrdiff_t offset) {
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_in_heap_at(oop base, ptrdiff_t offset, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
- const oop o = Raw::oop_atomic_xchg_in_heap_at(new_value, base, offset);
+ const oop o = Raw::oop_atomic_xchg_in_heap_at(base, offset, new_value);
return ZBarrier::load_barrier_on_oop(o);
}
@@ -222,20 +222,20 @@
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(oop new_value, T* addr, oop compare_value) {
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_cmpxchg_not_in_heap(T* addr, oop compare_value, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
- return Raw::oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value);
+ return Raw::oop_atomic_cmpxchg_not_in_heap(addr, compare_value, new_value);
}
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
-inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(oop new_value, T* addr) {
+inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_atomic_xchg_not_in_heap(T* addr, oop new_value) {
verify_decorators_present<ON_STRONG_OOP_REF>();
verify_decorators_absent<AS_NO_KEEPALIVE>();
- return Raw::oop_atomic_xchg_not_in_heap(new_value, addr);
+ return Raw::oop_atomic_xchg_not_in_heap(addr, new_value);
}
#endif // SHARE_GC_Z_ZBARRIERSET_INLINE_HPP
--- a/src/hotspot/share/gc/z/zBitMap.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zBitMap.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -55,7 +55,7 @@
inc_live = false;
return false;
}
- const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
+ const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val);
if (cur_val == old_val) {
// Success
const bm_word_t marked_mask = bit_mask(bit);
--- a/src/hotspot/share/gc/z/zForwarding.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -54,7 +54,7 @@
}
inline void ZForwarding::set_pinned() {
- Atomic::store(true, &_pinned);
+ Atomic::store(&_pinned, true);
}
inline bool ZForwarding::inc_refcount() {
@@ -63,7 +63,7 @@
while (refcount > 0) {
const uint32_t old_refcount = refcount;
const uint32_t new_refcount = old_refcount + 1;
- const uint32_t prev_refcount = Atomic::cmpxchg(new_refcount, &_refcount, old_refcount);
+ const uint32_t prev_refcount = Atomic::cmpxchg(&_refcount, old_refcount, new_refcount);
if (prev_refcount == old_refcount) {
return true;
}
@@ -76,7 +76,7 @@
inline bool ZForwarding::dec_refcount() {
assert(_refcount > 0, "Invalid state");
- return Atomic::sub(1u, &_refcount) == 0u;
+ return Atomic::sub(&_refcount, 1u) == 0u;
}
inline bool ZForwarding::retain_page() {
@@ -139,7 +139,7 @@
const ZForwardingEntry old_entry; // Empty
for (;;) {
- const ZForwardingEntry prev_entry = Atomic::cmpxchg(new_entry, entries() + *cursor, old_entry);
+ const ZForwardingEntry prev_entry = Atomic::cmpxchg(entries() + *cursor, old_entry, new_entry);
if (!prev_entry.populated()) {
// Success
return to_offset;
--- a/src/hotspot/share/gc/z/zHeap.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zHeap.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -352,12 +352,12 @@
ZRendezvousClosure cl;
Handshake::execute(&cl);
+ // Unblock resurrection of weak/phantom references
+ ZResurrection::unblock();
+
// Purge stale metadata and nmethods that were unlinked
_unload.purge();
- // Unblock resurrection of weak/phantom references
- ZResurrection::unblock();
-
// Enqueue Soft/Weak/Final/PhantomReferences. Note that this
// must be done after unblocking resurrection. Otherwise the
// Finalizer thread could call Reference.get() on the Finalizers
--- a/src/hotspot/share/gc/z/zHeuristics.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zHeuristics.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -46,7 +46,7 @@
ZPageSizeMedium = size;
ZPageSizeMediumShift = log2_intptr(ZPageSizeMedium);
ZObjectSizeLimitMedium = ZPageSizeMedium / 8;
- ZObjectAlignmentMediumShift = ZPageSizeMediumShift - 13;
+ ZObjectAlignmentMediumShift = (int)ZPageSizeMediumShift - 13;
ZObjectAlignmentMedium = 1 << ZObjectAlignmentMediumShift;
log_info(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
--- a/src/hotspot/share/gc/z/zLiveMap.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -54,11 +54,11 @@
// Multiple threads can enter here, make sure only one of them
// resets the marking information while the others busy wait.
- for (uint32_t seqnum = OrderAccess::load_acquire(&_seqnum);
+ for (uint32_t seqnum = Atomic::load_acquire(&_seqnum);
seqnum != ZGlobalSeqNum;
- seqnum = OrderAccess::load_acquire(&_seqnum)) {
+ seqnum = Atomic::load_acquire(&_seqnum)) {
if ((seqnum != seqnum_initializing) &&
- (Atomic::cmpxchg(seqnum_initializing, &_seqnum, seqnum) == seqnum)) {
+ (Atomic::cmpxchg(&_seqnum, seqnum, seqnum_initializing) == seqnum)) {
// Reset marking information
_live_bytes = 0;
_live_objects = 0;
@@ -73,7 +73,7 @@
// before the update of the page seqnum, such that when the
// up-to-date seqnum is load acquired, the bit maps will not
// contain stale information.
- OrderAccess::release_store(&_seqnum, ZGlobalSeqNum);
+ Atomic::release_store(&_seqnum, ZGlobalSeqNum);
break;
}
--- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -39,7 +39,7 @@
}
inline bool ZLiveMap::is_marked() const {
- return OrderAccess::load_acquire(&_seqnum) == ZGlobalSeqNum;
+ return Atomic::load_acquire(&_seqnum) == ZGlobalSeqNum;
}
inline uint32_t ZLiveMap::live_objects() const {
@@ -121,8 +121,8 @@
}
inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) {
- Atomic::add(objects, &_live_objects);
- Atomic::add(bytes, &_live_bytes);
+ Atomic::add(&_live_objects, objects);
+ Atomic::add(&_live_bytes, bytes);
}
inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const {
--- a/src/hotspot/share/gc/z/zLock.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zLock.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -53,7 +53,7 @@
if (owner != thread) {
_lock.lock();
- Atomic::store(thread, &_owner);
+ Atomic::store(&_owner, thread);
}
_count++;
@@ -66,7 +66,7 @@
_count--;
if (_count == 0) {
- Atomic::store((Thread*)NULL, &_owner);
+ Atomic::store(&_owner, (Thread*)NULL);
_lock.unlock();
}
}
--- a/src/hotspot/share/gc/z/zMark.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zMark.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -487,7 +487,7 @@
// Flush before termination
if (!try_flush(&_work_nterminateflush)) {
// No more work available, skip further flush attempts
- Atomic::store(false, &_work_terminateflush);
+ Atomic::store(&_work_terminateflush, false);
}
// Don't terminate, regardless of whether we successfully
--- a/src/hotspot/share/gc/z/zMarkStack.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zMarkStack.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -121,7 +121,7 @@
for (;;) {
decode_versioned_pointer(vstack, stack->next_addr(), &version);
T* const new_vstack = encode_versioned_pointer(stack, version + 1);
- T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
+ T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack);
if (prev_vstack == vstack) {
// Success
break;
@@ -145,7 +145,7 @@
}
T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1);
- T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
+ T* const prev_vstack = Atomic::cmpxchg(&_head, vstack, new_vstack);
if (prev_vstack == vstack) {
// Success
return stack;
--- a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -70,7 +70,7 @@
return 0;
}
- const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, top);
+ const uintptr_t prev_top = Atomic::cmpxchg(&_top, top, new_top);
if (prev_top == top) {
// Success
return top;
@@ -110,8 +110,8 @@
// Increment top before end to make sure another
// thread can't steal out newly expanded space.
- addr = Atomic::add(size, &_top) - size;
- Atomic::add(expand_size, &_end);
+ addr = Atomic::add(&_top, size) - size;
+ Atomic::add(&_end, expand_size);
return addr;
}
--- a/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -33,11 +33,11 @@
_nworking_stage1(0) {}
inline bool ZMarkTerminate::enter_stage(volatile uint* nworking_stage) {
- return Atomic::sub(1u, nworking_stage) == 0;
+ return Atomic::sub(nworking_stage, 1u) == 0;
}
inline void ZMarkTerminate::exit_stage(volatile uint* nworking_stage) {
- Atomic::add(1u, nworking_stage);
+ Atomic::add(nworking_stage, 1u);
}
inline bool ZMarkTerminate::try_exit_stage(volatile uint* nworking_stage) {
@@ -49,7 +49,7 @@
}
const uint new_nworking = nworking + 1;
- const uint prev_nworking = Atomic::cmpxchg(new_nworking, nworking_stage, nworking);
+ const uint prev_nworking = Atomic::cmpxchg(nworking_stage, nworking, new_nworking);
if (prev_nworking == nworking) {
// Success
return true;
--- a/src/hotspot/share/gc/z/zNMethod.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zNMethod.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -258,7 +258,7 @@
volatile bool _failed;
void set_failed() {
- Atomic::store(true, &_failed);
+ Atomic::store(&_failed, true);
}
void unlink(nmethod* nm) {
--- a/src/hotspot/share/gc/z/zNMethodData.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zNMethodData.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -78,7 +78,7 @@
}
ZNMethodDataOops* ZNMethodData::oops() const {
- return OrderAccess::load_acquire(&_oops);
+ return Atomic::load_acquire(&_oops);
}
ZNMethodDataOops* ZNMethodData::swap_oops(ZNMethodDataOops* new_oops) {
--- a/src/hotspot/share/gc/z/zNMethodTableIteration.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zNMethodTableIteration.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -58,7 +58,7 @@
// Claim table partition. Each partition is currently sized to span
// two cache lines. This number is just a guess, but seems to work well.
const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
- const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _size);
+ const size_t partition_start = MIN2(Atomic::add(&_claimed, partition_size) - partition_size, _size);
const size_t partition_end = MIN2(partition_start + partition_size, _size);
if (partition_start == partition_end) {
// End of table
--- a/src/hotspot/share/gc/z/zObjectAllocator.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -63,7 +63,7 @@
ZPage* const page = ZHeap::heap()->alloc_page(type, size, flags);
if (page != NULL) {
// Increment used bytes
- Atomic::add(size, _used.addr());
+ Atomic::add(_used.addr(), size);
}
return page;
@@ -71,7 +71,7 @@
void ZObjectAllocator::undo_alloc_page(ZPage* page) {
// Increment undone bytes
- Atomic::add(page->size(), _undone.addr());
+ Atomic::add(_undone.addr(), page->size());
ZHeap::heap()->undo_alloc_page(page);
}
@@ -82,7 +82,7 @@
size_t size,
ZAllocationFlags flags) {
uintptr_t addr = 0;
- ZPage* page = OrderAccess::load_acquire(shared_page);
+ ZPage* page = Atomic::load_acquire(shared_page);
if (page != NULL) {
addr = page->alloc_object_atomic(size);
@@ -97,7 +97,7 @@
retry:
// Install new page
- ZPage* const prev_page = Atomic::cmpxchg(new_page, shared_page, page);
+ ZPage* const prev_page = Atomic::cmpxchg(shared_page, page, new_page);
if (prev_page != page) {
if (prev_page == NULL) {
// Previous page was retired, retry installing the new page
@@ -304,7 +304,7 @@
size_t ZObjectAllocator::remaining() const {
assert(ZThread::is_java(), "Should be a Java thread");
- const ZPage* const page = OrderAccess::load_acquire(shared_small_page_addr());
+ const ZPage* const page = Atomic::load_acquire(shared_small_page_addr());
if (page != NULL) {
return page->remaining();
}
--- a/src/hotspot/share/gc/z/zOopClosures.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zOopClosures.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -96,7 +96,7 @@
// oop here again (the object would be strongly live and we would
// not consider clearing such oops), so therefore we don't have an
// ABA problem here.
- Atomic::cmpxchg(oop(NULL), p, obj);
+ Atomic::cmpxchg(p, obj, oop(NULL));
}
}
--- a/src/hotspot/share/gc/z/zPage.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zPage.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -255,7 +255,7 @@
return 0;
}
- const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, addr);
+ const uintptr_t prev_top = Atomic::cmpxchg(&_top, addr, new_top);
if (prev_top == addr) {
// Success
return ZAddress::good(addr);
@@ -299,7 +299,7 @@
return false;
}
- const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, old_top);
+ const uintptr_t prev_top = Atomic::cmpxchg(&_top, old_top, new_top);
if (prev_top == old_top) {
// Success
return true;
--- a/src/hotspot/share/gc/z/zReferenceProcessor.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zReferenceProcessor.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -316,7 +316,7 @@
// Prepend discovered references to internal pending list
if (*list != NULL) {
- *p = Atomic::xchg(*list, _pending_list.addr());
+ *p = Atomic::xchg(_pending_list.addr(), *list);
if (*p == NULL) {
// First to prepend to list, record tail
_pending_list_tail = p;
--- a/src/hotspot/share/gc/z/zRelocationSet.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zRelocationSet.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -38,7 +38,7 @@
if (parallel) {
if (_next < nforwardings) {
- const size_t next = Atomic::add(1u, &_next) - 1u;
+ const size_t next = Atomic::add(&_next, 1u) - 1u;
if (next < nforwardings) {
*forwarding = _relocation_set->_forwardings[next];
return true;
--- a/src/hotspot/share/gc/z/zResurrection.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zResurrection.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -23,7 +23,7 @@
#include "precompiled.hpp"
#include "gc/z/zResurrection.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/debug.hpp"
@@ -35,8 +35,8 @@
}
void ZResurrection::unblock() {
- // We use a storestore barrier to make sure all healed
- // oops are visible before we unblock resurrection.
- OrderAccess::storestore();
- _blocked = false;
+ // No need for anything stronger than a relaxed store here.
+ // The preceeding handshake makes sure that all non-strong
+ // oops have already been healed at this point.
+ Atomic::store(&_blocked, false);
}
--- a/src/hotspot/share/gc/z/zResurrection.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zResurrection.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -25,14 +25,10 @@
#define SHARE_GC_Z_ZRESURRECTION_INLINE_HPP
#include "gc/z/zResurrection.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
inline bool ZResurrection::is_blocked() {
- // We use a loadload barrier to make sure we are not
- // seeing oops from a time when resurrection was blocked.
- const bool blocked = _blocked;
- OrderAccess::loadload();
- return blocked;
+ return Atomic::load(&_blocked);
}
#endif // SHARE_GC_Z_ZRESURRECTION_INLINE_HPP
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -91,7 +91,7 @@
template <typename T, void (T::*F)(ZRootsIteratorClosure*)>
void ZSerialOopsDo<T, F>::oops_do(ZRootsIteratorClosure* cl) {
- if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) {
+ if (!_claimed && Atomic::cmpxchg(&_claimed, false, true) == false) {
(_iter->*F)(cl);
}
}
@@ -118,7 +118,7 @@
template <typename T, void (T::*F)(BoolObjectClosure*, ZRootsIteratorClosure*)>
void ZSerialWeakOopsDo<T, F>::weak_oops_do(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) {
- if (!_claimed && Atomic::cmpxchg(true, &_claimed, false) == false) {
+ if (!_claimed && Atomic::cmpxchg(&_claimed, false, true) == false) {
(_iter->*F)(is_alive, cl);
}
}
--- a/src/hotspot/share/gc/z/zStat.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/zStat.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -424,9 +424,9 @@
for (uint32_t i = 0; i < ncpus; i++) {
ZStatSamplerData* const cpu_data = get_cpu_local<ZStatSamplerData>(i);
if (cpu_data->_nsamples > 0) {
- const uint64_t nsamples = Atomic::xchg((uint64_t)0, &cpu_data->_nsamples);
- const uint64_t sum = Atomic::xchg((uint64_t)0, &cpu_data->_sum);
- const uint64_t max = Atomic::xchg((uint64_t)0, &cpu_data->_max);
+ const uint64_t nsamples = Atomic::xchg(&cpu_data->_nsamples, (uint64_t)0);
+ const uint64_t sum = Atomic::xchg(&cpu_data->_sum, (uint64_t)0);
+ const uint64_t max = Atomic::xchg(&cpu_data->_max, (uint64_t)0);
all._nsamples += nsamples;
all._sum += sum;
if (all._max < max) {
@@ -459,7 +459,7 @@
const uint32_t ncpus = ZCPU::count();
for (uint32_t i = 0; i < ncpus; i++) {
ZStatCounterData* const cpu_data = get_cpu_local<ZStatCounterData>(i);
- counter += Atomic::xchg((uint64_t)0, &cpu_data->_counter);
+ counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0);
}
ZStatSample(_sampler, counter);
@@ -481,7 +481,7 @@
const uint32_t ncpus = ZCPU::count();
for (uint32_t i = 0; i < ncpus; i++) {
ZStatCounterData* const cpu_data = get_cpu_local<ZStatCounterData>(i);
- all._counter += Atomic::xchg((uint64_t)0, &cpu_data->_counter);
+ all._counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0);
}
return all;
@@ -761,8 +761,8 @@
//
void ZStatSample(const ZStatSampler& sampler, uint64_t value) {
ZStatSamplerData* const cpu_data = sampler.get();
- Atomic::add(1u, &cpu_data->_nsamples);
- Atomic::add(value, &cpu_data->_sum);
+ Atomic::add(&cpu_data->_nsamples, 1u);
+ Atomic::add(&cpu_data->_sum, value);
uint64_t max = cpu_data->_max;
for (;;) {
@@ -772,7 +772,7 @@
}
const uint64_t new_max = value;
- const uint64_t prev_max = Atomic::cmpxchg(new_max, &cpu_data->_max, max);
+ const uint64_t prev_max = Atomic::cmpxchg(&cpu_data->_max, max, new_max);
if (prev_max == max) {
// Success
break;
@@ -787,14 +787,14 @@
void ZStatInc(const ZStatCounter& counter, uint64_t increment) {
ZStatCounterData* const cpu_data = counter.get();
- const uint64_t value = Atomic::add(increment, &cpu_data->_counter);
+ const uint64_t value = Atomic::add(&cpu_data->_counter, increment);
ZTracer::tracer()->report_stat_counter(counter, increment, value);
}
void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) {
ZStatCounterData* const cpu_data = counter.get();
- Atomic::add(increment, &cpu_data->_counter);
+ Atomic::add(&cpu_data->_counter, increment);
}
//
--- a/src/hotspot/share/gc/z/z_globals.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/gc/z/z_globals.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -79,7 +79,7 @@
diagnostic(bool, ZVerifyObjects, false, \
"Verify objects") \
\
- diagnostic(bool, ZVerifyMarking, false, \
+ diagnostic(bool, ZVerifyMarking, trueInDebug, \
"Verify marking stacks") \
\
diagnostic(bool, ZVerifyForwarding, false, \
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -2163,7 +2163,7 @@
HeapWord* compare_to = *Universe::heap()->top_addr();
HeapWord* new_top = compare_to + obj_size;
if (new_top <= *Universe::heap()->end_addr()) {
- if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
+ if (Atomic::cmpxchg(Universe::heap()->top_addr(), compare_to, new_top) != compare_to) {
goto retry;
}
result = (oop) compare_to;
--- a/src/hotspot/share/interpreter/oopMapCache.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/interpreter/oopMapCache.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -448,11 +448,11 @@
}
OopMapCacheEntry* OopMapCache::entry_at(int i) const {
- return OrderAccess::load_acquire(&(_array[i % _size]));
+ return Atomic::load_acquire(&(_array[i % _size]));
}
bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
- return Atomic::cmpxchg(entry, &_array[i % _size], old) == old;
+ return Atomic::cmpxchg(&_array[i % _size], old, entry) == old;
}
void OopMapCache::flush() {
@@ -564,7 +564,7 @@
do {
head = _old_entries;
entry->_next = head;
- success = Atomic::cmpxchg(entry, &_old_entries, head) == head;
+ success = Atomic::cmpxchg(&_old_entries, head, entry) == head;
} while (!success);
if (log_is_enabled(Debug, interpreter, oopmap)) {
--- a/src/hotspot/share/jfr/jni/jfrGetAllEventClasses.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/jni/jfrGetAllEventClasses.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -33,12 +33,11 @@
#include "memory/resourceArea.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/stack.inline.hpp"
- // incremented during class unloading (safepoint) for each unloaded event class
+ // incremented during class unloading for each unloaded event class
static jlong unloaded_event_classes = 0;
jlong JfrEventClasses::unloaded_event_classes_count() {
@@ -46,8 +45,7 @@
}
void JfrEventClasses::increment_unloaded_event_class() {
- // incremented during class unloading (safepoint) for each unloaded event class
- assert(SafepointSynchronize::is_at_safepoint(), "invariant");
+ assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
++unloaded_event_classes;
}
--- a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -210,6 +210,10 @@
JfrRecorder::start_recording();
JVM_END
+JVM_ENTRY_NO_ENV(jboolean, jfr_is_recording(JNIEnv * env, jobject jvm))
+ return JfrRecorder::is_recording() ? JNI_TRUE : JNI_FALSE;
+JVM_END
+
JVM_ENTRY_NO_ENV(void, jfr_end_recording(JNIEnv* env, jobject jvm))
if (!JfrRecorder::is_recording()) {
return;
@@ -217,6 +221,9 @@
JfrRecorder::stop_recording();
JVM_END
+JVM_ENTRY_NO_ENV(void, jfr_mark_chunk_final(JNIEnv * env, jobject jvm))
+ JfrRepository::mark_chunk_final();
+JVM_END
JVM_ENTRY_NO_ENV(jboolean, jfr_emit_event(JNIEnv* env, jobject jvm, jlong eventTypeId, jlong timeStamp, jlong when))
JfrPeriodicEventSet::requestEvent((JfrEventId)eventTypeId);
--- a/src/hotspot/share/jfr/jni/jfrJniMethod.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/jni/jfrJniMethod.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -49,8 +49,12 @@
void JNICALL jfr_begin_recording(JNIEnv* env, jobject jvm);
+jboolean JNICALL jfr_is_recording(JNIEnv* env, jobject jvm);
+
void JNICALL jfr_end_recording(JNIEnv* env, jobject jvm);
+void JNICALL jfr_mark_chunk_final(JNIEnv* env, jobject jvm);
+
jboolean JNICALL jfr_emit_event(JNIEnv* env, jobject jvm, jlong eventTypeId, jlong timeStamp, jlong when);
jobject JNICALL jfr_get_all_event_classes(JNIEnv* env, jobject jvm);
--- a/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/jni/jfrJniMethodRegistration.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -36,7 +36,9 @@
if (jfr_clz != NULL) {
JNINativeMethod method[] = {
(char*)"beginRecording", (char*)"()V", (void*)jfr_begin_recording,
+ (char*)"isRecording", (char*)"()Z", (void*)jfr_is_recording,
(char*)"endRecording", (char*)"()V", (void*)jfr_end_recording,
+ (char*)"markChunkFinal", (char*)"()V", (void*)jfr_mark_chunk_final,
(char*)"counterTime", (char*)"()J", (void*)jfr_elapsed_counter,
(char*)"createJFR", (char*)"(Z)Z", (void*)jfr_create_jfr,
(char*)"destroyJFR", (char*)"()Z", (void*)jfr_destroy_jfr,
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/eventEmitter.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -128,10 +128,13 @@
const traceid object_id = edge_store->get_id(edge);
assert(object_id != 0, "invariant");
+ Tickspan object_age = Ticks(_start_time.value()) - sample->allocation_time();
+
EventOldObjectSample e(UNTIMED);
e.set_starttime(_start_time);
e.set_endtime(_end_time);
e.set_allocationTime(sample->allocation_time());
+ e.set_objectAge(object_age);
e.set_lastKnownHeapUsage(sample->heap_used_at_last_gc());
e.set_object(object_id);
e.set_arrayElements(array_size(edge->pointee()));
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -93,7 +93,7 @@
ObjectSampler* ObjectSampler::acquire() {
assert(is_created(), "invariant");
- while (Atomic::cmpxchg(1, &_lock, 0) == 1) {}
+ while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {}
return _instance;
}
--- a/src/hotspot/share/jfr/metadata/metadata.xml Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/metadata/metadata.xml Mon Nov 25 15:16:29 2019 +0000
@@ -579,6 +579,7 @@
<Event name="OldObjectSample" category="Java Virtual Machine, Profiling" label="Old Object Sample" description="A potential memory leak" stackTrace="true" thread="true"
startTime="false" cutoff="true">
<Field type="Ticks" name="allocationTime" label="Allocation Time" />
+ <Field type="Tickspan" name="objectAge" label="Object Age" />
<Field type="ulong" contentType="bytes" name="lastKnownHeapUsage" label="Last Known Heap Usage" />
<Field type="OldObject" name="object" label="Object" />
<Field type="int" name="arrayElements" label="Array Elements" description="If the object is an array, the number of elements, or -1 if it is not an array" />
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -46,7 +46,7 @@
do {
compare_value = *dest;
exchange_value = compare_value + 1;
- } while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value);
+ } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
return exchange_value;
}
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -62,7 +62,7 @@
do {
const jbyte current = *dest;
const jbyte new_value = op(current, bits);
- if (Atomic::cmpxchg(new_value, dest, current) == current) {
+ if (Atomic::cmpxchg(dest, current, new_value) == current) {
return;
}
} while (true);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -27,7 +27,7 @@
#include "jfr/utilities/jfrTypes.hpp"
#include "memory/allocation.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
#define USED_BIT 1
#define METHOD_USED_BIT (USED_BIT << 2)
@@ -91,16 +91,16 @@
}
static bool has_changed_tag_state() {
- if (OrderAccess::load_acquire(&_tag_state)) {
- OrderAccess::release_store(&_tag_state, false);
+ if (Atomic::load_acquire(&_tag_state)) {
+ Atomic::release_store(&_tag_state, false);
return true;
}
return false;
}
static void set_changed_tag_state() {
- if (!OrderAccess::load_acquire(&_tag_state)) {
- OrderAccess::release_store(&_tag_state, true);
+ if (!Atomic::load_acquire(&_tag_state)) {
+ Atomic::release_store(&_tag_state, true);
}
}
};
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunk.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -59,7 +59,8 @@
_last_update_nanos(0),
_last_checkpoint_offset(0),
_last_metadata_offset(0),
- _generation(1) {}
+ _generation(1),
+ _final(false) {}
JfrChunk::~JfrChunk() {
reset();
@@ -86,10 +87,20 @@
return JFR_VERSION_MINOR;
}
-u2 JfrChunk::capabilities() const {
+void JfrChunk::mark_final() {
+ _final = true;
+}
+
+u2 JfrChunk::flags() const {
// chunk capabilities, CompressedIntegers etc
- static bool compressed_integers = JfrOptionSet::compressed_integers();
- return compressed_integers;
+ u2 flags = 0;
+ if (JfrOptionSet::compressed_integers()) {
+ flags |= 1 << 0;
+ }
+ if (_final) {
+ flags |= 1 << 1;
+ }
+ return flags;
}
int64_t JfrChunk::cpu_frequency() const {
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunk.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunk.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -44,6 +44,7 @@
int64_t _last_checkpoint_offset;
int64_t _last_metadata_offset;
mutable u1 _generation;
+ bool _final;
JfrChunk();
~JfrChunk();
@@ -53,7 +54,9 @@
u2 major_version() const;
u2 minor_version() const;
int64_t cpu_frequency() const;
- u2 capabilities() const;
+ u2 flags() const;
+
+ void mark_final();
void update_start_ticks();
void update_start_nanos();
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -41,8 +41,8 @@
static const int64_t START_TICKS_OFFSET = DURATION_NANOS_OFFSET + SLOT_SIZE;
static const int64_t CPU_FREQUENCY_OFFSET = START_TICKS_OFFSET + SLOT_SIZE;
static const int64_t GENERATION_OFFSET = CPU_FREQUENCY_OFFSET + SLOT_SIZE;
-static const int64_t CAPABILITY_OFFSET = GENERATION_OFFSET + 2;
-static const int64_t HEADER_SIZE = CAPABILITY_OFFSET + 2;
+static const int64_t FLAG_OFFSET = GENERATION_OFFSET + 2;
+static const int64_t HEADER_SIZE = FLAG_OFFSET + 2;
static fio_fd open_chunk(const char* path) {
return path != NULL ? os::open(path, O_CREAT | O_RDWR, S_IREAD | S_IWRITE) : invalid_fd;
@@ -117,8 +117,8 @@
_writer->flush();
}
- void write_capabilities() {
- _writer->be_write(_chunk->capabilities());
+ void write_flags() {
+ _writer->be_write(_chunk->flags());
}
void write_size_to_generation(int64_t size, bool finalize) {
@@ -135,7 +135,7 @@
assert(_chunk != NULL, "invariant");
DEBUG_ONLY(assert_writer_position(_writer, SIZE_OFFSET);)
write_size_to_generation(size, finalize);
- // no need to write capabilities
+ write_flags();
_writer->seek(size); // implicit flush
}
@@ -146,7 +146,7 @@
write_magic();
write_version();
write_size_to_generation(HEADER_SIZE, false);
- write_capabilities();
+ write_flags();
DEBUG_ONLY(assert_writer_position(_writer, HEADER_SIZE);)
_writer->flush();
}
@@ -201,7 +201,7 @@
head.write_time(false);
head.write_cpu_frequency();
head.write_next_generation();
- head.write_capabilities();
+ head.write_flags();
assert(current_offset() - header_content_pos == HEADER_SIZE, "invariant");
const u4 checkpoint_size = current_offset() - event_size_offset;
write_padded_at_offset<u4>(checkpoint_size, event_size_offset);
@@ -211,6 +211,11 @@
return sz_written;
}
+void JfrChunkWriter::mark_chunk_final() {
+ assert(_chunk != NULL, "invariant");
+ _chunk->mark_final();
+}
+
int64_t JfrChunkWriter::flush_chunk(bool flushpoint) {
assert(_chunk != NULL, "invariant");
const int64_t sz_written = write_chunk_header_checkpoint(flushpoint);
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -59,6 +59,7 @@
bool has_metadata() const;
void set_time_stamp();
+ void mark_chunk_final();
};
#endif // SHARE_JFR_RECORDER_REPOSITORY_JFRCHUNKWRITER_HPP
--- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -418,7 +418,7 @@
static volatile int jfr_shutdown_lock = 0;
static bool guard_reentrancy() {
- return Atomic::cmpxchg(1, &jfr_shutdown_lock, 0) == 0;
+ return Atomic::cmpxchg(&jfr_shutdown_lock, 0, 1) == 0;
}
class JavaThreadInVM : public StackObj {
--- a/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -115,6 +115,10 @@
chunkwriter().set_path(path);
}
+void JfrRepository::mark_chunk_final() {
+ chunkwriter().mark_chunk_final();
+}
+
jlong JfrRepository::current_chunk_start_nanos() {
return chunkwriter().current_chunk_start_nanos();
}
--- a/src/hotspot/share/jfr/recorder/repository/jfrRepository.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/repository/jfrRepository.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -70,6 +70,7 @@
public:
static void set_path(jstring location, JavaThread* jt);
static void set_chunk_path(jstring path, JavaThread* jt);
+ static void mark_chunk_final();
static void flush(JavaThread* jt);
static jlong current_chunk_start_nanos();
};
--- a/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -87,7 +87,7 @@
const int current_msgs = Atomic::load(&_messages);
// OR the new message
const int exchange_value = current_msgs | new_messages;
- const int result = Atomic::cmpxchg(exchange_value, &_messages, current_msgs);
+ const int result = Atomic::cmpxchg(&_messages, current_msgs, exchange_value);
if (result == current_msgs) {
return;
}
@@ -139,7 +139,7 @@
int JfrPostBox::collect() {
// get pending and reset to 0
- const int messages = Atomic::xchg(0, &_messages);
+ const int messages = Atomic::xchg(&_messages, 0);
if (check_waiters(messages)) {
_has_waiters = true;
assert(JfrMsg_lock->owned_by_self(), "incrementing _msg_read_serial is protected by JfrMsg_lock");
--- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -432,6 +432,7 @@
if (_chunkwriter.is_valid()) {
Thread* const t = Thread::current();
_storage.flush_regular_buffer(t->jfr_thread_local()->native_buffer(), t);
+ _chunkwriter.mark_chunk_final();
invoke_flush();
_chunkwriter.set_time_stamp();
_repository.close_chunk();
--- a/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -104,7 +104,7 @@
const u1* JfrBuffer::concurrent_top() const {
do {
const u1* current_top = stable_top();
- if (Atomic::cmpxchg(MUTEX_CLAIM, &_top, current_top) == current_top) {
+ if (Atomic::cmpxchg(&_top, current_top, MUTEX_CLAIM) == current_top) {
return current_top;
}
} while (true);
@@ -128,13 +128,13 @@
const void* current_id;
do {
current_id = Atomic::load(&_identity);
- } while (current_id != NULL || Atomic::cmpxchg(id, &_identity, current_id) != current_id);
+ } while (current_id != NULL || Atomic::cmpxchg(&_identity, current_id, id) != current_id);
}
bool JfrBuffer::try_acquire(const void* id) {
assert(id != NULL, "invariant");
const void* const current_id = Atomic::load(&_identity);
- return current_id == NULL && Atomic::cmpxchg(id, &_identity, current_id) == current_id;
+ return current_id == NULL && Atomic::cmpxchg(&_identity, current_id, id) == current_id;
}
void JfrBuffer::release() {
--- a/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -34,7 +34,7 @@
do {
compare_value = *dest;
exchange_value = compare_value + value;
- } while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value);
+ } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
return exchange_value;
}
@@ -45,7 +45,7 @@
compare_value = *dest;
assert(compare_value >= 1, "invariant");
exchange_value = compare_value - 1;
- } while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value);
+ } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
return exchange_value;
}
@@ -137,4 +137,3 @@
void JfrStorageControl::set_scavenge_threshold(size_t number_of_dead_buffers) {
_scavenge_threshold = number_of_dead_buffers;
}
-
--- a/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -46,19 +46,19 @@
inline void set_generation(uint64_t value, uint64_t* const dest) {
assert(dest != NULL, "invariant");
- OrderAccess::release_store(dest, value);
+ Atomic::release_store(dest, value);
}
static void increment_store_generation() {
- const uint64_t current_serialized = OrderAccess::load_acquire(&serialized_generation);
- const uint64_t current_stored = OrderAccess::load_acquire(&store_generation);
+ const uint64_t current_serialized = Atomic::load_acquire(&serialized_generation);
+ const uint64_t current_stored = Atomic::load_acquire(&store_generation);
if (current_serialized == current_stored) {
set_generation(current_serialized + 1, &store_generation);
}
}
static bool increment_serialized_generation() {
- const uint64_t current_stored = OrderAccess::load_acquire(&store_generation);
- const uint64_t current_serialized = OrderAccess::load_acquire(&serialized_generation);
+ const uint64_t current_stored = Atomic::load_acquire(&store_generation);
+ const uint64_t current_serialized = Atomic::load_acquire(&serialized_generation);
if (current_stored != current_serialized) {
set_generation(current_stored, &serialized_generation);
return true;
--- a/src/hotspot/share/jfr/utilities/jfrAllocation.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/utilities/jfrAllocation.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -41,7 +41,7 @@
do {
compare_value = *dest;
exchange_value = compare_value + value;
- } while (Atomic::cmpxchg(exchange_value, dest, compare_value) != compare_value);
+ } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
return exchange_value;
}
--- a/src/hotspot/share/jfr/utilities/jfrHashtable.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/utilities/jfrHashtable.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -58,9 +58,9 @@
TableEntry* _entry;
TableEntry* get_entry() const {
- return (TableEntry*)OrderAccess::load_acquire(&_entry);
+ return (TableEntry*)Atomic::load_acquire(&_entry);
}
- void set_entry(TableEntry* entry) { OrderAccess::release_store(&_entry, entry);}
+ void set_entry(TableEntry* entry) { Atomic::release_store(&_entry, entry);}
TableEntry** entry_addr() { return &_entry; }
};
--- a/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -92,11 +92,11 @@
MultiThreadedRefCounter() : _refs(0) {}
void inc() const {
- Atomic::add(1, &_refs);
+ Atomic::add(&_refs, 1);
}
bool dec() const {
- return 0 == Atomic::add((-1), &_refs);
+ return 0 == Atomic::add(&_refs, (-1));
}
int current() const {
--- a/src/hotspot/share/jfr/utilities/jfrTryLock.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jfr/utilities/jfrTryLock.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -36,7 +36,7 @@
bool _has_lock;
public:
- JfrTryLock(volatile int* lock) : _lock(lock), _has_lock(Atomic::cmpxchg(1, lock, 0) == 0) {}
+ JfrTryLock(volatile int* lock) : _lock(lock), _has_lock(Atomic::cmpxchg(lock, 0, 1) == 0) {}
~JfrTryLock() {
if (_has_lock) {
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1586,7 +1586,7 @@
jint res = main_vm.AttachCurrentThread((void**)&hotspotEnv, NULL);
_attached = res == JNI_OK;
static volatile int report_attach_error = 0;
- if (res != JNI_OK && report_attach_error == 0 && Atomic::cmpxchg(1, &report_attach_error, 0) == 0) {
+ if (res != JNI_OK && report_attach_error == 0 && Atomic::cmpxchg(&report_attach_error, 0, 1) == 0) {
// Only report an attach error once
jio_printf("Warning: attaching current thread to VM failed with %d (future attach errors are suppressed)\n", res);
}
@@ -1599,7 +1599,7 @@
extern struct JavaVM_ main_vm;
jint res = main_vm.DetachCurrentThread();
static volatile int report_detach_error = 0;
- if (res != JNI_OK && report_detach_error == 0 && Atomic::cmpxchg(1, &report_detach_error, 0) == 0) {
+ if (res != JNI_OK && report_detach_error == 0 && Atomic::cmpxchg(&report_detach_error, 0, 1) == 0) {
// Only report an attach error once
jio_printf("Warning: detaching current thread from VM failed with %d (future attach errors are suppressed)\n", res);
}
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -917,7 +917,7 @@
JavaThread* THREAD = JavaThread::current();
static volatile int report_error = 0;
- if (!report_error && Atomic::cmpxchg(1, &report_error, 0) == 0) {
+ if (!report_error && Atomic::cmpxchg(&report_error, 0, 1) == 0) {
// Only report an error once
tty->print_raw_cr(message);
if (JVMCIENV != NULL) {
@@ -1295,7 +1295,7 @@
static void fatal_exception_in_compile(JVMCIEnv* JVMCIENV, JavaThread* thread, const char* msg) {
// Only report a fatal JVMCI compilation exception once
static volatile int report_init_failure = 0;
- if (!report_init_failure && Atomic::cmpxchg(1, &report_init_failure, 0) == 0) {
+ if (!report_init_failure && Atomic::cmpxchg(&report_init_failure, 0, 1) == 0) {
tty->print_cr("%s:", msg);
JVMCIENV->describe_pending_exception(true);
}
--- a/src/hotspot/share/jvmci/metadataHandleBlock.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jvmci/metadataHandleBlock.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -141,7 +141,7 @@
// but can't be put on the free list yet. The
// HandleCleaner will set this to NULL and
// put it on the free list.
- jlong old_value = Atomic::cmpxchg((jlong) (ptr_tag), (jlong*)handle, (jlong) value);
+ jlong old_value = Atomic::cmpxchg((jlong*)handle, (jlong) value, (jlong) (ptr_tag));
if (old_value == (jlong) value) {
// Success
} else {
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -465,6 +465,13 @@
declare_constant(CodeInstaller::INLINE_CONTIGUOUS_ALLOCATION_SUPPORTED) \
declare_constant(CodeInstaller::INVOKE_INVALID) \
\
+ declare_constant(CollectedHeap::Serial) \
+ declare_constant(CollectedHeap::Parallel) \
+ declare_constant(CollectedHeap::G1) \
+ declare_constant(CollectedHeap::Epsilon) \
+ declare_constant(CollectedHeap::Z) \
+ declare_constant(CollectedHeap::Shenandoah) \
+ \
declare_constant(ConstantPool::CPCACHE_INDEX_TAG) \
declare_constant(ConstantPool::_has_dynamic_constant) \
\
--- a/src/hotspot/share/logging/logDecorations.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/logging/logDecorations.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -44,12 +44,12 @@
}
const char* LogDecorations::host_name() {
- const char* host_name = OrderAccess::load_acquire(&_host_name);
+ const char* host_name = Atomic::load_acquire(&_host_name);
if (host_name == NULL) {
char buffer[1024];
if (os::get_host_name(buffer, sizeof(buffer))) {
host_name = os::strdup_check_oom(buffer);
- const char* old_value = Atomic::cmpxchg(host_name, &_host_name, (const char*)NULL);
+ const char* old_value = Atomic::cmpxchg(&_host_name, (const char*)NULL, host_name);
if (old_value != NULL) {
os::free((void *) host_name);
host_name = old_value;
@@ -147,4 +147,3 @@
int written = jio_snprintf(pos, DecorationsBufferSize - (pos - _decorations_buffer), "%s", host_name());
ASSERT_AND_RETURN(written, pos)
}
-
--- a/src/hotspot/share/logging/logOutputList.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/logging/logOutputList.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -30,13 +30,13 @@
#include "utilities/globalDefinitions.hpp"
jint LogOutputList::increase_readers() {
- jint result = Atomic::add(1, &_active_readers);
+ jint result = Atomic::add(&_active_readers, 1);
assert(_active_readers > 0, "Ensure we have consistent state");
return result;
}
jint LogOutputList::decrease_readers() {
- jint result = Atomic::add(-1, &_active_readers);
+ jint result = Atomic::add(&_active_readers, -1);
assert(result >= 0, "Ensure we have consistent state");
return result;
}
--- a/src/hotspot/share/logging/logOutputList.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/logging/logOutputList.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -97,6 +97,20 @@
}
public:
+ Iterator(const Iterator &itr) : _current(itr._current), _list(itr._list){
+ itr._list->increase_readers();
+ }
+
+ Iterator& operator=(const Iterator& rhs) {
+ _current = rhs._current;
+ if (_list != rhs._list) {
+ rhs._list->increase_readers();
+ _list->decrease_readers();
+ _list = rhs._list;
+ }
+ return *this;
+ }
+
~Iterator() {
_list->decrease_readers();
}
--- a/src/hotspot/share/memory/allocation.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/memory/allocation.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -41,7 +41,7 @@
*dest += add_value;
#else
julong value = Atomic::load(dest);
- Atomic::store(value + add_value, dest);
+ Atomic::store(dest, value + add_value);
#endif
}
#endif
--- a/src/hotspot/share/memory/dynamicArchive.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/memory/dynamicArchive.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -494,6 +494,7 @@
size_t estimate_class_file_size();
address reserve_space_and_init_buffer_to_target_delta();
void init_header(address addr);
+ void release_header();
void make_trampolines();
void make_klasses_shareable();
void sort_methods(InstanceKlass* ik) const;
@@ -664,6 +665,7 @@
}
write_archive(serialized_data_start);
+ release_header();
assert(_num_dump_regions_used == _total_dump_regions, "must be");
verify_universe("After CDS dynamic dump");
@@ -755,6 +757,7 @@
init_first_dump_space(reserved_bottom);
FileMapInfo* mapinfo = new FileMapInfo(false);
+ assert(FileMapInfo::dynamic_info() == mapinfo, "must be");
_header = mapinfo->dynamic_header();
Thread* THREAD = Thread::current();
@@ -766,6 +769,19 @@
_header->populate(base_info, os::vm_allocation_granularity());
}
+void DynamicArchiveBuilder::release_header() {
+ // We temporarily allocated a dynamic FileMapInfo for dumping, which makes it appear we
+ // have mapped a dynamic archive, but we actually have not. We are in a safepoint now.
+ // Let's free it so that if class loading happens after we leave the safepoint, nothing
+ // bad will happen.
+ assert(SafepointSynchronize::is_at_safepoint(), "must be");
+ FileMapInfo *mapinfo = FileMapInfo::dynamic_info();
+ assert(mapinfo != NULL && _header == mapinfo->dynamic_header(), "must be");
+ delete mapinfo;
+ assert(!DynamicArchive::is_mapped(), "must be");
+ _header = NULL;
+}
+
size_t DynamicArchiveBuilder::estimate_trampoline_size() {
size_t total = 0;
size_t each_method_bytes =
--- a/src/hotspot/share/memory/filemap.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/memory/filemap.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1038,16 +1038,11 @@
}
int fd = os::open(_full_path, O_RDONLY | O_BINARY, 0);
if (fd < 0) {
- if (is_static()) {
- if (errno == ENOENT) {
- // Not locating the shared archive is ok.
- fail_continue("Specified shared archive not found (%s).", _full_path);
- } else {
- fail_continue("Failed to open shared archive file (%s).",
- os::strerror(errno));
- }
+ if (errno == ENOENT) {
+ fail_continue("Specified shared archive not found (%s).", _full_path);
} else {
- log_warning(cds, dynamic)("specified dynamic archive doesn't exist: %s", _full_path);
+ fail_continue("Failed to open shared archive file (%s).",
+ os::strerror(errno));
}
return false;
}
--- a/src/hotspot/share/memory/metaspace.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/memory/metaspace.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -128,7 +128,7 @@
}
size_t MetaspaceGC::capacity_until_GC() {
- size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
+ size_t value = Atomic::load_acquire(&_capacity_until_GC);
assert(value >= MetaspaceSize, "Not initialized properly?");
return value;
}
@@ -162,7 +162,7 @@
if (can_retry != NULL) {
*can_retry = true;
}
- size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
+ size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value);
if (old_capacity_until_GC != prev_value) {
return false;
@@ -180,7 +180,7 @@
size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
assert_is_aligned(v, Metaspace::commit_alignment());
- return Atomic::sub(v, &_capacity_until_GC);
+ return Atomic::sub(&_capacity_until_GC, v);
}
void MetaspaceGC::initialize() {
@@ -394,7 +394,7 @@
}
static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
- Atomic::add(words, pstat);
+ Atomic::add(pstat, words);
}
static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
@@ -402,7 +402,7 @@
assert(size_now >= words, "About to decrement counter below zero "
"(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
size_now, words);
- Atomic::sub(words, pstat);
+ Atomic::sub(pstat, words);
}
void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
--- a/src/hotspot/share/memory/universe.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/memory/universe.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -511,7 +511,7 @@
oop Universe::swap_reference_pending_list(oop list) {
assert_pll_locked(is_locked);
- return Atomic::xchg(list, &_reference_pending_list);
+ return Atomic::xchg(&_reference_pending_list, list);
}
#undef assert_pll_locked
@@ -580,7 +580,7 @@
int next;
if ((_preallocated_out_of_memory_error_avail_count > 0) &&
SystemDictionary::Throwable_klass()->is_initialized()) {
- next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
+ next = (int)Atomic::add(&_preallocated_out_of_memory_error_avail_count, -1);
assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
} else {
next = -1;
--- a/src/hotspot/share/oops/access.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/access.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -165,15 +165,15 @@
}
template <typename T>
- static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+ static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
verify_primitive_decorators<atomic_cmpxchg_mo_decorators>();
- return AccessInternal::atomic_cmpxchg_at<decorators>(new_value, base, offset, compare_value);
+ return AccessInternal::atomic_cmpxchg_at<decorators>(base, offset, compare_value, new_value);
}
template <typename T>
- static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+ static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
verify_primitive_decorators<atomic_xchg_mo_decorators>();
- return AccessInternal::atomic_xchg_at<decorators>(new_value, base, offset);
+ return AccessInternal::atomic_xchg_at<decorators>(base, offset, new_value);
}
// Oop heap accesses
@@ -191,20 +191,20 @@
}
template <typename T>
- static inline T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+ static inline T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
verify_heap_oop_decorators<atomic_cmpxchg_mo_decorators>();
typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
OopType new_oop_value = new_value;
OopType compare_oop_value = compare_value;
- return AccessInternal::atomic_cmpxchg_at<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, base, offset, compare_oop_value);
+ return AccessInternal::atomic_cmpxchg_at<decorators | INTERNAL_VALUE_IS_OOP>(base, offset, compare_oop_value, new_oop_value);
}
template <typename T>
- static inline T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+ static inline T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
verify_heap_oop_decorators<atomic_xchg_mo_decorators>();
typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
OopType new_oop_value = new_value;
- return AccessInternal::atomic_xchg_at<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, base, offset);
+ return AccessInternal::atomic_xchg_at<decorators | INTERNAL_VALUE_IS_OOP>(base, offset, new_oop_value);
}
// Clone an object from src to dst
@@ -227,15 +227,15 @@
}
template <typename P, typename T>
- static inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
+ static inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
verify_primitive_decorators<atomic_cmpxchg_mo_decorators>();
- return AccessInternal::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
+ return AccessInternal::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
}
template <typename P, typename T>
- static inline T atomic_xchg(T new_value, P* addr) {
+ static inline T atomic_xchg(P* addr, T new_value) {
verify_primitive_decorators<atomic_xchg_mo_decorators>();
- return AccessInternal::atomic_xchg<decorators>(new_value, addr);
+ return AccessInternal::atomic_xchg<decorators>(addr, new_value);
}
// Oop accesses
@@ -254,20 +254,20 @@
}
template <typename P, typename T>
- static inline T oop_atomic_cmpxchg(T new_value, P* addr, T compare_value) {
+ static inline T oop_atomic_cmpxchg(P* addr, T compare_value, T new_value) {
verify_oop_decorators<atomic_cmpxchg_mo_decorators>();
typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
OopType new_oop_value = new_value;
OopType compare_oop_value = compare_value;
- return AccessInternal::atomic_cmpxchg<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, addr, compare_oop_value);
+ return AccessInternal::atomic_cmpxchg<decorators | INTERNAL_VALUE_IS_OOP>(addr, compare_oop_value, new_oop_value);
}
template <typename P, typename T>
- static inline T oop_atomic_xchg(T new_value, P* addr) {
+ static inline T oop_atomic_xchg(P* addr, T new_value) {
verify_oop_decorators<atomic_xchg_mo_decorators>();
typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
OopType new_oop_value = new_value;
- return AccessInternal::atomic_xchg<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, addr);
+ return AccessInternal::atomic_xchg<decorators | INTERNAL_VALUE_IS_OOP>(addr, new_oop_value);
}
static oop resolve(oop obj) {
--- a/src/hotspot/share/oops/access.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/access.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -90,16 +90,16 @@
template <class GCBarrierType, DecoratorSet decorators>
struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG, decorators>: public AllStatic {
template <typename T>
- static T access_barrier(T new_value, void* addr) {
- return GCBarrierType::atomic_xchg_in_heap(new_value, reinterpret_cast<T*>(addr));
+ static T access_barrier(void* addr, T new_value) {
+ return GCBarrierType::atomic_xchg_in_heap(reinterpret_cast<T*>(addr), new_value);
}
- static oop oop_access_barrier(oop new_value, void* addr) {
+ static oop oop_access_barrier(void* addr, oop new_value) {
typedef typename HeapOopType<decorators>::type OopType;
if (HasDecorator<decorators, IN_HEAP>::value) {
- return GCBarrierType::oop_atomic_xchg_in_heap(new_value, reinterpret_cast<OopType*>(addr));
+ return GCBarrierType::oop_atomic_xchg_in_heap(reinterpret_cast<OopType*>(addr), new_value);
} else {
- return GCBarrierType::oop_atomic_xchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr));
+ return GCBarrierType::oop_atomic_xchg_not_in_heap(reinterpret_cast<OopType*>(addr), new_value);
}
}
};
@@ -107,16 +107,16 @@
template <class GCBarrierType, DecoratorSet decorators>
struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG, decorators>: public AllStatic {
template <typename T>
- static T access_barrier(T new_value, void* addr, T compare_value) {
- return GCBarrierType::atomic_cmpxchg_in_heap(new_value, reinterpret_cast<T*>(addr), compare_value);
+ static T access_barrier(void* addr, T compare_value, T new_value) {
+ return GCBarrierType::atomic_cmpxchg_in_heap(reinterpret_cast<T*>(addr), compare_value, new_value);
}
- static oop oop_access_barrier(oop new_value, void* addr, oop compare_value) {
+ static oop oop_access_barrier(void* addr, oop compare_value, oop new_value) {
typedef typename HeapOopType<decorators>::type OopType;
if (HasDecorator<decorators, IN_HEAP>::value) {
- return GCBarrierType::oop_atomic_cmpxchg_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value);
+ return GCBarrierType::oop_atomic_cmpxchg_in_heap(reinterpret_cast<OopType*>(addr), compare_value, new_value);
} else {
- return GCBarrierType::oop_atomic_cmpxchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value);
+ return GCBarrierType::oop_atomic_cmpxchg_not_in_heap(reinterpret_cast<OopType*>(addr), compare_value, new_value);
}
}
};
@@ -171,24 +171,24 @@
template <class GCBarrierType, DecoratorSet decorators>
struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG_AT, decorators>: public AllStatic {
template <typename T>
- static T access_barrier(T new_value, oop base, ptrdiff_t offset) {
- return GCBarrierType::atomic_xchg_in_heap_at(new_value, base, offset);
+ static T access_barrier(oop base, ptrdiff_t offset, T new_value) {
+ return GCBarrierType::atomic_xchg_in_heap_at(base, offset, new_value);
}
- static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset) {
- return GCBarrierType::oop_atomic_xchg_in_heap_at(new_value, base, offset);
+ static oop oop_access_barrier(oop base, ptrdiff_t offset, oop new_value) {
+ return GCBarrierType::oop_atomic_xchg_in_heap_at(base, offset, new_value);
}
};
template <class GCBarrierType, DecoratorSet decorators>
struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG_AT, decorators>: public AllStatic {
template <typename T>
- static T access_barrier(T new_value, oop base, ptrdiff_t offset, T compare_value) {
- return GCBarrierType::atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
+ static T access_barrier(oop base, ptrdiff_t offset, T compare_value, T new_value) {
+ return GCBarrierType::atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value);
}
- static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
- return GCBarrierType::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
+ static oop oop_access_barrier(oop base, ptrdiff_t offset, oop compare_value, oop new_value) {
+ return GCBarrierType::oop_atomic_cmpxchg_in_heap_at(base, offset, compare_value, new_value);
}
};
@@ -309,31 +309,31 @@
}
template <DecoratorSet decorators, typename T>
- T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
+ T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg_init(void* addr, T compare_value, T new_value) {
func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
_atomic_cmpxchg_func = function;
- return function(new_value, addr, compare_value);
+ return function(addr, compare_value, new_value);
}
template <DecoratorSet decorators, typename T>
- T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+ T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value) {
func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
_atomic_cmpxchg_at_func = function;
- return function(new_value, base, offset, compare_value);
+ return function(base, offset, compare_value, new_value);
}
template <DecoratorSet decorators, typename T>
- T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg_init(T new_value, void* addr) {
+ T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg_init(void* addr, T new_value) {
func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
_atomic_xchg_func = function;
- return function(new_value, addr);
+ return function(addr, new_value);
}
template <DecoratorSet decorators, typename T>
- T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
+ T RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value) {
func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
_atomic_xchg_at_func = function;
- return function(new_value, base, offset);
+ return function(base, offset, new_value);
}
template <DecoratorSet decorators, typename T>
--- a/src/hotspot/share/oops/accessBackend.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/accessBackend.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -102,13 +102,13 @@
struct AccessFunctionTypes {
typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
- typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value);
- typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset);
+ typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
+ typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
typedef T (*load_func_t)(void* addr);
typedef void (*store_func_t)(void* addr, T value);
- typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value);
- typedef T (*atomic_xchg_func_t)(T new_value, void* addr);
+ typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
+ typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
@@ -293,17 +293,17 @@
template <DecoratorSet ds, typename T>
static typename EnableIf<
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
- atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
+ atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
template <DecoratorSet ds, typename T>
static typename EnableIf<
HasDecorator<ds, MO_RELAXED>::value, T>::type
- atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
+ atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
template <DecoratorSet ds, typename T>
static typename EnableIf<
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
- atomic_xchg_internal(T new_value, void* addr);
+ atomic_xchg_internal(void* addr, T new_value);
// The following *_locked mechanisms serve the purpose of handling atomic operations
// that are larger than a machine can handle, and then possibly opt for using
@@ -312,26 +312,26 @@
template <DecoratorSet ds, typename T>
static inline typename EnableIf<
!AccessInternal::PossiblyLockedAccess<T>::value, T>::type
- atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
- return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
+ atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) {
+ return atomic_cmpxchg_internal<ds>(addr, compare_value, new_value);
}
template <DecoratorSet ds, typename T>
static typename EnableIf<
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
- atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value);
+ atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value);
template <DecoratorSet ds, typename T>
static inline typename EnableIf<
!AccessInternal::PossiblyLockedAccess<T>::value, T>::type
- atomic_xchg_maybe_locked(T new_value, void* addr) {
- return atomic_xchg_internal<ds>(new_value, addr);
+ atomic_xchg_maybe_locked(void* addr, T new_value) {
+ return atomic_xchg_internal<ds>(addr, new_value);
}
template <DecoratorSet ds, typename T>
static typename EnableIf<
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
- atomic_xchg_maybe_locked(T new_value, void* addr);
+ atomic_xchg_maybe_locked(void* addr, T new_value);
public:
template <typename T>
@@ -345,13 +345,13 @@
}
template <typename T>
- static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
- return atomic_cmpxchg_maybe_locked<decorators>(new_value, addr, compare_value);
+ static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
+ return atomic_cmpxchg_maybe_locked<decorators>(addr, compare_value, new_value);
}
template <typename T>
- static inline T atomic_xchg(T new_value, void* addr) {
- return atomic_xchg_maybe_locked<decorators>(new_value, addr);
+ static inline T atomic_xchg(void* addr, T new_value) {
+ return atomic_xchg_maybe_locked<decorators>(addr, new_value);
}
template <typename T>
@@ -370,14 +370,14 @@
static T oop_load_at(oop base, ptrdiff_t offset);
template <typename T>
- static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value);
+ static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
template <typename T>
- static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
+ static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
template <typename T>
- static T oop_atomic_xchg(T new_value, void* addr);
+ static T oop_atomic_xchg(void* addr, T new_value);
template <typename T>
- static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
+ static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
template <typename T>
static void store_at(oop base, ptrdiff_t offset, T value) {
@@ -390,13 +390,13 @@
}
template <typename T>
- static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
- return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
+ static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
+ return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
}
template <typename T>
- static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
- return atomic_xchg(new_value, field_addr(base, offset));
+ static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
+ return atomic_xchg(field_addr(base, offset), new_value);
}
template <typename T>
@@ -515,10 +515,10 @@
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
static func_t _atomic_cmpxchg_func;
- static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value);
+ static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value);
- static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
- return _atomic_cmpxchg_func(new_value, addr, compare_value);
+ static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
+ return _atomic_cmpxchg_func(addr, compare_value, new_value);
}
};
@@ -527,10 +527,10 @@
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
static func_t _atomic_cmpxchg_at_func;
- static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value);
+ static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value);
- static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
- return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
+ static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
+ return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value);
}
};
@@ -539,10 +539,10 @@
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
static func_t _atomic_xchg_func;
- static T atomic_xchg_init(T new_value, void* addr);
+ static T atomic_xchg_init(void* addr, T new_value);
- static inline T atomic_xchg(T new_value, void* addr) {
- return _atomic_xchg_func(new_value, addr);
+ static inline T atomic_xchg(void* addr, T new_value) {
+ return _atomic_xchg_func(addr, new_value);
}
};
@@ -551,10 +551,10 @@
typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
static func_t _atomic_xchg_at_func;
- static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset);
+ static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
- static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
- return _atomic_xchg_at_func(new_value, base, offset);
+ static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
+ return _atomic_xchg_at_func(base, offset, new_value);
}
};
@@ -782,112 +782,112 @@
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
- atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+ atomic_cmpxchg(void* addr, T compare_value, T new_value) {
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
- return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
+ return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
} else {
- return Raw::atomic_cmpxchg(new_value, addr, compare_value);
+ return Raw::atomic_cmpxchg(addr, compare_value, new_value);
}
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
- atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+ atomic_cmpxchg(void* addr, T compare_value, T new_value) {
if (UseCompressedOops) {
const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
- return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+ return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
} else {
const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
- return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+ return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
}
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
!HasDecorator<decorators, AS_RAW>::value, T>::type
- atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+ atomic_cmpxchg(void* addr, T compare_value, T new_value) {
if (is_hardwired_primitive<decorators>()) {
const DecoratorSet expanded_decorators = decorators | AS_RAW;
- return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+ return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
} else {
- return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
+ return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(addr, compare_value, new_value);
}
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
HasDecorator<decorators, AS_RAW>::value, T>::type
- atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
- return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
+ atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
+ return atomic_cmpxchg<decorators>(field_addr(base, offset), compare_value, new_value);
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
!HasDecorator<decorators, AS_RAW>::value, T>::type
- atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+ atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
if (is_hardwired_primitive<decorators>()) {
const DecoratorSet expanded_decorators = decorators | AS_RAW;
- return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
+ return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(base, offset, compare_value, new_value);
} else {
- return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
+ return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(base, offset, compare_value, new_value);
}
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
- atomic_xchg(T new_value, void* addr) {
+ atomic_xchg(void* addr, T new_value) {
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
- return Raw::oop_atomic_xchg(new_value, addr);
+ return Raw::oop_atomic_xchg(addr, new_value);
} else {
- return Raw::atomic_xchg(new_value, addr);
+ return Raw::atomic_xchg(addr, new_value);
}
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
- atomic_xchg(T new_value, void* addr) {
+ atomic_xchg(void* addr, T new_value) {
if (UseCompressedOops) {
const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
- return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+ return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
} else {
const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
- return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+ return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
}
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
!HasDecorator<decorators, AS_RAW>::value, T>::type
- atomic_xchg(T new_value, void* addr) {
+ atomic_xchg(void* addr, T new_value) {
if (is_hardwired_primitive<decorators>()) {
const DecoratorSet expanded_decorators = decorators | AS_RAW;
- return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+ return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
} else {
- return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
+ return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
}
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
HasDecorator<decorators, AS_RAW>::value, T>::type
- atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
- return atomic_xchg<decorators>(new_value, field_addr(base, offset));
+ atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
+ return atomic_xchg<decorators>(field_addr(base, offset), new_value);
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
!HasDecorator<decorators, AS_RAW>::value, T>::type
- atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+ atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
if (is_hardwired_primitive<decorators>()) {
const DecoratorSet expanded_decorators = decorators | AS_RAW;
- return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
+ return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
} else {
- return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
+ return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
}
}
@@ -1018,56 +1018,56 @@
}
template <DecoratorSet decorators, typename T>
- inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
- return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
+ inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) {
+ return PreRuntimeDispatch::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
}
template <DecoratorSet decorators>
- inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
+ inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) {
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
INTERNAL_RT_USE_COMPRESSED_OOPS;
- return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+ return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
}
template <DecoratorSet decorators>
- inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
+ inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) {
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
INTERNAL_RT_USE_COMPRESSED_OOPS;
- return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+ return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
}
template <DecoratorSet decorators>
- inline oop atomic_cmpxchg_reduce_types(oop new_value,
- HeapWord* addr,
- oop compare_value) {
+ inline oop atomic_cmpxchg_reduce_types(HeapWord* addr,
+ oop compare_value,
+ oop new_value) {
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
- return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
+ return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
}
template <DecoratorSet decorators, typename T>
- inline T atomic_xchg_reduce_types(T new_value, T* addr) {
+ inline T atomic_xchg_reduce_types(T* addr, T new_value) {
const DecoratorSet expanded_decorators = decorators;
- return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+ return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
}
template <DecoratorSet decorators>
- inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
+ inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
INTERNAL_RT_USE_COMPRESSED_OOPS;
- return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+ return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
}
template <DecoratorSet decorators>
- inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
+ inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
INTERNAL_RT_USE_COMPRESSED_OOPS;
- return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+ return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
}
template <DecoratorSet decorators>
- inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
+ inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
- return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
+ return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
}
template <DecoratorSet decorators, typename T>
@@ -1191,7 +1191,7 @@
}
template <DecoratorSet decorators, typename P, typename T>
- inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
+ inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
verify_types<decorators, T>();
typedef typename Decay<P>::type DecayedP;
typedef typename Decay<T>::type DecayedT;
@@ -1200,13 +1200,13 @@
const DecoratorSet expanded_decorators = DecoratorFixup<
(!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
(MO_SEQ_CST | decorators) : decorators>::value;
- return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
- const_cast<DecayedP*>(addr),
- compare_decayed_value);
+ return atomic_cmpxchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
+ compare_decayed_value,
+ new_decayed_value);
}
template <DecoratorSet decorators, typename T>
- inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
+ inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
verify_types<decorators, T>();
typedef typename Decay<T>::type DecayedT;
DecayedT new_decayed_value = new_value;
@@ -1219,24 +1219,24 @@
const DecoratorSet final_decorators = expanded_decorators |
(HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
- return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
- offset, compare_decayed_value);
+ return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(base, offset, compare_decayed_value,
+ new_decayed_value);
}
template <DecoratorSet decorators, typename P, typename T>
- inline T atomic_xchg(T new_value, P* addr) {
+ inline T atomic_xchg(P* addr, T new_value) {
verify_types<decorators, T>();
typedef typename Decay<P>::type DecayedP;
typedef typename Decay<T>::type DecayedT;
DecayedT new_decayed_value = new_value;
// atomic_xchg is only available in SEQ_CST flavour.
const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
- return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
- const_cast<DecayedP*>(addr));
+ return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
+ new_decayed_value);
}
template <DecoratorSet decorators, typename T>
- inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
+ inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
verify_types<decorators, T>();
typedef typename Decay<T>::type DecayedT;
DecayedT new_decayed_value = new_value;
@@ -1244,7 +1244,7 @@
const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
(HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
- return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
+ return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
}
template <DecoratorSet decorators, typename T>
--- a/src/hotspot/share/oops/accessBackend.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/accessBackend.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -85,35 +85,35 @@
template <DecoratorSet decorators>
template <typename T>
-inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(T new_value, void* addr, T compare_value) {
+inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(void* addr, T compare_value, T new_value) {
typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
Encoded encoded_new = encode(new_value);
Encoded encoded_compare = encode(compare_value);
- Encoded encoded_result = atomic_cmpxchg(encoded_new,
- reinterpret_cast<Encoded*>(addr),
- encoded_compare);
+ Encoded encoded_result = atomic_cmpxchg(reinterpret_cast<Encoded*>(addr),
+ encoded_compare,
+ encoded_new);
return decode<T>(encoded_result);
}
template <DecoratorSet decorators>
template <typename T>
-inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
- return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
+inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
+ return oop_atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
}
template <DecoratorSet decorators>
template <typename T>
-inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
+inline T RawAccessBarrier<decorators>::oop_atomic_xchg(void* addr, T new_value) {
typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
Encoded encoded_new = encode(new_value);
- Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
+ Encoded encoded_result = atomic_xchg(reinterpret_cast<Encoded*>(addr), encoded_new);
return decode<T>(encoded_result);
}
template <DecoratorSet decorators>
template <typename T>
-inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
- return oop_atomic_xchg(new_value, field_addr(base, offset));
+inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
+ return oop_atomic_xchg(field_addr(base, offset), new_value);
}
template <DecoratorSet decorators>
@@ -134,7 +134,7 @@
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
OrderAccess::fence();
}
- return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
+ return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
}
template <DecoratorSet decorators>
@@ -142,7 +142,7 @@
inline typename EnableIf<
HasDecorator<ds, MO_ACQUIRE>::value, T>::type
RawAccessBarrier<decorators>::load_internal(void* addr) {
- return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
+ return Atomic::load_acquire(reinterpret_cast<const volatile T*>(addr));
}
template <DecoratorSet decorators>
@@ -158,7 +158,7 @@
inline typename EnableIf<
HasDecorator<ds, MO_SEQ_CST>::value>::type
RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
- OrderAccess::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
+ Atomic::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
}
template <DecoratorSet decorators>
@@ -166,7 +166,7 @@
inline typename EnableIf<
HasDecorator<ds, MO_RELEASE>::value>::type
RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
- OrderAccess::release_store(reinterpret_cast<volatile T*>(addr), value);
+ Atomic::release_store(reinterpret_cast<volatile T*>(addr), value);
}
template <DecoratorSet decorators>
@@ -174,17 +174,17 @@
inline typename EnableIf<
HasDecorator<ds, MO_RELAXED>::value>::type
RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
- Atomic::store(value, reinterpret_cast<volatile T*>(addr));
+ Atomic::store(reinterpret_cast<volatile T*>(addr), value);
}
template <DecoratorSet decorators>
template <DecoratorSet ds, typename T>
inline typename EnableIf<
HasDecorator<ds, MO_RELAXED>::value, T>::type
-RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) {
- return Atomic::cmpxchg(new_value,
- reinterpret_cast<volatile T*>(addr),
+RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) {
+ return Atomic::cmpxchg(reinterpret_cast<volatile T*>(addr),
compare_value,
+ new_value,
memory_order_relaxed);
}
@@ -192,10 +192,10 @@
template <DecoratorSet ds, typename T>
inline typename EnableIf<
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
-RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) {
- return Atomic::cmpxchg(new_value,
- reinterpret_cast<volatile T*>(addr),
+RawAccessBarrier<decorators>::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) {
+ return Atomic::cmpxchg(reinterpret_cast<volatile T*>(addr),
compare_value,
+ new_value,
memory_order_conservative);
}
@@ -203,9 +203,9 @@
template <DecoratorSet ds, typename T>
inline typename EnableIf<
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
-RawAccessBarrier<decorators>::atomic_xchg_internal(T new_value, void* addr) {
- return Atomic::xchg(new_value,
- reinterpret_cast<volatile T*>(addr));
+RawAccessBarrier<decorators>::atomic_xchg_internal(void* addr, T new_value) {
+ return Atomic::xchg(reinterpret_cast<volatile T*>(addr),
+ new_value);
}
// For platforms that do not have native support for wide atomics,
@@ -216,9 +216,9 @@
template <DecoratorSet decorators, typename T>
inline typename EnableIf<
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
-RawAccessBarrier<ds>::atomic_xchg_maybe_locked(T new_value, void* addr) {
+RawAccessBarrier<ds>::atomic_xchg_maybe_locked(void* addr, T new_value) {
if (!AccessInternal::wide_atomic_needs_locking()) {
- return atomic_xchg_internal<ds>(new_value, addr);
+ return atomic_xchg_internal<ds>(addr, new_value);
} else {
AccessInternal::AccessLocker access_lock;
volatile T* p = reinterpret_cast<volatile T*>(addr);
@@ -232,9 +232,9 @@
template <DecoratorSet decorators, typename T>
inline typename EnableIf<
AccessInternal::PossiblyLockedAccess<T>::value, T>::type
-RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
+RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) {
if (!AccessInternal::wide_atomic_needs_locking()) {
- return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
+ return atomic_cmpxchg_internal<ds>(addr, compare_value, new_value);
} else {
AccessInternal::AccessLocker access_lock;
volatile T* p = reinterpret_cast<volatile T*>(addr);
--- a/src/hotspot/share/oops/array.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/array.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -27,7 +27,7 @@
#include "memory/allocation.hpp"
#include "memory/metaspace.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/align.hpp"
// Array for metadata allocation
@@ -122,8 +122,8 @@
T* adr_at(const int i) { assert(i >= 0 && i< _length, "oob: 0 <= %d < %d", i, _length); return &_data[i]; }
int find(const T& x) { return index_of(x); }
- T at_acquire(const int i) { return OrderAccess::load_acquire(adr_at(i)); }
- void release_at_put(int i, T x) { OrderAccess::release_store(adr_at(i), x); }
+ T at_acquire(const int i) { return Atomic::load_acquire(adr_at(i)); }
+ void release_at_put(int i, T x) { Atomic::release_store(adr_at(i), x); }
static int size(int length) {
size_t bytes = align_up(byte_sizeof(length), BytesPerWord);
--- a/src/hotspot/share/oops/arrayKlass.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/arrayKlass.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -29,11 +29,11 @@
#include "oops/arrayKlass.hpp"
inline Klass* ArrayKlass::higher_dimension_acquire() const {
- return OrderAccess::load_acquire(&_higher_dimension);
+ return Atomic::load_acquire(&_higher_dimension);
}
inline void ArrayKlass::release_set_higher_dimension(Klass* k) {
- OrderAccess::release_store(&_higher_dimension, k);
+ Atomic::release_store(&_higher_dimension, k);
}
#endif // SHARE_OOPS_ARRAYKLASS_INLINE_HPP
--- a/src/hotspot/share/oops/constantPool.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/constantPool.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -232,7 +232,7 @@
symbol_at_put(name_index, name);
name->increment_refcount();
Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
- OrderAccess::release_store(adr, k);
+ Atomic::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* non-NULL, so we need hardware store ordering here.
@@ -249,7 +249,7 @@
CPKlassSlot kslot = klass_slot_at(class_index);
int resolved_klass_index = kslot.resolved_klass_index();
Klass** adr = resolved_klasses()->adr_at(resolved_klass_index);
- OrderAccess::release_store(adr, k);
+ Atomic::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* non-NULL, so we need hardware store ordering here.
@@ -525,7 +525,7 @@
trace_class_resolution(this_cp, k);
}
Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index);
- OrderAccess::release_store(adr, k);
+ Atomic::release_store(adr, k);
// The interpreter assumes when the tag is stored, the klass is resolved
// and the Klass* stored in _resolved_klasses is non-NULL, so we need
// hardware store ordering here.
@@ -808,8 +808,9 @@
// This doesn't deterministically get an error. So why do we save this?
// We save this because jvmti can add classes to the bootclass path after
// this error, so it needs to get the same error if the error is first.
- jbyte old_tag = Atomic::cmpxchg((jbyte)error_tag,
- (jbyte*)this_cp->tag_addr_at(which), (jbyte)tag.value());
+ jbyte old_tag = Atomic::cmpxchg((jbyte*)this_cp->tag_addr_at(which),
+ (jbyte)tag.value(),
+ (jbyte)error_tag);
if (old_tag != error_tag && old_tag != tag.value()) {
// MethodHandles and MethodType doesn't change to resolved version.
assert(this_cp->tag_at(which).is_klass(), "Wrong tag value");
--- a/src/hotspot/share/oops/constantPool.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/constantPool.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -33,7 +33,7 @@
assert(is_within_bounds(which), "index out of bounds");
assert(!tag_at(which).is_unresolved_klass() && !tag_at(which).is_unresolved_klass_in_error(), "Corrupted constant pool");
// Uses volatile because the klass slot changes without a lock.
- intptr_t adr = OrderAccess::load_acquire(obj_at_addr(which));
+ intptr_t adr = Atomic::load_acquire(obj_at_addr(which));
assert(adr != 0 || which == 0, "cp entry for klass should not be zero");
return CPSlot(adr);
}
@@ -46,7 +46,7 @@
assert(tag_at(kslot.name_index()).is_symbol(), "sanity");
Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index());
- return OrderAccess::load_acquire(adr);
+ return Atomic::load_acquire(adr);
}
inline bool ConstantPool::is_pseudo_string_at(int which) {
--- a/src/hotspot/share/oops/cpCache.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/cpCache.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -97,7 +97,7 @@
assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
// Need to flush pending stores here before bytecode is written.
- OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
+ Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift));
}
void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
@@ -107,17 +107,17 @@
assert(c == 0 || c == code || code == 0, "update must be consistent");
#endif
// Need to flush pending stores here before bytecode is written.
- OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
+ Atomic::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift));
}
// Sets f1, ordering with previous writes.
void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) {
assert(f1 != NULL, "");
- OrderAccess::release_store(&_f1, f1);
+ Atomic::release_store(&_f1, f1);
}
void ConstantPoolCacheEntry::set_indy_resolution_failed() {
- OrderAccess::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift));
+ Atomic::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift));
}
// Note that concurrent update of both bytecodes can leave one of them
@@ -159,7 +159,7 @@
// sure that the final parameter size agrees with what was passed.
if (_flags == 0) {
intx newflags = (value & parameter_size_mask);
- Atomic::cmpxchg(newflags, &_flags, (intx)0);
+ Atomic::cmpxchg(&_flags, (intx)0, newflags);
}
guarantee(parameter_size() == value,
"size must not change: parameter_size=%d, value=%d", parameter_size(), value);
--- a/src/hotspot/share/oops/cpCache.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/cpCache.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -29,7 +29,7 @@
#include "oops/oopHandle.inline.hpp"
#include "runtime/orderAccess.hpp"
-inline int ConstantPoolCacheEntry::indices_ord() const { return OrderAccess::load_acquire(&_indices); }
+inline int ConstantPoolCacheEntry::indices_ord() const { return Atomic::load_acquire(&_indices); }
inline Bytecodes::Code ConstantPoolCacheEntry::bytecode_1() const {
return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask);
@@ -53,7 +53,7 @@
return (Method*)_f2;
}
-inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)OrderAccess::load_acquire(&_f1); }
+inline Metadata* ConstantPoolCacheEntry::f1_ord() const { return (Metadata *)Atomic::load_acquire(&_f1); }
inline Method* ConstantPoolCacheEntry::f1_as_method() const {
Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), "");
@@ -75,7 +75,7 @@
return (!is_f1_null()) && (_flags & (1 << has_local_signature_shift)) != 0;
}
-inline intx ConstantPoolCacheEntry::flags_ord() const { return (intx)OrderAccess::load_acquire(&_flags); }
+inline intx ConstantPoolCacheEntry::flags_ord() const { return (intx)Atomic::load_acquire(&_flags); }
inline bool ConstantPoolCacheEntry::indy_resolution_failed() const {
intx flags = flags_ord();
--- a/src/hotspot/share/oops/instanceKlass.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/instanceKlass.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1097,7 +1097,7 @@
return NULL;
} else {
// This load races with inserts, and therefore needs acquire.
- Klass* kls = OrderAccess::load_acquire(k);
+ Klass* kls = Atomic::load_acquire(k);
if (kls != NULL && !kls->is_loader_alive()) {
return NULL; // don't return unloaded class
} else {
@@ -1113,7 +1113,7 @@
Klass* volatile* addr = adr_implementor();
assert(addr != NULL, "null addr");
if (addr != NULL) {
- OrderAccess::release_store(addr, k);
+ Atomic::release_store(addr, k);
}
}
@@ -1370,14 +1370,14 @@
InterpreterOopMap* entry_for) {
// Lazily create the _oop_map_cache at first request
// Lock-free access requires load_acquire.
- OopMapCache* oop_map_cache = OrderAccess::load_acquire(&_oop_map_cache);
+ OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache);
if (oop_map_cache == NULL) {
MutexLocker x(OopMapCacheAlloc_lock);
// Check if _oop_map_cache was allocated while we were waiting for this lock
if ((oop_map_cache = _oop_map_cache) == NULL) {
oop_map_cache = new OopMapCache();
// Ensure _oop_map_cache is stable, since it is examined without a lock
- OrderAccess::release_store(&_oop_map_cache, oop_map_cache);
+ Atomic::release_store(&_oop_map_cache, oop_map_cache);
}
}
// _oop_map_cache is constant after init; lookup below does its own locking.
@@ -2114,7 +2114,7 @@
// The jmethodID cache can be read while unlocked so we have to
// make sure the new jmethodID is complete before installing it
// in the cache.
- OrderAccess::release_store(&jmeths[idnum+1], id);
+ Atomic::release_store(&jmeths[idnum+1], id);
} else {
*to_dealloc_id_p = new_id; // save new id for later delete
}
@@ -2196,11 +2196,11 @@
assert (ClassUnloading, "only called for ClassUnloading");
for (;;) {
// Use load_acquire due to competing with inserts
- Klass* impl = OrderAccess::load_acquire(adr_implementor());
+ Klass* impl = Atomic::load_acquire(adr_implementor());
if (impl != NULL && !impl->is_loader_alive()) {
// NULL this field, might be an unloaded klass or NULL
Klass* volatile* klass = adr_implementor();
- if (Atomic::cmpxchg((Klass*)NULL, klass, impl) == impl) {
+ if (Atomic::cmpxchg(klass, impl, (Klass*)NULL) == impl) {
// Successfully unlinking implementor.
if (log_is_enabled(Trace, class, unload)) {
ResourceMark rm;
--- a/src/hotspot/share/oops/instanceKlass.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/instanceKlass.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -35,19 +35,19 @@
#include "utilities/macros.hpp"
inline Klass* InstanceKlass::array_klasses_acquire() const {
- return OrderAccess::load_acquire(&_array_klasses);
+ return Atomic::load_acquire(&_array_klasses);
}
inline void InstanceKlass::release_set_array_klasses(Klass* k) {
- OrderAccess::release_store(&_array_klasses, k);
+ Atomic::release_store(&_array_klasses, k);
}
inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const {
- return OrderAccess::load_acquire(&_methods_jmethod_ids);
+ return Atomic::load_acquire(&_methods_jmethod_ids);
}
inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
- OrderAccess::release_store(&_methods_jmethod_ids, jmeths);
+ Atomic::release_store(&_methods_jmethod_ids, jmeths);
}
// The iteration over the oops in objects is a hot path in the GC code.
--- a/src/hotspot/share/oops/klass.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/klass.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -362,7 +362,7 @@
Klass* Klass::subklass(bool log) const {
// Need load_acquire on the _subklass, because it races with inserts that
// publishes freshly initialized data.
- for (Klass* chain = OrderAccess::load_acquire(&_subklass);
+ for (Klass* chain = Atomic::load_acquire(&_subklass);
chain != NULL;
// Do not need load_acquire on _next_sibling, because inserts never
// create _next_sibling edges to dead data.
@@ -402,7 +402,7 @@
void Klass::set_subklass(Klass* s) {
assert(s != this, "sanity check");
- OrderAccess::release_store(&_subklass, s);
+ Atomic::release_store(&_subklass, s);
}
void Klass::set_next_sibling(Klass* s) {
@@ -410,7 +410,7 @@
// Does not need release semantics. If used by cleanup, it will link to
// already safely published data, and if used by inserts, will be published
// safely using cmpxchg.
- Atomic::store(s, &_next_sibling);
+ Atomic::store(&_next_sibling, s);
}
void Klass::append_to_sibling_list() {
@@ -427,7 +427,7 @@
super->clean_subklass();
for (;;) {
- Klass* prev_first_subklass = OrderAccess::load_acquire(&_super->_subklass);
+ Klass* prev_first_subklass = Atomic::load_acquire(&_super->_subklass);
if (prev_first_subklass != NULL) {
// set our sibling to be the superklass' previous first subklass
assert(prev_first_subklass->is_loader_alive(), "May not attach not alive klasses");
@@ -436,7 +436,7 @@
// Note that the prev_first_subklass is always alive, meaning no sibling_next links
// are ever created to not alive klasses. This is an important invariant of the lock-free
// cleaning protocol, that allows us to safely unlink dead klasses from the sibling list.
- if (Atomic::cmpxchg(this, &super->_subklass, prev_first_subklass) == prev_first_subklass) {
+ if (Atomic::cmpxchg(&super->_subklass, prev_first_subklass, this) == prev_first_subklass) {
return;
}
}
@@ -446,12 +446,12 @@
void Klass::clean_subklass() {
for (;;) {
// Need load_acquire, due to contending with concurrent inserts
- Klass* subklass = OrderAccess::load_acquire(&_subklass);
+ Klass* subklass = Atomic::load_acquire(&_subklass);
if (subklass == NULL || subklass->is_loader_alive()) {
return;
}
// Try to fix _subklass until it points at something not dead.
- Atomic::cmpxchg(subklass->next_sibling(), &_subklass, subklass);
+ Atomic::cmpxchg(&_subklass, subklass, subklass->next_sibling());
}
}
@@ -710,7 +710,7 @@
}
int Klass::atomic_incr_biased_lock_revocation_count() {
- return (int) Atomic::add(1, &_biased_lock_revocation_count);
+ return (int) Atomic::add(&_biased_lock_revocation_count, 1);
}
// Unless overridden, jvmti_class_status has no flags set.
--- a/src/hotspot/share/oops/method.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/method.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -569,7 +569,7 @@
bool Method::init_method_counters(MethodCounters* counters) {
// Try to install a pointer to MethodCounters, return true on success.
- return Atomic::replace_if_null(counters, &_method_counters);
+ return Atomic::replace_if_null(&_method_counters, counters);
}
int Method::extra_stack_words() {
@@ -1247,7 +1247,7 @@
}
address Method::from_compiled_entry_no_trampoline() const {
- CompiledMethod *code = OrderAccess::load_acquire(&_code);
+ CompiledMethod *code = Atomic::load_acquire(&_code);
if (code) {
return code->verified_entry_point();
} else {
@@ -1273,7 +1273,7 @@
// Not inline to avoid circular ref.
bool Method::check_code() const {
// cached in a register or local. There's a race on the value of the field.
- CompiledMethod *code = OrderAccess::load_acquire(&_code);
+ CompiledMethod *code = Atomic::load_acquire(&_code);
return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
}
--- a/src/hotspot/share/oops/method.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/method.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -29,23 +29,23 @@
#include "runtime/orderAccess.hpp"
inline address Method::from_compiled_entry() const {
- return OrderAccess::load_acquire(&_from_compiled_entry);
+ return Atomic::load_acquire(&_from_compiled_entry);
}
inline address Method::from_interpreted_entry() const {
- return OrderAccess::load_acquire(&_from_interpreted_entry);
+ return Atomic::load_acquire(&_from_interpreted_entry);
}
inline void Method::set_method_data(MethodData* data) {
// The store into method must be released. On platforms without
// total store order (TSO) the reference may become visible before
// the initialization of data otherwise.
- OrderAccess::release_store(&_method_data, data);
+ Atomic::release_store(&_method_data, data);
}
inline CompiledMethod* volatile Method::code() const {
assert( check_code(), "" );
- return OrderAccess::load_acquire(&_code);
+ return Atomic::load_acquire(&_code);
}
// Write (bci, line number) pair to stream
--- a/src/hotspot/share/oops/methodData.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/methodData.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -896,7 +896,7 @@
FailedSpeculation** cursor = failed_speculations_address;
do {
if (*cursor == NULL) {
- FailedSpeculation* old_fs = Atomic::cmpxchg(fs, cursor, (FailedSpeculation*) NULL);
+ FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) NULL, fs);
if (old_fs == NULL) {
// Successfully appended fs to end of the list
return true;
@@ -1415,7 +1415,7 @@
for (;; dp = next_extra(dp)) {
assert(dp < end, "moved past end of extra data");
- // No need for "OrderAccess::load_acquire" ops,
+ // No need for "Atomic::load_acquire" ops,
// since the data structure is monotonic.
switch(dp->tag()) {
case DataLayout::no_tag:
@@ -1550,7 +1550,7 @@
DataLayout* end = args_data_limit();
for (;; dp = next_extra(dp)) {
assert(dp < end, "moved past end of extra data");
- // No need for "OrderAccess::load_acquire" ops,
+ // No need for "Atomic::load_acquire" ops,
// since the data structure is monotonic.
switch(dp->tag()) {
case DataLayout::no_tag:
--- a/src/hotspot/share/oops/methodData.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/methodData.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -2244,7 +2244,7 @@
_rtm_state = (int)rstate;
}
void atomic_set_rtm_state(RTMState rstate) {
- Atomic::store((int)rstate, &_rtm_state);
+ Atomic::store(&_rtm_state, (int)rstate);
}
static int rtm_state_offset_in_bytes() {
--- a/src/hotspot/share/oops/methodData.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/methodData.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -29,7 +29,7 @@
#include "runtime/orderAccess.hpp"
inline void DataLayout::release_set_cell_at(int index, intptr_t value) {
- OrderAccess::release_store(&_cells[index], value);
+ Atomic::release_store(&_cells[index], value);
}
inline void ProfileData::release_set_intptr_at(int index, intptr_t value) {
--- a/src/hotspot/share/oops/objArrayOop.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/objArrayOop.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -36,7 +36,7 @@
} else {
offs = objArrayOopDesc::obj_at_offset<oop>(index);
}
- return HeapAccess<IS_ARRAY>::oop_atomic_cmpxchg_at(exchange_value, as_oop(), offs, compare_value);
+ return HeapAccess<IS_ARRAY>::oop_atomic_cmpxchg_at(as_oop(), offs, compare_value, exchange_value);
}
Klass* objArrayOopDesc::element_klass() {
--- a/src/hotspot/share/oops/oop.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/oop.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -61,7 +61,7 @@
}
void oopDesc::set_mark_raw(markWord m) {
- Atomic::store(m, &_mark);
+ Atomic::store(&_mark, m);
}
void oopDesc::set_mark_raw(HeapWord* mem, markWord m) {
@@ -73,12 +73,12 @@
}
markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) {
- uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(new_mark.value(), as_oop(), mark_offset_in_bytes(), old_mark.value());
+ uintptr_t v = HeapAccess<>::atomic_cmpxchg_at(as_oop(), mark_offset_in_bytes(), old_mark.value(), new_mark.value());
return markWord(v);
}
markWord oopDesc::cas_set_mark_raw(markWord new_mark, markWord old_mark, atomic_memory_order order) {
- return Atomic::cmpxchg(new_mark, &_mark, old_mark, order);
+ return Atomic::cmpxchg(&_mark, old_mark, new_mark, order);
}
void oopDesc::init_mark() {
@@ -110,9 +110,9 @@
// Workaround for non-const load_acquire parameter.
const volatile narrowKlass* addr = &_metadata._compressed_klass;
volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
- return CompressedKlassPointers::decode(OrderAccess::load_acquire(xaddr));
+ return CompressedKlassPointers::decode(Atomic::load_acquire(xaddr));
} else {
- return OrderAccess::load_acquire(&_metadata._klass);
+ return Atomic::load_acquire(&_metadata._klass);
}
}
@@ -156,10 +156,10 @@
void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
CHECK_SET_KLASS(klass);
if (UseCompressedClassPointers) {
- OrderAccess::release_store(compressed_klass_addr(mem),
- CompressedKlassPointers::encode_not_null(klass));
+ Atomic::release_store(compressed_klass_addr(mem),
+ CompressedKlassPointers::encode_not_null(klass));
} else {
- OrderAccess::release_store(klass_addr(mem), klass);
+ Atomic::release_store(klass_addr(mem), klass);
}
}
@@ -356,7 +356,7 @@
// The forwardee is used when copying during scavenge and mark-sweep.
// It does need to clear the low two locking- and GC-related bits.
oop oopDesc::forwardee_acquire() const {
- return (oop) OrderAccess::load_acquire(&_mark).decode_pointer();
+ return (oop) Atomic::load_acquire(&_mark).decode_pointer();
}
// The following method needs to be MT safe.
--- a/src/hotspot/share/oops/symbol.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/oops/symbol.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -281,7 +281,7 @@
} else if (refc == 0) {
return false; // dead, can't revive.
} else {
- found = Atomic::cmpxchg(old_value + 1, &_length_and_refcount, old_value);
+ found = Atomic::cmpxchg(&_length_and_refcount, old_value, old_value + 1);
if (found == old_value) {
return true; // successfully updated.
}
@@ -324,7 +324,7 @@
#endif
return;
} else {
- found = Atomic::cmpxchg(old_value - 1, &_length_and_refcount, old_value);
+ found = Atomic::cmpxchg(&_length_and_refcount, old_value, old_value - 1);
if (found == old_value) {
return; // successfully updated.
}
@@ -348,7 +348,7 @@
return;
} else {
int len = extract_length(old_value);
- found = Atomic::cmpxchg(pack_length_and_refcount(len, PERM_REFCOUNT), &_length_and_refcount, old_value);
+ found = Atomic::cmpxchg(&_length_and_refcount, old_value, pack_length_and_refcount(len, PERM_REFCOUNT));
if (found == old_value) {
return; // successfully updated.
}
--- a/src/hotspot/share/opto/parse2.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/opto/parse2.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -605,7 +605,7 @@
return PROB_FAIR;
}
float p = taken_cnt / total_cnt;
- return MIN2(MAX2(p, PROB_MIN), PROB_MAX);
+ return clamp(p, PROB_MIN, PROB_MAX);
}
static float if_cnt(float cnt) {
--- a/src/hotspot/share/opto/runtime.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/opto/runtime.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1659,7 +1659,7 @@
c->set_next(NULL);
head = _named_counters;
c->set_next(head);
- } while (Atomic::cmpxchg(c, &_named_counters, head) != head);
+ } while (Atomic::cmpxchg(&_named_counters, head, c) != head);
return c;
}
--- a/src/hotspot/share/prims/jni.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/prims/jni.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -273,8 +273,8 @@
_name = elementName;
uintx count = 0;
- while (Atomic::cmpxchg(1, &JNIHistogram_lock, 0) != 0) {
- while (OrderAccess::load_acquire(&JNIHistogram_lock) != 0) {
+ while (Atomic::cmpxchg(&JNIHistogram_lock, 0, 1) != 0) {
+ while (Atomic::load_acquire(&JNIHistogram_lock) != 0) {
count +=1;
if ( (WarnOnStalledSpinLock > 0)
&& (count % WarnOnStalledSpinLock == 0)) {
@@ -3233,7 +3233,7 @@
return false;
}
- if (Atomic::cmpxchg(1, &directBufferSupportInitializeStarted, 0) == 0) {
+ if (Atomic::cmpxchg(&directBufferSupportInitializeStarted, 0, 1) == 0) {
if (!lookupDirectBufferClasses(env)) {
directBufferSupportInitializeFailed = 1;
return false;
@@ -3689,7 +3689,7 @@
intptr_t *a = (intptr_t *) jni_functions();
intptr_t *b = (intptr_t *) new_jni_NativeInterface;
for (uint i=0; i < sizeof(struct JNINativeInterface_)/sizeof(void *); i++) {
- Atomic::store(*b++, a++);
+ Atomic::store(a++, *b++);
}
}
@@ -3811,9 +3811,9 @@
#if defined(ZERO) && defined(ASSERT)
{
jint a = 0xcafebabe;
- jint b = Atomic::xchg((jint) 0xdeadbeef, &a);
+ jint b = Atomic::xchg(&a, (jint) 0xdeadbeef);
void *c = &a;
- void *d = Atomic::xchg(&b, &c);
+ void *d = Atomic::xchg(&c, &b);
assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
assert(c == &b && d == &a, "Atomic::xchg() works");
}
@@ -3829,10 +3829,10 @@
// We use Atomic::xchg rather than Atomic::add/dec since on some platforms
// the add/dec implementations are dependent on whether we are running
// on a multiprocessor Atomic::xchg does not have this problem.
- if (Atomic::xchg(1, &vm_created) == 1) {
+ if (Atomic::xchg(&vm_created, 1) == 1) {
return JNI_EEXIST; // already created, or create attempt in progress
}
- if (Atomic::xchg(0, &safe_to_recreate_vm) == 0) {
+ if (Atomic::xchg(&safe_to_recreate_vm, 0) == 0) {
return JNI_ERR; // someone tried and failed and retry not allowed.
}
@@ -3916,7 +3916,7 @@
*(JNIEnv**)penv = 0;
// reset vm_created last to avoid race condition. Use OrderAccess to
// control both compiler and architectural-based reordering.
- OrderAccess::release_store(&vm_created, 0);
+ Atomic::release_store(&vm_created, 0);
}
// Flush stdout and stderr before exit.
--- a/src/hotspot/share/prims/jvm.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/prims/jvm.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -233,8 +233,8 @@
_name = elementName;
uintx count = 0;
- while (Atomic::cmpxchg(1, &JVMHistogram_lock, 0) != 0) {
- while (OrderAccess::load_acquire(&JVMHistogram_lock) != 0) {
+ while (Atomic::cmpxchg(&JVMHistogram_lock, 0, 1) != 0) {
+ while (Atomic::load_acquire(&JVMHistogram_lock) != 0) {
count +=1;
if ( (WarnOnStalledSpinLock > 0)
&& (count % WarnOnStalledSpinLock == 0)) {
--- a/src/hotspot/share/prims/jvmtiEnvBase.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -255,11 +255,11 @@
}
JvmtiTagMap* tag_map_acquire() {
- return OrderAccess::load_acquire(&_tag_map);
+ return Atomic::load_acquire(&_tag_map);
}
void release_set_tag_map(JvmtiTagMap* tag_map) {
- OrderAccess::release_store(&_tag_map, tag_map);
+ Atomic::release_store(&_tag_map, tag_map);
}
// return true if event is enabled globally or for any thread
--- a/src/hotspot/share/prims/jvmtiImpl.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/prims/jvmtiImpl.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -724,13 +724,17 @@
NULL_CHECK(_jvf, false);
Method* method_oop = _jvf->method();
- if (method_oop->is_native()) {
- if (getting_receiver() && !method_oop->is_static()) {
- return true;
- } else {
- _result = JVMTI_ERROR_OPAQUE_FRAME;
+ if (getting_receiver()) {
+ if (method_oop->is_static()) {
+ _result = JVMTI_ERROR_INVALID_SLOT;
return false;
}
+ return true;
+ }
+
+ if (method_oop->is_native()) {
+ _result = JVMTI_ERROR_OPAQUE_FRAME;
+ return false;
}
if (!check_slot_type_no_lvt(_jvf)) {
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -121,7 +121,7 @@
void JvmtiRawMonitor::simple_enter(Thread* self) {
for (;;) {
- if (Atomic::replace_if_null(self, &_owner)) {
+ if (Atomic::replace_if_null(&_owner, self)) {
return;
}
@@ -133,7 +133,7 @@
node._next = _entry_list;
_entry_list = &node;
OrderAccess::fence();
- if (_owner == NULL && Atomic::replace_if_null(self, &_owner)) {
+ if (_owner == NULL && Atomic::replace_if_null(&_owner, self)) {
_entry_list = node._next;
RawMonitor_lock->unlock();
return;
@@ -147,7 +147,7 @@
void JvmtiRawMonitor::simple_exit(Thread* self) {
guarantee(_owner == self, "invariant");
- OrderAccess::release_store(&_owner, (Thread*)NULL);
+ Atomic::release_store(&_owner, (Thread*)NULL);
OrderAccess::fence();
if (_entry_list == NULL) {
return;
@@ -322,10 +322,10 @@
jt->SR_lock()->lock_without_safepoint_check();
}
// guarded by SR_lock to avoid racing with new external suspend requests.
- contended = Atomic::cmpxchg(jt, &_owner, (Thread*)NULL);
+ contended = Atomic::cmpxchg(&_owner, (Thread*)NULL, jt);
jt->SR_lock()->unlock();
} else {
- contended = Atomic::cmpxchg(self, &_owner, (Thread*)NULL);
+ contended = Atomic::cmpxchg(&_owner, (Thread*)NULL, self);
}
if (contended == self) {
--- a/src/hotspot/share/prims/resolvedMethodTable.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/prims/resolvedMethodTable.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -327,7 +327,7 @@
}
void ResolvedMethodTable::inc_dead_counter(size_t ndead) {
- size_t total = Atomic::add(ndead, &_uncleaned_items_count);
+ size_t total = Atomic::add(&_uncleaned_items_count, ndead);
log_trace(membername, table)(
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
_uncleaned_items_count, ndead, total);
--- a/src/hotspot/share/prims/unsafe.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/prims/unsafe.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -903,7 +903,7 @@
oop e = JNIHandles::resolve(e_h);
oop p = JNIHandles::resolve(obj);
assert_field_offset_sane(p, offset);
- oop res = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
+ oop res = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x);
return JNIHandles::make_local(env, res);
} UNSAFE_END
@@ -911,10 +911,10 @@
oop p = JNIHandles::resolve(obj);
if (p == NULL) {
volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
- return RawAccess<>::atomic_cmpxchg(x, addr, e);
+ return RawAccess<>::atomic_cmpxchg(addr, e, x);
} else {
assert_field_offset_sane(p, offset);
- return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
+ return HeapAccess<>::atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x);
}
} UNSAFE_END
@@ -922,10 +922,10 @@
oop p = JNIHandles::resolve(obj);
if (p == NULL) {
volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
- return RawAccess<>::atomic_cmpxchg(x, addr, e);
+ return RawAccess<>::atomic_cmpxchg(addr, e, x);
} else {
assert_field_offset_sane(p, offset);
- return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
+ return HeapAccess<>::atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x);
}
} UNSAFE_END
@@ -934,7 +934,7 @@
oop e = JNIHandles::resolve(e_h);
oop p = JNIHandles::resolve(obj);
assert_field_offset_sane(p, offset);
- oop ret = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e);
+ oop ret = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x);
return ret == e;
} UNSAFE_END
@@ -942,10 +942,10 @@
oop p = JNIHandles::resolve(obj);
if (p == NULL) {
volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
- return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
+ return RawAccess<>::atomic_cmpxchg(addr, e, x) == e;
} else {
assert_field_offset_sane(p, offset);
- return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e) == e;
+ return HeapAccess<>::atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x) == e;
}
} UNSAFE_END
@@ -953,10 +953,10 @@
oop p = JNIHandles::resolve(obj);
if (p == NULL) {
volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
- return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
+ return RawAccess<>::atomic_cmpxchg(addr, e, x) == e;
} else {
assert_field_offset_sane(p, offset);
- return HeapAccess<>::atomic_cmpxchg_at(x, p, (ptrdiff_t)offset, e) == e;
+ return HeapAccess<>::atomic_cmpxchg_at(p, (ptrdiff_t)offset, e, x) == e;
}
} UNSAFE_END
--- a/src/hotspot/share/runtime/arguments.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/arguments.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -622,6 +622,7 @@
{ "GCLockerInvokesConcurrent", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
{ "BindGCTaskThreadsToCPUs", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
{ "UseGCTaskAffinity", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
+ { "GCTaskTimeStampEntries", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
#ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
// These entries will generate build errors. Their purpose is to test the macros.
--- a/src/hotspot/share/runtime/atomic.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/atomic.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -34,6 +34,7 @@
#include "metaprogramming/primitiveConversions.hpp"
#include "metaprogramming/removeCV.hpp"
#include "metaprogramming/removePointer.hpp"
+#include "runtime/orderAccess.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
@@ -48,6 +49,12 @@
memory_order_conservative = 8
};
+enum ScopedFenceType {
+ X_ACQUIRE
+ , RELEASE_X
+ , RELEASE_X_FENCE
+};
+
class Atomic : AllStatic {
public:
// Atomic operations on int64 types are not available on all 32-bit
@@ -72,8 +79,14 @@
// The type T must be either a pointer type convertible to or equal
// to D, an integral/enum type equal to D, or a type equal to D that
// is primitive convertible using PrimitiveConversions.
- template<typename T, typename D>
- inline static void store(T store_value, volatile D* dest);
+ template<typename D, typename T>
+ inline static void store(volatile D* dest, T store_value);
+
+ template <typename D, typename T>
+ inline static void release_store(volatile D* dest, T store_value);
+
+ template <typename D, typename T>
+ inline static void release_store_fence(volatile D* dest, T store_value);
// Atomically load from a location
// The type T must be either a pointer type, an integral/enum type,
@@ -81,15 +94,18 @@
template<typename T>
inline static T load(const volatile T* dest);
+ template <typename T>
+ inline static T load_acquire(const volatile T* dest);
+
// Atomically add to a location. Returns updated value. add*() provide:
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>
- template<typename I, typename D>
- inline static D add(I add_value, D volatile* dest,
+ template<typename D, typename I>
+ inline static D add(D volatile* dest, I add_value,
atomic_memory_order order = memory_order_conservative);
- template<typename I, typename D>
- inline static D sub(I sub_value, D volatile* dest,
+ template<typename D, typename I>
+ inline static D sub(D volatile* dest, I sub_value,
atomic_memory_order order = memory_order_conservative);
// Atomically increment location. inc() provide:
@@ -116,8 +132,8 @@
// The type T must be either a pointer type convertible to or equal
// to D, an integral/enum type equal to D, or a type equal to D that
// is primitive convertible using PrimitiveConversions.
- template<typename T, typename D>
- inline static D xchg(T exchange_value, volatile D* dest,
+ template<typename D, typename T>
+ inline static D xchg(volatile D* dest, T exchange_value,
atomic_memory_order order = memory_order_conservative);
// Performs atomic compare of *dest and compare_value, and exchanges
@@ -125,10 +141,10 @@
// value of *dest. cmpxchg*() provide:
// <fence> compare-and-exchange <membar StoreLoad|StoreStore>
- template<typename T, typename D, typename U>
- inline static D cmpxchg(T exchange_value,
- D volatile* dest,
+ template<typename D, typename U, typename T>
+ inline static D cmpxchg(D volatile* dest,
U compare_value,
+ T exchange_value,
atomic_memory_order order = memory_order_conservative);
// Performs atomic compare of *dest and NULL, and replaces *dest
@@ -136,8 +152,8 @@
// the comparison succeeded and the exchange occurred. This is
// often used as part of lazy initialization, as a lock-free
// alternative to the Double-Checked Locking Pattern.
- template<typename T, typename D>
- inline static bool replace_if_null(T* value, D* volatile* dest,
+ template<typename D, typename T>
+ inline static bool replace_if_null(D* volatile* dest, T* value,
atomic_memory_order order = memory_order_conservative);
private:
@@ -152,7 +168,7 @@
// Dispatch handler for store. Provides type-based validity
// checking and limited conversions around calls to the platform-
// specific implementation layer provided by PlatformOp.
- template<typename T, typename D, typename PlatformOp, typename Enable = void>
+ template<typename D, typename T, typename PlatformOp, typename Enable = void>
struct StoreImpl;
// Platform-specific implementation of store. Support for sizes
@@ -200,11 +216,15 @@
// requires more for e.g. 64 bit loads, a specialization is required
template<size_t byte_size> struct PlatformLoad;
+ // Give platforms a variation point to specialize.
+ template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
+ template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
+
private:
// Dispatch handler for add. Provides type-based validity checking
// and limited conversions around calls to the platform-specific
// implementation layer provided by PlatformAdd.
- template<typename I, typename D, typename Enable = void>
+ template<typename D, typename I, typename Enable = void>
struct AddImpl;
// Platform-specific implementation of add. Support for sizes of 4
@@ -219,7 +239,7 @@
// - platform_add is an object of type PlatformAdd<sizeof(D)>.
//
// Then
- // platform_add(add_value, dest)
+ // platform_add(dest, add_value)
// must be a valid expression, returning a result convertible to D.
//
// No definition is provided; all platforms must explicitly define
@@ -239,12 +259,12 @@
// otherwise, addend is add_value.
//
// FetchAndAdd requires the derived class to provide
- // fetch_and_add(addend, dest)
+ // fetch_and_add(dest, addend)
// atomically adding addend to the value of dest, and returning the
// old value.
//
// AddAndFetch requires the derived class to provide
- // add_and_fetch(addend, dest)
+ // add_and_fetch(dest, addend)
// atomically adding addend to the value of dest, and returning the
// new value.
//
@@ -266,14 +286,14 @@
// function. No scaling of add_value is performed when D is a pointer
// type, so this function can be used to implement the support function
// required by AddAndFetch.
- template<typename Type, typename Fn, typename I, typename D>
- static D add_using_helper(Fn fn, I add_value, D volatile* dest);
+ template<typename Type, typename Fn, typename D, typename I>
+ static D add_using_helper(Fn fn, D volatile* dest, I add_value);
// Dispatch handler for cmpxchg. Provides type-based validity
// checking and limited conversions around calls to the
// platform-specific implementation layer provided by
// PlatformCmpxchg.
- template<typename T, typename D, typename U, typename Enable = void>
+ template<typename D, typename U, typename T, typename Enable = void>
struct CmpxchgImpl;
// Platform-specific implementation of cmpxchg. Support for sizes
@@ -286,11 +306,11 @@
// - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
//
// Then
- // platform_cmpxchg(exchange_value, dest, compare_value, order)
+ // platform_cmpxchg(dest, compare_value, exchange_value, order)
// must be a valid expression, returning a result convertible to T.
//
// A default definition is provided, which declares a function template
- // T operator()(T, T volatile*, T, atomic_memory_order) const
+ // T operator()(T volatile*, T, T, atomic_memory_order) const
//
// For each required size, a platform must either provide an
// appropriate definition of that function, or must entirely
@@ -306,9 +326,9 @@
// helper function.
template<typename Type, typename Fn, typename T>
static T cmpxchg_using_helper(Fn fn,
- T exchange_value,
T volatile* dest,
- T compare_value);
+ T compare_value,
+ T exchange_value);
// Support platforms that do not provide Read-Modify-Write
// byte-level atomic access. To use, derive PlatformCmpxchg<1> from
@@ -321,7 +341,7 @@
// checking and limited conversions around calls to the
// platform-specific implementation layer provided by
// PlatformXchg.
- template<typename T, typename D, typename Enable = void>
+ template<typename D, typename T, typename Enable = void>
struct XchgImpl;
// Platform-specific implementation of xchg. Support for sizes
@@ -333,11 +353,11 @@
// - platform_xchg is an object of type PlatformXchg<sizeof(T)>.
//
// Then
- // platform_xchg(exchange_value, dest)
+ // platform_xchg(dest, exchange_value)
// must be a valid expression, returning a result convertible to T.
//
// A default definition is provided, which declares a function template
- // T operator()(T, T volatile*, T, atomic_memory_order) const
+ // T operator()(T volatile*, T, atomic_memory_order) const
//
// For each required size, a platform must either provide an
// appropriate definition of that function, or must entirely
@@ -353,8 +373,8 @@
// helper function.
template<typename Type, typename Fn, typename T>
static T xchg_using_helper(Fn fn,
- T exchange_value,
- T volatile* dest);
+ T volatile* dest,
+ T exchange_value);
};
template<typename From, typename To>
@@ -430,9 +450,9 @@
PlatformOp,
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
{
- void operator()(T new_value, T volatile* dest) const {
+ void operator()(T volatile* dest, T new_value) const {
// Forward to the platform handler for the size of T.
- PlatformOp()(new_value, dest);
+ PlatformOp()(dest, new_value);
}
};
@@ -441,16 +461,16 @@
// The new_value must be implicitly convertible to the
// destination's type; it must be type-correct to store the
// new_value in the destination.
-template<typename T, typename D, typename PlatformOp>
+template<typename D, typename T, typename PlatformOp>
struct Atomic::StoreImpl<
- T*, D*,
+ D*, T*,
PlatformOp,
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
{
- void operator()(T* new_value, D* volatile* dest) const {
+ void operator()(D* volatile* dest, T* new_value) const {
// Allow derived to base conversion, and adding cv-qualifiers.
D* value = new_value;
- PlatformOp()(value, dest);
+ PlatformOp()(dest, value);
}
};
@@ -466,12 +486,12 @@
PlatformOp,
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
{
- void operator()(T new_value, T volatile* dest) const {
+ void operator()(T volatile* dest, T new_value) const {
typedef PrimitiveConversions::Translate<T> Translator;
typedef typename Translator::Decayed Decayed;
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
- PlatformOp()(Translator::decay(new_value),
- reinterpret_cast<Decayed volatile*>(dest));
+ PlatformOp()(reinterpret_cast<Decayed volatile*>(dest),
+ Translator::decay(new_value));
}
};
@@ -484,8 +504,8 @@
template<size_t byte_size>
struct Atomic::PlatformStore {
template<typename T>
- void operator()(T new_value,
- T volatile* dest) const {
+ void operator()(T volatile* dest,
+ T new_value) const {
STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization
(void)const_cast<T&>(*dest = new_value);
}
@@ -497,21 +517,21 @@
template<typename Derived>
struct Atomic::FetchAndAdd {
- template<typename I, typename D>
- D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<typename Derived>
struct Atomic::AddAndFetch {
- template<typename I, typename D>
- D operator()(I add_value, D volatile* dest, atomic_memory_order order) const;
+ template<typename D, typename I>
+ D operator()(D volatile* dest, I add_value, atomic_memory_order order) const;
};
template<typename D>
inline void Atomic::inc(D volatile* dest, atomic_memory_order order) {
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
- Atomic::add(I(1), dest, order);
+ Atomic::add(dest, I(1), order);
}
template<typename D>
@@ -520,11 +540,11 @@
typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I;
// Assumes two's complement integer representation.
#pragma warning(suppress: 4146)
- Atomic::add(I(-1), dest, order);
+ Atomic::add(dest, I(-1), order);
}
-template<typename I, typename D>
-inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) {
+template<typename D, typename I>
+inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) {
STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value);
STATIC_ASSERT(IsIntegral<I>::value);
// If D is a pointer type, use [u]intptr_t as the addend type,
@@ -537,7 +557,7 @@
AddendType addend = sub_value;
// Assumes two's complement integer representation.
#pragma warning(suppress: 4146) // In case AddendType is not signed.
- return Atomic::add(-addend, dest, order);
+ return Atomic::add(dest, -addend, order);
}
// Define the class before including platform file, which may specialize
@@ -548,9 +568,9 @@
template<size_t byte_size>
struct Atomic::PlatformCmpxchg {
template<typename T>
- T operator()(T exchange_value,
- T volatile* dest,
+ T operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const;
};
@@ -559,9 +579,9 @@
// in this file, near the other definitions related to cmpxchg.
struct Atomic::CmpxchgByteUsingInt {
template<typename T>
- T operator()(T exchange_value,
- T volatile* dest,
+ T operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const;
};
@@ -573,11 +593,37 @@
template<size_t byte_size>
struct Atomic::PlatformXchg {
template<typename T>
- T operator()(T exchange_value,
- T volatile* dest,
+ T operator()(T volatile* dest,
+ T exchange_value,
atomic_memory_order order) const;
};
+template <ScopedFenceType T>
+class ScopedFenceGeneral: public StackObj {
+ public:
+ void prefix() {}
+ void postfix() {}
+};
+
+// The following methods can be specialized using simple template specialization
+// in the platform specific files for optimization purposes. Otherwise the
+// generalized variant is used.
+
+template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }
+template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }
+template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }
+template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
+
+template <ScopedFenceType T>
+class ScopedFence : public ScopedFenceGeneral<T> {
+ void *const _field;
+ public:
+ ScopedFence(void *const field) : _field(field) { prefix(); }
+ ~ScopedFence() { postfix(); }
+ void prefix() { ScopedFenceGeneral<T>::prefix(); }
+ void postfix() { ScopedFenceGeneral<T>::postfix(); }
+};
+
// platform specific in-line definitions - must come before shared definitions
#include OS_CPU_HEADER(atomic)
@@ -594,94 +640,127 @@
return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest);
}
-template<typename T, typename D>
-inline void Atomic::store(T store_value, volatile D* dest) {
- StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest);
+template<size_t byte_size, ScopedFenceType type>
+struct Atomic::PlatformOrderedLoad {
+ template <typename T>
+ T operator()(const volatile T* p) const {
+ ScopedFence<type> f((void*)p);
+ return Atomic::load(p);
+ }
+};
+
+template <typename T>
+inline T Atomic::load_acquire(const volatile T* p) {
+ return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
+}
+
+template<typename D, typename T>
+inline void Atomic::store(volatile D* dest, T store_value) {
+ StoreImpl<D, T, PlatformStore<sizeof(D)> >()(dest, store_value);
}
-template<typename I, typename D>
-inline D Atomic::add(I add_value, D volatile* dest,
- atomic_memory_order order) {
- return AddImpl<I, D>()(add_value, dest, order);
+template<size_t byte_size, ScopedFenceType type>
+struct Atomic::PlatformOrderedStore {
+ template <typename T>
+ void operator()(volatile T* p, T v) const {
+ ScopedFence<type> f((void*)p);
+ Atomic::store(p, v);
+ }
+};
+
+template <typename D, typename T>
+inline void Atomic::release_store(volatile D* p, T v) {
+ StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(p, v);
}
-template<typename I, typename D>
+template <typename D, typename T>
+inline void Atomic::release_store_fence(volatile D* p, T v) {
+ StoreImpl<D, T, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(p, v);
+}
+
+template<typename D, typename I>
+inline D Atomic::add(D volatile* dest, I add_value,
+ atomic_memory_order order) {
+ return AddImpl<D, I>()(dest, add_value, order);
+}
+
+template<typename D, typename I>
struct Atomic::AddImpl<
- I, D,
+ D, I,
typename EnableIf<IsIntegral<I>::value &&
IsIntegral<D>::value &&
(sizeof(I) <= sizeof(D)) &&
(IsSigned<I>::value == IsSigned<D>::value)>::type>
{
- D operator()(I add_value, D volatile* dest, atomic_memory_order order) const {
+ D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
D addend = add_value;
- return PlatformAdd<sizeof(D)>()(addend, dest, order);
+ return PlatformAdd<sizeof(D)>()(dest, addend, order);
}
};
-template<typename I, typename P>
+template<typename P, typename I>
struct Atomic::AddImpl<
- I, P*,
+ P*, I,
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
{
- P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const {
+ P* operator()(P* volatile* dest, I add_value, atomic_memory_order order) const {
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
typedef typename Conditional<IsSigned<I>::value,
intptr_t,
uintptr_t>::type CI;
CI addend = add_value;
- return PlatformAdd<sizeof(P*)>()(addend, dest, order);
+ return PlatformAdd<sizeof(P*)>()(dest, addend, order);
}
};
template<typename Derived>
-template<typename I, typename D>
-inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::FetchAndAdd<Derived>::operator()(D volatile* dest, I add_value,
atomic_memory_order order) const {
I addend = add_value;
// If D is a pointer type P*, scale by sizeof(P).
if (IsPointer<D>::value) {
addend *= sizeof(typename RemovePointer<D>::type);
}
- D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order);
+ D old = static_cast<const Derived*>(this)->fetch_and_add(dest, addend, order);
return old + add_value;
}
template<typename Derived>
-template<typename I, typename D>
-inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest,
+template<typename D, typename I>
+inline D Atomic::AddAndFetch<Derived>::operator()(D volatile* dest, I add_value,
atomic_memory_order order) const {
// If D is a pointer type P*, scale by sizeof(P).
if (IsPointer<D>::value) {
add_value *= sizeof(typename RemovePointer<D>::type);
}
- return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order);
+ return static_cast<const Derived*>(this)->add_and_fetch(dest, add_value, order);
}
-template<typename Type, typename Fn, typename I, typename D>
-inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
+template<typename Type, typename Fn, typename D, typename I>
+inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) {
return PrimitiveConversions::cast<D>(
fn(PrimitiveConversions::cast<Type>(add_value),
reinterpret_cast<Type volatile*>(dest)));
}
-template<typename T, typename D, typename U>
-inline D Atomic::cmpxchg(T exchange_value,
- D volatile* dest,
+template<typename D, typename U, typename T>
+inline D Atomic::cmpxchg(D volatile* dest,
U compare_value,
+ T exchange_value,
atomic_memory_order order) {
- return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
+ return CmpxchgImpl<D, U, T>()(dest, compare_value, exchange_value, order);
}
-template<typename T, typename D>
-inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
+template<typename D, typename T>
+inline bool Atomic::replace_if_null(D* volatile* dest, T* value,
atomic_memory_order order) {
// Presently using a trivial implementation in terms of cmpxchg.
// Consider adding platform support, to permit the use of compiler
// intrinsics like gcc's __sync_bool_compare_and_swap.
D* expected_null = NULL;
- return expected_null == cmpxchg(value, dest, expected_null, order);
+ return expected_null == cmpxchg(dest, expected_null, value, order);
}
// Handle cmpxchg for integral and enum types.
@@ -692,12 +771,12 @@
T, T, T,
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
{
- T operator()(T exchange_value, T volatile* dest, T compare_value,
+ T operator()(T volatile* dest, T compare_value, T exchange_value,
atomic_memory_order order) const {
// Forward to the platform handler for the size of T.
- return PlatformCmpxchg<sizeof(T)>()(exchange_value,
- dest,
+ return PlatformCmpxchg<sizeof(T)>()(dest,
compare_value,
+ exchange_value,
order);
}
};
@@ -711,21 +790,21 @@
// The exchange_value must be implicitly convertible to the
// destination's type; it must be type-correct to store the
// exchange_value in the destination.
-template<typename T, typename D, typename U>
+template<typename D, typename U, typename T>
struct Atomic::CmpxchgImpl<
- T*, D*, U*,
+ D*, U*, T*,
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
IsSame<typename RemoveCV<D>::type,
typename RemoveCV<U>::type>::value>::type>
{
- D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
+ D* operator()(D* volatile* dest, U* compare_value, T* exchange_value,
atomic_memory_order order) const {
// Allow derived to base conversion, and adding cv-qualifiers.
D* new_value = exchange_value;
// Don't care what the CV qualifiers for compare_value are,
// but we need to match D* when calling platform support.
D* old_value = const_cast<D*>(compare_value);
- return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order);
+ return PlatformCmpxchg<sizeof(D*)>()(dest, old_value, new_value, order);
}
};
@@ -741,24 +820,24 @@
T, T, T,
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
{
- T operator()(T exchange_value, T volatile* dest, T compare_value,
+ T operator()(T volatile* dest, T compare_value, T exchange_value,
atomic_memory_order order) const {
typedef PrimitiveConversions::Translate<T> Translator;
typedef typename Translator::Decayed Decayed;
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
return Translator::recover(
- cmpxchg(Translator::decay(exchange_value),
- reinterpret_cast<Decayed volatile*>(dest),
+ cmpxchg(reinterpret_cast<Decayed volatile*>(dest),
Translator::decay(compare_value),
+ Translator::decay(exchange_value),
order));
}
};
template<typename Type, typename Fn, typename T>
inline T Atomic::cmpxchg_using_helper(Fn fn,
- T exchange_value,
T volatile* dest,
- T compare_value) {
+ T compare_value,
+ T exchange_value) {
STATIC_ASSERT(sizeof(Type) == sizeof(T));
return PrimitiveConversions::cast<T>(
fn(PrimitiveConversions::cast<Type>(exchange_value),
@@ -767,9 +846,9 @@
}
template<typename T>
-inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
- T volatile* dest,
+inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest,
T compare_value,
+ T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
uint8_t canon_exchange_value = exchange_value;
@@ -792,7 +871,7 @@
// ... except for the one byte we want to update
reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
- uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
+ uint32_t res = cmpxchg(aligned_dest, cur, new_value, order);
if (res == cur) break; // success
// at least one byte in the int changed value, so update
@@ -812,9 +891,9 @@
T, T,
typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
{
- T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
+ T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
// Forward to the platform handler for the size of T.
- return PlatformXchg<sizeof(T)>()(exchange_value, dest, order);
+ return PlatformXchg<sizeof(T)>()(dest, exchange_value, order);
}
};
@@ -823,15 +902,15 @@
// The exchange_value must be implicitly convertible to the
// destination's type; it must be type-correct to store the
// exchange_value in the destination.
-template<typename T, typename D>
+template<typename D, typename T>
struct Atomic::XchgImpl<
- T*, D*,
+ D*, T*,
typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type>
{
- D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const {
+ D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const {
// Allow derived to base conversion, and adding cv-qualifiers.
D* new_value = exchange_value;
- return PlatformXchg<sizeof(D*)>()(new_value, dest, order);
+ return PlatformXchg<sizeof(D*)>()(dest, new_value, order);
}
};
@@ -847,30 +926,31 @@
T, T,
typename EnableIf<PrimitiveConversions::Translate<T>::value>::type>
{
- T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const {
+ T operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const {
typedef PrimitiveConversions::Translate<T> Translator;
typedef typename Translator::Decayed Decayed;
STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
return Translator::recover(
- xchg(Translator::decay(exchange_value),
- reinterpret_cast<Decayed volatile*>(dest),
+ xchg(reinterpret_cast<Decayed volatile*>(dest),
+ Translator::decay(exchange_value),
order));
}
};
template<typename Type, typename Fn, typename T>
inline T Atomic::xchg_using_helper(Fn fn,
- T exchange_value,
- T volatile* dest) {
+ T volatile* dest,
+ T exchange_value) {
STATIC_ASSERT(sizeof(Type) == sizeof(T));
+ // Notice the swapped order of arguments. Change when/if stubs are rewritten.
return PrimitiveConversions::cast<T>(
fn(PrimitiveConversions::cast<Type>(exchange_value),
reinterpret_cast<Type volatile*>(dest)));
}
-template<typename T, typename D>
-inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) {
- return XchgImpl<T, D>()(exchange_value, dest, order);
+template<typename D, typename T>
+inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) {
+ return XchgImpl<D, T>()(dest, exchange_value, order);
}
#endif // SHARE_RUNTIME_ATOMIC_HPP
--- a/src/hotspot/share/runtime/basicLock.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/basicLock.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -40,7 +40,7 @@
}
void set_displaced_header(markWord header) {
- Atomic::store(header, &_displaced_header);
+ Atomic::store(&_displaced_header, header);
}
void print_on(outputStream* st) const;
--- a/src/hotspot/share/runtime/deoptimization.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/deoptimization.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -869,7 +869,7 @@
static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) {
if (_singleton == NULL) {
BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread);
- if (!Atomic::replace_if_null(s, &_singleton)) {
+ if (!Atomic::replace_if_null(&_singleton, s)) {
delete s;
}
}
@@ -923,7 +923,7 @@
static BooleanBoxCache* singleton(Thread* thread) {
if (_singleton == NULL) {
BooleanBoxCache* s = new BooleanBoxCache(thread);
- if (!Atomic::replace_if_null(s, &_singleton)) {
+ if (!Atomic::replace_if_null(&_singleton, s)) {
delete s;
}
}
--- a/src/hotspot/share/runtime/globals.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/globals.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -1408,7 +1408,7 @@
product(intx, AllocatePrefetchDistance, -1, \
"Distance to prefetch ahead of allocation pointer. " \
"-1: use system-specific value (automatically determined") \
- constraint(AllocatePrefetchDistanceConstraintFunc, AfterMemoryInit)\
+ constraint(AllocatePrefetchDistanceConstraintFunc,AfterMemoryInit)\
\
product(intx, AllocatePrefetchLines, 3, \
"Number of lines to prefetch ahead of array allocation pointer") \
--- a/src/hotspot/share/runtime/handshake.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/handshake.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -294,7 +294,7 @@
if (!_semaphore.trywait()) {
_semaphore.wait_with_safepoint_check(thread);
}
- HandshakeOperation* op = OrderAccess::load_acquire(&_operation);
+ HandshakeOperation* op = Atomic::load_acquire(&_operation);
if (op != NULL) {
HandleMark hm(thread);
CautiouslyPreserveExceptionMark pem(thread);
--- a/src/hotspot/share/runtime/init.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/init.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -195,7 +195,7 @@
static volatile bool _init_completed = false;
bool is_init_completed() {
- return OrderAccess::load_acquire(&_init_completed);
+ return Atomic::load_acquire(&_init_completed);
}
void wait_init_completed() {
@@ -208,6 +208,6 @@
void set_init_completed() {
assert(Universe::is_fully_initialized(), "Should have completed initialization");
MonitorLocker ml(InitCompleted_lock, Monitor::_no_safepoint_check_flag);
- OrderAccess::release_store(&_init_completed, true);
+ Atomic::release_store(&_init_completed, true);
ml.notify_all();
}
--- a/src/hotspot/share/runtime/interfaceSupport.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/interfaceSupport.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -87,8 +87,8 @@
_name = elementName;
uintx count = 0;
- while (Atomic::cmpxchg(1, &RuntimeHistogram_lock, 0) != 0) {
- while (OrderAccess::load_acquire(&RuntimeHistogram_lock) != 0) {
+ while (Atomic::cmpxchg(&RuntimeHistogram_lock, 0, 1) != 0) {
+ while (Atomic::load_acquire(&RuntimeHistogram_lock) != 0) {
count +=1;
if ( (WarnOnStalledSpinLock > 0)
&& (count % WarnOnStalledSpinLock == 0)) {
--- a/src/hotspot/share/runtime/objectMonitor.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/objectMonitor.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -245,7 +245,7 @@
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
Thread * const Self = THREAD;
- void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
+ void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self);
if (cur == NULL) {
assert(_recursions == 0, "invariant");
return;
@@ -403,7 +403,7 @@
int ObjectMonitor::TryLock(Thread * Self) {
void * own = _owner;
if (own != NULL) return 0;
- if (Atomic::replace_if_null(Self, &_owner)) {
+ if (Atomic::replace_if_null(&_owner, Self)) {
assert(_recursions == 0, "invariant");
return 1;
}
@@ -480,7 +480,7 @@
ObjectWaiter * nxt;
for (;;) {
node._next = nxt = _cxq;
- if (Atomic::cmpxchg(&node, &_cxq, nxt) == nxt) break;
+ if (Atomic::cmpxchg(&_cxq, nxt, &node) == nxt) break;
// Interference - the CAS failed because _cxq changed. Just retry.
// As an optional optimization we retry the lock.
@@ -518,7 +518,7 @@
if (nxt == NULL && _EntryList == NULL) {
// Try to assume the role of responsible thread for the monitor.
// CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
- Atomic::replace_if_null(Self, &_Responsible);
+ Atomic::replace_if_null(&_Responsible, Self);
}
// The lock might have been released while this thread was occupied queueing
@@ -773,7 +773,7 @@
ObjectWaiter * v = _cxq;
assert(v != NULL, "invariant");
- if (v != SelfNode || Atomic::cmpxchg(SelfNode->_next, &_cxq, v) != v) {
+ if (v != SelfNode || Atomic::cmpxchg(&_cxq, v, SelfNode->_next) != v) {
// The CAS above can fail from interference IFF a "RAT" arrived.
// In that case Self must be in the interior and can no longer be
// at the head of cxq.
@@ -916,8 +916,8 @@
// release semantics: prior loads and stores from within the critical section
// must not float (reorder) past the following store that drops the lock.
- OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock
- OrderAccess::storeload(); // See if we need to wake a successor
+ Atomic::release_store(&_owner, (void*)NULL); // drop the lock
+ OrderAccess::storeload(); // See if we need to wake a successor
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
return;
}
@@ -959,7 +959,7 @@
// to reacquire the lock the responsibility for ensuring succession
// falls to the new owner.
//
- if (!Atomic::replace_if_null(THREAD, &_owner)) {
+ if (!Atomic::replace_if_null(&_owner, THREAD)) {
return;
}
@@ -995,7 +995,7 @@
// The following loop is tantamount to: w = swap(&cxq, NULL)
for (;;) {
assert(w != NULL, "Invariant");
- ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w);
+ ObjectWaiter * u = Atomic::cmpxchg(&_cxq, w, (ObjectWaiter*)NULL);
if (u == w) break;
w = u;
}
@@ -1092,7 +1092,7 @@
Wakee = NULL;
// Drop the lock
- OrderAccess::release_store(&_owner, (void*)NULL);
+ Atomic::release_store(&_owner, (void*)NULL);
OrderAccess::fence(); // ST _owner vs LD in unpark()
DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
@@ -1459,7 +1459,7 @@
for (;;) {
ObjectWaiter * front = _cxq;
iterator->_next = front;
- if (Atomic::cmpxchg(iterator, &_cxq, front) == front) {
+ if (Atomic::cmpxchg(&_cxq, front, iterator) == front) {
break;
}
}
@@ -1680,7 +1680,7 @@
Thread * ox = (Thread *) _owner;
if (ox == NULL) {
- ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
+ ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self);
if (ox == NULL) {
// The CAS succeeded -- this thread acquired ownership
// Take care of some bookkeeping to exit spin state.
--- a/src/hotspot/share/runtime/objectMonitor.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -44,7 +44,7 @@
}
inline void ObjectMonitor::set_header(markWord hdr) {
- Atomic::store(hdr, &_header);
+ Atomic::store(&_header, hdr);
}
inline jint ObjectMonitor::waiters() const {
@@ -63,7 +63,7 @@
assert(_object != NULL, "must be non-NULL");
assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
- Atomic::store(markWord::zero(), &_header);
+ Atomic::store(&_header, markWord::zero());
_object = NULL;
}
--- a/src/hotspot/share/runtime/orderAccess.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/orderAccess.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -26,7 +26,6 @@
#define SHARE_RUNTIME_ORDERACCESS_HPP
#include "memory/allocation.hpp"
-#include "runtime/atomic.hpp"
#include "utilities/macros.hpp"
// Memory Access Ordering Model
@@ -231,30 +230,7 @@
// order. If their implementations change such that these assumptions
// are violated, a whole lot of code will break.
-enum ScopedFenceType {
- X_ACQUIRE
- , RELEASE_X
- , RELEASE_X_FENCE
-};
-
-template <ScopedFenceType T>
-class ScopedFenceGeneral: public StackObj {
- public:
- void prefix() {}
- void postfix() {}
-};
-
-template <ScopedFenceType T>
-class ScopedFence : public ScopedFenceGeneral<T> {
- void *const _field;
- public:
- ScopedFence(void *const field) : _field(field) { prefix(); }
- ~ScopedFence() { postfix(); }
- void prefix() { ScopedFenceGeneral<T>::prefix(); }
- void postfix() { ScopedFenceGeneral<T>::postfix(); }
-};
-
-class OrderAccess : private Atomic {
+class OrderAccess : public AllStatic {
public:
// barriers
static void loadload();
@@ -267,85 +243,13 @@
static void fence();
static void cross_modify_fence();
-
- template <typename T>
- static T load_acquire(const volatile T* p);
-
- template <typename T, typename D>
- static void release_store(volatile D* p, T v);
-
- template <typename T, typename D>
- static void release_store_fence(volatile D* p, T v);
-
- private:
+private:
// This is a helper that invokes the StubRoutines::fence_entry()
// routine if it exists, It should only be used by platforms that
// don't have another way to do the inline assembly.
static void StubRoutines_fence();
-
- // Give platforms a variation point to specialize.
- template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
- template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
-
- template<typename FieldType, ScopedFenceType FenceType>
- static void ordered_store(volatile FieldType* p, FieldType v);
-
- template<typename FieldType, ScopedFenceType FenceType>
- static FieldType ordered_load(const volatile FieldType* p);
-};
-
-// The following methods can be specialized using simple template specialization
-// in the platform specific files for optimization purposes. Otherwise the
-// generalized variant is used.
-
-template<size_t byte_size, ScopedFenceType type>
-struct OrderAccess::PlatformOrderedStore {
- template <typename T>
- void operator()(T v, volatile T* p) const {
- ordered_store<T, type>(p, v);
- }
-};
-
-template<size_t byte_size, ScopedFenceType type>
-struct OrderAccess::PlatformOrderedLoad {
- template <typename T>
- T operator()(const volatile T* p) const {
- return ordered_load<T, type>(p);
- }
};
#include OS_CPU_HEADER(orderAccess)
-template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }
-template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }
-template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }
-template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
-
-
-template <typename FieldType, ScopedFenceType FenceType>
-inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) {
- ScopedFence<FenceType> f((void*)p);
- Atomic::store(v, p);
-}
-
-template <typename FieldType, ScopedFenceType FenceType>
-inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) {
- ScopedFence<FenceType> f((void*)p);
- return Atomic::load(p);
-}
-
-template <typename T>
-inline T OrderAccess::load_acquire(const volatile T* p) {
- return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
-}
-
-template <typename T, typename D>
-inline void OrderAccess::release_store(volatile D* p, T v) {
- StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
-}
-
-template <typename T, typename D>
-inline void OrderAccess::release_store_fence(volatile D* p, T v) {
- StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
-}
#endif // SHARE_RUNTIME_ORDERACCESS_HPP
--- a/src/hotspot/share/runtime/os.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/os.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -668,7 +668,7 @@
if ((cur_malloc_words + words) > MallocMaxTestWords) {
return true;
}
- Atomic::add(words, &cur_malloc_words);
+ Atomic::add(&cur_malloc_words, words);
}
return false;
}
@@ -855,7 +855,7 @@
while (true) {
unsigned int seed = _rand_seed;
unsigned int rand = random_helper(seed);
- if (Atomic::cmpxchg(rand, &_rand_seed, seed) == seed) {
+ if (Atomic::cmpxchg(&_rand_seed, seed, rand) == seed) {
return static_cast<int>(rand);
}
}
@@ -1804,7 +1804,7 @@
os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from,
os::SuspendResume::State to)
{
- os::SuspendResume::State result = Atomic::cmpxchg(to, &_state, from);
+ os::SuspendResume::State result = Atomic::cmpxchg(&_state, from, to);
if (result == from) {
// success
return to;
--- a/src/hotspot/share/runtime/perfMemory.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/perfMemory.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -156,7 +156,7 @@
_prologue->overflow = 0;
_prologue->mod_time_stamp = 0;
- OrderAccess::release_store(&_initialized, 1);
+ Atomic::release_store(&_initialized, 1);
}
void PerfMemory::destroy() {
@@ -269,5 +269,5 @@
}
bool PerfMemory::is_initialized() {
- return OrderAccess::load_acquire(&_initialized) != 0;
+ return Atomic::load_acquire(&_initialized) != 0;
}
--- a/src/hotspot/share/runtime/safepoint.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/safepoint.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -328,7 +328,7 @@
assert((_safepoint_counter & 0x1) == 0, "must be even");
// The store to _safepoint_counter must happen after any stores in arming.
- OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1);
+ Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1);
// We are synchronizing
OrderAccess::storestore(); // Ordered with _safepoint_counter
@@ -482,7 +482,7 @@
// Set the next dormant (even) safepoint id.
assert((_safepoint_counter & 0x1) == 1, "must be odd");
- OrderAccess::release_store(&_safepoint_counter, _safepoint_counter + 1);
+ Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1);
OrderAccess::fence(); // Keep the local state from floating up.
@@ -968,15 +968,15 @@
}
uint64_t ThreadSafepointState::get_safepoint_id() const {
- return OrderAccess::load_acquire(&_safepoint_id);
+ return Atomic::load_acquire(&_safepoint_id);
}
void ThreadSafepointState::reset_safepoint_id() {
- OrderAccess::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter);
+ Atomic::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter);
}
void ThreadSafepointState::set_safepoint_id(uint64_t safepoint_id) {
- OrderAccess::release_store(&_safepoint_id, safepoint_id);
+ Atomic::release_store(&_safepoint_id, safepoint_id);
}
void ThreadSafepointState::examine_state_of_thread(uint64_t safepoint_count) {
--- a/src/hotspot/share/runtime/synchronizer.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/synchronizer.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -240,7 +240,7 @@
// and last are the inflated Java Monitor (ObjectMonitor) checks.
lock->set_displaced_header(markWord::unused_mark());
- if (owner == NULL && Atomic::replace_if_null(self, &(m->_owner))) {
+ if (owner == NULL && Atomic::replace_if_null(&(m->_owner), self)) {
assert(m->_recursions == 0, "invariant");
return true;
}
@@ -694,35 +694,39 @@
// object should remain ineligible for biased locking
assert(!mark.has_bias_pattern(), "invariant");
- if (mark.is_neutral()) {
- hash = mark.hash(); // this is a normal header
- if (hash != 0) { // if it has hash, just return it
+ if (mark.is_neutral()) { // if this is a normal header
+ hash = mark.hash();
+ if (hash != 0) { // if it has a hash, just return it
return hash;
}
- hash = get_next_hash(self, obj); // allocate a new hash code
- temp = mark.copy_set_hash(hash); // merge the hash code into header
- // use (machine word version) atomic operation to install the hash
+ hash = get_next_hash(self, obj); // get a new hash
+ temp = mark.copy_set_hash(hash); // merge the hash into header
+ // try to install the hash
test = obj->cas_set_mark(temp, mark);
- if (test == mark) {
+ if (test == mark) { // if the hash was installed, return it
return hash;
}
- // If atomic operation failed, we must inflate the header
- // into heavy weight monitor. We could add more code here
- // for fast path, but it does not worth the complexity.
+ // Failed to install the hash. It could be that another thread
+ // installed the hash just before our attempt or inflation has
+ // occurred or... so we fall thru to inflate the monitor for
+ // stability and then install the hash.
} else if (mark.has_monitor()) {
monitor = mark.monitor();
temp = monitor->header();
assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
hash = temp.hash();
- if (hash != 0) {
+ if (hash != 0) { // if it has a hash, just return it
return hash;
}
- // Skip to the following code to reduce code size
+ // Fall thru so we only have one place that installs the hash in
+ // the ObjectMonitor.
} else if (self->is_lock_owned((address)mark.locker())) {
- temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned
+ // This is a stack lock owned by the calling thread so fetch the
+ // displaced markWord from the BasicLock on the stack.
+ temp = mark.displaced_mark_helper();
assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
- hash = temp.hash(); // by current thread, check if the displaced
- if (hash != 0) { // header contains hash code
+ hash = temp.hash();
+ if (hash != 0) { // if it has a hash, just return it
return hash;
}
// WARNING:
@@ -735,29 +739,30 @@
// may not propagate to other threads correctly.
}
- // Inflate the monitor to set hash code
+ // Inflate the monitor to set the hash.
monitor = inflate(self, obj, inflate_cause_hash_code);
- // Load displaced header and check it has hash code
+ // Load ObjectMonitor's header/dmw field and see if it has a hash.
mark = monitor->header();
assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
hash = mark.hash();
- if (hash == 0) {
- hash = get_next_hash(self, obj);
- temp = mark.copy_set_hash(hash); // merge hash code into header
+ if (hash == 0) { // if it does not have a hash
+ hash = get_next_hash(self, obj); // get a new hash
+ temp = mark.copy_set_hash(hash); // merge the hash into header
assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
- uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value());
+ uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
test = markWord(v);
if (test != mark) {
- // The only non-deflation update to the ObjectMonitor's
- // header/dmw field is to merge in the hash code. If someone
- // adds a new usage of the header/dmw field, please update
- // this code.
+ // The attempt to update the ObjectMonitor's header/dmw field
+ // did not work. This can happen if another thread managed to
+ // merge in the hash just before our cmpxchg().
+ // If we add any new usages of the header/dmw field, this code
+ // will need to be updated.
hash = test.hash();
assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
- assert(hash != 0, "Trivial unexpected object/monitor header usage.");
+ assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
}
}
- // We finally get the hash
+ // We finally get the hash.
return hash;
}
@@ -884,7 +889,7 @@
// Visitors ...
void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
- PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
+ PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header");
for (int i = _BLOCKSIZE - 1; i > 0; i--) {
@@ -993,7 +998,7 @@
// of active monitors passes the specified threshold.
// TODO: assert thread state is reasonable
- if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
+ if (ForceMonitorScavenge == 0 && Atomic::xchg(&ForceMonitorScavenge, 1) == 0) {
// Induce a 'null' safepoint to scavenge monitors
// Must VM_Operation instance be heap allocated as the op will be enqueue and posted
// to the VMthread and have a lifespan longer than that of this activation record.
@@ -1113,7 +1118,7 @@
temp[0]._next_om = g_block_list;
// There are lock-free uses of g_block_list so make sure that
// the previous stores happen before we update g_block_list.
- OrderAccess::release_store(&g_block_list, temp);
+ Atomic::release_store(&g_block_list, temp);
// Add the new string of ObjectMonitors to the global free list
temp[_BLOCKSIZE - 1]._next_om = g_free_list;
@@ -2164,7 +2169,7 @@
// the list of extant blocks without taking a lock.
int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
- PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list);
+ PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
while (block != NULL) {
assert(block->object() == CHAINMARKER, "must be a block header");
if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
--- a/src/hotspot/share/runtime/thread.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/thread.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -861,7 +861,7 @@
bool Thread::claim_par_threads_do(uintx claim_token) {
uintx token = _threads_do_token;
if (token != claim_token) {
- uintx res = Atomic::cmpxchg(claim_token, &_threads_do_token, token);
+ uintx res = Atomic::cmpxchg(&_threads_do_token, token, claim_token);
if (res == token) {
return true;
}
@@ -1269,7 +1269,7 @@
NonJavaThread::Iterator::Iterator() :
_protect_enter(_the_list._protect.enter()),
- _current(OrderAccess::load_acquire(&_the_list._head))
+ _current(Atomic::load_acquire(&_the_list._head))
{}
NonJavaThread::Iterator::~Iterator() {
@@ -1278,7 +1278,7 @@
void NonJavaThread::Iterator::step() {
assert(!end(), "precondition");
- _current = OrderAccess::load_acquire(&_current->_next);
+ _current = Atomic::load_acquire(&_current->_next);
}
NonJavaThread::NonJavaThread() : Thread(), _next(NULL) {
@@ -1291,8 +1291,8 @@
MutexLocker ml(NonJavaThreadsList_lock, Mutex::_no_safepoint_check_flag);
// Initialize BarrierSet-related data before adding to list.
BarrierSet::barrier_set()->on_thread_attach(this);
- OrderAccess::release_store(&_next, _the_list._head);
- OrderAccess::release_store(&_the_list._head, this);
+ Atomic::release_store(&_next, _the_list._head);
+ Atomic::release_store(&_the_list._head, this);
}
void NonJavaThread::remove_from_the_list() {
@@ -4875,7 +4875,7 @@
typedef volatile int SpinLockT;
void Thread::SpinAcquire(volatile int * adr, const char * LockName) {
- if (Atomic::cmpxchg (1, adr, 0) == 0) {
+ if (Atomic::cmpxchg(adr, 0, 1) == 0) {
return; // normal fast-path return
}
@@ -4896,7 +4896,7 @@
SpinPause();
}
}
- if (Atomic::cmpxchg(1, adr, 0) == 0) return;
+ if (Atomic::cmpxchg(adr, 0, 1) == 0) return;
}
}
@@ -4968,9 +4968,9 @@
const intptr_t LOCKBIT = 1;
void Thread::muxAcquire(volatile intptr_t * Lock, const char * LockName) {
- intptr_t w = Atomic::cmpxchg(LOCKBIT, Lock, (intptr_t)0);
+ intptr_t w = Atomic::cmpxchg(Lock, (intptr_t)0, LOCKBIT);
if (w == 0) return;
- if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
+ if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(Lock, w, w|LOCKBIT) == w) {
return;
}
@@ -4982,7 +4982,7 @@
// Optional spin phase: spin-then-park strategy
while (--its >= 0) {
w = *Lock;
- if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
+ if ((w & LOCKBIT) == 0 && Atomic::cmpxchg(Lock, w, w|LOCKBIT) == w) {
return;
}
}
@@ -4995,7 +4995,7 @@
for (;;) {
w = *Lock;
if ((w & LOCKBIT) == 0) {
- if (Atomic::cmpxchg(w|LOCKBIT, Lock, w) == w) {
+ if (Atomic::cmpxchg(Lock, w, w|LOCKBIT) == w) {
Self->OnList = 0; // hygiene - allows stronger asserts
return;
}
@@ -5003,7 +5003,7 @@
}
assert(w & LOCKBIT, "invariant");
Self->ListNext = (ParkEvent *) (w & ~LOCKBIT);
- if (Atomic::cmpxchg(intptr_t(Self)|LOCKBIT, Lock, w) == w) break;
+ if (Atomic::cmpxchg(Lock, w, intptr_t(Self)|LOCKBIT) == w) break;
}
while (Self->OnList != 0) {
@@ -5039,7 +5039,7 @@
// store (CAS) to the lock-word that releases the lock becomes globally visible.
void Thread::muxRelease(volatile intptr_t * Lock) {
for (;;) {
- const intptr_t w = Atomic::cmpxchg((intptr_t)0, Lock, LOCKBIT);
+ const intptr_t w = Atomic::cmpxchg(Lock, LOCKBIT, (intptr_t)0);
assert(w & LOCKBIT, "invariant");
if (w == LOCKBIT) return;
ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT);
@@ -5050,7 +5050,7 @@
// The following CAS() releases the lock and pops the head element.
// The CAS() also ratifies the previously fetched lock-word value.
- if (Atomic::cmpxchg(intptr_t(nxt), Lock, w) != w) {
+ if (Atomic::cmpxchg(Lock, w, intptr_t(nxt)) != w) {
continue;
}
List->OnList = 0;
--- a/src/hotspot/share/runtime/thread.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/thread.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -37,14 +37,14 @@
do {
flags = _suspend_flags;
}
- while (Atomic::cmpxchg((flags | f), &_suspend_flags, flags) != flags);
+ while (Atomic::cmpxchg(&_suspend_flags, flags, (flags | f)) != flags);
}
inline void Thread::clear_suspend_flag(SuspendFlags f) {
uint32_t flags;
do {
flags = _suspend_flags;
}
- while (Atomic::cmpxchg((flags & ~f), &_suspend_flags, flags) != flags);
+ while (Atomic::cmpxchg(&_suspend_flags, flags, (flags & ~f)) != flags);
}
inline void Thread::set_has_async_exception() {
@@ -67,7 +67,7 @@
}
inline jlong Thread::cooked_allocated_bytes() {
- jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
+ jlong allocated_bytes = Atomic::load_acquire(&_allocated_bytes);
if (UseTLAB) {
size_t used_bytes = tlab().used_bytes();
if (used_bytes <= ThreadLocalAllocBuffer::max_size_in_bytes()) {
@@ -83,15 +83,15 @@
}
inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value) {
- return (ThreadsList*)Atomic::cmpxchg(exchange_value, &_threads_hazard_ptr, compare_value);
+ return (ThreadsList*)Atomic::cmpxchg(&_threads_hazard_ptr, compare_value, exchange_value);
}
inline ThreadsList* Thread::get_threads_hazard_ptr() {
- return (ThreadsList*)OrderAccess::load_acquire(&_threads_hazard_ptr);
+ return (ThreadsList*)Atomic::load_acquire(&_threads_hazard_ptr);
}
inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) {
- OrderAccess::release_store_fence(&_threads_hazard_ptr, new_list);
+ Atomic::release_store_fence(&_threads_hazard_ptr, new_list);
}
inline void JavaThread::set_ext_suspended() {
@@ -118,7 +118,7 @@
#if defined(PPC64) || defined (AARCH64)
// Use membars when accessing volatile _thread_state. See
// Threads::create_vm() for size checks.
- return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
+ return (JavaThreadState) Atomic::load_acquire((volatile jint*)&_thread_state);
#else
return _thread_state;
#endif
@@ -128,7 +128,7 @@
#if defined(PPC64) || defined (AARCH64)
// Use membars when accessing volatile _thread_state. See
// Threads::create_vm() for size checks.
- OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
+ Atomic::release_store((volatile jint*)&_thread_state, (jint)s);
#else
_thread_state = s;
#endif
@@ -200,7 +200,7 @@
// The release make sure this store is done after storing the handshake
// operation or global state
inline void JavaThread::set_polling_page_release(void* poll_value) {
- OrderAccess::release_store(polling_page_addr(), poll_value);
+ Atomic::release_store(polling_page_addr(), poll_value);
}
// Caller is responsible for using a memory barrier if needed.
@@ -211,14 +211,14 @@
// The aqcquire make sure reading of polling page is done before
// the reading the handshake operation or the global state
inline volatile void* JavaThread::get_polling_page() {
- return OrderAccess::load_acquire(polling_page_addr());
+ return Atomic::load_acquire(polling_page_addr());
}
inline bool JavaThread::is_exiting() const {
// Use load-acquire so that setting of _terminated by
// JavaThread::exit() is seen more quickly.
TerminatedTypes l_terminated = (TerminatedTypes)
- OrderAccess::load_acquire((volatile jint *) &_terminated);
+ Atomic::load_acquire((volatile jint *) &_terminated);
return l_terminated == _thread_exiting || check_is_terminated(l_terminated);
}
@@ -226,19 +226,19 @@
// Use load-acquire so that setting of _terminated by
// JavaThread::exit() is seen more quickly.
TerminatedTypes l_terminated = (TerminatedTypes)
- OrderAccess::load_acquire((volatile jint *) &_terminated);
+ Atomic::load_acquire((volatile jint *) &_terminated);
return check_is_terminated(l_terminated);
}
inline void JavaThread::set_terminated(TerminatedTypes t) {
// use release-store so the setting of _terminated is seen more quickly
- OrderAccess::release_store((volatile jint *) &_terminated, (jint) t);
+ Atomic::release_store((volatile jint *) &_terminated, (jint) t);
}
// special for Threads::remove() which is static:
inline void JavaThread::set_terminated_value() {
// use release-store so the setting of _terminated is seen more quickly
- OrderAccess::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
+ Atomic::release_store((volatile jint *) &_terminated, (jint) _thread_terminated);
}
// Allow tracking of class initialization monitor use
--- a/src/hotspot/share/runtime/threadHeapSampler.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/threadHeapSampler.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -139,9 +139,9 @@
}
int ThreadHeapSampler::get_sampling_interval() {
- return OrderAccess::load_acquire(&_sampling_interval);
+ return Atomic::load_acquire(&_sampling_interval);
}
void ThreadHeapSampler::set_sampling_interval(int sampling_interval) {
- OrderAccess::release_store(&_sampling_interval, sampling_interval);
+ Atomic::release_store(&_sampling_interval, sampling_interval);
}
--- a/src/hotspot/share/runtime/threadSMR.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/threadSMR.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -134,7 +134,7 @@
// 'inline' functions first so the definitions are before first use:
inline void ThreadsSMRSupport::add_deleted_thread_times(uint add_value) {
- Atomic::add(add_value, &_deleted_thread_times);
+ Atomic::add(&_deleted_thread_times, add_value);
}
inline void ThreadsSMRSupport::inc_deleted_thread_cnt() {
@@ -156,7 +156,7 @@
// No need to update max value so we're done.
break;
}
- if (Atomic::cmpxchg(new_value, &_deleted_thread_time_max, cur_value) == cur_value) {
+ if (Atomic::cmpxchg(&_deleted_thread_time_max, cur_value, new_value) == cur_value) {
// Updated max value so we're done. Otherwise try it all again.
break;
}
@@ -170,7 +170,7 @@
}
inline ThreadsList* ThreadsSMRSupport::xchg_java_thread_list(ThreadsList* new_list) {
- return (ThreadsList*)Atomic::xchg(new_list, &_java_thread_list);
+ return (ThreadsList*)Atomic::xchg(&_java_thread_list, new_list);
}
// Hash table of pointers found by a scan. Used for collecting hazard
@@ -779,7 +779,7 @@
bool ThreadsSMRSupport::delete_notify() {
// Use load_acquire() in order to see any updates to _delete_notify
// earlier than when delete_lock is grabbed.
- return (OrderAccess::load_acquire(&_delete_notify) != 0);
+ return (Atomic::load_acquire(&_delete_notify) != 0);
}
// Safely free a ThreadsList after a Threads::add() or Threads::remove().
--- a/src/hotspot/share/runtime/threadSMR.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/threadSMR.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -56,7 +56,7 @@
// they are called by public inline update_tlh_stats() below:
inline void ThreadsSMRSupport::add_tlh_times(uint add_value) {
- Atomic::add(add_value, &_tlh_times);
+ Atomic::add(&_tlh_times, add_value);
}
inline void ThreadsSMRSupport::inc_tlh_cnt() {
@@ -70,7 +70,7 @@
// No need to update max value so we're done.
break;
}
- if (Atomic::cmpxchg(new_value, &_tlh_time_max, cur_value) == cur_value) {
+ if (Atomic::cmpxchg(&_tlh_time_max, cur_value, new_value) == cur_value) {
// Updated max value so we're done. Otherwise try it all again.
break;
}
@@ -78,7 +78,7 @@
}
inline ThreadsList* ThreadsSMRSupport::get_java_thread_list() {
- return (ThreadsList*)OrderAccess::load_acquire(&_java_thread_list);
+ return (ThreadsList*)Atomic::load_acquire(&_java_thread_list);
}
inline bool ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread *thread) {
--- a/src/hotspot/share/runtime/vmThread.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/runtime/vmThread.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -203,16 +203,16 @@
}
bool VMOperationTimeoutTask::is_armed() {
- return OrderAccess::load_acquire(&_armed) != 0;
+ return Atomic::load_acquire(&_armed) != 0;
}
void VMOperationTimeoutTask::arm() {
_arm_time = os::javaTimeMillis();
- OrderAccess::release_store_fence(&_armed, 1);
+ Atomic::release_store_fence(&_armed, 1);
}
void VMOperationTimeoutTask::disarm() {
- OrderAccess::release_store_fence(&_armed, 0);
+ Atomic::release_store_fence(&_armed, 0);
}
//------------------------------------------------------------------------------------------------------------------
--- a/src/hotspot/share/services/attachListener.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/services/attachListener.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -86,7 +86,7 @@
public:
static void set_state(AttachListenerState new_state) {
- Atomic::store(new_state, &_state);
+ Atomic::store(&_state, new_state);
}
static AttachListenerState get_state() {
@@ -95,7 +95,7 @@
static AttachListenerState transit_state(AttachListenerState new_state,
AttachListenerState cmp_state) {
- return Atomic::cmpxchg(new_state, &_state, cmp_state);
+ return Atomic::cmpxchg(&_state, cmp_state, new_state);
}
static bool is_initialized() {
@@ -103,7 +103,7 @@
}
static void set_initialized() {
- Atomic::store(AL_INITIALIZED, &_state);
+ Atomic::store(&_state, AL_INITIALIZED);
}
// indicates if this VM supports attach-on-demand
--- a/src/hotspot/share/services/mallocSiteTable.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/services/mallocSiteTable.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -128,7 +128,7 @@
if (entry == NULL) return NULL;
// swap in the head
- if (Atomic::replace_if_null(entry, &_table[index])) {
+ if (Atomic::replace_if_null(&_table[index], entry)) {
return entry->data();
}
@@ -229,7 +229,7 @@
do {
val = *_lock;
target = _MAGIC_ + *_lock;
- } while (Atomic::cmpxchg(target, _lock, val) != val);
+ } while (Atomic::cmpxchg(_lock, val, target) != val);
// wait for all readers to exit
while (*_lock != _MAGIC_) {
@@ -243,5 +243,5 @@
}
bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
- return Atomic::replace_if_null(entry, &_next);
+ return Atomic::replace_if_null(&_next, entry);
}
--- a/src/hotspot/share/services/mallocSiteTable.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/services/mallocSiteTable.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -153,7 +153,7 @@
// Acquire shared lock.
// Return true if shared access is granted.
inline bool sharedLock() {
- jint res = Atomic::add(1, _lock);
+ jint res = Atomic::add(_lock, 1);
if (res < 0) {
Atomic::dec(_lock);
return false;
--- a/src/hotspot/share/services/mallocTracker.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/services/mallocTracker.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -55,7 +55,7 @@
inline void allocate(size_t sz) {
Atomic::inc(&_count);
if (sz > 0) {
- Atomic::add(sz, &_size);
+ Atomic::add(&_size, sz);
DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
}
DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
@@ -66,13 +66,13 @@
assert(_size >= sz, "deallocation > allocated");
Atomic::dec(&_count);
if (sz > 0) {
- Atomic::sub(sz, &_size);
+ Atomic::sub(&_size, sz);
}
}
inline void resize(long sz) {
if (sz != 0) {
- Atomic::add(size_t(sz), &_size);
+ Atomic::add(&_size, size_t(sz));
DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
}
}
--- a/src/hotspot/share/services/memTracker.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/services/memTracker.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -183,7 +183,7 @@
// printing the final report during normal VM exit, it should not print
// the final report again. In addition, it should be guarded from
// recursive calls in case NMT reporting itself crashes.
- if (Atomic::cmpxchg(true, &g_final_report_did_run, false) == false) {
+ if (Atomic::cmpxchg(&g_final_report_did_run, false, true) == false) {
NMT_TrackingLevel level = tracking_level();
if (level >= NMT_summary) {
report(level == NMT_summary, output);
--- a/src/hotspot/share/services/memoryManager.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/services/memoryManager.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -65,7 +65,7 @@
instanceOop MemoryManager::get_memory_manager_instance(TRAPS) {
// Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_mgr_obj points to or implies.
- instanceOop mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
+ instanceOop mgr_obj = Atomic::load_acquire(&_memory_mgr_obj);
if (mgr_obj == NULL) {
// It's ok for more than one thread to execute the code up to the locked region.
// Extra manager instances will just be gc'ed.
@@ -118,7 +118,7 @@
//
// The lock has done an acquire, so the load can't float above it, but
// we need to do a load_acquire as above.
- mgr_obj = OrderAccess::load_acquire(&_memory_mgr_obj);
+ mgr_obj = Atomic::load_acquire(&_memory_mgr_obj);
if (mgr_obj != NULL) {
return mgr_obj;
}
@@ -130,7 +130,7 @@
// with creating the management object are visible before publishing
// its address. The unlock will publish the store to _memory_mgr_obj
// because it does a release first.
- OrderAccess::release_store(&_memory_mgr_obj, mgr_obj);
+ Atomic::release_store(&_memory_mgr_obj, mgr_obj);
}
}
--- a/src/hotspot/share/services/memoryPool.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/services/memoryPool.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -77,7 +77,7 @@
instanceOop MemoryPool::get_memory_pool_instance(TRAPS) {
// Must do an acquire so as to force ordering of subsequent
// loads from anything _memory_pool_obj points to or implies.
- instanceOop pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
+ instanceOop pool_obj = Atomic::load_acquire(&_memory_pool_obj);
if (pool_obj == NULL) {
// It's ok for more than one thread to execute the code up to the locked region.
// Extra pool instances will just be gc'ed.
@@ -118,7 +118,7 @@
//
// The lock has done an acquire, so the load can't float above it,
// but we need to do a load_acquire as above.
- pool_obj = OrderAccess::load_acquire(&_memory_pool_obj);
+ pool_obj = Atomic::load_acquire(&_memory_pool_obj);
if (pool_obj != NULL) {
return pool_obj;
}
@@ -130,7 +130,7 @@
// with creating the pool are visible before publishing its address.
// The unlock will publish the store to _memory_pool_obj because
// it does a release first.
- OrderAccess::release_store(&_memory_pool_obj, pool_obj);
+ Atomic::release_store(&_memory_pool_obj, pool_obj);
}
}
--- a/src/hotspot/share/utilities/accessFlags.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/accessFlags.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -33,7 +33,7 @@
do {
old_flags = _flags;
new_flags = old_flags | bits;
- f = Atomic::cmpxchg(new_flags, &_flags, old_flags);
+ f = Atomic::cmpxchg(&_flags, old_flags, new_flags);
} while(f != old_flags);
}
@@ -43,7 +43,7 @@
do {
old_flags = _flags;
new_flags = old_flags & ~bits;
- f = Atomic::cmpxchg(new_flags, &_flags, old_flags);
+ f = Atomic::cmpxchg(&_flags, old_flags, new_flags);
} while(f != old_flags);
}
--- a/src/hotspot/share/utilities/bitMap.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/bitMap.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -217,7 +217,7 @@
bm_word_t mr = inverted_bit_mask_for_range(beg, end);
bm_word_t nw = value ? (w | ~mr) : (w & mr);
while (true) {
- bm_word_t res = Atomic::cmpxchg(nw, pw, w);
+ bm_word_t res = Atomic::cmpxchg(pw, w, nw);
if (res == w) break;
w = res;
nw = value ? (w | ~mr) : (w & mr);
@@ -640,7 +640,7 @@
table[i] = num_set_bits(i);
}
- if (!Atomic::replace_if_null(table, &_pop_count_table)) {
+ if (!Atomic::replace_if_null(&_pop_count_table, table)) {
guarantee(_pop_count_table != NULL, "invariant");
FREE_C_HEAP_ARRAY(idx_t, table);
}
--- a/src/hotspot/share/utilities/bitMap.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/bitMap.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -48,7 +48,7 @@
memory_order == memory_order_acquire ||
memory_order == memory_order_conservative,
"unexpected memory ordering");
- return OrderAccess::load_acquire(addr);
+ return Atomic::load_acquire(addr);
}
}
@@ -72,7 +72,7 @@
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
- const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val, memory_order);
+ const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order);
if (cur_val == old_val) {
return true; // Success.
}
@@ -91,7 +91,7 @@
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
- const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val, memory_order);
+ const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order);
if (cur_val == old_val) {
return true; // Success.
}
--- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -58,7 +58,7 @@
ConcurrentHashTable<CONFIG, F>::
Node::next() const
{
- return OrderAccess::load_acquire(&_next);
+ return Atomic::load_acquire(&_next);
}
// Bucket
@@ -67,7 +67,7 @@
ConcurrentHashTable<CONFIG, F>::
Bucket::first_raw() const
{
- return OrderAccess::load_acquire(&_first);
+ return Atomic::load_acquire(&_first);
}
template <typename CONFIG, MEMFLAGS F>
@@ -79,7 +79,7 @@
// Due to this assert this methods is not static.
assert(is_locked(), "Must be locked.");
Node** tmp = (Node**)dst;
- OrderAccess::release_store(tmp, clear_set_state(node, *dst));
+ Atomic::release_store(tmp, clear_set_state(node, *dst));
}
template <typename CONFIG, MEMFLAGS F>
@@ -88,7 +88,7 @@
Bucket::first() const
{
// We strip the states bit before returning the ptr.
- return clear_state(OrderAccess::load_acquire(&_first));
+ return clear_state(Atomic::load_acquire(&_first));
}
template <typename CONFIG, MEMFLAGS F>
@@ -145,7 +145,7 @@
if (is_locked()) {
return false;
}
- if (Atomic::cmpxchg(node, &_first, expect) == expect) {
+ if (Atomic::cmpxchg(&_first, expect, node) == expect) {
return true;
}
return false;
@@ -160,7 +160,7 @@
}
// We will expect a clean first pointer.
Node* tmp = first();
- if (Atomic::cmpxchg(set_state(tmp, STATE_LOCK_BIT), &_first, tmp) == tmp) {
+ if (Atomic::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) {
return true;
}
return false;
@@ -173,7 +173,7 @@
assert(is_locked(), "Must be locked.");
assert(!have_redirect(),
"Unlocking a bucket after it has reached terminal state.");
- OrderAccess::release_store(&_first, clear_state(first()));
+ Atomic::release_store(&_first, clear_state(first()));
}
template <typename CONFIG, MEMFLAGS F>
@@ -181,7 +181,7 @@
Bucket::redirect()
{
assert(is_locked(), "Must be locked.");
- OrderAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
+ Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT));
}
// InternalTable
@@ -217,8 +217,8 @@
_cs_context(GlobalCounter::critical_section_begin(_thread))
{
// This version is published now.
- if (OrderAccess::load_acquire(&_cht->_invisible_epoch) != NULL) {
- OrderAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
+ if (Atomic::load_acquire(&_cht->_invisible_epoch) != NULL) {
+ Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)NULL);
}
}
@@ -289,13 +289,13 @@
assert(_resize_lock_owner == thread, "Re-size lock not held");
OrderAccess::fence(); // Prevent below load from floating up.
// If no reader saw this version we can skip write_synchronize.
- if (OrderAccess::load_acquire(&_invisible_epoch) == thread) {
+ if (Atomic::load_acquire(&_invisible_epoch) == thread) {
return;
}
assert(_invisible_epoch == NULL, "Two thread doing bulk operations");
// We set this/next version that we are synchronizing for to not published.
// A reader will zero this flag if it reads this/next version.
- OrderAccess::release_store(&_invisible_epoch, thread);
+ Atomic::release_store(&_invisible_epoch, thread);
GlobalCounter::write_synchronize();
}
@@ -374,7 +374,7 @@
ConcurrentHashTable<CONFIG, F>::
get_table() const
{
- return OrderAccess::load_acquire(&_table);
+ return Atomic::load_acquire(&_table);
}
template <typename CONFIG, MEMFLAGS F>
@@ -382,7 +382,7 @@
ConcurrentHashTable<CONFIG, F>::
get_new_table() const
{
- return OrderAccess::load_acquire(&_new_table);
+ return Atomic::load_acquire(&_new_table);
}
template <typename CONFIG, MEMFLAGS F>
@@ -392,7 +392,7 @@
{
InternalTable* old_table = _table;
// Publish the new table.
- OrderAccess::release_store(&_table, _new_table);
+ Atomic::release_store(&_table, _new_table);
// All must see this.
GlobalCounter::write_synchronize();
// _new_table not read any more.
--- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -53,7 +53,7 @@
// Returns true if you succeeded to claim the range start -> (stop-1).
bool claim(size_t* start, size_t* stop) {
- size_t claimed = Atomic::add((size_t)1, &_next_to_claim) - 1;
+ size_t claimed = Atomic::add(&_next_to_claim, (size_t)1) - 1;
if (claimed >= _stop_task) {
return false;
}
@@ -74,7 +74,7 @@
// Returns false if all ranges are claimed.
bool have_more_work() {
- return OrderAccess::load_acquire(&_next_to_claim) >= _stop_task;
+ return Atomic::load_acquire(&_next_to_claim) >= _stop_task;
}
void thread_owns_resize_lock(Thread* thread) {
--- a/src/hotspot/share/utilities/debug.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/debug.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -323,7 +323,7 @@
// same time. To avoid dumping the heap or executing the data collection
// commands multiple times we just do it once when the first threads reports
// the error.
- if (Atomic::cmpxchg(1, &out_of_memory_reported, 0) == 0) {
+ if (Atomic::cmpxchg(&out_of_memory_reported, 0, 1) == 0) {
// create heap dump before OnOutOfMemoryError commands are executed
if (HeapDumpOnOutOfMemoryError) {
tty->print_cr("java.lang.OutOfMemoryError: %s", message);
@@ -762,7 +762,7 @@
// Store Context away.
if (ucVoid) {
const intx my_tid = os::current_thread_id();
- if (Atomic::cmpxchg(my_tid, &g_asserting_thread, (intx)0) == 0) {
+ if (Atomic::cmpxchg(&g_asserting_thread, (intx)0, my_tid) == 0) {
store_context(ucVoid);
g_assertion_context = &g_stored_assertion_context;
}
@@ -772,4 +772,3 @@
return false;
}
#endif // CAN_SHOW_REGISTERS_ON_ASSERT
-
--- a/src/hotspot/share/utilities/globalCounter.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/globalCounter.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -41,7 +41,7 @@
SpinYield yield;
// Loops on this thread until it has exited the critical read section.
while(true) {
- uintx cnt = OrderAccess::load_acquire(thread->get_rcu_counter());
+ uintx cnt = Atomic::load_acquire(thread->get_rcu_counter());
// This checks if the thread's counter is active. And if so is the counter
// for a pre-existing reader (belongs to this grace period). A pre-existing
// reader will have a lower counter than the global counter version for this
@@ -59,7 +59,7 @@
void GlobalCounter::write_synchronize() {
assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
// Atomic::add must provide fence since we have storeload dependency.
- uintx gbl_cnt = Atomic::add(COUNTER_INCREMENT, &_global_counter._counter);
+ uintx gbl_cnt = Atomic::add(&_global_counter._counter, COUNTER_INCREMENT);
// Do all RCU threads.
CounterThreadCheck ctc(gbl_cnt);
--- a/src/hotspot/share/utilities/globalCounter.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/globalCounter.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -40,7 +40,7 @@
if ((new_cnt & COUNTER_ACTIVE) == 0) {
new_cnt = Atomic::load(&_global_counter._counter) | COUNTER_ACTIVE;
}
- OrderAccess::release_store_fence(thread->get_rcu_counter(), new_cnt);
+ Atomic::release_store_fence(thread->get_rcu_counter(), new_cnt);
return static_cast<CSContext>(old_cnt);
}
@@ -49,8 +49,8 @@
assert(thread == Thread::current(), "must be current thread");
assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
// Restore the counter value from before the associated begin.
- OrderAccess::release_store(thread->get_rcu_counter(),
- static_cast<uintx>(context));
+ Atomic::release_store(thread->get_rcu_counter(),
+ static_cast<uintx>(context));
}
class GlobalCounter::CriticalSection {
--- a/src/hotspot/share/utilities/globalDefinitions.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -949,6 +949,13 @@
template<class T> inline T ABS(T x) { return (x > 0) ? x : -x; }
+// Return the given value clamped to the range [min ... max]
+template<typename T>
+inline T clamp(T value, T min, T max) {
+ assert(min <= max, "must be");
+ return MIN2(MAX2(value, min), max);
+}
+
// true if x is a power of 2, false otherwise
inline bool is_power_of_2(intptr_t x) {
return ((x != NoBits) && (mask_bits(x, x - 1) == NoBits));
--- a/src/hotspot/share/utilities/hashtable.inline.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/hashtable.inline.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -88,7 +88,7 @@
// SystemDictionary are read without locks. The new entry must be
// complete before other threads can be allowed to see it
// via a store to _buckets[index].
- OrderAccess::release_store(&_entry, l);
+ Atomic::release_store(&_entry, l);
}
@@ -97,7 +97,7 @@
// SystemDictionary are read without locks. The new entry must be
// complete before other threads can be allowed to see it
// via a store to _buckets[index].
- return OrderAccess::load_acquire(&_entry);
+ return Atomic::load_acquire(&_entry);
}
--- a/src/hotspot/share/utilities/lockFreeStack.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/lockFreeStack.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -65,7 +65,7 @@
do {
old = cur;
set_next(*last, cur);
- cur = Atomic::cmpxchg(first, &_top, cur);
+ cur = Atomic::cmpxchg(&_top, cur, first);
} while (old != cur);
}
@@ -91,7 +91,7 @@
new_top = next(*result);
}
// CAS even on empty pop, for consistent membar bahavior.
- result = Atomic::cmpxchg(new_top, &_top, result);
+ result = Atomic::cmpxchg(&_top, result, new_top);
} while (result != old);
if (result != NULL) {
set_next(*result, NULL);
@@ -103,7 +103,7 @@
// list of elements. Acts as a full memory barrier.
// postcondition: empty()
T* pop_all() {
- return Atomic::xchg((T*)NULL, &_top);
+ return Atomic::xchg(&_top, (T*)NULL);
}
// Atomically adds value to the top of this stack. Acts as a full
@@ -170,7 +170,7 @@
// if value is in an instance of this specialization of LockFreeStack,
// there must be no concurrent push or pop operations on that stack.
static void set_next(T& value, T* new_next) {
- Atomic::store(new_next, next_ptr(value));
+ Atomic::store(next_ptr(value), new_next);
}
};
--- a/src/hotspot/share/utilities/singleWriterSynchronizer.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -44,7 +44,7 @@
// synchronization have exited that critical section.
void SingleWriterSynchronizer::synchronize() {
// Side-effect in assert balanced by debug-only dec at end.
- assert(Atomic::add(1u, &_writers) == 1u, "multiple writers");
+ assert(Atomic::add(&_writers, 1u) == 1u, "multiple writers");
// We don't know anything about the muxing between this invocation
// and invocations in other threads. We must start with the latest
// _enter polarity, else we could clobber the wrong _exit value on
@@ -64,7 +64,7 @@
do {
old = value;
*new_ptr = ++value;
- value = Atomic::cmpxchg(value, &_enter, old);
+ value = Atomic::cmpxchg(&_enter, old, value);
} while (old != value);
// Critical sections entered before we changed the polarity will use
// the old exit counter. Critical sections entered after the change
@@ -85,7 +85,7 @@
// to complete, e.g. for the value of old_ptr to catch up with old.
// Loop because there could be pending wakeups unrelated to this
// synchronize request.
- while (old != OrderAccess::load_acquire(old_ptr)) {
+ while (old != Atomic::load_acquire(old_ptr)) {
_wakeup.wait();
}
// (5) Drain any pending wakeups. A critical section exit may have
--- a/src/hotspot/share/utilities/singleWriterSynchronizer.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/singleWriterSynchronizer.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -89,11 +89,11 @@
};
inline uint SingleWriterSynchronizer::enter() {
- return Atomic::add(2u, &_enter);
+ return Atomic::add(&_enter, 2u);
}
inline void SingleWriterSynchronizer::exit(uint enter_value) {
- uint exit_value = Atomic::add(2u, &_exit[enter_value & 1]);
+ uint exit_value = Atomic::add(&_exit[enter_value & 1], 2u);
// If this exit completes a synchronize request, wakeup possibly
// waiting synchronizer. Read of _waiting_for must follow the _exit
// update.
--- a/src/hotspot/share/utilities/ticks.hpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/ticks.hpp Mon Nov 25 15:16:29 2019 +0000
@@ -233,6 +233,7 @@
TimeInstant(jlong ticks) : Rep<TimeSource>(ticks) {}
friend class GranularTimer;
friend class ObjectSample;
+ friend class EventEmitter;
// GC unit tests
friend class TimePartitionPhasesIteratorTest;
friend class GCTimerTest;
--- a/src/hotspot/share/utilities/vmError.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/vmError.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -399,7 +399,7 @@
void VMError::record_reporting_start_time() {
const jlong now = get_current_timestamp();
- Atomic::store(now, &_reporting_start_time);
+ Atomic::store(&_reporting_start_time, now);
}
jlong VMError::get_reporting_start_time() {
@@ -408,7 +408,7 @@
void VMError::record_step_start_time() {
const jlong now = get_current_timestamp();
- Atomic::store(now, &_step_start_time);
+ Atomic::store(&_step_start_time, now);
}
jlong VMError::get_step_start_time() {
@@ -416,7 +416,7 @@
}
void VMError::clear_step_start_time() {
- return Atomic::store((jlong)0, &_step_start_time);
+ return Atomic::store(&_step_start_time, (jlong)0);
}
void VMError::report(outputStream* st, bool _verbose) {
@@ -1365,7 +1365,7 @@
}
intptr_t mytid = os::current_thread_id();
if (_first_error_tid == -1 &&
- Atomic::cmpxchg(mytid, &_first_error_tid, (intptr_t)-1) == -1) {
+ Atomic::cmpxchg(&_first_error_tid, (intptr_t)-1, mytid) == -1) {
// Initialize time stamps to use the same base.
out.time_stamp().update_to(1);
--- a/src/hotspot/share/utilities/waitBarrier_generic.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/src/hotspot/share/utilities/waitBarrier_generic.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -48,7 +48,7 @@
assert(w > 0, "Bad counting");
// We need an exact count which never goes below zero,
// otherwise the semaphore may be signalled too many times.
- if (Atomic::cmpxchg(w - 1, &_waiters, w) == w) {
+ if (Atomic::cmpxchg(&_waiters, w, w - 1) == w) {
_sem_barrier.signal();
return w - 1;
}
@@ -82,13 +82,13 @@
OrderAccess::fence();
return;
}
- Atomic::add(1, &_barrier_threads);
+ Atomic::add(&_barrier_threads, 1);
if (barrier_tag != 0 && barrier_tag == _barrier_tag) {
- Atomic::add(1, &_waiters);
+ Atomic::add(&_waiters, 1);
_sem_barrier.wait();
// We help out with posting, but we need to do so before we decrement the
// _barrier_threads otherwise we might wake threads up in next wait.
GenericWaitBarrier::wake_if_needed();
}
- Atomic::add(-1, &_barrier_threads);
+ Atomic::add(&_barrier_threads, -1);
}
--- a/src/java.base/share/classes/java/lang/invoke/MethodHandles.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/classes/java/lang/invoke/MethodHandles.java Mon Nov 25 15:16:29 2019 +0000
@@ -4286,8 +4286,8 @@
* {@link #dropArguments(MethodHandle,int,Class...) dropArguments}{@code (target, pos, valueTypes.toArray(new Class[0]))}
* </pre></blockquote>
* @param target the method handle to invoke after the arguments are dropped
+ * @param pos position of first argument to drop (zero for the leftmost)
* @param valueTypes the type(s) of the argument(s) to drop
- * @param pos position of first argument to drop (zero for the leftmost)
* @return a method handle which drops arguments of the given types,
* before calling the original method handle
* @throws NullPointerException if the target is null,
@@ -4366,8 +4366,8 @@
* {@link #dropArguments(MethodHandle,int,List) dropArguments}{@code (target, pos, Arrays.asList(valueTypes))}
* </pre></blockquote>
* @param target the method handle to invoke after the arguments are dropped
+ * @param pos position of first argument to drop (zero for the leftmost)
* @param valueTypes the type(s) of the argument(s) to drop
- * @param pos position of first argument to drop (zero for the leftmost)
* @return a method handle which drops arguments of the given types,
* before calling the original method handle
* @throws NullPointerException if the target is null,
--- a/src/java.base/share/classes/java/net/AbstractPlainDatagramSocketImpl.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/classes/java/net/AbstractPlainDatagramSocketImpl.java Mon Nov 25 15:16:29 2019 +0000
@@ -482,7 +482,9 @@
throw new IllegalArgumentException("Invalid TTL/hop value: " + value);
setTimeToLive((Integer)value);
} else if (name == StandardSocketOptions.IP_MULTICAST_LOOP) {
- setOption(SocketOptions.IP_MULTICAST_LOOP, value);
+ boolean enable = (boolean) value;
+ // Legacy setOption expects true to mean 'disabled'
+ setOption(SocketOptions.IP_MULTICAST_LOOP, !enable);
} else if (extendedOptions.isOptionSupported(name)) {
extendedOptions.setOption(fd, name, value);
} else {
@@ -517,7 +519,9 @@
} else if (name == StandardSocketOptions.IP_MULTICAST_TTL) {
return (T) ((Integer) getTimeToLive());
} else if (name == StandardSocketOptions.IP_MULTICAST_LOOP) {
- return (T) getOption(SocketOptions.IP_MULTICAST_LOOP);
+ boolean disabled = (boolean) getOption(SocketOptions.IP_MULTICAST_LOOP);
+ // Legacy getOption returns true when disabled
+ return (T) Boolean.valueOf(!disabled);
} else if (extendedOptions.isOptionSupported(name)) {
return (T) extendedOptions.getOption(fd, name);
} else {
--- a/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/classes/java/nio/channels/DatagramChannel.java Mon Nov 25 15:16:29 2019 +0000
@@ -403,6 +403,9 @@
* or {@code null} if this channel is in non-blocking mode
* and no datagram was immediately available
*
+ * @throws IllegalArgumentException
+ * If the buffer is read-only
+ *
* @throws ClosedChannelException
* If this channel is closed
*
--- a/src/java.base/share/classes/java/nio/channels/FileChannel.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/classes/java/nio/channels/FileChannel.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -719,7 +719,7 @@
* size
*
* @throws IllegalArgumentException
- * If the position is negative
+ * If the position is negative or the buffer is read-only
*
* @throws NonReadableChannelException
* If this channel was not opened for reading
--- a/src/java.base/share/classes/java/nio/channels/ReadableByteChannel.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/classes/java/nio/channels/ReadableByteChannel.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -84,6 +84,9 @@
* @return The number of bytes read, possibly zero, or {@code -1} if the
* channel has reached end-of-stream
*
+ * @throws IllegalArgumentException
+ * If the buffer is read-only
+ *
* @throws NonReadableChannelException
* If this channel was not opened for reading
*
--- a/src/java.base/share/classes/java/nio/channels/ScatteringByteChannel.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/classes/java/nio/channels/ScatteringByteChannel.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -101,6 +101,9 @@
* If the preconditions on the {@code offset} and {@code length}
* parameters do not hold
*
+ * @throws IllegalArgumentException
+ * If any of the buffers is read-only
+ *
* @throws NonReadableChannelException
* If this channel was not opened for reading
*
@@ -138,6 +141,9 @@
* @return The number of bytes read, possibly zero,
* or {@code -1} if the channel has reached end-of-stream
*
+ * @throws IllegalArgumentException
+ * If any of the buffers is read-only
+ *
* @throws NonReadableChannelException
* If this channel was not opened for reading
*
--- a/src/java.base/share/classes/sun/security/ssl/NamedGroup.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/classes/sun/security/ssl/NamedGroup.java Mon Nov 25 15:16:29 2019 +0000
@@ -250,8 +250,19 @@
this.supportedProtocols = supportedProtocols;
this.keAlgParamSpec = keAlgParamSpec;
+ // Check if it is a supported named group.
AlgorithmParameters algParams = null;
boolean mediator = (keAlgParamSpec != null);
+
+ // HACK CODE
+ //
+ // An EC provider, for example the SunEC provider, may support
+ // AlgorithmParameters but not KeyPairGenerator or KeyAgreement.
+ if (mediator && (namedGroupSpec == NamedGroupSpec.NAMED_GROUP_ECDHE)) {
+ mediator = JsseJce.isEcAvailable();
+ }
+
+ // Check the specific algorithm parameters.
if (mediator) {
try {
algParams =
--- a/src/java.base/share/classes/sun/security/ssl/SignatureScheme.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/classes/sun/security/ssl/SignatureScheme.java Mon Nov 25 15:16:29 2019 +0000
@@ -274,17 +274,28 @@
Arrays.asList(handshakeSupportedProtocols);
boolean mediator = true;
- if (signAlgParams != null) {
- mediator = signAlgParams.isAvailable;
- } else {
- try {
- Signature.getInstance(algorithm);
- } catch (Exception e) {
- mediator = false;
- if (SSLLogger.isOn && SSLLogger.isOn("ssl,handshake")) {
- SSLLogger.warning(
- "Signature algorithm, " + algorithm +
- ", is not supported by the underlying providers");
+ // HACK CODE
+ //
+ // An EC provider, for example the SunEC provider, may support
+ // AlgorithmParameters but not KeyPairGenerator or Signature.
+ if ("EC".equals(keyAlgorithm)) {
+ mediator = JsseJce.isEcAvailable();
+ }
+
+ // Check the specific algorithm and parameters.
+ if (mediator) {
+ if (signAlgParams != null) {
+ mediator = signAlgParams.isAvailable;
+ } else {
+ try {
+ Signature.getInstance(algorithm);
+ } catch (Exception e) {
+ mediator = false;
+ if (SSLLogger.isOn && SSLLogger.isOn("ssl,handshake")) {
+ SSLLogger.warning(
+ "Signature algorithm, " + algorithm +
+ ", is not supported by the underlying providers");
+ }
}
}
}
--- a/src/java.base/share/native/libjli/jli_util.h Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/native/libjli/jli_util.h Mon Nov 25 15:16:29 2019 +0000
@@ -80,7 +80,6 @@
#define JLI_StrSpn(p1, p2) strspn((p1), (p2))
#define JLI_StrCSpn(p1, p2) strcspn((p1), (p2))
#define JLI_StrPBrk(p1, p2) strpbrk((p1), (p2))
-#define JLI_StrTok(p1, p2) strtok((p1), (p2))
/* On Windows lseek() is in io.h rather than the location dictated by POSIX. */
#ifdef _WIN32
--- a/src/java.base/share/native/libnet/net_util.c Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/native/libnet/net_util.c Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -115,12 +115,6 @@
* get_ methods that return +ve int return -1 on error
* get_ methods that return objects return NULL on error.
*/
-jobject getInet6Address_scopeifname(JNIEnv *env, jobject iaObj) {
- jobject holder = (*env)->GetObjectField(env, iaObj, ia6_holder6ID);
- CHECK_NULL_RETURN(holder, NULL);
- return (*env)->GetObjectField(env, holder, ia6_scopeifnameID);
-}
-
jboolean setInet6Address_scopeifname(JNIEnv *env, jobject iaObj, jobject scopeifname) {
jobject holder = (*env)->GetObjectField(env, iaObj, ia6_holder6ID);
CHECK_NULL_RETURN(holder, JNI_FALSE);
@@ -128,12 +122,6 @@
return JNI_TRUE;
}
-jboolean getInet6Address_scopeid_set(JNIEnv *env, jobject iaObj) {
- jobject holder = (*env)->GetObjectField(env, iaObj, ia6_holder6ID);
- CHECK_NULL_RETURN(holder, JNI_FALSE);
- return (*env)->GetBooleanField(env, holder, ia6_scopeidsetID);
-}
-
unsigned int getInet6Address_scopeid(JNIEnv *env, jobject iaObj) {
jobject holder = (*env)->GetObjectField(env, iaObj, ia6_holder6ID);
CHECK_NULL_RETURN(holder, 0);
@@ -208,12 +196,6 @@
return (*env)->GetIntField(env, holder, iac_familyID);
}
-jobject getInetAddress_hostName(JNIEnv *env, jobject iaObj) {
- jobject holder = (*env)->GetObjectField(env, iaObj, ia_holderID);
- CHECK_NULL_THROW_NPE_RETURN(env, holder, "InetAddress holder is null", NULL);
- return (*env)->GetObjectField(env, holder, iac_hostNameID);
-}
-
JNIEXPORT jobject JNICALL
NET_SockaddrToInetAddress(JNIEnv *env, SOCKETADDRESS *sa, int *port) {
jobject iaObj;
--- a/src/java.base/share/native/libnet/net_util.h Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/share/native/libnet/net_util.h Mon Nov 25 15:16:29 2019 +0000
@@ -64,9 +64,7 @@
* get_ methods that return int/boolean, return -1 on error
* get_ methods that return objects return NULL on error.
*/
-extern jobject getInet6Address_scopeifname(JNIEnv *env, jobject ia6Obj);
extern jboolean setInet6Address_scopeifname(JNIEnv *env, jobject ia6Obj, jobject scopeifname);
-extern jboolean getInet6Address_scopeid_set(JNIEnv *env, jobject ia6Obj);
extern unsigned int getInet6Address_scopeid(JNIEnv *env, jobject ia6Obj);
extern jboolean setInet6Address_scopeid(JNIEnv *env, jobject ia6Obj, int scopeid);
extern jboolean getInet6Address_ipaddress(JNIEnv *env, jobject ia6Obj, char *dest);
@@ -77,7 +75,6 @@
extern void setInetAddress_hostName(JNIEnv *env, jobject iaObj, jobject h);
extern int getInetAddress_addr(JNIEnv *env, jobject iaObj);
extern int getInetAddress_family(JNIEnv *env, jobject iaObj);
-extern jobject getInetAddress_hostName(JNIEnv *env, jobject iaObj);
extern jclass ia4_class;
extern jmethodID ia4_ctrID;
--- a/src/java.base/unix/native/libjli/java_md_solinux.c Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/unix/native/libjli/java_md_solinux.c Mon Nov 25 15:16:29 2019 +0000
@@ -188,6 +188,7 @@
char serverPattern[] = "lib/server";
char *envpath;
char *path;
+ char* save_ptr = NULL;
jboolean clientPatternFound;
jboolean serverPatternFound;
@@ -207,7 +208,7 @@
* we have a suspicious path component, check if it contains a libjvm.so
*/
envpath = JLI_StringDup(env);
- for (path = JLI_StrTok(envpath, ":"); path != NULL; path = JLI_StrTok(NULL, ":")) {
+ for (path = strtok_r(envpath, ":", &save_ptr); path != NULL; path = strtok_r(NULL, ":", &save_ptr)) {
if (clientPatternFound && JLI_StrStr(path, clientPattern) != NULL) {
if (JvmExists(path)) {
JLI_MemFree(envpath);
--- a/src/java.base/unix/native/libnet/DefaultProxySelector.c Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/unix/native/libnet/DefaultProxySelector.c Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -74,7 +74,6 @@
gconf_client_get_string_func* my_get_string_func = NULL;
gconf_client_get_int_func* my_get_int_func = NULL;
gconf_client_get_bool_func* my_get_bool_func = NULL;
-gconf_init_func* my_gconf_init_func = NULL;
g_type_init_func* my_g_type_init_func = NULL;
--- a/src/java.base/unix/native/libnet/net_util_md.c Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/unix/native/libnet/net_util_md.c Mon Nov 25 15:16:29 2019 +0000
@@ -73,50 +73,6 @@
#define UDP_EXCLBIND 0x0101
#endif
-void setDefaultScopeID(JNIEnv *env, struct sockaddr *him)
-{
-#ifdef MACOSX
- static jclass ni_class = NULL;
- static jfieldID ni_defaultIndexID;
- if (ni_class == NULL) {
- jclass c = (*env)->FindClass(env, "java/net/NetworkInterface");
- CHECK_NULL(c);
- c = (*env)->NewGlobalRef(env, c);
- CHECK_NULL(c);
- ni_defaultIndexID = (*env)->GetStaticFieldID(env, c, "defaultIndex", "I");
- CHECK_NULL(ni_defaultIndexID);
- ni_class = c;
- }
- int defaultIndex;
- struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)him;
- if (sin6->sin6_family == AF_INET6 && (sin6->sin6_scope_id == 0) &&
- (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) ||
- IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))) {
- defaultIndex = (*env)->GetStaticIntField(env, ni_class,
- ni_defaultIndexID);
- sin6->sin6_scope_id = defaultIndex;
- }
-#endif
-}
-
-int getDefaultScopeID(JNIEnv *env) {
- int defaultIndex = 0;
- static jclass ni_class = NULL;
- static jfieldID ni_defaultIndexID;
- if (ni_class == NULL) {
- jclass c = (*env)->FindClass(env, "java/net/NetworkInterface");
- CHECK_NULL_RETURN(c, 0);
- c = (*env)->NewGlobalRef(env, c);
- CHECK_NULL_RETURN(c, 0);
- ni_defaultIndexID = (*env)->GetStaticFieldID(env, c, "defaultIndex", "I");
- CHECK_NULL_RETURN(ni_defaultIndexID, 0);
- ni_class = c;
- }
- defaultIndex = (*env)->GetStaticIntField(env, ni_class,
- ni_defaultIndexID);
- return defaultIndex;
-}
-
#define RESTARTABLE(_cmd, _result) do { \
do { \
_result = _cmd; \
@@ -217,26 +173,6 @@
}
#endif
-#ifdef __linux__
-static int vinit = 0;
-static int kernelV24 = 0;
-static int vinit24 = 0;
-
-int kernelIsV24 () {
- if (!vinit24) {
- struct utsname sysinfo;
- if (uname(&sysinfo) == 0) {
- sysinfo.release[3] = '\0';
- if (strcmp(sysinfo.release, "2.4") == 0) {
- kernelV24 = JNI_TRUE;
- }
- }
- vinit24 = 1;
- }
- return kernelV24;
-}
-#endif
-
void
NET_ThrowByNameWithLastError(JNIEnv *env, const char *name,
const char *defaultDetail) {
--- a/src/java.base/unix/native/libnet/net_util_md.h Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.base/unix/native/libnet/net_util_md.h Mon Nov 25 15:16:29 2019 +0000
@@ -101,10 +101,6 @@
const char *defaultDetail);
void NET_SetTrafficClass(SOCKETADDRESS *sa, int trafficClass);
-#ifdef __linux__
-int kernelIsV24();
-#endif
-
#ifdef __solaris__
int net_getParam(char *driver, char *param);
#endif
--- a/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/Init.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/Init.java Mon Nov 25 15:16:29 2019 +0000
@@ -30,8 +30,6 @@
import java.util.ArrayList;
import java.util.List;
-import javax.xml.parsers.DocumentBuilder;
-
import com.sun.org.apache.xml.internal.security.algorithms.JCEMapper;
import com.sun.org.apache.xml.internal.security.algorithms.SignatureAlgorithm;
import com.sun.org.apache.xml.internal.security.c14n.Canonicalizer;
@@ -170,8 +168,7 @@
private static void fileInit(InputStream is) {
try {
/* read library configuration file */
- DocumentBuilder db = XMLUtils.createDocumentBuilder(false);
- Document doc = db.parse(is);
+ Document doc = XMLUtils.read(is, false);
Node config = doc.getFirstChild();
for (; config != null; config = config.getNextSibling()) {
if ("Configuration".equals(config.getLocalName())) {
--- a/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/c14n/Canonicalizer.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/c14n/Canonicalizer.java Mon Nov 25 15:16:29 2019 +0000
@@ -30,8 +30,6 @@
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
-import javax.xml.parsers.DocumentBuilder;
-
import com.sun.org.apache.xml.internal.security.c14n.implementations.Canonicalizer11_OmitComments;
import com.sun.org.apache.xml.internal.security.c14n.implementations.Canonicalizer11_WithComments;
import com.sun.org.apache.xml.internal.security.c14n.implementations.Canonicalizer20010315ExclOmitComments;
@@ -261,17 +259,7 @@
try (InputStream bais = new ByteArrayInputStream(inputBytes)) {
InputSource in = new InputSource(bais);
- // needs to validate for ID attribute normalization
- DocumentBuilder db = XMLUtils.createDocumentBuilder(true, secureValidation);
-
/*
- * for some of the test vectors from the specification,
- * there has to be a validating parser for ID attributes, default
- * attribute values, NMTOKENS, etc.
- * Unfortunately, the test vectors do use different DTDs or
- * even no DTD. So Xerces 1.3.1 fires many warnings about using
- * ErrorHandlers.
- *
* Text from the spec:
*
* The input octet stream MUST contain a well-formed XML document,
@@ -285,9 +273,7 @@
* though the document type declaration is not retained in the
* canonical form.
*/
- db.setErrorHandler(new com.sun.org.apache.xml.internal.security.utils.IgnoreAllErrorHandler());
-
- document = db.parse(in);
+ document = XMLUtils.read(in, secureValidation);
}
return this.canonicalizeSubtree(document);
}
--- a/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/c14n/CanonicalizerSpi.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/c14n/CanonicalizerSpi.java Mon Nov 25 15:16:29 2019 +0000
@@ -26,8 +26,6 @@
import java.io.OutputStream;
import java.util.Set;
-import javax.xml.parsers.DocumentBuilder;
-
import com.sun.org.apache.xml.internal.security.utils.XMLUtils;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
@@ -63,9 +61,7 @@
try (java.io.InputStream bais = new ByteArrayInputStream(inputBytes)) {
InputSource in = new InputSource(bais);
- DocumentBuilder db = XMLUtils.createDocumentBuilder(false, secureValidation);
-
- document = db.parse(in);
+ document = XMLUtils.read(in, secureValidation);
}
return this.engineCanonicalizeSubTree(document);
}
--- a/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/keys/keyresolver/KeyResolverSpi.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/keys/keyresolver/KeyResolverSpi.java Mon Nov 25 15:16:29 2019 +0000
@@ -31,7 +31,6 @@
import java.util.HashMap;
import javax.crypto.SecretKey;
-import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
import com.sun.org.apache.xml.internal.security.keys.storage.StorageResolver;
@@ -276,10 +275,8 @@
* @throws KeyResolverException if something goes wrong
*/
protected static Element getDocFromBytes(byte[] bytes, boolean secureValidation) throws KeyResolverException {
- DocumentBuilder db = null;
try (InputStream is = new ByteArrayInputStream(bytes)) {
- db = XMLUtils.createDocumentBuilder(false, secureValidation);
- Document doc = db.parse(is);
+ Document doc = XMLUtils.read(is, secureValidation);
return doc.getDocumentElement();
} catch (SAXException ex) {
throw new KeyResolverException(ex);
--- a/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/signature/SignedInfo.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/signature/SignedInfo.java Mon Nov 25 15:16:29 2019 +0000
@@ -215,10 +215,8 @@
c14nizer.setSecureValidation(secureValidation);
byte[] c14nizedBytes = c14nizer.canonicalizeSubtree(element);
- javax.xml.parsers.DocumentBuilder db =
- XMLUtils.createDocumentBuilder(false, secureValidation);
try (InputStream is = new ByteArrayInputStream(c14nizedBytes)) {
- Document newdoc = db.parse(is);
+ Document newdoc = XMLUtils.read(is, secureValidation);
Node imported = element.getOwnerDocument().importNode(
newdoc.getDocumentElement(), true);
element.getParentNode().replaceChild(imported, element);
--- a/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/signature/XMLSignatureInput.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/signature/XMLSignatureInput.java Mon Nov 25 15:16:29 2019 +0000
@@ -33,7 +33,6 @@
import java.util.List;
import java.util.Set;
-import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
import com.sun.org.apache.xml.internal.security.c14n.CanonicalizationException;
@@ -574,12 +573,9 @@
void convertToNodes() throws CanonicalizationException,
ParserConfigurationException, IOException, SAXException {
- DocumentBuilder db = XMLUtils.createDocumentBuilder(false, secureValidation);
// select all nodes, also the comments.
try {
- db.setErrorHandler(new com.sun.org.apache.xml.internal.security.utils.IgnoreAllErrorHandler());
-
- Document doc = db.parse(this.getOctetStream());
+ Document doc = XMLUtils.read(this.getOctetStream(), secureValidation);
this.subNode = doc;
} catch (SAXException ex) {
byte[] result = null;
@@ -593,7 +589,7 @@
result = baos.toByteArray();
}
try (InputStream is = new ByteArrayInputStream(result)) {
- Document document = db.parse(is);
+ Document document = XMLUtils.read(is, secureValidation);
this.subNode = document.getDocumentElement().getFirstChild().getFirstChild();
}
} finally {
--- a/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/transforms/implementations/TransformBase64Decode.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/transforms/implementations/TransformBase64Decode.java Mon Nov 25 15:16:29 2019 +0000
@@ -147,7 +147,7 @@
//Exceptional case there is current not text case testing this(Before it was a
//a common case).
Document doc =
- XMLUtils.createDocumentBuilder(false, secureValidation).parse(input.getOctetStream());
+ XMLUtils.read(input.getOctetStream(), secureValidation);
Element rootNode = doc.getDocumentElement();
StringBuilder sb = new StringBuilder();
--- a/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/utils/WeakObjectPool.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/utils/WeakObjectPool.java Mon Nov 25 15:16:29 2019 +0000
@@ -39,7 +39,10 @@
*
* Internally, the pool is stored in a java.util.concurrent.LinkedBlockingDeque
* instance.
+ *
+ * @deprecated This class is no longer in use in Santuario 2.1.4
*/
+@Deprecated
public abstract class WeakObjectPool<T, E extends Throwable> {
private static final Integer MARKER_VALUE = Integer.MAX_VALUE;//once here rather than auto-box it?
--- a/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/utils/XMLUtils.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/com/sun/org/apache/xml/internal/security/utils/XMLUtils.java Mon Nov 25 15:16:29 2019 +0000
@@ -23,16 +23,22 @@
package com.sun.org.apache.xml.internal.security.utils;
import java.io.IOException;
+import java.io.InputStream;
import java.io.OutputStream;
import java.math.BigInteger;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.Base64;
+import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
+import java.util.Queue;
import java.util.Set;
+import java.util.WeakHashMap;
+import java.util.concurrent.ArrayBlockingQueue;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
@@ -48,6 +54,8 @@
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.w3c.dom.Text;
+import org.xml.sax.InputSource;
+import org.xml.sax.SAXException;
/**
* DOM and XML accessibility and comfort functions.
@@ -58,6 +66,9 @@
private static boolean ignoreLineBreaks =
AccessController.doPrivileged(
(PrivilegedAction<Boolean>) () -> Boolean.getBoolean("com.sun.org.apache.xml.internal.security.ignoreLineBreaks"));
+ private static int parserPoolSize =
+ AccessController.doPrivileged(
+ (PrivilegedAction<Integer>) () -> Integer.getInteger("com.sun.org.apache.xml.internal.security.parser.pool-size", 20));
private static volatile String dsPrefix = "ds";
private static volatile String ds11Prefix = "dsig11";
@@ -67,6 +78,11 @@
private static final com.sun.org.slf4j.internal.Logger LOG =
com.sun.org.slf4j.internal.LoggerFactory.getLogger(XMLUtils.class);
+ private static final Map<ClassLoader, Queue<DocumentBuilder>> DOCUMENT_BUILDERS =
+ Collections.synchronizedMap(new WeakHashMap<ClassLoader, Queue<DocumentBuilder>>());
+
+ private static final Map<ClassLoader, Queue<DocumentBuilder>> DOCUMENT_BUILDERS_DISALLOW_DOCTYPE =
+ Collections.synchronizedMap(new WeakHashMap<ClassLoader, Queue<DocumentBuilder>>());
/**
* Constructor XMLUtils
@@ -147,7 +163,7 @@
if (rootNode == exclude) {
return;
}
- switch (rootNode.getNodeType()) {
+ switch (rootNode.getNodeType()) { //NOPMD
case Node.ELEMENT_NODE:
result.add(rootNode);
Element el = (Element)rootNode;
@@ -172,20 +188,19 @@
}
getSetRec(r, result, exclude, com);
}
- return;
+ break;
case Node.COMMENT_NODE:
if (com) {
result.add(rootNode);
}
- return;
+ break;
case Node.DOCUMENT_TYPE_NODE:
- return;
+ break;
default:
result.add(rootNode);
}
}
-
/**
* Outputs a DOM tree to an {@link OutputStream}.
*
@@ -960,18 +975,105 @@
return true;
}
- public static DocumentBuilder createDocumentBuilder(boolean validating)
- throws ParserConfigurationException {
+ public static Document newDocument() throws ParserConfigurationException {
+ ClassLoader loader = getContextClassLoader();
+ if (loader == null) {
+ loader = getClassLoader(XMLUtils.class);
+ }
+ // If the ClassLoader is null then just create a DocumentBuilder and use it
+ if (loader == null) {
+ DocumentBuilder documentBuilder = buildDocumentBuilder(true);
+ return documentBuilder.newDocument();
+ }
+
+ Queue<DocumentBuilder> queue = getDocumentBuilderQueue(true, loader);
+ DocumentBuilder documentBuilder = getDocumentBuilder(true, queue);
+ Document doc = documentBuilder.newDocument();
+ repoolDocumentBuilder(documentBuilder, queue);
+ return doc;
+ }
+
+ public static Document read(InputStream inputStream) throws ParserConfigurationException, SAXException, IOException {
+ return read(inputStream, true);
+ }
+
+ public static Document read(InputStream inputStream, boolean disAllowDocTypeDeclarations) throws ParserConfigurationException, SAXException, IOException {
+ ClassLoader loader = getContextClassLoader();
+ if (loader == null) {
+ loader = getClassLoader(XMLUtils.class);
+ }
+ // If the ClassLoader is null then just create a DocumentBuilder and use it
+ if (loader == null) {
+ DocumentBuilder documentBuilder = buildDocumentBuilder(disAllowDocTypeDeclarations);
+ return documentBuilder.parse(inputStream);
+ }
+
+ Queue<DocumentBuilder> queue = getDocumentBuilderQueue(disAllowDocTypeDeclarations, loader);
+ DocumentBuilder documentBuilder = getDocumentBuilder(disAllowDocTypeDeclarations, queue);
+ Document doc = documentBuilder.parse(inputStream);
+ repoolDocumentBuilder(documentBuilder, queue);
+ return doc;
+ }
+
+ public static Document read(String uri, boolean disAllowDocTypeDeclarations)
+ throws ParserConfigurationException, SAXException, IOException {
+ ClassLoader loader = getContextClassLoader();
+ if (loader == null) {
+ loader = getClassLoader(XMLUtils.class);
+ }
+ // If the ClassLoader is null then just create a DocumentBuilder and use it
+ if (loader == null) {
+ DocumentBuilder documentBuilder = buildDocumentBuilder(disAllowDocTypeDeclarations);
+ return documentBuilder.parse(uri);
+ }
+
+ Queue<DocumentBuilder> queue = getDocumentBuilderQueue(disAllowDocTypeDeclarations, loader);
+ DocumentBuilder documentBuilder = getDocumentBuilder(disAllowDocTypeDeclarations, queue);
+ Document doc = documentBuilder.parse(uri);
+ repoolDocumentBuilder(documentBuilder, queue);
+ return doc;
+ }
+
+ public static Document read(InputSource inputSource) throws ParserConfigurationException, SAXException, IOException {
+ return read(inputSource, true);
+ }
+
+ public static Document read(InputSource inputSource, boolean disAllowDocTypeDeclarations)
+ throws ParserConfigurationException, SAXException, IOException {
+ ClassLoader loader = getContextClassLoader();
+ if (loader == null) {
+ loader = getClassLoader(XMLUtils.class);
+ }
+ // If the ClassLoader is null then just create a DocumentBuilder and use it
+ if (loader == null) {
+ DocumentBuilder documentBuilder = buildDocumentBuilder(disAllowDocTypeDeclarations);
+ return documentBuilder.parse(inputSource);
+ }
+
+ Queue<DocumentBuilder> queue = getDocumentBuilderQueue(disAllowDocTypeDeclarations, loader);
+ DocumentBuilder documentBuilder = getDocumentBuilder(disAllowDocTypeDeclarations, queue);
+ Document doc = documentBuilder.parse(inputSource);
+ repoolDocumentBuilder(documentBuilder, queue);
+ return doc;
+ }
+
+ /**
+ * @deprecated Use XMLUtils.read instead to directly read a document.
+ */
+ @Deprecated
+ public static DocumentBuilder createDocumentBuilder(boolean validating) throws ParserConfigurationException {
return createDocumentBuilder(validating, true);
}
- // The current implementation does not throw a ParserConfigurationException.
- // Kept here in case we create the DocumentBuilder inline again.
+ /**
+ * @deprecated Use XMLUtils.read instead to directly read a document.
+ */
+ @Deprecated
public static DocumentBuilder createDocumentBuilder(
boolean validating, boolean disAllowDocTypeDeclarations
) throws ParserConfigurationException {
DocumentBuilderFactory dfactory = DocumentBuilderFactory.newInstance();
- dfactory.setFeature(javax.xml.XMLConstants.FEATURE_SECURE_PROCESSING, true);
+ dfactory.setFeature(javax.xml.XMLConstants.FEATURE_SECURE_PROCESSING, Boolean.TRUE);
if (disAllowDocTypeDeclarations) {
dfactory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true);
}
@@ -981,6 +1083,14 @@
}
/**
+ * @deprecated This method has no effect in Santuario 2.1.4
+ */
+ @Deprecated
+ public static boolean repoolDocumentBuilder(DocumentBuilder db) {
+ return true;
+ }
+
+ /**
* Returns a byte-array representation of a {@code {@link BigInteger}}.
* No sign-bit is output.
*
@@ -1024,4 +1134,64 @@
return resizedBytes;
}
+
+ private static Queue<DocumentBuilder> getDocumentBuilderQueue(boolean disAllowDocTypeDeclarations, ClassLoader loader) throws ParserConfigurationException {
+ Map<ClassLoader, Queue<DocumentBuilder>> docBuilderCache =
+ disAllowDocTypeDeclarations ? DOCUMENT_BUILDERS_DISALLOW_DOCTYPE : DOCUMENT_BUILDERS;
+ Queue<DocumentBuilder> queue = docBuilderCache.get(loader);
+ if (queue == null) {
+ queue = new ArrayBlockingQueue<>(parserPoolSize);
+ docBuilderCache.put(loader, queue);
+ }
+
+ return queue;
+ }
+
+ private static DocumentBuilder getDocumentBuilder(boolean disAllowDocTypeDeclarations, Queue<DocumentBuilder> queue) throws ParserConfigurationException {
+ DocumentBuilder db = queue.poll();
+ if (db == null) {
+ db = buildDocumentBuilder(disAllowDocTypeDeclarations);
+ }
+ return db;
+ }
+
+ private static DocumentBuilder buildDocumentBuilder(boolean disAllowDocTypeDeclarations) throws ParserConfigurationException {
+ DocumentBuilderFactory f = DocumentBuilderFactory.newInstance();
+ f.setNamespaceAware(true);
+ f.setFeature(javax.xml.XMLConstants.FEATURE_SECURE_PROCESSING, true);
+ f.setFeature("http://apache.org/xml/features/disallow-doctype-decl", disAllowDocTypeDeclarations);
+ return f.newDocumentBuilder();
+ }
+
+ private static void repoolDocumentBuilder(DocumentBuilder db, Queue<DocumentBuilder> queue) {
+ if (queue != null) {
+ db.reset();
+ queue.offer(db);
+ }
+ }
+
+ private static ClassLoader getContextClassLoader() {
+ final SecurityManager sm = System.getSecurityManager();
+ if (sm != null) {
+ return AccessController.doPrivileged(new PrivilegedAction<ClassLoader>() {
+ public ClassLoader run() {
+ return Thread.currentThread().getContextClassLoader();
+ }
+ });
+ }
+ return Thread.currentThread().getContextClassLoader();
+ }
+
+ private static ClassLoader getClassLoader(final Class<?> clazz) {
+ final SecurityManager sm = System.getSecurityManager();
+ if (sm != null) {
+ return AccessController.doPrivileged(new PrivilegedAction<ClassLoader>() {
+ public ClassLoader run() {
+ return clazz.getClassLoader();
+ }
+ });
+ }
+ return clazz.getClassLoader();
+ }
+
}
--- a/src/java.xml.crypto/share/classes/org/jcp/xml/dsig/internal/dom/DOMRetrievalMethod.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/org/jcp/xml/dsig/internal/dom/DOMRetrievalMethod.java Mon Nov 25 15:16:29 2019 +0000
@@ -31,7 +31,7 @@
* ===========================================================================
*/
/*
- * $Id: DOMRetrievalMethod.java 1854026 2019-02-21 09:30:01Z coheigea $
+ * $Id: DOMRetrievalMethod.java 1862080 2019-06-25 16:50:17Z coheigea $
*/
package org.jcp.xml.dsig.internal.dom;
@@ -57,7 +57,6 @@
import javax.xml.crypto.dsig.Transform;
import javax.xml.crypto.dsig.XMLSignature;
import javax.xml.crypto.dsig.keyinfo.RetrievalMethod;
-import javax.xml.parsers.DocumentBuilder;
import com.sun.org.apache.xml.internal.security.utils.XMLUtils;
import org.w3c.dom.Attr;
@@ -275,12 +274,10 @@
public XMLStructure dereferenceAsXMLStructure(XMLCryptoContext context)
throws URIReferenceException
{
- DocumentBuilder db = null;
boolean secVal = Utils.secureValidation(context);
ApacheData data = (ApacheData)dereference(context);
try (InputStream is = new ByteArrayInputStream(data.getXMLSignatureInput().getBytes())) {
- db = XMLUtils.createDocumentBuilder(false, secVal);
- Document doc = db.parse(is);
+ Document doc = XMLUtils.read(is, secVal);
Element kiElem = doc.getDocumentElement();
if (kiElem.getLocalName().equals("X509Data")
&& XMLSignature.XMLNS.equals(kiElem.getNamespaceURI())) {
--- a/src/java.xml.crypto/share/classes/org/jcp/xml/dsig/internal/dom/XMLDSigRI.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/classes/org/jcp/xml/dsig/internal/dom/XMLDSigRI.java Mon Nov 25 15:16:29 2019 +0000
@@ -135,7 +135,7 @@
public XMLDSigRI() {
// This is the JDK XMLDSig provider, synced from
- // Apache Santuario XML Security for Java, version 2.1.3
+ // Apache Santuario XML Security for Java, version 2.1.4
super("XMLDSig", VER, INFO);
final Provider p = this;
--- a/src/java.xml.crypto/share/legal/santuario.md Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml.crypto/share/legal/santuario.md Mon Nov 25 15:16:29 2019 +0000
@@ -1,4 +1,4 @@
-## Apache Santuario v2.1.3
+## Apache Santuario v2.1.4
### Apache Santuario Notice
<pre>
--- a/src/java.xml/share/classes/com/sun/org/apache/xalan/internal/xsltc/compiler/XPathParser.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml/share/classes/com/sun/org/apache/xalan/internal/xsltc/compiler/XPathParser.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,844 +36,891 @@
import java.util.Stack;
/**
- * CUP v0.10j generated parser.
- * This class was generated by CUP v0.10j on Thu Oct 06 10:09:26 PDT 2005.
+ * CUP v0.11b generated parser.
+ * This class was generated by CUP v0.11b on Nov 12, 2019.
+ *
+ * @LastModified: Nov 2019
*/
public class XPathParser extends lr_parser {
- /** Default constructor. */
- public XPathParser() {super();}
-
- /** Constructor which sets the default scanner. */
- public XPathParser(Scanner s) {super(s);}
+ /**
+ * Default constructor.
+ */
+ public XPathParser() {
+ super();
+ }
- /** Production table. */
- protected static final short _production_table[][] =
- unpackFromStrings(new String[] {
- "\000\215\000\002\002\004\000\002\003\004\000\002\003" +
- "\004\000\002\036\003\000\002\036\005\000\002\037\003" +
- "\000\002\037\004\000\002\037\003\000\002\037\005\000" +
- "\002\037\005\000\002\037\004\000\002\037\003\000\002" +
- "\035\006\000\002\035\010\000\002\040\006\000\002\041" +
- "\003\000\002\041\005\000\002\041\005\000\002\042\003" +
- "\000\002\042\004\000\002\042\003\000\002\042\004\000" +
- "\002\042\004\000\002\042\005\000\002\042\004\000\002" +
- "\042\005\000\002\043\003\000\002\043\003\000\002\043" +
- "\003\000\002\043\003\000\002\043\003\000\002\044\003" +
- "\000\002\044\003\000\002\054\003\000\002\054\004\000" +
- "\002\054\004\000\002\045\003\000\002\045\004\000\002" +
- "\007\005\000\002\004\003\000\002\012\003\000\002\012" +
- "\005\000\002\013\003\000\002\013\005\000\002\014\003" +
- "\000\002\014\005\000\002\014\005\000\002\015\003\000" +
- "\002\015\005\000\002\015\005\000\002\015\005\000\002" +
- "\015\005\000\002\016\003\000\002\016\005\000\002\016" +
- "\005\000\002\017\003\000\002\017\005\000\002\017\005" +
- "\000\002\017\005\000\002\020\003\000\002\020\004\000" +
- "\002\024\003\000\002\024\005\000\002\025\003\000\002" +
- "\025\003\000\002\025\005\000\002\025\005\000\002\006" +
- "\003\000\002\006\003\000\002\027\003\000\002\027\005" +
- "\000\002\027\003\000\002\031\003\000\002\031\004\000" +
- "\002\031\003\000\002\030\005\000\002\032\004\000\002" +
- "\011\003\000\002\011\004\000\002\011\005\000\002\011" +
- "\004\000\002\011\003\000\002\053\004\000\002\053\003" +
- "\000\002\052\003\000\002\052\003\000\002\052\003\000" +
- "\002\052\003\000\002\052\003\000\002\052\003\000\002" +
- "\052\003\000\002\052\003\000\002\052\003\000\002\052" +
- "\003\000\002\052\003\000\002\052\003\000\002\052\003" +
- "\000\002\026\003\000\002\026\003\000\002\010\003\000" +
- "\002\010\004\000\002\023\003\000\002\023\005\000\002" +
- "\023\003\000\002\023\003\000\002\023\003\000\002\023" +
- "\003\000\002\021\004\000\002\022\005\000\002\022\006" +
- "\000\002\046\003\000\002\046\005\000\002\050\003\000" +
- "\002\051\003\000\002\005\003\000\002\033\003\000\002" +
- "\033\003\000\002\033\003\000\002\033\003\000\002\033" +
- "\006\000\002\033\003\000\002\034\003\000\002\034\003" +
- "\000\002\047\003\000\002\047\003\000\002\047\003\000" +
- "\002\047\003\000\002\047\003\000\002\047\003\000\002" +
- "\047\003\000\002\047\003\000\002\047\003\000\002\047" +
- "\003\000\002\047\003\000\002\047\003\000\002\047\003" +
- "\000\002\047\003\000\002\047\003\000\002\047\003\000" +
- "\002\047\003\000\002\047\003" });
-
- /** Access to production table. */
- public short[][] production_table() {return _production_table;}
+ /**
+ * Constructor which sets the default scanner.
+ */
+ public XPathParser(Scanner s) {
+ super(s);
+ }
- /** Parse-action table. */
- protected static final short[][] _action_table =
- unpackFromStrings(new String[] {
- "\000\307\000\006\066\006\067\005\001\002\000\004\002" +
- "\311\001\002\000\110\004\061\005\111\011\071\013\103" +
- "\015\134\016\066\017\106\021\070\031\075\032\031\033" +
- "\040\034\057\035\034\036\113\037\141\040\142\041\145" +
- "\044\063\045\062\046\065\047\115\050\123\051\136\052" +
- "\077\053\143\054\131\055\125\056\116\057\104\060\140" +
- "\061\067\062\060\063\127\064\132\065\072\001\002\000" +
- "\070\004\013\013\027\016\020\021\023\032\031\033\040" +
- "\035\034\036\033\037\046\040\047\041\051\044\015\045" +
- "\014\046\016\047\036\050\037\051\044\052\025\053\050" +
- "\054\043\055\041\056\035\057\030\060\045\061\021\062" +
- "\012\063\042\001\002\000\006\002\ufffe\010\306\001\002" +
- "\000\004\002\000\001\002\000\014\002\uffe7\004\uffe7\006" +
- "\uffe7\010\uffe7\021\uffe7\001\002\000\054\002\uff7a\004\uff7a" +
- "\006\uff7a\007\uff7a\010\uff7a\012\uff7a\013\uff7a\014\uff7a\021" +
- "\uff7a\022\uff7a\023\uff7a\024\uff7a\025\uff7a\026\uff7a\027\uff7a" +
- "\030\uff7a\031\uff7a\032\uff7a\033\uff7a\042\uff7a\043\uff7a\001" +
- "\002\000\070\002\ufffc\010\ufffc\013\027\016\020\032\031" +
- "\033\040\035\034\036\113\037\141\040\047\041\051\044" +
- "\015\045\014\046\016\047\036\050\037\051\044\052\025" +
- "\053\050\054\043\055\041\056\035\057\030\060\045\061" +
- "\021\062\012\063\042\001\002\000\014\002\uffe3\004\uffe3" +
- "\006\uffe3\010\uffe3\021\uffe3\001\002\000\014\002\uffe4\004" +
- "\uffe4\006\uffe4\010\uffe4\021\uffe4\001\002\000\004\011\302" +
- "\001\002\000\012\002\ufffa\004\277\010\ufffa\021\276\001" +
- "\002\000\062\013\uffe0\032\uffe0\033\uffe0\035\uffe0\036\uffe0" +
- "\037\uffe0\040\uffe0\041\uffe0\044\uffe0\045\uffe0\046\uffe0\047" +
- "\uffe0\050\uffe0\051\uffe0\052\uffe0\053\uffe0\054\uffe0\055\uffe0" +
- "\056\uffe0\057\uffe0\060\uffe0\061\uffe0\062\uffe0\063\uffe0\001" +
- "\002\000\054\002\uff7b\004\uff7b\006\uff7b\007\uff7b\010\uff7b" +
- "\012\uff7b\013\uff7b\014\uff7b\021\uff7b\022\uff7b\023\uff7b\024" +
- "\uff7b\025\uff7b\026\uff7b\027\uff7b\030\uff7b\031\uff7b\032\uff7b" +
- "\033\uff7b\042\uff7b\043\uff7b\001\002\000\062\013\027\032" +
- "\031\033\040\035\034\036\113\037\141\040\047\041\051" +
- "\044\015\045\014\046\016\047\036\050\037\051\044\052" +
- "\170\053\166\054\043\055\041\056\035\057\030\060\045" +
- "\061\021\062\012\063\042\001\002\000\064\013\027\016" +
- "\020\032\031\033\040\035\034\036\113\037\141\040\047" +
- "\041\051\044\015\045\014\046\016\047\036\050\037\051" +
- "\044\052\025\053\050\054\043\055\041\056\035\057\030" +
- "\060\045\061\021\062\012\063\042\001\002\000\012\002" +
- "\ufff2\004\266\010\ufff2\021\265\001\002\000\016\002\uff7f" +
- "\004\uff7f\006\uff7f\010\uff7f\020\264\021\uff7f\001\002\000" +
- "\006\002\ufff6\010\ufff6\001\002\000\014\002\uffe2\004\uffe2" +
- "\006\uffe2\010\uffe2\021\uffe2\001\002\000\054\002\uff7d\004" +
- "\uff7d\006\uff7d\007\uff7d\010\uff7d\012\uff7d\013\uff7d\014\uff7d" +
- "\021\uff7d\022\uff7d\023\uff7d\024\uff7d\025\uff7d\026\uff7d\027" +
- "\uff7d\030\uff7d\031\uff7d\032\uff7d\033\uff7d\042\uff7d\043\uff7d" +
- "\001\002\000\056\002\uff85\004\uff85\006\uff85\007\uff85\010" +
- "\uff85\011\uff85\012\uff85\013\uff85\014\uff85\021\uff85\022\uff85" +
- "\023\uff85\024\uff85\025\uff85\026\uff85\027\uff85\030\uff85\031" +
- "\uff85\032\uff85\033\uff85\042\uff85\043\uff85\001\002\000\014" +
- "\002\uffed\004\uffed\006\055\010\uffed\021\uffed\001\002\000" +
- "\016\002\uff75\004\uff75\006\uff75\010\uff75\011\260\021\uff75" +
- "\001\002\000\056\002\uff86\004\uff86\006\uff86\007\uff86\010" +
- "\uff86\011\uff86\012\uff86\013\uff86\014\uff86\021\uff86\022\uff86" +
- "\023\uff86\024\uff86\025\uff86\026\uff86\027\uff86\030\uff86\031" +
- "\uff86\032\uff86\033\uff86\042\uff86\043\uff86\001\002\000\054" +
- "\002\uff7e\004\uff7e\006\uff7e\007\uff7e\010\uff7e\012\uff7e\013" +
- "\uff7e\014\uff7e\021\uff7e\022\uff7e\023\uff7e\024\uff7e\025\uff7e" +
- "\026\uff7e\027\uff7e\030\uff7e\031\uff7e\032\uff7e\033\uff7e\042" +
- "\uff7e\043\uff7e\001\002\000\054\002\uff77\004\uff77\006\uff77" +
- "\007\uff77\010\uff77\012\uff77\013\uff77\014\uff77\021\uff77\022" +
- "\uff77\023\uff77\024\uff77\025\uff77\026\uff77\027\uff77\030\uff77" +
- "\031\uff77\032\uff77\033\uff77\042\uff77\043\uff77\001\002\000" +
- "\054\002\uff76\004\uff76\006\uff76\007\uff76\010\uff76\012\uff76" +
- "\013\uff76\014\uff76\021\uff76\022\uff76\023\uff76\024\uff76\025" +
- "\uff76\026\uff76\027\uff76\030\uff76\031\uff76\032\uff76\033\uff76" +
- "\042\uff76\043\uff76\001\002\000\056\002\uff84\004\uff84\006" +
- "\uff84\007\uff84\010\uff84\011\uff84\012\uff84\013\uff84\014\uff84" +
- "\021\uff84\022\uff84\023\uff84\024\uff84\025\uff84\026\uff84\027" +
- "\uff84\030\uff84\031\uff84\032\uff84\033\uff84\042\uff84\043\uff84" +
- "\001\002\000\054\002\uff81\004\uff81\006\uff81\007\uff81\010" +
- "\uff81\012\uff81\013\uff81\014\uff81\021\uff81\022\uff81\023\uff81" +
- "\024\uff81\025\uff81\026\uff81\027\uff81\030\uff81\031\uff81\032" +
- "\uff81\033\uff81\042\uff81\043\uff81\001\002\000\054\002\uff78" +
- "\004\uff78\006\uff78\007\uff78\010\uff78\012\uff78\013\uff78\014" +
- "\uff78\021\uff78\022\uff78\023\uff78\024\uff78\025\uff78\026\uff78" +
- "\027\uff78\030\uff78\031\uff78\032\uff78\033\uff78\042\uff78\043" +
- "\uff78\001\002\000\054\002\uff82\004\uff82\006\uff82\007\uff82" +
- "\010\uff82\012\uff82\013\uff82\014\uff82\021\uff82\022\uff82\023" +
- "\uff82\024\uff82\025\uff82\026\uff82\027\uff82\030\uff82\031\uff82" +
- "\032\uff82\033\uff82\042\uff82\043\uff82\001\002\000\054\002" +
- "\uff79\004\uff79\006\uff79\007\uff79\010\uff79\012\uff79\013\uff79" +
- "\014\uff79\021\uff79\022\uff79\023\uff79\024\uff79\025\uff79\026" +
- "\uff79\027\uff79\030\uff79\031\uff79\032\uff79\033\uff79\042\uff79" +
- "\043\uff79\001\002\000\054\002\uff7c\004\uff7c\006\uff7c\007" +
- "\uff7c\010\uff7c\012\uff7c\013\uff7c\014\uff7c\021\uff7c\022\uff7c" +
- "\023\uff7c\024\uff7c\025\uff7c\026\uff7c\027\uff7c\030\uff7c\031" +
- "\uff7c\032\uff7c\033\uff7c\042\uff7c\043\uff7c\001\002\000\016" +
- "\002\uff83\004\uff83\006\uff83\010\uff83\011\253\021\uff83\001" +
- "\002\000\014\002\uffe5\004\uffe5\006\uffe5\010\uffe5\021\uffe5" +
- "\001\002\000\016\002\uff80\004\uff80\006\uff80\010\uff80\020" +
- "\252\021\uff80\001\002\000\014\002\uffe6\004\uffe6\006\uffe6" +
- "\010\uffe6\021\uffe6\001\002\000\014\002\uffe1\004\uffe1\006" +
- "\uffe1\010\uffe1\021\uffe1\001\002\000\014\002\uffef\004\uffef" +
- "\006\055\010\uffef\021\uffef\001\002\000\054\002\uffdd\004" +
- "\uffdd\006\055\007\uffdd\010\uffdd\012\uffdd\013\uffdd\014\uffdd" +
- "\021\uffdd\022\uffdd\023\uffdd\024\uffdd\025\uffdd\026\uffdd\027" +
- "\uffdd\030\uffdd\031\uffdd\032\uffdd\033\uffdd\042\uffdd\043\uffdd" +
- "\001\002\000\110\004\061\005\111\011\071\013\103\015" +
- "\134\016\066\017\106\021\070\031\075\032\031\033\040" +
- "\034\057\035\034\036\113\037\141\040\142\041\145\044" +
- "\063\045\062\046\065\047\115\050\123\051\136\052\077" +
- "\053\143\054\131\055\125\056\116\057\104\060\140\061" +
- "\067\062\060\063\127\064\132\065\072\001\002\000\012" +
- "\002\uffee\004\uffee\010\uffee\021\uffee\001\002\000\054\002" +
- "\uff9a\004\uff9a\006\uff9a\007\uff9a\010\uff9a\012\uff9a\013\uff9a" +
- "\014\uff9a\021\uff9a\022\uff9a\023\uff9a\024\uff9a\025\uff9a\026" +
- "\uff9a\027\uff9a\030\uff9a\031\uff9a\032\uff9a\033\uff9a\042\uff9a" +
- "\043\uff9a\001\002\000\060\002\uff7a\004\uff7a\006\uff7a\007" +
- "\uff7a\010\uff7a\011\uff7a\012\uff7a\013\uff7a\014\uff7a\020\uffa5" +
- "\021\uff7a\022\uff7a\023\uff7a\024\uff7a\025\uff7a\026\uff7a\027" +
- "\uff7a\030\uff7a\031\uff7a\032\uff7a\033\uff7a\042\uff7a\043\uff7a" +
- "\001\002\000\126\002\uffb9\005\111\007\uffb9\010\uffb9\012" +
- "\uffb9\013\103\014\uffb9\016\066\017\106\022\uffb9\023\uffb9" +
- "\024\uffb9\025\uffb9\026\uffb9\027\uffb9\030\uffb9\031\uffb9\032" +
- "\031\033\040\035\034\036\113\037\141\040\142\041\145" +
- "\042\uffb9\043\uffb9\044\063\045\062\046\065\047\115\050" +
- "\123\051\136\052\077\053\143\054\131\055\125\056\116" +
- "\057\104\060\140\061\067\062\060\063\127\001\002\000" +
- "\054\002\uff89\004\uff89\006\uff89\007\uff89\010\uff89\012\uff89" +
- "\013\uff89\014\uff89\021\uff89\022\uff89\023\uff89\024\uff89\025" +
- "\uff89\026\uff89\027\uff89\030\uff89\031\uff89\032\uff89\033\uff89" +
- "\042\uff89\043\uff89\001\002\000\054\002\uff8b\004\uff8b\006" +
- "\uff8b\007\uff8b\010\uff8b\012\uff8b\013\uff8b\014\uff8b\021\uff8b" +
- "\022\uff8b\023\uff8b\024\uff8b\025\uff8b\026\uff8b\027\uff8b\030" +
- "\uff8b\031\uff8b\032\uff8b\033\uff8b\042\uff8b\043\uff8b\001\002" +
- "\000\032\002\uffd5\007\uffd5\012\uffd5\014\uffd5\022\uffd5\023" +
- "\uffd5\024\221\025\222\026\223\027\224\042\uffd5\043\uffd5" +
- "\001\002\000\004\011\245\001\002\000\062\013\uffae\032" +
- "\uffae\033\uffae\035\uffae\036\uffae\037\uffae\040\uffae\041\uffae" +
- "\044\uffae\045\uffae\046\uffae\047\uffae\050\uffae\051\uffae\052" +
- "\uffae\053\uffae\054\uffae\055\uffae\056\uffae\057\uffae\060\uffae" +
- "\061\uffae\062\uffae\063\uffae\001\002\000\060\002\uff7b\004" +
- "\uff7b\006\uff7b\007\uff7b\010\uff7b\011\uff7b\012\uff7b\013\uff7b" +
- "\014\uff7b\020\uffa6\021\uff7b\022\uff7b\023\uff7b\024\uff7b\025" +
- "\uff7b\026\uff7b\027\uff7b\030\uff7b\031\uff7b\032\uff7b\033\uff7b" +
- "\042\uff7b\043\uff7b\001\002\000\070\005\111\013\103\016" +
- "\066\017\106\032\031\033\040\035\034\036\113\037\141" +
- "\040\142\041\145\044\063\045\062\046\065\047\115\050" +
- "\123\051\136\052\077\053\143\054\131\055\125\056\116" +
- "\057\104\060\140\061\067\062\060\063\127\001\002\000" +
- "\110\004\061\005\111\011\071\013\103\015\134\016\066" +
- "\017\106\021\070\031\075\032\031\033\040\034\057\035" +
- "\034\036\113\037\141\040\142\041\145\044\063\045\062" +
- "\046\065\047\115\050\123\051\136\052\077\053\143\054" +
- "\131\055\125\056\116\057\104\060\140\061\067\062\060" +
- "\063\127\064\132\065\072\001\002\000\054\002\uff99\004" +
- "\uff99\006\uff99\007\uff99\010\uff99\012\uff99\013\uff99\014\uff99" +
- "\021\uff99\022\uff99\023\uff99\024\uff99\025\uff99\026\uff99\027" +
- "\uff99\030\uff99\031\uff99\032\uff99\033\uff99\042\uff99\043\uff99" +
- "\001\002\000\046\002\uffb7\007\uffb7\010\uffb7\012\uffb7\013" +
- "\uffb7\014\uffb7\022\uffb7\023\uffb7\024\uffb7\025\uffb7\026\uffb7" +
- "\027\uffb7\030\uffb7\031\uffb7\032\uffb7\033\uffb7\042\uffb7\043" +
- "\uffb7\001\002\000\054\002\uff97\004\uff97\006\uff97\007\uff97" +
- "\010\uff97\012\uff97\013\uff97\014\uff97\021\uff97\022\uff97\023" +
- "\uff97\024\uff97\025\uff97\026\uff97\027\uff97\030\uff97\031\uff97" +
- "\032\uff97\033\uff97\042\uff97\043\uff97\001\002\000\110\004" +
- "\061\005\111\011\071\013\103\015\134\016\066\017\106" +
- "\021\070\031\075\032\031\033\040\034\057\035\034\036" +
- "\113\037\141\040\142\041\145\044\063\045\062\046\065" +
- "\047\115\050\123\051\136\052\077\053\143\054\131\055" +
- "\125\056\116\057\104\060\140\061\067\062\060\063\127" +
- "\064\132\065\072\001\002\000\016\002\uffd9\007\uffd9\012" +
- "\uffd9\014\uffd9\042\uffd9\043\234\001\002\000\060\002\uff7f" +
- "\004\uff7f\006\uff7f\007\uff7f\010\uff7f\011\uff7f\012\uff7f\013" +
- "\uff7f\014\uff7f\020\uffaa\021\uff7f\022\uff7f\023\uff7f\024\uff7f" +
- "\025\uff7f\026\uff7f\027\uff7f\030\uff7f\031\uff7f\032\uff7f\033" +
- "\uff7f\042\uff7f\043\uff7f\001\002\000\062\013\103\032\031" +
- "\033\040\035\034\036\113\037\141\040\142\041\145\044" +
- "\063\045\062\046\065\047\036\050\037\051\044\052\170" +
- "\053\166\054\043\055\041\056\035\057\030\060\045\061" +
- "\021\062\012\063\042\001\002\000\004\020\236\001\002" +
- "\000\014\002\uffda\007\uffda\012\uffda\014\uffda\042\232\001" +
- "\002\000\054\002\uff88\004\uff88\006\uff88\007\uff88\010\uff88" +
- "\012\uff88\013\uff88\014\uff88\021\uff88\022\uff88\023\uff88\024" +
- "\uff88\025\uff88\026\uff88\027\uff88\030\uff88\031\uff88\032\uff88" +
- "\033\uff88\042\uff88\043\uff88\001\002\000\060\002\uff7d\004" +
- "\uff7d\006\uff7d\007\uff7d\010\uff7d\011\uff7d\012\uff7d\013\uff7d" +
- "\014\uff7d\020\uffa8\021\uff7d\022\uff7d\023\uff7d\024\uff7d\025" +
- "\uff7d\026\uff7d\027\uff7d\030\uff7d\031\uff7d\032\uff7d\033\uff7d" +
- "\042\uff7d\043\uff7d\001\002\000\022\002\uffd7\007\uffd7\012" +
- "\uffd7\014\uffd7\022\216\023\217\042\uffd7\043\uffd7\001\002" +
- "\000\052\002\uff9f\004\uff9f\007\uff9f\010\uff9f\012\uff9f\013" +
- "\uff9f\014\uff9f\021\uff9f\022\uff9f\023\uff9f\024\uff9f\025\uff9f" +
- "\026\uff9f\027\uff9f\030\uff9f\031\uff9f\032\uff9f\033\uff9f\042" +
- "\uff9f\043\uff9f\001\002\000\054\002\uffb4\004\uffb4\006\055" +
- "\007\uffb4\010\uffb4\012\uffb4\013\uffb4\014\uffb4\021\uffb4\022" +
- "\uffb4\023\uffb4\024\uffb4\025\uffb4\026\uffb4\027\uffb4\030\uffb4" +
- "\031\uffb4\032\uffb4\033\uffb4\042\uffb4\043\uffb4\001\002\000" +
- "\046\002\uffbd\007\uffbd\010\uffbd\012\uffbd\013\uffbd\014\uffbd" +
- "\022\uffbd\023\uffbd\024\uffbd\025\uffbd\026\uffbd\027\uffbd\030" +
- "\uffbd\031\uffbd\032\uffbd\033\uffbd\042\uffbd\043\uffbd\001\002" +
- "\000\052\002\uffa0\004\uffa0\007\uffa0\010\uffa0\012\uffa0\013" +
- "\uffa0\014\uffa0\021\uffa0\022\uffa0\023\uffa0\024\uffa0\025\uffa0" +
- "\026\uffa0\027\uffa0\030\uffa0\031\uffa0\032\uffa0\033\uffa0\042" +
- "\uffa0\043\uffa0\001\002\000\036\002\uffd2\007\uffd2\012\uffd2" +
- "\014\uffd2\022\uffd2\023\uffd2\024\uffd2\025\uffd2\026\uffd2\027" +
- "\uffd2\030\211\031\212\042\uffd2\043\uffd2\001\002\000\056" +
- "\002\uff75\004\uff75\006\uff75\007\uff75\010\uff75\011\uff75\012" +
- "\uff75\013\uff75\014\uff75\021\uff75\022\uff75\023\uff75\024\uff75" +
- "\025\uff75\026\uff75\027\uff75\030\uff75\031\uff75\032\uff75\033" +
- "\uff75\042\uff75\043\uff75\001\002\000\044\002\uffca\007\uffca" +
- "\012\uffca\013\uffca\014\uffca\022\uffca\023\uffca\024\uffca\025" +
- "\uffca\026\uffca\027\uffca\030\uffca\031\uffca\032\uffca\033\uffca" +
- "\042\uffca\043\uffca\001\002\000\060\002\uff77\004\uff77\006" +
- "\uff77\007\uff77\010\uff77\011\uff77\012\uff77\013\uff77\014\uff77" +
- "\020\uffa2\021\uff77\022\uff77\023\uff77\024\uff77\025\uff77\026" +
- "\uff77\027\uff77\030\uff77\031\uff77\032\uff77\033\uff77\042\uff77" +
- "\043\uff77\001\002\000\060\002\uff7e\004\uff7e\006\uff7e\007" +
- "\uff7e\010\uff7e\011\uff7e\012\uff7e\013\uff7e\014\uff7e\020\uffa9" +
- "\021\uff7e\022\uff7e\023\uff7e\024\uff7e\025\uff7e\026\uff7e\027" +
- "\uff7e\030\uff7e\031\uff7e\032\uff7e\033\uff7e\042\uff7e\043\uff7e" +
- "\001\002\000\004\011\201\001\002\000\052\002\uffbc\004" +
- "\uffbc\007\uffbc\010\uffbc\012\uffbc\013\uffbc\014\uffbc\021\uffbc" +
- "\022\uffbc\023\uffbc\024\uffbc\025\uffbc\026\uffbc\027\uffbc\030" +
- "\uffbc\031\uffbc\032\uffbc\033\uffbc\042\uffbc\043\uffbc\001\002" +
- "\000\046\002\uffc2\007\uffc2\010\uffc2\012\uffc2\013\uffc2\014" +
- "\uffc2\022\uffc2\023\uffc2\024\uffc2\025\uffc2\026\uffc2\027\uffc2" +
- "\030\uffc2\031\uffc2\032\uffc2\033\uffc2\042\uffc2\043\uffc2\001" +
- "\002\000\054\002\uff9e\004\uff9e\006\055\007\uff9e\010\uff9e" +
- "\012\uff9e\013\uff9e\014\uff9e\021\uff9e\022\uff9e\023\uff9e\024" +
- "\uff9e\025\uff9e\026\uff9e\027\uff9e\030\uff9e\031\uff9e\032\uff9e" +
- "\033\uff9e\042\uff9e\043\uff9e\001\002\000\060\002\uff76\004" +
- "\uff76\006\uff76\007\uff76\010\uff76\011\uff76\012\uff76\013\uff76" +
- "\014\uff76\020\uffa1\021\uff76\022\uff76\023\uff76\024\uff76\025" +
- "\uff76\026\uff76\027\uff76\030\uff76\031\uff76\032\uff76\033\uff76" +
- "\042\uff76\043\uff76\001\002\000\046\002\uffc4\007\uffc4\010" +
- "\176\012\uffc4\013\uffc4\014\uffc4\022\uffc4\023\uffc4\024\uffc4" +
- "\025\uffc4\026\uffc4\027\uffc4\030\uffc4\031\uffc4\032\uffc4\033" +
- "\uffc4\042\uffc4\043\uffc4\001\002\000\060\002\uff81\004\uff81" +
- "\006\uff81\007\uff81\010\uff81\011\uff81\012\uff81\013\uff81\014" +
- "\uff81\020\uffac\021\uff81\022\uff81\023\uff81\024\uff81\025\uff81" +
- "\026\uff81\027\uff81\030\uff81\031\uff81\032\uff81\033\uff81\042" +
- "\uff81\043\uff81\001\002\000\054\002\uff9c\004\uff9c\006\uff9c" +
- "\007\uff9c\010\uff9c\012\uff9c\013\uff9c\014\uff9c\021\uff9c\022" +
- "\uff9c\023\uff9c\024\uff9c\025\uff9c\026\uff9c\027\uff9c\030\uff9c" +
- "\031\uff9c\032\uff9c\033\uff9c\042\uff9c\043\uff9c\001\002\000" +
- "\060\002\uff78\004\uff78\006\uff78\007\uff78\010\uff78\011\uff78" +
- "\012\uff78\013\uff78\014\uff78\020\uffa3\021\uff78\022\uff78\023" +
- "\uff78\024\uff78\025\uff78\026\uff78\027\uff78\030\uff78\031\uff78" +
- "\032\uff78\033\uff78\042\uff78\043\uff78\001\002\000\052\002" +
- "\uffc1\004\173\007\uffc1\010\uffc1\012\uffc1\013\uffc1\014\uffc1" +
- "\021\172\022\uffc1\023\uffc1\024\uffc1\025\uffc1\026\uffc1\027" +
- "\uffc1\030\uffc1\031\uffc1\032\uffc1\033\uffc1\042\uffc1\043\uffc1" +
- "\001\002\000\060\002\uff82\004\uff82\006\uff82\007\uff82\010" +
- "\uff82\011\uff82\012\uff82\013\uff82\014\uff82\020\uffad\021\uff82" +
- "\022\uff82\023\uff82\024\uff82\025\uff82\026\uff82\027\uff82\030" +
- "\uff82\031\uff82\032\uff82\033\uff82\042\uff82\043\uff82\001\002" +
- "\000\054\002\uff98\004\uff98\006\uff98\007\uff98\010\uff98\012" +
- "\uff98\013\uff98\014\uff98\021\uff98\022\uff98\023\uff98\024\uff98" +
- "\025\uff98\026\uff98\027\uff98\030\uff98\031\uff98\032\uff98\033" +
- "\uff98\042\uff98\043\uff98\001\002\000\004\007\171\001\002" +
- "\000\046\032\031\033\040\035\034\036\113\037\141\047" +
- "\036\050\037\051\044\052\170\053\166\054\043\055\041" +
- "\056\035\057\030\060\045\061\021\062\012\063\042\001" +
- "\002\000\052\002\uffba\004\uffba\007\uffba\010\uffba\012\uffba" +
- "\013\uffba\014\uffba\021\uffba\022\uffba\023\uffba\024\uffba\025" +
- "\uffba\026\uffba\027\uffba\030\uffba\031\uffba\032\uffba\033\uffba" +
- "\042\uffba\043\uffba\001\002\000\060\002\uff79\004\uff79\006" +
- "\uff79\007\uff79\010\uff79\011\uff79\012\uff79\013\uff79\014\uff79" +
- "\020\uffa4\021\uff79\022\uff79\023\uff79\024\uff79\025\uff79\026" +
- "\uff79\027\uff79\030\uff79\031\uff79\032\uff79\033\uff79\042\uff79" +
- "\043\uff79\001\002\000\052\002\uffb0\004\uffb0\007\uffb0\010" +
- "\uffb0\012\uffb0\013\uffb0\014\uffb0\021\uffb0\022\uffb0\023\uffb0" +
- "\024\uffb0\025\uffb0\026\uffb0\027\uffb0\030\uffb0\031\uffb0\032" +
- "\uffb0\033\uffb0\042\uffb0\043\uffb0\001\002\000\060\002\uff7c" +
- "\004\uff7c\006\uff7c\007\uff7c\010\uff7c\011\uff7c\012\uff7c\013" +
- "\uff7c\014\uff7c\020\uffa7\021\uff7c\022\uff7c\023\uff7c\024\uff7c" +
- "\025\uff7c\026\uff7c\027\uff7c\030\uff7c\031\uff7c\032\uff7c\033" +
- "\uff7c\042\uff7c\043\uff7c\001\002\000\056\002\uff83\004\uff83" +
- "\006\uff83\007\uff83\010\uff83\011\uff83\012\uff83\013\uff83\014" +
- "\uff83\021\uff83\022\uff83\023\uff83\024\uff83\025\uff83\026\uff83" +
- "\027\uff83\030\uff83\031\uff83\032\uff83\033\uff83\042\uff83\043" +
- "\uff83\001\002\000\054\002\uff8c\004\uff8c\006\uff8c\007\uff8c" +
- "\010\uff8c\012\uff8c\013\uff8c\014\uff8c\021\uff8c\022\uff8c\023" +
- "\uff8c\024\uff8c\025\uff8c\026\uff8c\027\uff8c\030\uff8c\031\uff8c" +
- "\032\uff8c\033\uff8c\042\uff8c\043\uff8c\001\002\000\060\002" +
- "\uff80\004\uff80\006\uff80\007\uff80\010\uff80\011\uff80\012\uff80" +
- "\013\uff80\014\uff80\020\uffab\021\uff80\022\uff80\023\uff80\024" +
- "\uff80\025\uff80\026\uff80\027\uff80\030\uff80\031\uff80\032\uff80" +
- "\033\uff80\042\uff80\043\uff80\001\002\000\044\002\uffc6\007" +
- "\uffc6\012\uffc6\013\uffc6\014\uffc6\022\uffc6\023\uffc6\024\uffc6" +
- "\025\uffc6\026\uffc6\027\uffc6\030\uffc6\031\uffc6\032\uffc6\033" +
- "\uffc6\042\uffc6\043\uffc6\001\002\000\054\002\uff8d\004\uff8d" +
- "\006\uff8d\007\uff8d\010\uff8d\012\uff8d\013\uff8d\014\uff8d\021" +
- "\uff8d\022\uff8d\023\uff8d\024\uff8d\025\uff8d\026\uff8d\027\uff8d" +
- "\030\uff8d\031\uff8d\032\uff8d\033\uff8d\042\uff8d\043\uff8d\001" +
- "\002\000\044\002\uffcd\007\uffcd\012\uffcd\013\160\014\uffcd" +
- "\022\uffcd\023\uffcd\024\uffcd\025\uffcd\026\uffcd\027\uffcd\030" +
- "\uffcd\031\uffcd\032\161\033\157\042\uffcd\043\uffcd\001\002" +
- "\000\052\002\uffbe\004\153\007\uffbe\010\uffbe\012\uffbe\013" +
- "\uffbe\014\uffbe\021\152\022\uffbe\023\uffbe\024\uffbe\025\uffbe" +
- "\026\uffbe\027\uffbe\030\uffbe\031\uffbe\032\uffbe\033\uffbe\042" +
- "\uffbe\043\uffbe\001\002\000\054\002\uff8e\004\uff8e\006\uff8e" +
- "\007\uff8e\010\uff8e\012\uff8e\013\uff8e\014\uff8e\021\uff8e\022" +
- "\uff8e\023\uff8e\024\uff8e\025\uff8e\026\uff8e\027\uff8e\030\uff8e" +
- "\031\uff8e\032\uff8e\033\uff8e\042\uff8e\043\uff8e\001\002\000" +
- "\056\002\uff87\004\uff87\006\uff87\007\uff87\010\uff87\011\uff91" +
- "\012\uff87\013\uff87\014\uff87\021\uff87\022\uff87\023\uff87\024" +
- "\uff87\025\uff87\026\uff87\027\uff87\030\uff87\031\uff87\032\uff87" +
- "\033\uff87\042\uff87\043\uff87\001\002\000\070\005\111\013" +
- "\103\016\066\017\106\032\031\033\040\035\034\036\113" +
- "\037\141\040\142\041\145\044\063\045\062\046\065\047" +
- "\115\050\123\051\136\052\077\053\143\054\131\055\125" +
- "\056\116\057\104\060\140\061\067\062\060\063\127\001" +
- "\002\000\070\005\111\013\103\016\066\017\106\032\031" +
- "\033\040\035\034\036\113\037\141\040\142\041\145\044" +
- "\063\045\062\046\065\047\115\050\123\051\136\052\077" +
- "\053\143\054\131\055\125\056\116\057\104\060\140\061" +
- "\067\062\060\063\127\001\002\000\054\002\uff87\004\uff87" +
- "\006\uff87\007\uff87\010\uff87\012\uff87\013\uff87\014\uff87\021" +
- "\uff87\022\uff87\023\uff87\024\uff87\025\uff87\026\uff87\027\uff87" +
- "\030\uff87\031\uff87\032\uff87\033\uff87\042\uff87\043\uff87\001" +
- "\002\000\052\002\uffbb\004\uffbb\007\uffbb\010\uffbb\012\uffbb" +
- "\013\uffbb\014\uffbb\021\uffbb\022\uffbb\023\uffbb\024\uffbb\025" +
- "\uffbb\026\uffbb\027\uffbb\030\uffbb\031\uffbb\032\uffbb\033\uffbb" +
- "\042\uffbb\043\uffbb\001\002\000\052\002\uffb6\004\uffb6\007" +
- "\uffb6\010\uffb6\012\uffb6\013\uffb6\014\uffb6\021\uffb6\022\uffb6" +
- "\023\uffb6\024\uffb6\025\uffb6\026\uffb6\027\uffb6\030\uffb6\031" +
- "\uffb6\032\uffb6\033\uffb6\042\uffb6\043\uffb6\001\002\000\110" +
- "\004\061\005\111\011\071\013\103\015\134\016\066\017" +
- "\106\021\070\031\075\032\031\033\040\034\057\035\034" +
- "\036\113\037\141\040\142\041\145\044\063\045\062\046" +
- "\065\047\115\050\123\051\136\052\077\053\143\054\131" +
- "\055\125\056\116\057\104\060\140\061\067\062\060\063" +
- "\127\064\132\065\072\001\002\000\110\004\061\005\111" +
- "\011\071\013\103\015\134\016\066\017\106\021\070\031" +
- "\075\032\031\033\040\034\057\035\034\036\113\037\141" +
- "\040\142\041\145\044\063\045\062\046\065\047\115\050" +
- "\123\051\136\052\077\053\143\054\131\055\125\056\116" +
- "\057\104\060\140\061\067\062\060\063\127\064\132\065" +
- "\072\001\002\000\110\004\061\005\111\011\071\013\103" +
- "\015\134\016\066\017\106\021\070\031\075\032\031\033" +
- "\040\034\057\035\034\036\113\037\141\040\142\041\145" +
- "\044\063\045\062\046\065\047\115\050\123\051\136\052" +
- "\077\053\143\054\131\055\125\056\116\057\104\060\140" +
- "\061\067\062\060\063\127\064\132\065\072\001\002\000" +
- "\044\002\uffc8\007\uffc8\012\uffc8\013\uffc8\014\uffc8\022\uffc8" +
- "\023\uffc8\024\uffc8\025\uffc8\026\uffc8\027\uffc8\030\uffc8\031" +
- "\uffc8\032\uffc8\033\uffc8\042\uffc8\043\uffc8\001\002\000\044" +
- "\002\uffc9\007\uffc9\012\uffc9\013\uffc9\014\uffc9\022\uffc9\023" +
- "\uffc9\024\uffc9\025\uffc9\026\uffc9\027\uffc9\030\uffc9\031\uffc9" +
- "\032\uffc9\033\uffc9\042\uffc9\043\uffc9\001\002\000\044\002" +
- "\uffc7\007\uffc7\012\uffc7\013\uffc7\014\uffc7\022\uffc7\023\uffc7" +
- "\024\uffc7\025\uffc7\026\uffc7\027\uffc7\030\uffc7\031\uffc7\032" +
- "\uffc7\033\uffc7\042\uffc7\043\uffc7\001\002\000\054\002\uff90" +
- "\004\uff90\006\uff90\007\uff90\010\uff90\012\uff90\013\uff90\014" +
- "\uff90\021\uff90\022\uff90\023\uff90\024\uff90\025\uff90\026\uff90" +
- "\027\uff90\030\uff90\031\uff90\032\uff90\033\uff90\042\uff90\043" +
- "\uff90\001\002\000\054\002\uff80\004\uff80\006\uff80\007\uff80" +
- "\010\uff80\012\uff80\013\uff80\014\uff80\021\uff80\022\uff80\023" +
- "\uff80\024\uff80\025\uff80\026\uff80\027\uff80\030\uff80\031\uff80" +
- "\032\uff80\033\uff80\042\uff80\043\uff80\001\002\000\054\002" +
- "\uff96\004\uff96\006\uff96\007\uff96\010\uff96\012\uff96\013\uff96" +
- "\014\uff96\021\uff96\022\uff96\023\uff96\024\uff96\025\uff96\026" +
- "\uff96\027\uff96\030\uff96\031\uff96\032\uff96\033\uff96\042\uff96" +
- "\043\uff96\001\002\000\054\002\uff7f\004\uff7f\006\uff7f\007" +
- "\uff7f\010\uff7f\012\uff7f\013\uff7f\014\uff7f\021\uff7f\022\uff7f" +
- "\023\uff7f\024\uff7f\025\uff7f\026\uff7f\027\uff7f\030\uff7f\031" +
- "\uff7f\032\uff7f\033\uff7f\042\uff7f\043\uff7f\001\002\000\054" +
- "\002\uffdb\004\uffdb\006\uffdb\007\uffdb\010\uffdb\012\uffdb\013" +
- "\uffdb\014\uffdb\021\uffdb\022\uffdb\023\uffdb\024\uffdb\025\uffdb" +
- "\026\uffdb\027\uffdb\030\uffdb\031\uffdb\032\uffdb\033\uffdb\042" +
- "\uffdb\043\uffdb\001\002\000\070\005\111\013\103\016\066" +
- "\017\106\032\031\033\040\035\034\036\113\037\141\040" +
- "\142\041\145\044\063\045\062\046\065\047\115\050\123" +
- "\051\136\052\077\053\143\054\131\055\125\056\116\057" +
- "\104\060\140\061\067\062\060\063\127\001\002\000\070" +
- "\005\111\013\103\016\066\017\106\032\031\033\040\035" +
- "\034\036\113\037\141\040\142\041\145\044\063\045\062" +
- "\046\065\047\115\050\123\051\136\052\077\053\143\054" +
- "\131\055\125\056\116\057\104\060\140\061\067\062\060" +
- "\063\127\001\002\000\052\002\uffc0\004\153\007\uffc0\010" +
- "\uffc0\012\uffc0\013\uffc0\014\uffc0\021\152\022\uffc0\023\uffc0" +
- "\024\uffc0\025\uffc0\026\uffc0\027\uffc0\030\uffc0\031\uffc0\032" +
- "\uffc0\033\uffc0\042\uffc0\043\uffc0\001\002\000\052\002\uffbf" +
- "\004\153\007\uffbf\010\uffbf\012\uffbf\013\uffbf\014\uffbf\021" +
- "\152\022\uffbf\023\uffbf\024\uffbf\025\uffbf\026\uffbf\027\uffbf" +
- "\030\uffbf\031\uffbf\032\uffbf\033\uffbf\042\uffbf\043\uffbf\001" +
- "\002\000\106\004\061\005\111\011\071\013\103\015\134" +
- "\016\066\017\106\021\070\032\031\033\040\034\057\035" +
- "\034\036\113\037\141\040\142\041\145\044\063\045\062" +
- "\046\065\047\115\050\123\051\136\052\077\053\143\054" +
- "\131\055\125\056\116\057\104\060\140\061\067\062\060" +
- "\063\127\064\132\065\072\001\002\000\044\002\uffc3\007" +
- "\uffc3\012\uffc3\013\uffc3\014\uffc3\022\uffc3\023\uffc3\024\uffc3" +
- "\025\uffc3\026\uffc3\027\uffc3\030\uffc3\031\uffc3\032\uffc3\033" +
- "\uffc3\042\uffc3\043\uffc3\001\002\000\052\002\uff9d\004\uff9d" +
- "\007\uff9d\010\uff9d\012\uff9d\013\uff9d\014\uff9d\021\uff9d\022" +
- "\uff9d\023\uff9d\024\uff9d\025\uff9d\026\uff9d\027\uff9d\030\uff9d" +
- "\031\uff9d\032\uff9d\033\uff9d\042\uff9d\043\uff9d\001\002\000" +
- "\112\004\061\005\111\011\071\012\202\013\103\015\134" +
- "\016\066\017\106\021\070\031\075\032\031\033\040\034" +
- "\057\035\034\036\113\037\141\040\142\041\145\044\063" +
- "\045\062\046\065\047\115\050\123\051\136\052\077\053" +
- "\143\054\131\055\125\056\116\057\104\060\140\061\067" +
- "\062\060\063\127\064\132\065\072\001\002\000\054\002" +
- "\uff95\004\uff95\006\uff95\007\uff95\010\uff95\012\uff95\013\uff95" +
- "\014\uff95\021\uff95\022\uff95\023\uff95\024\uff95\025\uff95\026" +
- "\uff95\027\uff95\030\uff95\031\uff95\032\uff95\033\uff95\042\uff95" +
- "\043\uff95\001\002\000\006\012\uff93\014\207\001\002\000" +
- "\006\012\uff8f\014\uff8f\001\002\000\004\012\206\001\002" +
- "\000\054\002\uff94\004\uff94\006\uff94\007\uff94\010\uff94\012" +
- "\uff94\013\uff94\014\uff94\021\uff94\022\uff94\023\uff94\024\uff94" +
- "\025\uff94\026\uff94\027\uff94\030\uff94\031\uff94\032\uff94\033" +
- "\uff94\042\uff94\043\uff94\001\002\000\110\004\061\005\111" +
- "\011\071\013\103\015\134\016\066\017\106\021\070\031" +
- "\075\032\031\033\040\034\057\035\034\036\113\037\141" +
- "\040\142\041\145\044\063\045\062\046\065\047\115\050" +
- "\123\051\136\052\077\053\143\054\131\055\125\056\116" +
- "\057\104\060\140\061\067\062\060\063\127\064\132\065" +
- "\072\001\002\000\004\012\uff92\001\002\000\110\004\061" +
- "\005\111\011\071\013\103\015\134\016\066\017\106\021" +
- "\070\031\075\032\031\033\040\034\057\035\034\036\113" +
- "\037\141\040\142\041\145\044\063\045\062\046\065\047" +
- "\115\050\123\051\136\052\077\053\143\054\131\055\125" +
- "\056\116\057\104\060\140\061\067\062\060\063\127\064" +
- "\132\065\072\001\002\000\110\004\061\005\111\011\071" +
- "\013\103\015\134\016\066\017\106\021\070\031\075\032" +
- "\031\033\040\034\057\035\034\036\113\037\141\040\142" +
- "\041\145\044\063\045\062\046\065\047\115\050\123\051" +
- "\136\052\077\053\143\054\131\055\125\056\116\057\104" +
- "\060\140\061\067\062\060\063\127\064\132\065\072\001" +
- "\002\000\044\002\uffcb\007\uffcb\012\uffcb\013\160\014\uffcb" +
- "\022\uffcb\023\uffcb\024\uffcb\025\uffcb\026\uffcb\027\uffcb\030" +
- "\uffcb\031\uffcb\032\161\033\157\042\uffcb\043\uffcb\001\002" +
- "\000\044\002\uffcc\007\uffcc\012\uffcc\013\160\014\uffcc\022" +
- "\uffcc\023\uffcc\024\uffcc\025\uffcc\026\uffcc\027\uffcc\030\uffcc" +
- "\031\uffcc\032\161\033\157\042\uffcc\043\uffcc\001\002\000" +
- "\052\002\uffb3\004\uffb3\007\uffb3\010\uffb3\012\uffb3\013\uffb3" +
- "\014\uffb3\021\uffb3\022\uffb3\023\uffb3\024\uffb3\025\uffb3\026" +
- "\uffb3\027\uffb3\030\uffb3\031\uffb3\032\uffb3\033\uffb3\042\uffb3" +
- "\043\uffb3\001\002\000\110\004\061\005\111\011\071\013" +
- "\103\015\134\016\066\017\106\021\070\031\075\032\031" +
- "\033\040\034\057\035\034\036\113\037\141\040\142\041" +
- "\145\044\063\045\062\046\065\047\115\050\123\051\136" +
- "\052\077\053\143\054\131\055\125\056\116\057\104\060" +
- "\140\061\067\062\060\063\127\064\132\065\072\001\002" +
- "\000\110\004\061\005\111\011\071\013\103\015\134\016" +
- "\066\017\106\021\070\031\075\032\031\033\040\034\057" +
- "\035\034\036\113\037\141\040\142\041\145\044\063\045" +
- "\062\046\065\047\115\050\123\051\136\052\077\053\143" +
- "\054\131\055\125\056\116\057\104\060\140\061\067\062" +
- "\060\063\127\064\132\065\072\001\002\000\032\002\uffd3" +
- "\007\uffd3\012\uffd3\014\uffd3\022\uffd3\023\uffd3\024\221\025" +
- "\222\026\223\027\224\042\uffd3\043\uffd3\001\002\000\110" +
- "\004\061\005\111\011\071\013\103\015\134\016\066\017" +
- "\106\021\070\031\075\032\031\033\040\034\057\035\034" +
- "\036\113\037\141\040\142\041\145\044\063\045\062\046" +
- "\065\047\115\050\123\051\136\052\077\053\143\054\131" +
- "\055\125\056\116\057\104\060\140\061\067\062\060\063" +
- "\127\064\132\065\072\001\002\000\110\004\061\005\111" +
- "\011\071\013\103\015\134\016\066\017\106\021\070\031" +
- "\075\032\031\033\040\034\057\035\034\036\113\037\141" +
- "\040\142\041\145\044\063\045\062\046\065\047\115\050" +
- "\123\051\136\052\077\053\143\054\131\055\125\056\116" +
- "\057\104\060\140\061\067\062\060\063\127\064\132\065" +
- "\072\001\002\000\110\004\061\005\111\011\071\013\103" +
- "\015\134\016\066\017\106\021\070\031\075\032\031\033" +
- "\040\034\057\035\034\036\113\037\141\040\142\041\145" +
- "\044\063\045\062\046\065\047\115\050\123\051\136\052" +
- "\077\053\143\054\131\055\125\056\116\057\104\060\140" +
- "\061\067\062\060\063\127\064\132\065\072\001\002\000" +
- "\110\004\061\005\111\011\071\013\103\015\134\016\066" +
- "\017\106\021\070\031\075\032\031\033\040\034\057\035" +
- "\034\036\113\037\141\040\142\041\145\044\063\045\062" +
- "\046\065\047\115\050\123\051\136\052\077\053\143\054" +
- "\131\055\125\056\116\057\104\060\140\061\067\062\060" +
- "\063\127\064\132\065\072\001\002\000\036\002\uffce\007" +
- "\uffce\012\uffce\014\uffce\022\uffce\023\uffce\024\uffce\025\uffce" +
- "\026\uffce\027\uffce\030\211\031\212\042\uffce\043\uffce\001" +
- "\002\000\036\002\uffcf\007\uffcf\012\uffcf\014\uffcf\022\uffcf" +
- "\023\uffcf\024\uffcf\025\uffcf\026\uffcf\027\uffcf\030\211\031" +
- "\212\042\uffcf\043\uffcf\001\002\000\036\002\uffd0\007\uffd0" +
- "\012\uffd0\014\uffd0\022\uffd0\023\uffd0\024\uffd0\025\uffd0\026" +
- "\uffd0\027\uffd0\030\211\031\212\042\uffd0\043\uffd0\001\002" +
- "\000\036\002\uffd1\007\uffd1\012\uffd1\014\uffd1\022\uffd1\023" +
- "\uffd1\024\uffd1\025\uffd1\026\uffd1\027\uffd1\030\211\031\212" +
- "\042\uffd1\043\uffd1\001\002\000\032\002\uffd4\007\uffd4\012" +
- "\uffd4\014\uffd4\022\uffd4\023\uffd4\024\221\025\222\026\223" +
- "\027\224\042\uffd4\043\uffd4\001\002\000\110\004\061\005" +
- "\111\011\071\013\103\015\134\016\066\017\106\021\070" +
- "\031\075\032\031\033\040\034\057\035\034\036\113\037" +
- "\141\040\142\041\145\044\063\045\062\046\065\047\115" +
- "\050\123\051\136\052\077\053\143\054\131\055\125\056" +
- "\116\057\104\060\140\061\067\062\060\063\127\064\132" +
- "\065\072\001\002\000\016\002\uffd8\007\uffd8\012\uffd8\014" +
- "\uffd8\042\uffd8\043\234\001\002\000\110\004\061\005\111" +
- "\011\071\013\103\015\134\016\066\017\106\021\070\031" +
- "\075\032\031\033\040\034\057\035\034\036\113\037\141" +
- "\040\142\041\145\044\063\045\062\046\065\047\115\050" +
- "\123\051\136\052\077\053\143\054\131\055\125\056\116" +
- "\057\104\060\140\061\067\062\060\063\127\064\132\065" +
- "\072\001\002\000\022\002\uffd6\007\uffd6\012\uffd6\014\uffd6" +
- "\022\216\023\217\042\uffd6\043\uffd6\001\002\000\062\013" +
- "\uffaf\032\uffaf\033\uffaf\035\uffaf\036\uffaf\037\uffaf\040\uffaf" +
- "\041\uffaf\044\uffaf\045\uffaf\046\uffaf\047\uffaf\050\uffaf\051" +
- "\uffaf\052\uffaf\053\uffaf\054\uffaf\055\uffaf\056\uffaf\057\uffaf" +
- "\060\uffaf\061\uffaf\062\uffaf\063\uffaf\001\002\000\054\002" +
- "\uffb1\004\uffb1\006\055\007\uffb1\010\uffb1\012\uffb1\013\uffb1" +
- "\014\uffb1\021\uffb1\022\uffb1\023\uffb1\024\uffb1\025\uffb1\026" +
- "\uffb1\027\uffb1\030\uffb1\031\uffb1\032\uffb1\033\uffb1\042\uffb1" +
- "\043\uffb1\001\002\000\052\002\uffb2\004\uffb2\007\uffb2\010" +
- "\uffb2\012\uffb2\013\uffb2\014\uffb2\021\uffb2\022\uffb2\023\uffb2" +
- "\024\uffb2\025\uffb2\026\uffb2\027\uffb2\030\uffb2\031\uffb2\032" +
- "\uffb2\033\uffb2\042\uffb2\043\uffb2\001\002\000\044\002\uffc5" +
- "\007\uffc5\012\uffc5\013\uffc5\014\uffc5\022\uffc5\023\uffc5\024" +
- "\uffc5\025\uffc5\026\uffc5\027\uffc5\030\uffc5\031\uffc5\032\uffc5" +
- "\033\uffc5\042\uffc5\043\uffc5\001\002\000\004\012\243\001" +
- "\002\000\054\002\uff9b\004\uff9b\006\uff9b\007\uff9b\010\uff9b" +
- "\012\uff9b\013\uff9b\014\uff9b\021\uff9b\022\uff9b\023\uff9b\024" +
- "\uff9b\025\uff9b\026\uff9b\027\uff9b\030\uff9b\031\uff9b\032\uff9b" +
- "\033\uff9b\042\uff9b\043\uff9b\001\002\000\052\002\uffb5\004" +
- "\153\007\uffb5\010\uffb5\012\uffb5\013\uffb5\014\uffb5\021\152" +
- "\022\uffb5\023\uffb5\024\uffb5\025\uffb5\026\uffb5\027\uffb5\030" +
- "\uffb5\031\uffb5\032\uffb5\033\uffb5\042\uffb5\043\uffb5\001\002" +
- "\000\004\034\246\001\002\000\004\012\247\001\002\000" +
- "\054\002\uff8a\004\uff8a\006\uff8a\007\uff8a\010\uff8a\012\uff8a" +
- "\013\uff8a\014\uff8a\021\uff8a\022\uff8a\023\uff8a\024\uff8a\025" +
- "\uff8a\026\uff8a\027\uff8a\030\uff8a\031\uff8a\032\uff8a\033\uff8a" +
- "\042\uff8a\043\uff8a\001\002\000\052\002\uffb8\004\153\007" +
- "\uffb8\010\uffb8\012\uffb8\013\uffb8\014\uffb8\021\152\022\uffb8" +
- "\023\uffb8\024\uffb8\025\uffb8\026\uffb8\027\uffb8\030\uffb8\031" +
- "\uffb8\032\uffb8\033\uffb8\042\uffb8\043\uffb8\001\002\000\052" +
- "\002\uffdc\004\uffdc\007\uffdc\010\uffdc\012\uffdc\013\uffdc\014" +
- "\uffdc\021\uffdc\022\uffdc\023\uffdc\024\uffdc\025\uffdc\026\uffdc" +
- "\027\uffdc\030\uffdc\031\uffdc\032\uffdc\033\uffdc\042\uffdc\043" +
- "\uffdc\001\002\000\062\013\uffde\032\uffde\033\uffde\035\uffde" +
- "\036\uffde\037\uffde\040\uffde\041\uffde\044\uffde\045\uffde\046" +
- "\uffde\047\uffde\050\uffde\051\uffde\052\uffde\053\uffde\054\uffde" +
- "\055\uffde\056\uffde\057\uffde\060\uffde\061\uffde\062\uffde\063" +
- "\uffde\001\002\000\004\034\254\001\002\000\004\014\255" +
- "\001\002\000\004\034\256\001\002\000\004\012\257\001" +
- "\002\000\012\002\ufff4\004\ufff4\010\ufff4\021\ufff4\001\002" +
- "\000\004\034\261\001\002\000\004\012\262\001\002\000" +
- "\012\002\ufff5\004\ufff5\010\ufff5\021\ufff5\001\002\000\012" +
- "\002\uffec\004\uffec\010\uffec\021\uffec\001\002\000\062\013" +
- "\uffdf\032\uffdf\033\uffdf\035\uffdf\036\uffdf\037\uffdf\040\uffdf" +
- "\041\uffdf\044\uffdf\045\uffdf\046\uffdf\047\uffdf\050\uffdf\051" +
- "\uffdf\052\uffdf\053\uffdf\054\uffdf\055\uffdf\056\uffdf\057\uffdf" +
- "\060\uffdf\061\uffdf\062\uffdf\063\uffdf\001\002\000\064\013" +
- "\027\016\020\032\031\033\040\035\034\036\113\037\141" +
- "\040\047\041\051\044\015\045\014\046\016\047\036\050" +
- "\037\051\044\052\025\053\050\054\043\055\041\056\035" +
- "\057\030\060\045\061\021\062\012\063\042\001\002\000" +
- "\064\013\027\016\020\032\031\033\040\035\034\036\113" +
- "\037\141\040\047\041\051\044\015\045\014\046\016\047" +
- "\036\050\037\051\044\052\025\053\050\054\043\055\041" +
- "\056\035\057\030\060\045\061\021\062\012\063\042\001" +
- "\002\000\006\002\ufff1\010\ufff1\001\002\000\006\002\ufff0" +
- "\010\ufff0\001\002\000\006\002\ufff7\010\ufff7\001\002\000" +
- "\014\002\uffe9\004\uffe9\006\055\010\uffe9\021\uffe9\001\002" +
- "\000\014\002\uffeb\004\uffeb\006\055\010\uffeb\021\uffeb\001" +
- "\002\000\012\002\uffea\004\uffea\010\uffea\021\uffea\001\002" +
- "\000\012\002\uffe8\004\uffe8\010\uffe8\021\uffe8\001\002\000" +
- "\064\013\027\016\020\032\031\033\040\035\034\036\113" +
- "\037\141\040\047\041\051\044\015\045\014\046\016\047" +
- "\036\050\037\051\044\052\025\053\050\054\043\055\041" +
- "\056\035\057\030\060\045\061\021\062\012\063\042\001" +
- "\002\000\064\013\027\016\020\032\031\033\040\035\034" +
- "\036\113\037\141\040\047\041\051\044\015\045\014\046" +
- "\016\047\036\050\037\051\044\052\025\053\050\054\043" +
- "\055\041\056\035\057\030\060\045\061\021\062\012\063" +
- "\042\001\002\000\006\002\ufff9\010\ufff9\001\002\000\006" +
- "\002\ufff8\010\ufff8\001\002\000\004\034\303\001\002\000" +
- "\004\012\304\001\002\000\014\002\ufff3\004\ufff3\006\ufff3" +
- "\010\ufff3\021\ufff3\001\002\000\006\002\ufffb\010\ufffb\001" +
- "\002\000\070\004\013\013\027\016\020\021\023\032\031" +
- "\033\040\035\034\036\033\037\046\040\047\041\051\044" +
- "\015\045\014\046\016\047\036\050\037\051\044\052\025" +
- "\053\050\054\043\055\041\056\035\057\030\060\045\061" +
- "\021\062\012\063\042\001\002\000\004\002\ufffd\001\002" +
- "\000\004\002\uffff\001\002\000\004\002\001\001\002" });
+ /**
+ * Production table.
+ */
+ protected static final short _production_table[][]
+ = unpackFromStrings(new String[]{
+ "\000\215\000\002\002\004\000\002\003\004\000\002\003"
+ + "\004\000\002\036\003\000\002\036\005\000\002\037\003"
+ + "\000\002\037\004\000\002\037\003\000\002\037\005\000"
+ + "\002\037\005\000\002\037\004\000\002\037\003\000\002"
+ + "\035\006\000\002\035\010\000\002\040\006\000\002\041"
+ + "\003\000\002\041\005\000\002\041\005\000\002\042\003"
+ + "\000\002\042\004\000\002\042\003\000\002\042\004\000"
+ + "\002\042\004\000\002\042\005\000\002\042\004\000\002"
+ + "\042\005\000\002\043\003\000\002\043\003\000\002\043"
+ + "\003\000\002\043\003\000\002\043\003\000\002\044\003"
+ + "\000\002\044\003\000\002\054\003\000\002\054\004\000"
+ + "\002\054\004\000\002\045\003\000\002\045\004\000\002"
+ + "\007\005\000\002\004\003\000\002\012\003\000\002\012"
+ + "\005\000\002\013\003\000\002\013\005\000\002\014\003"
+ + "\000\002\014\005\000\002\014\005\000\002\015\003\000"
+ + "\002\015\005\000\002\015\005\000\002\015\005\000\002"
+ + "\015\005\000\002\016\003\000\002\016\005\000\002\016"
+ + "\005\000\002\017\003\000\002\017\005\000\002\017\005"
+ + "\000\002\017\005\000\002\020\003\000\002\020\004\000"
+ + "\002\024\003\000\002\024\005\000\002\025\003\000\002"
+ + "\025\003\000\002\025\005\000\002\025\005\000\002\006"
+ + "\003\000\002\006\003\000\002\027\003\000\002\027\005"
+ + "\000\002\027\003\000\002\031\003\000\002\031\004\000"
+ + "\002\031\003\000\002\030\005\000\002\032\004\000\002"
+ + "\011\003\000\002\011\004\000\002\011\005\000\002\011"
+ + "\004\000\002\011\003\000\002\053\004\000\002\053\003"
+ + "\000\002\052\003\000\002\052\003\000\002\052\003\000"
+ + "\002\052\003\000\002\052\003\000\002\052\003\000\002"
+ + "\052\003\000\002\052\003\000\002\052\003\000\002\052"
+ + "\003\000\002\052\003\000\002\052\003\000\002\052\003"
+ + "\000\002\026\003\000\002\026\003\000\002\010\003\000"
+ + "\002\010\004\000\002\023\003\000\002\023\005\000\002"
+ + "\023\003\000\002\023\003\000\002\023\003\000\002\023"
+ + "\003\000\002\021\004\000\002\022\005\000\002\022\006"
+ + "\000\002\046\003\000\002\046\005\000\002\050\003\000"
+ + "\002\051\003\000\002\005\003\000\002\033\003\000\002"
+ + "\033\003\000\002\033\003\000\002\033\003\000\002\033"
+ + "\006\000\002\033\003\000\002\034\003\000\002\034\003"
+ + "\000\002\047\003\000\002\047\003\000\002\047\003\000"
+ + "\002\047\003\000\002\047\003\000\002\047\003\000\002"
+ + "\047\003\000\002\047\003\000\002\047\003\000\002\047"
+ + "\003\000\002\047\003\000\002\047\003\000\002\047\003"
+ + "\000\002\047\003\000\002\047\003\000\002\047\003\000"
+ + "\002\047\003\000\002\047\003"});
- /** Access to parse-action table. */
- public short[][] action_table() {return _action_table;}
-
- /** <code>reduce_goto</code> table. */
- protected static final short[][] _reduce_table =
- unpackFromStrings(new String[] {
- "\000\307\000\004\003\003\001\001\000\002\001\001\000" +
- "\070\004\307\006\120\010\127\011\117\012\101\013\075" +
- "\014\104\015\063\016\111\017\145\020\113\021\125\022" +
- "\073\023\121\024\143\025\123\026\136\027\146\030\134" +
- "\031\107\032\072\033\106\034\147\047\150\050\116\052" +
- "\100\053\077\001\001\000\026\035\016\036\007\037\006" +
- "\040\031\041\025\042\023\043\052\044\010\047\051\054" +
- "\021\001\001\000\002\001\001\000\002\001\001\000\002" +
- "\001\001\000\002\001\001\000\020\040\031\041\304\042" +
- "\023\043\052\044\010\047\051\054\021\001\001\000\002" +
- "\001\001\000\002\001\001\000\002\001\001\000\002\001" +
- "\001\000\002\001\001\000\002\001\001\000\012\040\271" +
- "\043\272\044\010\047\051\001\001\000\020\040\031\041" +
- "\270\042\023\043\052\044\010\047\051\054\021\001\001" +
- "\000\002\001\001\000\002\001\001\000\002\001\001\000" +
- "\002\001\001\000\002\001\001\000\002\001\001\000\006" +
- "\007\053\045\262\001\001\000\002\001\001\000\002\001" +
- "\001\000\002\001\001\000\002\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\002\001\001\000" +
- "\002\001\001\000\002\001\001\000\002\001\001\000\002" +
- "\001\001\000\002\001\001\000\002\001\001\000\002\001" +
- "\001\000\002\001\001\000\006\007\053\045\055\001\001" +
- "\000\006\007\053\045\250\001\001\000\070\004\132\006" +
- "\120\010\127\011\117\012\101\013\075\014\104\015\063" +
- "\016\111\017\145\020\113\021\125\022\073\023\121\024" +
- "\143\025\123\026\136\027\146\030\134\031\107\032\072" +
- "\033\106\034\147\047\150\050\116\052\100\053\077\001" +
- "\001\000\002\001\001\000\002\001\001\000\002\001\001" +
- "\000\024\011\117\026\136\027\247\030\134\033\106\034" +
- "\147\047\153\052\100\053\077\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\002\001\001\000" +
- "\002\001\001\000\002\001\001\000\024\011\117\026\136" +
- "\027\243\030\134\033\106\034\147\047\153\052\100\053" +
- "\077\001\001\000\070\004\241\006\120\010\127\011\117" +
- "\012\101\013\075\014\104\015\063\016\111\017\145\020" +
- "\113\021\125\022\073\023\121\024\143\025\123\026\136" +
- "\027\146\030\134\031\107\032\072\033\106\034\147\047" +
- "\150\050\116\052\100\053\077\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\052\006\120\010" +
- "\127\011\117\020\240\021\125\022\073\023\121\024\143" +
- "\025\123\026\136\027\146\030\134\031\107\032\072\033" +
- "\106\034\147\047\150\050\116\052\100\053\077\001\001" +
- "\000\002\001\001\000\002\001\001\000\010\033\236\034" +
- "\147\047\153\001\001\000\002\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\002\001\001\000" +
- "\002\001\001\000\006\007\053\045\214\001\001\000\002" +
- "\001\001\000\002\001\001\000\002\001\001\000\002\001" +
- "\001\000\002\001\001\000\002\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\002\001\001\000" +
- "\006\007\053\045\177\001\001\000\002\001\001\000\002" +
- "\001\001\000\002\001\001\000\002\001\001\000\002\001" +
- "\001\000\002\001\001\000\002\001\001\000\002\001\001" +
- "\000\002\001\001\000\006\047\164\051\166\001\001\000" +
- "\002\001\001\000\002\001\001\000\002\001\001\000\002" +
- "\001\001\000\002\001\001\000\002\001\001\000\002\001" +
- "\001\000\002\001\001\000\002\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\002\001\001\000" +
- "\020\011\155\026\136\033\106\034\147\047\153\052\100" +
- "\053\077\001\001\000\020\011\154\026\136\033\106\034" +
- "\147\047\153\052\100\053\077\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\052\006\120\010" +
- "\127\011\117\020\163\021\125\022\073\023\121\024\143" +
- "\025\123\026\136\027\146\030\134\031\107\032\072\033" +
- "\106\034\147\047\150\050\116\052\100\053\077\001\001" +
- "\000\052\006\120\010\127\011\117\020\162\021\125\022" +
- "\073\023\121\024\143\025\123\026\136\027\146\030\134" +
- "\031\107\032\072\033\106\034\147\047\150\050\116\052" +
- "\100\053\077\001\001\000\052\006\120\010\127\011\117" +
- "\020\161\021\125\022\073\023\121\024\143\025\123\026" +
- "\136\027\146\030\134\031\107\032\072\033\106\034\147" +
- "\047\150\050\116\052\100\053\077\001\001\000\002\001" +
- "\001\000\002\001\001\000\002\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\002\001\001\000" +
- "\002\001\001\000\024\011\117\026\136\027\174\030\134" +
- "\033\106\034\147\047\153\052\100\053\077\001\001\000" +
- "\024\011\117\026\136\027\173\030\134\033\106\034\147" +
- "\047\153\052\100\053\077\001\001\000\002\001\001\000" +
- "\002\001\001\000\050\006\120\010\127\011\117\021\125" +
- "\022\073\023\121\024\176\025\123\026\136\027\146\030" +
- "\134\031\107\032\072\033\106\034\147\047\150\050\116" +
- "\052\100\053\077\001\001\000\002\001\001\000\002\001" +
- "\001\000\074\004\203\005\202\006\120\010\127\011\117" +
- "\012\101\013\075\014\104\015\063\016\111\017\145\020" +
- "\113\021\125\022\073\023\121\024\143\025\123\026\136" +
- "\027\146\030\134\031\107\032\072\033\106\034\147\046" +
- "\204\047\150\050\116\052\100\053\077\001\001\000\002" +
- "\001\001\000\002\001\001\000\002\001\001\000\002\001" +
- "\001\000\002\001\001\000\074\004\203\005\202\006\120" +
- "\010\127\011\117\012\101\013\075\014\104\015\063\016" +
- "\111\017\145\020\113\021\125\022\073\023\121\024\143" +
- "\025\123\026\136\027\146\030\134\031\107\032\072\033" +
- "\106\034\147\046\207\047\150\050\116\052\100\053\077" +
- "\001\001\000\002\001\001\000\054\006\120\010\127\011" +
- "\117\017\213\020\113\021\125\022\073\023\121\024\143" +
- "\025\123\026\136\027\146\030\134\031\107\032\072\033" +
- "\106\034\147\047\150\050\116\052\100\053\077\001\001" +
- "\000\054\006\120\010\127\011\117\017\212\020\113\021" +
- "\125\022\073\023\121\024\143\025\123\026\136\027\146" +
- "\030\134\031\107\032\072\033\106\034\147\047\150\050" +
- "\116\052\100\053\077\001\001\000\002\001\001\000\002" +
- "\001\001\000\002\001\001\000\060\006\120\010\127\011" +
- "\117\015\230\016\111\017\145\020\113\021\125\022\073" +
- "\023\121\024\143\025\123\026\136\027\146\030\134\031" +
- "\107\032\072\033\106\034\147\047\150\050\116\052\100" +
- "\053\077\001\001\000\060\006\120\010\127\011\117\015" +
- "\217\016\111\017\145\020\113\021\125\022\073\023\121" +
- "\024\143\025\123\026\136\027\146\030\134\031\107\032" +
- "\072\033\106\034\147\047\150\050\116\052\100\053\077" +
- "\001\001\000\002\001\001\000\056\006\120\010\127\011" +
- "\117\016\227\017\145\020\113\021\125\022\073\023\121" +
- "\024\143\025\123\026\136\027\146\030\134\031\107\032" +
- "\072\033\106\034\147\047\150\050\116\052\100\053\077" +
- "\001\001\000\056\006\120\010\127\011\117\016\226\017" +
- "\145\020\113\021\125\022\073\023\121\024\143\025\123" +
- "\026\136\027\146\030\134\031\107\032\072\033\106\034" +
- "\147\047\150\050\116\052\100\053\077\001\001\000\056" +
- "\006\120\010\127\011\117\016\225\017\145\020\113\021" +
- "\125\022\073\023\121\024\143\025\123\026\136\027\146" +
- "\030\134\031\107\032\072\033\106\034\147\047\150\050" +
- "\116\052\100\053\077\001\001\000\056\006\120\010\127" +
- "\011\117\016\224\017\145\020\113\021\125\022\073\023" +
- "\121\024\143\025\123\026\136\027\146\030\134\031\107" +
- "\032\072\033\106\034\147\047\150\050\116\052\100\053" +
- "\077\001\001\000\002\001\001\000\002\001\001\000\002" +
- "\001\001\000\002\001\001\000\002\001\001\000\064\006" +
- "\120\010\127\011\117\013\232\014\104\015\063\016\111" +
- "\017\145\020\113\021\125\022\073\023\121\024\143\025" +
- "\123\026\136\027\146\030\134\031\107\032\072\033\106" +
- "\034\147\047\150\050\116\052\100\053\077\001\001\000" +
- "\002\001\001\000\062\006\120\010\127\011\117\014\234" +
- "\015\063\016\111\017\145\020\113\021\125\022\073\023" +
- "\121\024\143\025\123\026\136\027\146\030\134\031\107" +
- "\032\072\033\106\034\147\047\150\050\116\052\100\053" +
- "\077\001\001\000\002\001\001\000\002\001\001\000\006" +
- "\007\053\045\237\001\001\000\002\001\001\000\002\001" +
- "\001\000\002\001\001\000\002\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\002\001\001\000" +
- "\002\001\001\000\002\001\001\000\002\001\001\000\002" +
- "\001\001\000\002\001\001\000\002\001\001\000\002\001" +
- "\001\000\002\001\001\000\002\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\002\001\001\000" +
- "\020\040\031\041\267\042\023\043\052\044\010\047\051" +
- "\054\021\001\001\000\020\040\031\041\266\042\023\043" +
- "\052\044\010\047\051\054\021\001\001\000\002\001\001" +
- "\000\002\001\001\000\002\001\001\000\006\007\053\045" +
- "\274\001\001\000\006\007\053\045\273\001\001\000\002" +
- "\001\001\000\002\001\001\000\020\040\031\041\300\042" +
- "\023\043\052\044\010\047\051\054\021\001\001\000\020" +
- "\040\031\041\277\042\023\043\052\044\010\047\051\054" +
- "\021\001\001\000\002\001\001\000\002\001\001\000\002" +
- "\001\001\000\002\001\001\000\002\001\001\000\002\001" +
- "\001\000\026\035\016\036\306\037\006\040\031\041\025" +
- "\042\023\043\052\044\010\047\051\054\021\001\001\000" +
- "\002\001\001\000\002\001\001\000\002\001\001" });
-
- /** Access to <code>reduce_goto</code> table. */
- public short[][] reduce_table() {return _reduce_table;}
-
- /** Instance of action encapsulation class. */
- protected parser_actions action_obj;
-
- /** Action encapsulation object initializer. */
- protected void init_actions()
- {
- action_obj = new parser_actions(this);
+ /**
+ * Access to production table.
+ */
+ public short[][] production_table() {
+ return _production_table;
}
- /** Invoke a user supplied parse action. */
- public Symbol do_action(
- int act_num,
- lr_parser parser,
- Stack<Symbol> stack,
- int top)
- throws java.lang.Exception
- {
- /* call code in generated class */
- return action_obj.parser_do_action(act_num, parser, stack, top);
- }
+ /**
+ * Parse-action table.
+ */
+ protected static final short[][] _action_table
+ = unpackFromStrings(new String[]{
+ "\000\307\000\006\066\006\067\005\001\002\000\004\002"
+ + "\311\001\002\000\110\004\061\005\111\011\071\013\103"
+ + "\015\134\016\066\017\106\021\070\031\075\032\031\033"
+ + "\040\034\057\035\034\036\113\037\141\040\142\041\145"
+ + "\044\063\045\062\046\065\047\115\050\123\051\136\052"
+ + "\077\053\143\054\131\055\125\056\116\057\104\060\140"
+ + "\061\067\062\060\063\127\064\132\065\072\001\002\000"
+ + "\070\004\013\013\027\016\020\021\023\032\031\033\040"
+ + "\035\034\036\033\037\046\040\047\041\051\044\015\045"
+ + "\014\046\016\047\036\050\037\051\044\052\025\053\050"
+ + "\054\043\055\041\056\035\057\030\060\045\061\021\062"
+ + "\012\063\042\001\002\000\006\002\ufffe\010\306\001\002"
+ + "\000\004\002\000\001\002\000\014\002\uffe7\004\uffe7\006"
+ + "\uffe7\010\uffe7\021\uffe7\001\002\000\054\002\uff7a\004\uff7a"
+ + "\006\uff7a\007\uff7a\010\uff7a\012\uff7a\013\uff7a\014\uff7a\021"
+ + "\uff7a\022\uff7a\023\uff7a\024\uff7a\025\uff7a\026\uff7a\027\uff7a"
+ + "\030\uff7a\031\uff7a\032\uff7a\033\uff7a\042\uff7a\043\uff7a\001"
+ + "\002\000\070\002\ufffc\010\ufffc\013\027\016\020\032\031"
+ + "\033\040\035\034\036\113\037\141\040\047\041\051\044"
+ + "\015\045\014\046\016\047\036\050\037\051\044\052\025"
+ + "\053\050\054\043\055\041\056\035\057\030\060\045\061"
+ + "\021\062\012\063\042\001\002\000\014\002\uffe3\004\uffe3"
+ + "\006\uffe3\010\uffe3\021\uffe3\001\002\000\014\002\uffe4\004"
+ + "\uffe4\006\uffe4\010\uffe4\021\uffe4\001\002\000\004\011\302"
+ + "\001\002\000\012\002\ufffa\004\277\010\ufffa\021\276\001"
+ + "\002\000\062\013\uffe0\032\uffe0\033\uffe0\035\uffe0\036\uffe0"
+ + "\037\uffe0\040\uffe0\041\uffe0\044\uffe0\045\uffe0\046\uffe0\047"
+ + "\uffe0\050\uffe0\051\uffe0\052\uffe0\053\uffe0\054\uffe0\055\uffe0"
+ + "\056\uffe0\057\uffe0\060\uffe0\061\uffe0\062\uffe0\063\uffe0\001"
+ + "\002\000\054\002\uff7b\004\uff7b\006\uff7b\007\uff7b\010\uff7b"
+ + "\012\uff7b\013\uff7b\014\uff7b\021\uff7b\022\uff7b\023\uff7b\024"
+ + "\uff7b\025\uff7b\026\uff7b\027\uff7b\030\uff7b\031\uff7b\032\uff7b"
+ + "\033\uff7b\042\uff7b\043\uff7b\001\002\000\062\013\027\032"
+ + "\031\033\040\035\034\036\113\037\141\040\047\041\051"
+ + "\044\015\045\014\046\016\047\036\050\037\051\044\052"
+ + "\170\053\166\054\043\055\041\056\035\057\030\060\045"
+ + "\061\021\062\012\063\042\001\002\000\064\013\027\016"
+ + "\020\032\031\033\040\035\034\036\113\037\141\040\047"
+ + "\041\051\044\015\045\014\046\016\047\036\050\037\051"
+ + "\044\052\025\053\050\054\043\055\041\056\035\057\030"
+ + "\060\045\061\021\062\012\063\042\001\002\000\012\002"
+ + "\ufff2\004\266\010\ufff2\021\265\001\002\000\016\002\uff7f"
+ + "\004\uff7f\006\uff7f\010\uff7f\020\264\021\uff7f\001\002\000"
+ + "\006\002\ufff6\010\ufff6\001\002\000\014\002\uffe2\004\uffe2"
+ + "\006\uffe2\010\uffe2\021\uffe2\001\002\000\054\002\uff7d\004"
+ + "\uff7d\006\uff7d\007\uff7d\010\uff7d\012\uff7d\013\uff7d\014\uff7d"
+ + "\021\uff7d\022\uff7d\023\uff7d\024\uff7d\025\uff7d\026\uff7d\027"
+ + "\uff7d\030\uff7d\031\uff7d\032\uff7d\033\uff7d\042\uff7d\043\uff7d"
+ + "\001\002\000\056\002\uff85\004\uff85\006\uff85\007\uff85\010"
+ + "\uff85\011\uff85\012\uff85\013\uff85\014\uff85\021\uff85\022\uff85"
+ + "\023\uff85\024\uff85\025\uff85\026\uff85\027\uff85\030\uff85\031"
+ + "\uff85\032\uff85\033\uff85\042\uff85\043\uff85\001\002\000\014"
+ + "\002\uffed\004\uffed\006\055\010\uffed\021\uffed\001\002\000"
+ + "\016\002\uff75\004\uff75\006\uff75\010\uff75\011\260\021\uff75"
+ + "\001\002\000\056\002\uff86\004\uff86\006\uff86\007\uff86\010"
+ + "\uff86\011\uff86\012\uff86\013\uff86\014\uff86\021\uff86\022\uff86"
+ + "\023\uff86\024\uff86\025\uff86\026\uff86\027\uff86\030\uff86\031"
+ + "\uff86\032\uff86\033\uff86\042\uff86\043\uff86\001\002\000\054"
+ + "\002\uff7e\004\uff7e\006\uff7e\007\uff7e\010\uff7e\012\uff7e\013"
+ + "\uff7e\014\uff7e\021\uff7e\022\uff7e\023\uff7e\024\uff7e\025\uff7e"
+ + "\026\uff7e\027\uff7e\030\uff7e\031\uff7e\032\uff7e\033\uff7e\042"
+ + "\uff7e\043\uff7e\001\002\000\054\002\uff77\004\uff77\006\uff77"
+ + "\007\uff77\010\uff77\012\uff77\013\uff77\014\uff77\021\uff77\022"
+ + "\uff77\023\uff77\024\uff77\025\uff77\026\uff77\027\uff77\030\uff77"
+ + "\031\uff77\032\uff77\033\uff77\042\uff77\043\uff77\001\002\000"
+ + "\054\002\uff76\004\uff76\006\uff76\007\uff76\010\uff76\012\uff76"
+ + "\013\uff76\014\uff76\021\uff76\022\uff76\023\uff76\024\uff76\025"
+ + "\uff76\026\uff76\027\uff76\030\uff76\031\uff76\032\uff76\033\uff76"
+ + "\042\uff76\043\uff76\001\002\000\056\002\uff84\004\uff84\006"
+ + "\uff84\007\uff84\010\uff84\011\uff84\012\uff84\013\uff84\014\uff84"
+ + "\021\uff84\022\uff84\023\uff84\024\uff84\025\uff84\026\uff84\027"
+ + "\uff84\030\uff84\031\uff84\032\uff84\033\uff84\042\uff84\043\uff84"
+ + "\001\002\000\054\002\uff81\004\uff81\006\uff81\007\uff81\010"
+ + "\uff81\012\uff81\013\uff81\014\uff81\021\uff81\022\uff81\023\uff81"
+ + "\024\uff81\025\uff81\026\uff81\027\uff81\030\uff81\031\uff81\032"
+ + "\uff81\033\uff81\042\uff81\043\uff81\001\002\000\054\002\uff78"
+ + "\004\uff78\006\uff78\007\uff78\010\uff78\012\uff78\013\uff78\014"
+ + "\uff78\021\uff78\022\uff78\023\uff78\024\uff78\025\uff78\026\uff78"
+ + "\027\uff78\030\uff78\031\uff78\032\uff78\033\uff78\042\uff78\043"
+ + "\uff78\001\002\000\054\002\uff82\004\uff82\006\uff82\007\uff82"
+ + "\010\uff82\012\uff82\013\uff82\014\uff82\021\uff82\022\uff82\023"
+ + "\uff82\024\uff82\025\uff82\026\uff82\027\uff82\030\uff82\031\uff82"
+ + "\032\uff82\033\uff82\042\uff82\043\uff82\001\002\000\054\002"
+ + "\uff79\004\uff79\006\uff79\007\uff79\010\uff79\012\uff79\013\uff79"
+ + "\014\uff79\021\uff79\022\uff79\023\uff79\024\uff79\025\uff79\026"
+ + "\uff79\027\uff79\030\uff79\031\uff79\032\uff79\033\uff79\042\uff79"
+ + "\043\uff79\001\002\000\054\002\uff7c\004\uff7c\006\uff7c\007"
+ + "\uff7c\010\uff7c\012\uff7c\013\uff7c\014\uff7c\021\uff7c\022\uff7c"
+ + "\023\uff7c\024\uff7c\025\uff7c\026\uff7c\027\uff7c\030\uff7c\031"
+ + "\uff7c\032\uff7c\033\uff7c\042\uff7c\043\uff7c\001\002\000\016"
+ + "\002\uff83\004\uff83\006\uff83\010\uff83\011\253\021\uff83\001"
+ + "\002\000\014\002\uffe5\004\uffe5\006\uffe5\010\uffe5\021\uffe5"
+ + "\001\002\000\016\002\uff80\004\uff80\006\uff80\010\uff80\020"
+ + "\252\021\uff80\001\002\000\014\002\uffe6\004\uffe6\006\uffe6"
+ + "\010\uffe6\021\uffe6\001\002\000\014\002\uffe1\004\uffe1\006"
+ + "\uffe1\010\uffe1\021\uffe1\001\002\000\014\002\uffef\004\uffef"
+ + "\006\055\010\uffef\021\uffef\001\002\000\054\002\uffdd\004"
+ + "\uffdd\006\055\007\uffdd\010\uffdd\012\uffdd\013\uffdd\014\uffdd"
+ + "\021\uffdd\022\uffdd\023\uffdd\024\uffdd\025\uffdd\026\uffdd\027"
+ + "\uffdd\030\uffdd\031\uffdd\032\uffdd\033\uffdd\042\uffdd\043\uffdd"
+ + "\001\002\000\110\004\061\005\111\011\071\013\103\015"
+ + "\134\016\066\017\106\021\070\031\075\032\031\033\040"
+ + "\034\057\035\034\036\113\037\141\040\142\041\145\044"
+ + "\063\045\062\046\065\047\115\050\123\051\136\052\077"
+ + "\053\143\054\131\055\125\056\116\057\104\060\140\061"
+ + "\067\062\060\063\127\064\132\065\072\001\002\000\012"
+ + "\002\uffee\004\uffee\010\uffee\021\uffee\001\002\000\054\002"
+ + "\uff9a\004\uff9a\006\uff9a\007\uff9a\010\uff9a\012\uff9a\013\uff9a"
+ + "\014\uff9a\021\uff9a\022\uff9a\023\uff9a\024\uff9a\025\uff9a\026"
+ + "\uff9a\027\uff9a\030\uff9a\031\uff9a\032\uff9a\033\uff9a\042\uff9a"
+ + "\043\uff9a\001\002\000\060\002\uff7a\004\uff7a\006\uff7a\007"
+ + "\uff7a\010\uff7a\011\uff7a\012\uff7a\013\uff7a\014\uff7a\020\uffa5"
+ + "\021\uff7a\022\uff7a\023\uff7a\024\uff7a\025\uff7a\026\uff7a\027"
+ + "\uff7a\030\uff7a\031\uff7a\032\uff7a\033\uff7a\042\uff7a\043\uff7a"
+ + "\001\002\000\126\002\uffb9\005\111\007\uffb9\010\uffb9\012"
+ + "\uffb9\013\103\014\uffb9\016\066\017\106\022\uffb9\023\uffb9"
+ + "\024\uffb9\025\uffb9\026\uffb9\027\uffb9\030\uffb9\031\uffb9\032"
+ + "\031\033\040\035\034\036\113\037\141\040\142\041\145"
+ + "\042\uffb9\043\uffb9\044\063\045\062\046\065\047\115\050"
+ + "\123\051\136\052\077\053\143\054\131\055\125\056\116"
+ + "\057\104\060\140\061\067\062\060\063\127\001\002\000"
+ + "\054\002\uff89\004\uff89\006\uff89\007\uff89\010\uff89\012\uff89"
+ + "\013\uff89\014\uff89\021\uff89\022\uff89\023\uff89\024\uff89\025"
+ + "\uff89\026\uff89\027\uff89\030\uff89\031\uff89\032\uff89\033\uff89"
+ + "\042\uff89\043\uff89\001\002\000\054\002\uff8b\004\uff8b\006"
+ + "\uff8b\007\uff8b\010\uff8b\012\uff8b\013\uff8b\014\uff8b\021\uff8b"
+ + "\022\uff8b\023\uff8b\024\uff8b\025\uff8b\026\uff8b\027\uff8b\030"
+ + "\uff8b\031\uff8b\032\uff8b\033\uff8b\042\uff8b\043\uff8b\001\002"
+ + "\000\032\002\uffd5\007\uffd5\012\uffd5\014\uffd5\022\uffd5\023"
+ + "\uffd5\024\221\025\222\026\223\027\224\042\uffd5\043\uffd5"
+ + "\001\002\000\004\011\245\001\002\000\062\013\uffae\032"
+ + "\uffae\033\uffae\035\uffae\036\uffae\037\uffae\040\uffae\041\uffae"
+ + "\044\uffae\045\uffae\046\uffae\047\uffae\050\uffae\051\uffae\052"
+ + "\uffae\053\uffae\054\uffae\055\uffae\056\uffae\057\uffae\060\uffae"
+ + "\061\uffae\062\uffae\063\uffae\001\002\000\060\002\uff7b\004"
+ + "\uff7b\006\uff7b\007\uff7b\010\uff7b\011\uff7b\012\uff7b\013\uff7b"
+ + "\014\uff7b\020\uffa6\021\uff7b\022\uff7b\023\uff7b\024\uff7b\025"
+ + "\uff7b\026\uff7b\027\uff7b\030\uff7b\031\uff7b\032\uff7b\033\uff7b"
+ + "\042\uff7b\043\uff7b\001\002\000\070\005\111\013\103\016"
+ + "\066\017\106\032\031\033\040\035\034\036\113\037\141"
+ + "\040\142\041\145\044\063\045\062\046\065\047\115\050"
+ + "\123\051\136\052\077\053\143\054\131\055\125\056\116"
+ + "\057\104\060\140\061\067\062\060\063\127\001\002\000"
+ + "\110\004\061\005\111\011\071\013\103\015\134\016\066"
+ + "\017\106\021\070\031\075\032\031\033\040\034\057\035"
+ + "\034\036\113\037\141\040\142\041\145\044\063\045\062"
+ + "\046\065\047\115\050\123\051\136\052\077\053\143\054"
+ + "\131\055\125\056\116\057\104\060\140\061\067\062\060"
+ + "\063\127\064\132\065\072\001\002\000\054\002\uff99\004"
+ + "\uff99\006\uff99\007\uff99\010\uff99\012\uff99\013\uff99\014\uff99"
+ + "\021\uff99\022\uff99\023\uff99\024\uff99\025\uff99\026\uff99\027"
+ + "\uff99\030\uff99\031\uff99\032\uff99\033\uff99\042\uff99\043\uff99"
+ + "\001\002\000\046\002\uffb7\007\uffb7\010\uffb7\012\uffb7\013"
+ + "\uffb7\014\uffb7\022\uffb7\023\uffb7\024\uffb7\025\uffb7\026\uffb7"
+ + "\027\uffb7\030\uffb7\031\uffb7\032\uffb7\033\uffb7\042\uffb7\043"
+ + "\uffb7\001\002\000\054\002\uff97\004\uff97\006\uff97\007\uff97"
+ + "\010\uff97\012\uff97\013\uff97\014\uff97\021\uff97\022\uff97\023"
+ + "\uff97\024\uff97\025\uff97\026\uff97\027\uff97\030\uff97\031\uff97"
+ + "\032\uff97\033\uff97\042\uff97\043\uff97\001\002\000\110\004"
+ + "\061\005\111\011\071\013\103\015\134\016\066\017\106"
+ + "\021\070\031\075\032\031\033\040\034\057\035\034\036"
+ + "\113\037\141\040\142\041\145\044\063\045\062\046\065"
+ + "\047\115\050\123\051\136\052\077\053\143\054\131\055"
+ + "\125\056\116\057\104\060\140\061\067\062\060\063\127"
+ + "\064\132\065\072\001\002\000\016\002\uffd9\007\uffd9\012"
+ + "\uffd9\014\uffd9\042\uffd9\043\234\001\002\000\060\002\uff7f"
+ + "\004\uff7f\006\uff7f\007\uff7f\010\uff7f\011\uff7f\012\uff7f\013"
+ + "\uff7f\014\uff7f\020\uffaa\021\uff7f\022\uff7f\023\uff7f\024\uff7f"
+ + "\025\uff7f\026\uff7f\027\uff7f\030\uff7f\031\uff7f\032\uff7f\033"
+ + "\uff7f\042\uff7f\043\uff7f\001\002\000\062\013\103\032\031"
+ + "\033\040\035\034\036\113\037\141\040\142\041\145\044"
+ + "\063\045\062\046\065\047\036\050\037\051\044\052\170"
+ + "\053\166\054\043\055\041\056\035\057\030\060\045\061"
+ + "\021\062\012\063\042\001\002\000\004\020\236\001\002"
+ + "\000\014\002\uffda\007\uffda\012\uffda\014\uffda\042\232\001"
+ + "\002\000\054\002\uff88\004\uff88\006\uff88\007\uff88\010\uff88"
+ + "\012\uff88\013\uff88\014\uff88\021\uff88\022\uff88\023\uff88\024"
+ + "\uff88\025\uff88\026\uff88\027\uff88\030\uff88\031\uff88\032\uff88"
+ + "\033\uff88\042\uff88\043\uff88\001\002\000\060\002\uff7d\004"
+ + "\uff7d\006\uff7d\007\uff7d\010\uff7d\011\uff7d\012\uff7d\013\uff7d"
+ + "\014\uff7d\020\uffa8\021\uff7d\022\uff7d\023\uff7d\024\uff7d\025"
+ + "\uff7d\026\uff7d\027\uff7d\030\uff7d\031\uff7d\032\uff7d\033\uff7d"
+ + "\042\uff7d\043\uff7d\001\002\000\022\002\uffd7\007\uffd7\012"
+ + "\uffd7\014\uffd7\022\216\023\217\042\uffd7\043\uffd7\001\002"
+ + "\000\052\002\uff9f\004\uff9f\007\uff9f\010\uff9f\012\uff9f\013"
+ + "\uff9f\014\uff9f\021\uff9f\022\uff9f\023\uff9f\024\uff9f\025\uff9f"
+ + "\026\uff9f\027\uff9f\030\uff9f\031\uff9f\032\uff9f\033\uff9f\042"
+ + "\uff9f\043\uff9f\001\002\000\054\002\uffb4\004\uffb4\006\055"
+ + "\007\uffb4\010\uffb4\012\uffb4\013\uffb4\014\uffb4\021\uffb4\022"
+ + "\uffb4\023\uffb4\024\uffb4\025\uffb4\026\uffb4\027\uffb4\030\uffb4"
+ + "\031\uffb4\032\uffb4\033\uffb4\042\uffb4\043\uffb4\001\002\000"
+ + "\046\002\uffbd\007\uffbd\010\uffbd\012\uffbd\013\uffbd\014\uffbd"
+ + "\022\uffbd\023\uffbd\024\uffbd\025\uffbd\026\uffbd\027\uffbd\030"
+ + "\uffbd\031\uffbd\032\uffbd\033\uffbd\042\uffbd\043\uffbd\001\002"
+ + "\000\052\002\uffa0\004\uffa0\007\uffa0\010\uffa0\012\uffa0\013"
+ + "\uffa0\014\uffa0\021\uffa0\022\uffa0\023\uffa0\024\uffa0\025\uffa0"
+ + "\026\uffa0\027\uffa0\030\uffa0\031\uffa0\032\uffa0\033\uffa0\042"
+ + "\uffa0\043\uffa0\001\002\000\036\002\uffd2\007\uffd2\012\uffd2"
+ + "\014\uffd2\022\uffd2\023\uffd2\024\uffd2\025\uffd2\026\uffd2\027"
+ + "\uffd2\030\211\031\212\042\uffd2\043\uffd2\001\002\000\056"
+ + "\002\uff75\004\uff75\006\uff75\007\uff75\010\uff75\011\uff75\012"
+ + "\uff75\013\uff75\014\uff75\021\uff75\022\uff75\023\uff75\024\uff75"
+ + "\025\uff75\026\uff75\027\uff75\030\uff75\031\uff75\032\uff75\033"
+ + "\uff75\042\uff75\043\uff75\001\002\000\044\002\uffca\007\uffca"
+ + "\012\uffca\013\uffca\014\uffca\022\uffca\023\uffca\024\uffca\025"
+ + "\uffca\026\uffca\027\uffca\030\uffca\031\uffca\032\uffca\033\uffca"
+ + "\042\uffca\043\uffca\001\002\000\060\002\uff77\004\uff77\006"
+ + "\uff77\007\uff77\010\uff77\011\uff77\012\uff77\013\uff77\014\uff77"
+ + "\020\uffa2\021\uff77\022\uff77\023\uff77\024\uff77\025\uff77\026"
+ + "\uff77\027\uff77\030\uff77\031\uff77\032\uff77\033\uff77\042\uff77"
+ + "\043\uff77\001\002\000\060\002\uff7e\004\uff7e\006\uff7e\007"
+ + "\uff7e\010\uff7e\011\uff7e\012\uff7e\013\uff7e\014\uff7e\020\uffa9"
+ + "\021\uff7e\022\uff7e\023\uff7e\024\uff7e\025\uff7e\026\uff7e\027"
+ + "\uff7e\030\uff7e\031\uff7e\032\uff7e\033\uff7e\042\uff7e\043\uff7e"
+ + "\001\002\000\004\011\201\001\002\000\052\002\uffbc\004"
+ + "\uffbc\007\uffbc\010\uffbc\012\uffbc\013\uffbc\014\uffbc\021\uffbc"
+ + "\022\uffbc\023\uffbc\024\uffbc\025\uffbc\026\uffbc\027\uffbc\030"
+ + "\uffbc\031\uffbc\032\uffbc\033\uffbc\042\uffbc\043\uffbc\001\002"
+ + "\000\046\002\uffc2\007\uffc2\010\uffc2\012\uffc2\013\uffc2\014"
+ + "\uffc2\022\uffc2\023\uffc2\024\uffc2\025\uffc2\026\uffc2\027\uffc2"
+ + "\030\uffc2\031\uffc2\032\uffc2\033\uffc2\042\uffc2\043\uffc2\001"
+ + "\002\000\054\002\uff9e\004\uff9e\006\055\007\uff9e\010\uff9e"
+ + "\012\uff9e\013\uff9e\014\uff9e\021\uff9e\022\uff9e\023\uff9e\024"
+ + "\uff9e\025\uff9e\026\uff9e\027\uff9e\030\uff9e\031\uff9e\032\uff9e"
+ + "\033\uff9e\042\uff9e\043\uff9e\001\002\000\060\002\uff76\004"
+ + "\uff76\006\uff76\007\uff76\010\uff76\011\uff76\012\uff76\013\uff76"
+ + "\014\uff76\020\uffa1\021\uff76\022\uff76\023\uff76\024\uff76\025"
+ + "\uff76\026\uff76\027\uff76\030\uff76\031\uff76\032\uff76\033\uff76"
+ + "\042\uff76\043\uff76\001\002\000\046\002\uffc4\007\uffc4\010"
+ + "\176\012\uffc4\013\uffc4\014\uffc4\022\uffc4\023\uffc4\024\uffc4"
+ + "\025\uffc4\026\uffc4\027\uffc4\030\uffc4\031\uffc4\032\uffc4\033"
+ + "\uffc4\042\uffc4\043\uffc4\001\002\000\060\002\uff81\004\uff81"
+ + "\006\uff81\007\uff81\010\uff81\011\uff81\012\uff81\013\uff81\014"
+ + "\uff81\020\uffac\021\uff81\022\uff81\023\uff81\024\uff81\025\uff81"
+ + "\026\uff81\027\uff81\030\uff81\031\uff81\032\uff81\033\uff81\042"
+ + "\uff81\043\uff81\001\002\000\054\002\uff9c\004\uff9c\006\uff9c"
+ + "\007\uff9c\010\uff9c\012\uff9c\013\uff9c\014\uff9c\021\uff9c\022"
+ + "\uff9c\023\uff9c\024\uff9c\025\uff9c\026\uff9c\027\uff9c\030\uff9c"
+ + "\031\uff9c\032\uff9c\033\uff9c\042\uff9c\043\uff9c\001\002\000"
+ + "\060\002\uff78\004\uff78\006\uff78\007\uff78\010\uff78\011\uff78"
+ + "\012\uff78\013\uff78\014\uff78\020\uffa3\021\uff78\022\uff78\023"
+ + "\uff78\024\uff78\025\uff78\026\uff78\027\uff78\030\uff78\031\uff78"
+ + "\032\uff78\033\uff78\042\uff78\043\uff78\001\002\000\052\002"
+ + "\uffc1\004\173\007\uffc1\010\uffc1\012\uffc1\013\uffc1\014\uffc1"
+ + "\021\172\022\uffc1\023\uffc1\024\uffc1\025\uffc1\026\uffc1\027"
+ + "\uffc1\030\uffc1\031\uffc1\032\uffc1\033\uffc1\042\uffc1\043\uffc1"
+ + "\001\002\000\060\002\uff82\004\uff82\006\uff82\007\uff82\010"
+ + "\uff82\011\uff82\012\uff82\013\uff82\014\uff82\020\uffad\021\uff82"
+ + "\022\uff82\023\uff82\024\uff82\025\uff82\026\uff82\027\uff82\030"
+ + "\uff82\031\uff82\032\uff82\033\uff82\042\uff82\043\uff82\001\002"
+ + "\000\054\002\uff98\004\uff98\006\uff98\007\uff98\010\uff98\012"
+ + "\uff98\013\uff98\014\uff98\021\uff98\022\uff98\023\uff98\024\uff98"
+ + "\025\uff98\026\uff98\027\uff98\030\uff98\031\uff98\032\uff98\033"
+ + "\uff98\042\uff98\043\uff98\001\002\000\004\007\171\001\002"
+ + "\000\046\032\031\033\040\035\034\036\113\037\141\047"
+ + "\036\050\037\051\044\052\170\053\166\054\043\055\041"
+ + "\056\035\057\030\060\045\061\021\062\012\063\042\001"
+ + "\002\000\052\002\uffba\004\uffba\007\uffba\010\uffba\012\uffba"
+ + "\013\uffba\014\uffba\021\uffba\022\uffba\023\uffba\024\uffba\025"
+ + "\uffba\026\uffba\027\uffba\030\uffba\031\uffba\032\uffba\033\uffba"
+ + "\042\uffba\043\uffba\001\002\000\060\002\uff79\004\uff79\006"
+ + "\uff79\007\uff79\010\uff79\011\uff79\012\uff79\013\uff79\014\uff79"
+ + "\020\uffa4\021\uff79\022\uff79\023\uff79\024\uff79\025\uff79\026"
+ + "\uff79\027\uff79\030\uff79\031\uff79\032\uff79\033\uff79\042\uff79"
+ + "\043\uff79\001\002\000\052\002\uffb0\004\uffb0\007\uffb0\010"
+ + "\uffb0\012\uffb0\013\uffb0\014\uffb0\021\uffb0\022\uffb0\023\uffb0"
+ + "\024\uffb0\025\uffb0\026\uffb0\027\uffb0\030\uffb0\031\uffb0\032"
+ + "\uffb0\033\uffb0\042\uffb0\043\uffb0\001\002\000\060\002\uff7c"
+ + "\004\uff7c\006\uff7c\007\uff7c\010\uff7c\011\uff7c\012\uff7c\013"
+ + "\uff7c\014\uff7c\020\uffa7\021\uff7c\022\uff7c\023\uff7c\024\uff7c"
+ + "\025\uff7c\026\uff7c\027\uff7c\030\uff7c\031\uff7c\032\uff7c\033"
+ + "\uff7c\042\uff7c\043\uff7c\001\002\000\056\002\uff83\004\uff83"
+ + "\006\uff83\007\uff83\010\uff83\011\uff83\012\uff83\013\uff83\014"
+ + "\uff83\021\uff83\022\uff83\023\uff83\024\uff83\025\uff83\026\uff83"
+ + "\027\uff83\030\uff83\031\uff83\032\uff83\033\uff83\042\uff83\043"
+ + "\uff83\001\002\000\054\002\uff8c\004\uff8c\006\uff8c\007\uff8c"
+ + "\010\uff8c\012\uff8c\013\uff8c\014\uff8c\021\uff8c\022\uff8c\023"
+ + "\uff8c\024\uff8c\025\uff8c\026\uff8c\027\uff8c\030\uff8c\031\uff8c"
+ + "\032\uff8c\033\uff8c\042\uff8c\043\uff8c\001\002\000\060\002"
+ + "\uff80\004\uff80\006\uff80\007\uff80\010\uff80\011\uff80\012\uff80"
+ + "\013\uff80\014\uff80\020\uffab\021\uff80\022\uff80\023\uff80\024"
+ + "\uff80\025\uff80\026\uff80\027\uff80\030\uff80\031\uff80\032\uff80"
+ + "\033\uff80\042\uff80\043\uff80\001\002\000\044\002\uffc6\007"
+ + "\uffc6\012\uffc6\013\uffc6\014\uffc6\022\uffc6\023\uffc6\024\uffc6"
+ + "\025\uffc6\026\uffc6\027\uffc6\030\uffc6\031\uffc6\032\uffc6\033"
+ + "\uffc6\042\uffc6\043\uffc6\001\002\000\054\002\uff8d\004\uff8d"
+ + "\006\uff8d\007\uff8d\010\uff8d\012\uff8d\013\uff8d\014\uff8d\021"
+ + "\uff8d\022\uff8d\023\uff8d\024\uff8d\025\uff8d\026\uff8d\027\uff8d"
+ + "\030\uff8d\031\uff8d\032\uff8d\033\uff8d\042\uff8d\043\uff8d\001"
+ + "\002\000\044\002\uffcd\007\uffcd\012\uffcd\013\160\014\uffcd"
+ + "\022\uffcd\023\uffcd\024\uffcd\025\uffcd\026\uffcd\027\uffcd\030"
+ + "\uffcd\031\uffcd\032\161\033\157\042\uffcd\043\uffcd\001\002"
+ + "\000\052\002\uffbe\004\153\007\uffbe\010\uffbe\012\uffbe\013"
+ + "\uffbe\014\uffbe\021\152\022\uffbe\023\uffbe\024\uffbe\025\uffbe"
+ + "\026\uffbe\027\uffbe\030\uffbe\031\uffbe\032\uffbe\033\uffbe\042"
+ + "\uffbe\043\uffbe\001\002\000\054\002\uff8e\004\uff8e\006\uff8e"
+ + "\007\uff8e\010\uff8e\012\uff8e\013\uff8e\014\uff8e\021\uff8e\022"
+ + "\uff8e\023\uff8e\024\uff8e\025\uff8e\026\uff8e\027\uff8e\030\uff8e"
+ + "\031\uff8e\032\uff8e\033\uff8e\042\uff8e\043\uff8e\001\002\000"
+ + "\056\002\uff87\004\uff87\006\uff87\007\uff87\010\uff87\011\uff91"
+ + "\012\uff87\013\uff87\014\uff87\021\uff87\022\uff87\023\uff87\024"
+ + "\uff87\025\uff87\026\uff87\027\uff87\030\uff87\031\uff87\032\uff87"
+ + "\033\uff87\042\uff87\043\uff87\001\002\000\070\005\111\013"
+ + "\103\016\066\017\106\032\031\033\040\035\034\036\113"
+ + "\037\141\040\142\041\145\044\063\045\062\046\065\047"
+ + "\115\050\123\051\136\052\077\053\143\054\131\055\125"
+ + "\056\116\057\104\060\140\061\067\062\060\063\127\001"
+ + "\002\000\070\005\111\013\103\016\066\017\106\032\031"
+ + "\033\040\035\034\036\113\037\141\040\142\041\145\044"
+ + "\063\045\062\046\065\047\115\050\123\051\136\052\077"
+ + "\053\143\054\131\055\125\056\116\057\104\060\140\061"
+ + "\067\062\060\063\127\001\002\000\054\002\uff87\004\uff87"
+ + "\006\uff87\007\uff87\010\uff87\012\uff87\013\uff87\014\uff87\021"
+ + "\uff87\022\uff87\023\uff87\024\uff87\025\uff87\026\uff87\027\uff87"
+ + "\030\uff87\031\uff87\032\uff87\033\uff87\042\uff87\043\uff87\001"
+ + "\002\000\052\002\uffbb\004\uffbb\007\uffbb\010\uffbb\012\uffbb"
+ + "\013\uffbb\014\uffbb\021\uffbb\022\uffbb\023\uffbb\024\uffbb\025"
+ + "\uffbb\026\uffbb\027\uffbb\030\uffbb\031\uffbb\032\uffbb\033\uffbb"
+ + "\042\uffbb\043\uffbb\001\002\000\052\002\uffb6\004\uffb6\007"
+ + "\uffb6\010\uffb6\012\uffb6\013\uffb6\014\uffb6\021\uffb6\022\uffb6"
+ + "\023\uffb6\024\uffb6\025\uffb6\026\uffb6\027\uffb6\030\uffb6\031"
+ + "\uffb6\032\uffb6\033\uffb6\042\uffb6\043\uffb6\001\002\000\110"
+ + "\004\061\005\111\011\071\013\103\015\134\016\066\017"
+ + "\106\021\070\031\075\032\031\033\040\034\057\035\034"
+ + "\036\113\037\141\040\142\041\145\044\063\045\062\046"
+ + "\065\047\115\050\123\051\136\052\077\053\143\054\131"
+ + "\055\125\056\116\057\104\060\140\061\067\062\060\063"
+ + "\127\064\132\065\072\001\002\000\110\004\061\005\111"
+ + "\011\071\013\103\015\134\016\066\017\106\021\070\031"
+ + "\075\032\031\033\040\034\057\035\034\036\113\037\141"
+ + "\040\142\041\145\044\063\045\062\046\065\047\115\050"
+ + "\123\051\136\052\077\053\143\054\131\055\125\056\116"
+ + "\057\104\060\140\061\067\062\060\063\127\064\132\065"
+ + "\072\001\002\000\110\004\061\005\111\011\071\013\103"
+ + "\015\134\016\066\017\106\021\070\031\075\032\031\033"
+ + "\040\034\057\035\034\036\113\037\141\040\142\041\145"
+ + "\044\063\045\062\046\065\047\115\050\123\051\136\052"
+ + "\077\053\143\054\131\055\125\056\116\057\104\060\140"
+ + "\061\067\062\060\063\127\064\132\065\072\001\002\000"
+ + "\044\002\uffc8\007\uffc8\012\uffc8\013\uffc8\014\uffc8\022\uffc8"
+ + "\023\uffc8\024\uffc8\025\uffc8\026\uffc8\027\uffc8\030\uffc8\031"
+ + "\uffc8\032\uffc8\033\uffc8\042\uffc8\043\uffc8\001\002\000\044"
+ + "\002\uffc9\007\uffc9\012\uffc9\013\uffc9\014\uffc9\022\uffc9\023"
+ + "\uffc9\024\uffc9\025\uffc9\026\uffc9\027\uffc9\030\uffc9\031\uffc9"
+ + "\032\uffc9\033\uffc9\042\uffc9\043\uffc9\001\002\000\044\002"
+ + "\uffc7\007\uffc7\012\uffc7\013\uffc7\014\uffc7\022\uffc7\023\uffc7"
+ + "\024\uffc7\025\uffc7\026\uffc7\027\uffc7\030\uffc7\031\uffc7\032"
+ + "\uffc7\033\uffc7\042\uffc7\043\uffc7\001\002\000\054\002\uff90"
+ + "\004\uff90\006\uff90\007\uff90\010\uff90\012\uff90\013\uff90\014"
+ + "\uff90\021\uff90\022\uff90\023\uff90\024\uff90\025\uff90\026\uff90"
+ + "\027\uff90\030\uff90\031\uff90\032\uff90\033\uff90\042\uff90\043"
+ + "\uff90\001\002\000\054\002\uff80\004\uff80\006\uff80\007\uff80"
+ + "\010\uff80\012\uff80\013\uff80\014\uff80\021\uff80\022\uff80\023"
+ + "\uff80\024\uff80\025\uff80\026\uff80\027\uff80\030\uff80\031\uff80"
+ + "\032\uff80\033\uff80\042\uff80\043\uff80\001\002\000\054\002"
+ + "\uff96\004\uff96\006\uff96\007\uff96\010\uff96\012\uff96\013\uff96"
+ + "\014\uff96\021\uff96\022\uff96\023\uff96\024\uff96\025\uff96\026"
+ + "\uff96\027\uff96\030\uff96\031\uff96\032\uff96\033\uff96\042\uff96"
+ + "\043\uff96\001\002\000\054\002\uff7f\004\uff7f\006\uff7f\007"
+ + "\uff7f\010\uff7f\012\uff7f\013\uff7f\014\uff7f\021\uff7f\022\uff7f"
+ + "\023\uff7f\024\uff7f\025\uff7f\026\uff7f\027\uff7f\030\uff7f\031"
+ + "\uff7f\032\uff7f\033\uff7f\042\uff7f\043\uff7f\001\002\000\054"
+ + "\002\uffdb\004\uffdb\006\uffdb\007\uffdb\010\uffdb\012\uffdb\013"
+ + "\uffdb\014\uffdb\021\uffdb\022\uffdb\023\uffdb\024\uffdb\025\uffdb"
+ + "\026\uffdb\027\uffdb\030\uffdb\031\uffdb\032\uffdb\033\uffdb\042"
+ + "\uffdb\043\uffdb\001\002\000\070\005\111\013\103\016\066"
+ + "\017\106\032\031\033\040\035\034\036\113\037\141\040"
+ + "\142\041\145\044\063\045\062\046\065\047\115\050\123"
+ + "\051\136\052\077\053\143\054\131\055\125\056\116\057"
+ + "\104\060\140\061\067\062\060\063\127\001\002\000\070"
+ + "\005\111\013\103\016\066\017\106\032\031\033\040\035"
+ + "\034\036\113\037\141\040\142\041\145\044\063\045\062"
+ + "\046\065\047\115\050\123\051\136\052\077\053\143\054"
+ + "\131\055\125\056\116\057\104\060\140\061\067\062\060"
+ + "\063\127\001\002\000\052\002\uffc0\004\153\007\uffc0\010"
+ + "\uffc0\012\uffc0\013\uffc0\014\uffc0\021\152\022\uffc0\023\uffc0"
+ + "\024\uffc0\025\uffc0\026\uffc0\027\uffc0\030\uffc0\031\uffc0\032"
+ + "\uffc0\033\uffc0\042\uffc0\043\uffc0\001\002\000\052\002\uffbf"
+ + "\004\153\007\uffbf\010\uffbf\012\uffbf\013\uffbf\014\uffbf\021"
+ + "\152\022\uffbf\023\uffbf\024\uffbf\025\uffbf\026\uffbf\027\uffbf"
+ + "\030\uffbf\031\uffbf\032\uffbf\033\uffbf\042\uffbf\043\uffbf\001"
+ + "\002\000\106\004\061\005\111\011\071\013\103\015\134"
+ + "\016\066\017\106\021\070\032\031\033\040\034\057\035"
+ + "\034\036\113\037\141\040\142\041\145\044\063\045\062"
+ + "\046\065\047\115\050\123\051\136\052\077\053\143\054"
+ + "\131\055\125\056\116\057\104\060\140\061\067\062\060"
+ + "\063\127\064\132\065\072\001\002\000\044\002\uffc3\007"
+ + "\uffc3\012\uffc3\013\uffc3\014\uffc3\022\uffc3\023\uffc3\024\uffc3"
+ + "\025\uffc3\026\uffc3\027\uffc3\030\uffc3\031\uffc3\032\uffc3\033"
+ + "\uffc3\042\uffc3\043\uffc3\001\002\000\052\002\uff9d\004\uff9d"
+ + "\007\uff9d\010\uff9d\012\uff9d\013\uff9d\014\uff9d\021\uff9d\022"
+ + "\uff9d\023\uff9d\024\uff9d\025\uff9d\026\uff9d\027\uff9d\030\uff9d"
+ + "\031\uff9d\032\uff9d\033\uff9d\042\uff9d\043\uff9d\001\002\000"
+ + "\112\004\061\005\111\011\071\012\202\013\103\015\134"
+ + "\016\066\017\106\021\070\031\075\032\031\033\040\034"
+ + "\057\035\034\036\113\037\141\040\142\041\145\044\063"
+ + "\045\062\046\065\047\115\050\123\051\136\052\077\053"
+ + "\143\054\131\055\125\056\116\057\104\060\140\061\067"
+ + "\062\060\063\127\064\132\065\072\001\002\000\054\002"
+ + "\uff95\004\uff95\006\uff95\007\uff95\010\uff95\012\uff95\013\uff95"
+ + "\014\uff95\021\uff95\022\uff95\023\uff95\024\uff95\025\uff95\026"
+ + "\uff95\027\uff95\030\uff95\031\uff95\032\uff95\033\uff95\042\uff95"
+ + "\043\uff95\001\002\000\006\012\uff93\014\207\001\002\000"
+ + "\006\012\uff8f\014\uff8f\001\002\000\004\012\206\001\002"
+ + "\000\054\002\uff94\004\uff94\006\uff94\007\uff94\010\uff94\012"
+ + "\uff94\013\uff94\014\uff94\021\uff94\022\uff94\023\uff94\024\uff94"
+ + "\025\uff94\026\uff94\027\uff94\030\uff94\031\uff94\032\uff94\033"
+ + "\uff94\042\uff94\043\uff94\001\002\000\110\004\061\005\111"
+ + "\011\071\013\103\015\134\016\066\017\106\021\070\031"
+ + "\075\032\031\033\040\034\057\035\034\036\113\037\141"
+ + "\040\142\041\145\044\063\045\062\046\065\047\115\050"
+ + "\123\051\136\052\077\053\143\054\131\055\125\056\116"
+ + "\057\104\060\140\061\067\062\060\063\127\064\132\065"
+ + "\072\001\002\000\004\012\uff92\001\002\000\110\004\061"
+ + "\005\111\011\071\013\103\015\134\016\066\017\106\021"
+ + "\070\031\075\032\031\033\040\034\057\035\034\036\113"
+ + "\037\141\040\142\041\145\044\063\045\062\046\065\047"
+ + "\115\050\123\051\136\052\077\053\143\054\131\055\125"
+ + "\056\116\057\104\060\140\061\067\062\060\063\127\064"
+ + "\132\065\072\001\002\000\110\004\061\005\111\011\071"
+ + "\013\103\015\134\016\066\017\106\021\070\031\075\032"
+ + "\031\033\040\034\057\035\034\036\113\037\141\040\142"
+ + "\041\145\044\063\045\062\046\065\047\115\050\123\051"
+ + "\136\052\077\053\143\054\131\055\125\056\116\057\104"
+ + "\060\140\061\067\062\060\063\127\064\132\065\072\001"
+ + "\002\000\044\002\uffcb\007\uffcb\012\uffcb\013\160\014\uffcb"
+ + "\022\uffcb\023\uffcb\024\uffcb\025\uffcb\026\uffcb\027\uffcb\030"
+ + "\uffcb\031\uffcb\032\161\033\157\042\uffcb\043\uffcb\001\002"
+ + "\000\044\002\uffcc\007\uffcc\012\uffcc\013\160\014\uffcc\022"
+ + "\uffcc\023\uffcc\024\uffcc\025\uffcc\026\uffcc\027\uffcc\030\uffcc"
+ + "\031\uffcc\032\161\033\157\042\uffcc\043\uffcc\001\002\000"
+ + "\052\002\uffb3\004\uffb3\007\uffb3\010\uffb3\012\uffb3\013\uffb3"
+ + "\014\uffb3\021\uffb3\022\uffb3\023\uffb3\024\uffb3\025\uffb3\026"
+ + "\uffb3\027\uffb3\030\uffb3\031\uffb3\032\uffb3\033\uffb3\042\uffb3"
+ + "\043\uffb3\001\002\000\110\004\061\005\111\011\071\013"
+ + "\103\015\134\016\066\017\106\021\070\031\075\032\031"
+ + "\033\040\034\057\035\034\036\113\037\141\040\142\041"
+ + "\145\044\063\045\062\046\065\047\115\050\123\051\136"
+ + "\052\077\053\143\054\131\055\125\056\116\057\104\060"
+ + "\140\061\067\062\060\063\127\064\132\065\072\001\002"
+ + "\000\110\004\061\005\111\011\071\013\103\015\134\016"
+ + "\066\017\106\021\070\031\075\032\031\033\040\034\057"
+ + "\035\034\036\113\037\141\040\142\041\145\044\063\045"
+ + "\062\046\065\047\115\050\123\051\136\052\077\053\143"
+ + "\054\131\055\125\056\116\057\104\060\140\061\067\062"
+ + "\060\063\127\064\132\065\072\001\002\000\032\002\uffd3"
+ + "\007\uffd3\012\uffd3\014\uffd3\022\uffd3\023\uffd3\024\221\025"
+ + "\222\026\223\027\224\042\uffd3\043\uffd3\001\002\000\110"
+ + "\004\061\005\111\011\071\013\103\015\134\016\066\017"
+ + "\106\021\070\031\075\032\031\033\040\034\057\035\034"
+ + "\036\113\037\141\040\142\041\145\044\063\045\062\046"
+ + "\065\047\115\050\123\051\136\052\077\053\143\054\131"
+ + "\055\125\056\116\057\104\060\140\061\067\062\060\063"
+ + "\127\064\132\065\072\001\002\000\110\004\061\005\111"
+ + "\011\071\013\103\015\134\016\066\017\106\021\070\031"
+ + "\075\032\031\033\040\034\057\035\034\036\113\037\141"
+ + "\040\142\041\145\044\063\045\062\046\065\047\115\050"
+ + "\123\051\136\052\077\053\143\054\131\055\125\056\116"
+ + "\057\104\060\140\061\067\062\060\063\127\064\132\065"
+ + "\072\001\002\000\110\004\061\005\111\011\071\013\103"
+ + "\015\134\016\066\017\106\021\070\031\075\032\031\033"
+ + "\040\034\057\035\034\036\113\037\141\040\142\041\145"
+ + "\044\063\045\062\046\065\047\115\050\123\051\136\052"
+ + "\077\053\143\054\131\055\125\056\116\057\104\060\140"
+ + "\061\067\062\060\063\127\064\132\065\072\001\002\000"
+ + "\110\004\061\005\111\011\071\013\103\015\134\016\066"
+ + "\017\106\021\070\031\075\032\031\033\040\034\057\035"
+ + "\034\036\113\037\141\040\142\041\145\044\063\045\062"
+ + "\046\065\047\115\050\123\051\136\052\077\053\143\054"
+ + "\131\055\125\056\116\057\104\060\140\061\067\062\060"
+ + "\063\127\064\132\065\072\001\002\000\036\002\uffce\007"
+ + "\uffce\012\uffce\014\uffce\022\uffce\023\uffce\024\uffce\025\uffce"
+ + "\026\uffce\027\uffce\030\211\031\212\042\uffce\043\uffce\001"
+ + "\002\000\036\002\uffcf\007\uffcf\012\uffcf\014\uffcf\022\uffcf"
+ + "\023\uffcf\024\uffcf\025\uffcf\026\uffcf\027\uffcf\030\211\031"
+ + "\212\042\uffcf\043\uffcf\001\002\000\036\002\uffd0\007\uffd0"
+ + "\012\uffd0\014\uffd0\022\uffd0\023\uffd0\024\uffd0\025\uffd0\026"
+ + "\uffd0\027\uffd0\030\211\031\212\042\uffd0\043\uffd0\001\002"
+ + "\000\036\002\uffd1\007\uffd1\012\uffd1\014\uffd1\022\uffd1\023"
+ + "\uffd1\024\uffd1\025\uffd1\026\uffd1\027\uffd1\030\211\031\212"
+ + "\042\uffd1\043\uffd1\001\002\000\032\002\uffd4\007\uffd4\012"
+ + "\uffd4\014\uffd4\022\uffd4\023\uffd4\024\221\025\222\026\223"
+ + "\027\224\042\uffd4\043\uffd4\001\002\000\110\004\061\005"
+ + "\111\011\071\013\103\015\134\016\066\017\106\021\070"
+ + "\031\075\032\031\033\040\034\057\035\034\036\113\037"
+ + "\141\040\142\041\145\044\063\045\062\046\065\047\115"
+ + "\050\123\051\136\052\077\053\143\054\131\055\125\056"
+ + "\116\057\104\060\140\061\067\062\060\063\127\064\132"
+ + "\065\072\001\002\000\016\002\uffd8\007\uffd8\012\uffd8\014"
+ + "\uffd8\042\uffd8\043\234\001\002\000\110\004\061\005\111"
+ + "\011\071\013\103\015\134\016\066\017\106\021\070\031"
+ + "\075\032\031\033\040\034\057\035\034\036\113\037\141"
+ + "\040\142\041\145\044\063\045\062\046\065\047\115\050"
+ + "\123\051\136\052\077\053\143\054\131\055\125\056\116"
+ + "\057\104\060\140\061\067\062\060\063\127\064\132\065"
+ + "\072\001\002\000\022\002\uffd6\007\uffd6\012\uffd6\014\uffd6"
+ + "\022\216\023\217\042\uffd6\043\uffd6\001\002\000\062\013"
+ + "\uffaf\032\uffaf\033\uffaf\035\uffaf\036\uffaf\037\uffaf\040\uffaf"
+ + "\041\uffaf\044\uffaf\045\uffaf\046\uffaf\047\uffaf\050\uffaf\051"
+ + "\uffaf\052\uffaf\053\uffaf\054\uffaf\055\uffaf\056\uffaf\057\uffaf"
+ + "\060\uffaf\061\uffaf\062\uffaf\063\uffaf\001\002\000\054\002"
+ + "\uffb1\004\uffb1\006\055\007\uffb1\010\uffb1\012\uffb1\013\uffb1"
+ + "\014\uffb1\021\uffb1\022\uffb1\023\uffb1\024\uffb1\025\uffb1\026"
+ + "\uffb1\027\uffb1\030\uffb1\031\uffb1\032\uffb1\033\uffb1\042\uffb1"
+ + "\043\uffb1\001\002\000\052\002\uffb2\004\uffb2\007\uffb2\010"
+ + "\uffb2\012\uffb2\013\uffb2\014\uffb2\021\uffb2\022\uffb2\023\uffb2"
+ + "\024\uffb2\025\uffb2\026\uffb2\027\uffb2\030\uffb2\031\uffb2\032"
+ + "\uffb2\033\uffb2\042\uffb2\043\uffb2\001\002\000\044\002\uffc5"
+ + "\007\uffc5\012\uffc5\013\uffc5\014\uffc5\022\uffc5\023\uffc5\024"
+ + "\uffc5\025\uffc5\026\uffc5\027\uffc5\030\uffc5\031\uffc5\032\uffc5"
+ + "\033\uffc5\042\uffc5\043\uffc5\001\002\000\004\012\243\001"
+ + "\002\000\054\002\uff9b\004\uff9b\006\uff9b\007\uff9b\010\uff9b"
+ + "\012\uff9b\013\uff9b\014\uff9b\021\uff9b\022\uff9b\023\uff9b\024"
+ + "\uff9b\025\uff9b\026\uff9b\027\uff9b\030\uff9b\031\uff9b\032\uff9b"
+ + "\033\uff9b\042\uff9b\043\uff9b\001\002\000\052\002\uffb5\004"
+ + "\153\007\uffb5\010\uffb5\012\uffb5\013\uffb5\014\uffb5\021\152"
+ + "\022\uffb5\023\uffb5\024\uffb5\025\uffb5\026\uffb5\027\uffb5\030"
+ + "\uffb5\031\uffb5\032\uffb5\033\uffb5\042\uffb5\043\uffb5\001\002"
+ + "\000\004\034\246\001\002\000\004\012\247\001\002\000"
+ + "\054\002\uff8a\004\uff8a\006\uff8a\007\uff8a\010\uff8a\012\uff8a"
+ + "\013\uff8a\014\uff8a\021\uff8a\022\uff8a\023\uff8a\024\uff8a\025"
+ + "\uff8a\026\uff8a\027\uff8a\030\uff8a\031\uff8a\032\uff8a\033\uff8a"
+ + "\042\uff8a\043\uff8a\001\002\000\052\002\uffb8\004\153\007"
+ + "\uffb8\010\uffb8\012\uffb8\013\uffb8\014\uffb8\021\152\022\uffb8"
+ + "\023\uffb8\024\uffb8\025\uffb8\026\uffb8\027\uffb8\030\uffb8\031"
+ + "\uffb8\032\uffb8\033\uffb8\042\uffb8\043\uffb8\001\002\000\052"
+ + "\002\uffdc\004\uffdc\007\uffdc\010\uffdc\012\uffdc\013\uffdc\014"
+ + "\uffdc\021\uffdc\022\uffdc\023\uffdc\024\uffdc\025\uffdc\026\uffdc"
+ + "\027\uffdc\030\uffdc\031\uffdc\032\uffdc\033\uffdc\042\uffdc\043"
+ + "\uffdc\001\002\000\062\013\uffde\032\uffde\033\uffde\035\uffde"
+ + "\036\uffde\037\uffde\040\uffde\041\uffde\044\uffde\045\uffde\046"
+ + "\uffde\047\uffde\050\uffde\051\uffde\052\uffde\053\uffde\054\uffde"
+ + "\055\uffde\056\uffde\057\uffde\060\uffde\061\uffde\062\uffde\063"
+ + "\uffde\001\002\000\004\034\254\001\002\000\004\014\255"
+ + "\001\002\000\004\034\256\001\002\000\004\012\257\001"
+ + "\002\000\012\002\ufff4\004\ufff4\010\ufff4\021\ufff4\001\002"
+ + "\000\004\034\261\001\002\000\004\012\262\001\002\000"
+ + "\012\002\ufff5\004\ufff5\010\ufff5\021\ufff5\001\002\000\012"
+ + "\002\uffec\004\uffec\010\uffec\021\uffec\001\002\000\062\013"
+ + "\uffdf\032\uffdf\033\uffdf\035\uffdf\036\uffdf\037\uffdf\040\uffdf"
+ + "\041\uffdf\044\uffdf\045\uffdf\046\uffdf\047\uffdf\050\uffdf\051"
+ + "\uffdf\052\uffdf\053\uffdf\054\uffdf\055\uffdf\056\uffdf\057\uffdf"
+ + "\060\uffdf\061\uffdf\062\uffdf\063\uffdf\001\002\000\064\013"
+ + "\027\016\020\032\031\033\040\035\034\036\113\037\141"
+ + "\040\047\041\051\044\015\045\014\046\016\047\036\050"
+ + "\037\051\044\052\025\053\050\054\043\055\041\056\035"
+ + "\057\030\060\045\061\021\062\012\063\042\001\002\000"
+ + "\064\013\027\016\020\032\031\033\040\035\034\036\113"
+ + "\037\141\040\047\041\051\044\015\045\014\046\016\047"
+ + "\036\050\037\051\044\052\025\053\050\054\043\055\041"
+ + "\056\035\057\030\060\045\061\021\062\012\063\042\001"
+ + "\002\000\006\002\ufff1\010\ufff1\001\002\000\006\002\ufff0"
+ + "\010\ufff0\001\002\000\006\002\ufff7\010\ufff7\001\002\000"
+ + "\014\002\uffe9\004\uffe9\006\055\010\uffe9\021\uffe9\001\002"
+ + "\000\014\002\uffeb\004\uffeb\006\055\010\uffeb\021\uffeb\001"
+ + "\002\000\012\002\uffea\004\uffea\010\uffea\021\uffea\001\002"
+ + "\000\012\002\uffe8\004\uffe8\010\uffe8\021\uffe8\001\002\000"
+ + "\064\013\027\016\020\032\031\033\040\035\034\036\113"
+ + "\037\141\040\047\041\051\044\015\045\014\046\016\047"
+ + "\036\050\037\051\044\052\025\053\050\054\043\055\041"
+ + "\056\035\057\030\060\045\061\021\062\012\063\042\001"
+ + "\002\000\064\013\027\016\020\032\031\033\040\035\034"
+ + "\036\113\037\141\040\047\041\051\044\015\045\014\046"
+ + "\016\047\036\050\037\051\044\052\025\053\050\054\043"
+ + "\055\041\056\035\057\030\060\045\061\021\062\012\063"
+ + "\042\001\002\000\006\002\ufff9\010\ufff9\001\002\000\006"
+ + "\002\ufff8\010\ufff8\001\002\000\004\034\303\001\002\000"
+ + "\004\012\304\001\002\000\014\002\ufff3\004\ufff3\006\ufff3"
+ + "\010\ufff3\021\ufff3\001\002\000\006\002\ufffb\010\ufffb\001"
+ + "\002\000\070\004\013\013\027\016\020\021\023\032\031"
+ + "\033\040\035\034\036\033\037\046\040\047\041\051\044"
+ + "\015\045\014\046\016\047\036\050\037\051\044\052\025"
+ + "\053\050\054\043\055\041\056\035\057\030\060\045\061"
+ + "\021\062\012\063\042\001\002\000\004\002\ufffd\001\002"
+ + "\000\004\002\uffff\001\002\000\004\002\001\001\002"});
- /** Indicates start state. */
- public int start_state() {return 0;}
- /** Indicates start production. */
- public int start_production() {return 0;}
+ /**
+ * Access to parse-action table.
+ */
+ public short[][] action_table() {
+ return _action_table;
+ }
- /** <code>EOF</code> Symbol index. */
- public int EOF_sym() {return 0;}
+ /**
+ * <code>reduce_goto</code> table.
+ */
+ protected static final short[][] _reduce_table
+ = unpackFromStrings(new String[]{
+ "\000\307\000\004\003\003\001\001\000\002\001\001\000"
+ + "\070\004\307\006\120\010\127\011\117\012\101\013\075"
+ + "\014\104\015\063\016\111\017\145\020\113\021\125\022"
+ + "\073\023\121\024\143\025\123\026\136\027\146\030\134"
+ + "\031\107\032\072\033\106\034\147\047\150\050\116\052"
+ + "\100\053\077\001\001\000\026\035\016\036\007\037\006"
+ + "\040\031\041\025\042\023\043\052\044\010\047\051\054"
+ + "\021\001\001\000\002\001\001\000\002\001\001\000\002"
+ + "\001\001\000\002\001\001\000\020\040\031\041\304\042"
+ + "\023\043\052\044\010\047\051\054\021\001\001\000\002"
+ + "\001\001\000\002\001\001\000\002\001\001\000\002\001"
+ + "\001\000\002\001\001\000\002\001\001\000\012\040\271"
+ + "\043\272\044\010\047\051\001\001\000\020\040\031\041"
+ + "\270\042\023\043\052\044\010\047\051\054\021\001\001"
+ + "\000\002\001\001\000\002\001\001\000\002\001\001\000"
+ + "\002\001\001\000\002\001\001\000\002\001\001\000\006"
+ + "\007\053\045\262\001\001\000\002\001\001\000\002\001"
+ + "\001\000\002\001\001\000\002\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\002\001\001\000"
+ + "\002\001\001\000\002\001\001\000\002\001\001\000\002"
+ + "\001\001\000\002\001\001\000\002\001\001\000\002\001"
+ + "\001\000\002\001\001\000\006\007\053\045\055\001\001"
+ + "\000\006\007\053\045\250\001\001\000\070\004\132\006"
+ + "\120\010\127\011\117\012\101\013\075\014\104\015\063"
+ + "\016\111\017\145\020\113\021\125\022\073\023\121\024"
+ + "\143\025\123\026\136\027\146\030\134\031\107\032\072"
+ + "\033\106\034\147\047\150\050\116\052\100\053\077\001"
+ + "\001\000\002\001\001\000\002\001\001\000\002\001\001"
+ + "\000\024\011\117\026\136\027\247\030\134\033\106\034"
+ + "\147\047\153\052\100\053\077\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\002\001\001\000"
+ + "\002\001\001\000\002\001\001\000\024\011\117\026\136"
+ + "\027\243\030\134\033\106\034\147\047\153\052\100\053"
+ + "\077\001\001\000\070\004\241\006\120\010\127\011\117"
+ + "\012\101\013\075\014\104\015\063\016\111\017\145\020"
+ + "\113\021\125\022\073\023\121\024\143\025\123\026\136"
+ + "\027\146\030\134\031\107\032\072\033\106\034\147\047"
+ + "\150\050\116\052\100\053\077\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\052\006\120\010"
+ + "\127\011\117\020\240\021\125\022\073\023\121\024\143"
+ + "\025\123\026\136\027\146\030\134\031\107\032\072\033"
+ + "\106\034\147\047\150\050\116\052\100\053\077\001\001"
+ + "\000\002\001\001\000\002\001\001\000\010\033\236\034"
+ + "\147\047\153\001\001\000\002\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\002\001\001\000"
+ + "\002\001\001\000\006\007\053\045\214\001\001\000\002"
+ + "\001\001\000\002\001\001\000\002\001\001\000\002\001"
+ + "\001\000\002\001\001\000\002\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\002\001\001\000"
+ + "\006\007\053\045\177\001\001\000\002\001\001\000\002"
+ + "\001\001\000\002\001\001\000\002\001\001\000\002\001"
+ + "\001\000\002\001\001\000\002\001\001\000\002\001\001"
+ + "\000\002\001\001\000\006\047\164\051\166\001\001\000"
+ + "\002\001\001\000\002\001\001\000\002\001\001\000\002"
+ + "\001\001\000\002\001\001\000\002\001\001\000\002\001"
+ + "\001\000\002\001\001\000\002\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\002\001\001\000"
+ + "\020\011\155\026\136\033\106\034\147\047\153\052\100"
+ + "\053\077\001\001\000\020\011\154\026\136\033\106\034"
+ + "\147\047\153\052\100\053\077\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\052\006\120\010"
+ + "\127\011\117\020\163\021\125\022\073\023\121\024\143"
+ + "\025\123\026\136\027\146\030\134\031\107\032\072\033"
+ + "\106\034\147\047\150\050\116\052\100\053\077\001\001"
+ + "\000\052\006\120\010\127\011\117\020\162\021\125\022"
+ + "\073\023\121\024\143\025\123\026\136\027\146\030\134"
+ + "\031\107\032\072\033\106\034\147\047\150\050\116\052"
+ + "\100\053\077\001\001\000\052\006\120\010\127\011\117"
+ + "\020\161\021\125\022\073\023\121\024\143\025\123\026"
+ + "\136\027\146\030\134\031\107\032\072\033\106\034\147"
+ + "\047\150\050\116\052\100\053\077\001\001\000\002\001"
+ + "\001\000\002\001\001\000\002\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\002\001\001\000"
+ + "\002\001\001\000\024\011\117\026\136\027\174\030\134"
+ + "\033\106\034\147\047\153\052\100\053\077\001\001\000"
+ + "\024\011\117\026\136\027\173\030\134\033\106\034\147"
+ + "\047\153\052\100\053\077\001\001\000\002\001\001\000"
+ + "\002\001\001\000\050\006\120\010\127\011\117\021\125"
+ + "\022\073\023\121\024\176\025\123\026\136\027\146\030"
+ + "\134\031\107\032\072\033\106\034\147\047\150\050\116"
+ + "\052\100\053\077\001\001\000\002\001\001\000\002\001"
+ + "\001\000\074\004\203\005\202\006\120\010\127\011\117"
+ + "\012\101\013\075\014\104\015\063\016\111\017\145\020"
+ + "\113\021\125\022\073\023\121\024\143\025\123\026\136"
+ + "\027\146\030\134\031\107\032\072\033\106\034\147\046"
+ + "\204\047\150\050\116\052\100\053\077\001\001\000\002"
+ + "\001\001\000\002\001\001\000\002\001\001\000\002\001"
+ + "\001\000\002\001\001\000\074\004\203\005\202\006\120"
+ + "\010\127\011\117\012\101\013\075\014\104\015\063\016"
+ + "\111\017\145\020\113\021\125\022\073\023\121\024\143"
+ + "\025\123\026\136\027\146\030\134\031\107\032\072\033"
+ + "\106\034\147\046\207\047\150\050\116\052\100\053\077"
+ + "\001\001\000\002\001\001\000\054\006\120\010\127\011"
+ + "\117\017\213\020\113\021\125\022\073\023\121\024\143"
+ + "\025\123\026\136\027\146\030\134\031\107\032\072\033"
+ + "\106\034\147\047\150\050\116\052\100\053\077\001\001"
+ + "\000\054\006\120\010\127\011\117\017\212\020\113\021"
+ + "\125\022\073\023\121\024\143\025\123\026\136\027\146"
+ + "\030\134\031\107\032\072\033\106\034\147\047\150\050"
+ + "\116\052\100\053\077\001\001\000\002\001\001\000\002"
+ + "\001\001\000\002\001\001\000\060\006\120\010\127\011"
+ + "\117\015\230\016\111\017\145\020\113\021\125\022\073"
+ + "\023\121\024\143\025\123\026\136\027\146\030\134\031"
+ + "\107\032\072\033\106\034\147\047\150\050\116\052\100"
+ + "\053\077\001\001\000\060\006\120\010\127\011\117\015"
+ + "\217\016\111\017\145\020\113\021\125\022\073\023\121"
+ + "\024\143\025\123\026\136\027\146\030\134\031\107\032"
+ + "\072\033\106\034\147\047\150\050\116\052\100\053\077"
+ + "\001\001\000\002\001\001\000\056\006\120\010\127\011"
+ + "\117\016\227\017\145\020\113\021\125\022\073\023\121"
+ + "\024\143\025\123\026\136\027\146\030\134\031\107\032"
+ + "\072\033\106\034\147\047\150\050\116\052\100\053\077"
+ + "\001\001\000\056\006\120\010\127\011\117\016\226\017"
+ + "\145\020\113\021\125\022\073\023\121\024\143\025\123"
+ + "\026\136\027\146\030\134\031\107\032\072\033\106\034"
+ + "\147\047\150\050\116\052\100\053\077\001\001\000\056"
+ + "\006\120\010\127\011\117\016\225\017\145\020\113\021"
+ + "\125\022\073\023\121\024\143\025\123\026\136\027\146"
+ + "\030\134\031\107\032\072\033\106\034\147\047\150\050"
+ + "\116\052\100\053\077\001\001\000\056\006\120\010\127"
+ + "\011\117\016\224\017\145\020\113\021\125\022\073\023"
+ + "\121\024\143\025\123\026\136\027\146\030\134\031\107"
+ + "\032\072\033\106\034\147\047\150\050\116\052\100\053"
+ + "\077\001\001\000\002\001\001\000\002\001\001\000\002"
+ + "\001\001\000\002\001\001\000\002\001\001\000\064\006"
+ + "\120\010\127\011\117\013\232\014\104\015\063\016\111"
+ + "\017\145\020\113\021\125\022\073\023\121\024\143\025"
+ + "\123\026\136\027\146\030\134\031\107\032\072\033\106"
+ + "\034\147\047\150\050\116\052\100\053\077\001\001\000"
+ + "\002\001\001\000\062\006\120\010\127\011\117\014\234"
+ + "\015\063\016\111\017\145\020\113\021\125\022\073\023"
+ + "\121\024\143\025\123\026\136\027\146\030\134\031\107"
+ + "\032\072\033\106\034\147\047\150\050\116\052\100\053"
+ + "\077\001\001\000\002\001\001\000\002\001\001\000\006"
+ + "\007\053\045\237\001\001\000\002\001\001\000\002\001"
+ + "\001\000\002\001\001\000\002\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\002\001\001\000"
+ + "\002\001\001\000\002\001\001\000\002\001\001\000\002"
+ + "\001\001\000\002\001\001\000\002\001\001\000\002\001"
+ + "\001\000\002\001\001\000\002\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\002\001\001\000"
+ + "\020\040\031\041\267\042\023\043\052\044\010\047\051"
+ + "\054\021\001\001\000\020\040\031\041\266\042\023\043"
+ + "\052\044\010\047\051\054\021\001\001\000\002\001\001"
+ + "\000\002\001\001\000\002\001\001\000\006\007\053\045"
+ + "\274\001\001\000\006\007\053\045\273\001\001\000\002"
+ + "\001\001\000\002\001\001\000\020\040\031\041\300\042"
+ + "\023\043\052\044\010\047\051\054\021\001\001\000\020"
+ + "\040\031\041\277\042\023\043\052\044\010\047\051\054"
+ + "\021\001\001\000\002\001\001\000\002\001\001\000\002"
+ + "\001\001\000\002\001\001\000\002\001\001\000\002\001"
+ + "\001\000\026\035\016\036\306\037\006\040\031\041\025"
+ + "\042\023\043\052\044\010\047\051\054\021\001\001\000"
+ + "\002\001\001\000\002\001\001\000\002\001\001"});
- /** <code>error</code> Symbol index. */
- public int error_sym() {return 1;}
+ /**
+ * Access to <code>reduce_goto</code> table.
+ */
+ public short[][] reduce_table() {
+ return _reduce_table;
+ }
+
+ /**
+ * Instance of action encapsulation class.
+ */
+ protected parser_actions action_obj;
+
+ /**
+ * Action encapsulation object initializer.
+ */
+ protected void init_actions() {
+ action_obj = new parser_actions(this);
+ }
+ /**
+ * Invoke a user supplied parse action.
+ */
+ public Symbol do_action(
+ int act_num,
+ lr_parser parser,
+ Stack<Symbol> stack,
+ int top)
+ throws java.lang.Exception {
+ /* call code in generated class */
+ return action_obj.parser_do_action(act_num, parser, stack, top);
+ }
+ /**
+ * Indicates start state.
+ */
+ public int start_state() {
+ return 0;
+ }
+
+ /**
+ * Indicates start production.
+ */
+ public int start_production() {
+ return 0;
+ }
+
+ /**
+ * <code>EOF</code> Symbol index.
+ */
+ public int EOF_sym() {
+ return 0;
+ }
+
+ /**
+ * <code>error</code> Symbol index.
+ */
+ public int error_sym() {
+ return 1;
+ }
/**
* Used by function calls with no args.
@@ -889,7 +936,7 @@
* Reference to the Parser class.
*/
private Parser _parser;
- private XSLTC _xsltc;
+ private XSLTC _xsltc;
/**
* String representation of the expression being parsed.
@@ -917,7 +964,7 @@
}
public QName getQNameIgnoreDefaultNs(String name) {
- return _parser.getQNameIgnoreDefaultNs(name);
+ return _parser.getQNameIgnoreDefaultNs(name);
}
public QName getQName(String namespace, String prefix, String localname) {
@@ -925,55 +972,51 @@
}
public void setMultiDocument(boolean flag) {
- _xsltc.setMultiDocument(flag);
+ _xsltc.setMultiDocument(flag);
}
public void setCallsNodeset(boolean flag) {
- _xsltc.setCallsNodeset(flag);
+ _xsltc.setCallsNodeset(flag);
}
public void setHasIdCall(boolean flag) {
- _xsltc.setHasIdCall(flag);
+ _xsltc.setHasIdCall(flag);
}
-
/**
* This method is similar to findNodeType(int, Object) except that it
* creates a StepPattern instead of just returning a node type. It also
* differs in the way it handles "{uri}:*" and "{uri}:@*". The last two
* patterns are expanded as "*[namespace-uri() = 'uri']" and
* "@*[namespace-uri() = 'uri']", respectively. This expansion considerably
- * simplifies the grouping of patterns in the Mode class. For this
- * expansion to be correct, the priority of the pattern/template must be
- * set to -0.25 (when no other predicates are present).
+ * simplifies the grouping of patterns in the Mode class. For this expansion
+ * to be correct, the priority of the pattern/template must be set to -0.25
+ * (when no other predicates are present).
*/
public StepPattern createStepPattern(int axis, Object test, List<Predicate> predicates) {
int nodeType;
if (test == null) { // "*"
- nodeType = (axis == Axis.ATTRIBUTE) ? NodeTest.ATTRIBUTE :
- (axis == Axis.NAMESPACE) ? -1 : NodeTest.ELEMENT;
+ nodeType = (axis == Axis.ATTRIBUTE) ? NodeTest.ATTRIBUTE
+ : (axis == Axis.NAMESPACE) ? -1 : NodeTest.ELEMENT;
return new StepPattern(axis, nodeType, predicates);
- }
- else if (test instanceof Integer) {
- nodeType = ((Integer) test).intValue();
+ } else if (test instanceof Integer) {
+ nodeType = ((Integer) test);
return new StepPattern(axis, nodeType, predicates);
- }
- else {
- QName name = (QName)test;
+ } else {
+ QName name = (QName) test;
boolean setPriority = false;
if (axis == Axis.NAMESPACE) {
nodeType = (name.toString().equals("*")) ? -1
- : _xsltc.registerNamespacePrefix(name);;
- }
- else {
+ : _xsltc.registerNamespacePrefix(name);;
+ } else {
final String uri = name.getNamespace();
final String local = name.getLocalPart();
- final QName namespace_uri =
- _parser.getQNameIgnoreDefaultNs("namespace-uri");
+ final QName namespace_uri
+ = _parser.getQNameIgnoreDefaultNs("namespace-uri");
// Expand {uri}:* to *[namespace-uri() = 'uri'] - same for @*
if (uri != null && (local.equals("*") || local.equals("@*"))) {
@@ -985,22 +1028,20 @@
setPriority = (predicates.size() == 0);
predicates.add(
- new Predicate(
- new EqualityExpr(Operators.EQ,
- new NamespaceUriCall(namespace_uri),
- new LiteralExpr(uri))));
+ new Predicate(
+ new EqualityExpr(Operators.EQ,
+ new NamespaceUriCall(namespace_uri),
+ new LiteralExpr(uri))));
}
if (local.equals("*")) {
nodeType = (axis == Axis.ATTRIBUTE) ? NodeTest.ATTRIBUTE
- : NodeTest.ELEMENT;
- }
- else if (local.equals("@*")) {
+ : NodeTest.ELEMENT;
+ } else if (local.equals("@*")) {
nodeType = NodeTest.ATTRIBUTE;
- }
- else {
+ } else {
nodeType = (axis == Axis.ATTRIBUTE) ? _xsltc.registerAttribute(name)
- : _xsltc.registerElement(name);
+ : _xsltc.registerElement(name);
}
}
@@ -1017,19 +1058,17 @@
public int findNodeType(int axis, Object test) {
if (test == null) { // *
- return (axis == Axis.ATTRIBUTE) ?
- NodeTest.ATTRIBUTE :
- (axis == Axis.NAMESPACE) ? -1 : NodeTest.ELEMENT;
- }
- else if (test instanceof Integer) {
- return ((Integer)test).intValue();
- }
- else {
- QName name = (QName)test;
+ return (axis == Axis.ATTRIBUTE)
+ ? NodeTest.ATTRIBUTE
+ : (axis == Axis.NAMESPACE) ? -1 : NodeTest.ELEMENT;
+ } else if (test instanceof Integer) {
+ return ((Integer) test);
+ } else {
+ QName name = (QName) test;
if (axis == Axis.NAMESPACE) {
return (name.toString().equals("*")) ? -1
- : _xsltc.registerNamespacePrefix(name);
+ : _xsltc.registerNamespacePrefix(name);
}
if (name.getNamespace() == null) {
@@ -1037,27 +1076,25 @@
if (local.equals("*")) {
return (axis == Axis.ATTRIBUTE) ? NodeTest.ATTRIBUTE
- : NodeTest.ELEMENT;
- }
- else if (local.equals("@*")) {
+ : NodeTest.ELEMENT;
+ } else if (local.equals("@*")) {
return NodeTest.ATTRIBUTE;
}
}
return (axis == Axis.ATTRIBUTE) ? _xsltc.registerAttribute(name)
- : _xsltc.registerElement(name);
+ : _xsltc.registerElement(name);
}
}
/**
- * Parse the expression passed to the current scanner. If this
- * expression contains references to local variables and it will be
- * compiled in an external module (not in the main class) request
- * the current template to create a new variable stack frame.
+ * Parse the expression passed to the current scanner. If this expression
+ * contains references to local variables and it will be compiled in an
+ * external module (not in the main class) request the current template to
+ * create a new variable stack frame.
*
+ * @param expression the expression to be parsed
* @param lineNumber Line where the current expression is defined.
- * @param external Set to <tt>true</tt> if this expression is
- * compiled in a separate module.
*
*/
public Symbol parse(String expression, int lineNumber) throws Exception {
@@ -1065,10 +1102,9 @@
_expression = expression;
_lineNumber = lineNumber;
return super.parse();
- }
- catch (IllegalCharException e) {
+ } catch (IllegalCharException e) {
ErrorMsg err = new ErrorMsg(ErrorMsg.ILLEGAL_CHAR_ERR,
- lineNumber, e.getMessage());
+ lineNumber, e.getMessage());
_parser.reportError(Constants.FATAL, err);
}
return null;
@@ -1082,10 +1118,11 @@
final SyntaxTreeNode lookupName(QName name) {
// Is it a local var or param ?
final SyntaxTreeNode result = _parser.lookupVariable(name);
- if (result != null)
- return(result);
- else
- return(_symbolTable.lookupName(name));
+ if (result != null) {
+ return (result);
+ } else {
+ return (_symbolTable.lookupName(name));
+ }
}
public final void addError(ErrorMsg error) {
@@ -1094,7 +1131,7 @@
public void report_error(String message, Object info) {
final ErrorMsg err = new ErrorMsg(ErrorMsg.SYNTAX_ERR, _lineNumber,
- _expression);
+ _expression);
_parser.reportError(Constants.FATAL, err);
}
@@ -1105,2073 +1142,1661 @@
public RelativeLocationPath insertStep(Step step, RelativeLocationPath rlp) {
if (rlp instanceof Step) {
return new ParentLocationPath(step, (Step) rlp);
- }
- else if (rlp instanceof ParentLocationPath) {
+ } else if (rlp instanceof ParentLocationPath) {
final ParentLocationPath plp = (ParentLocationPath) rlp;
final RelativeLocationPath newrlp = insertStep(step, plp.getPath());
return new ParentLocationPath(newrlp, plp.getStep());
- }
- else {
+ } else {
addError(new ErrorMsg(ErrorMsg.INTERNAL_ERR, "XPathParser.insertStep"));
return rlp;
}
}
/**
- * Returns true if the axis applies to elements only. The axes
- * child, attribute, namespace, descendant result in non-empty
- * nodesets only if the context node is of type element.
+ * Returns true if the axis applies to elements only. The axes child,
+ * attribute, namespace, descendant result in non-empty nodesets only if the
+ * context node is of type element.
*/
public boolean isElementAxis(int axis) {
- return (axis == Axis.CHILD || axis == Axis.ATTRIBUTE ||
- axis == Axis.NAMESPACE || axis == Axis.DESCENDANT);
+ return (axis == Axis.CHILD || axis == Axis.ATTRIBUTE
+ || axis == Axis.NAMESPACE || axis == Axis.DESCENDANT);
}
}
-/** Cup generated class to encapsulate user supplied action code.*/
+/**
+ * Cup generated class to encapsulate user supplied action code.
+ */
class parser_actions {
- private final XPathParser parser;
+
+ private final XPathParser parser;
- /** Constructor */
- parser_actions(XPathParser parser) {
- this.parser = parser;
- }
+ /**
+ * Constructor
+ */
+ parser_actions(XPathParser parser) {
+ this.parser = parser;
+ }
+
+ /**
+ * Method with the actual generated action code.
+ */
+ public final Symbol parser_do_action(
+ int parser_act_num,
+ lr_parser parser_parser,
+ Stack<Symbol> parser_stack,
+ int parser_top)
+ throws java.lang.Exception {
+ /* Symbol object for return from actions */
+ Symbol parser_result;
- /** Method with the actual generated action code. */
- public final Symbol parser_do_action(
- int parser_act_num,
- lr_parser parser_parser,
- Stack<Symbol> parser_stack,
- int parser_top)
- throws java.lang.Exception
- {
- /* Symbol object for return from actions */
- Symbol parser_result;
+ /* select the action based on the action number */
+ switch (parser_act_num) {
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 0: // $START ::= TopLevel EOF
+ {
+ SyntaxTreeNode start_val = (SyntaxTreeNode) (parser_stack.get(parser_top - 1)).value;
+ parser_result = new Symbol(0, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, start_val);
+ }
+ /* ACCEPT */
+ parser_parser.done_parsing();
+ return parser_result;
- /* select the action based on the action number */
- switch (parser_act_num)
- {
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 140: // QName ::= ID
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 1: // TopLevel ::= PATTERN Pattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("id");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Pattern pattern = (Pattern) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(1, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, pattern);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 139: // QName ::= SELF
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 2: // TopLevel ::= EXPRESSION Expr
+ {
+ Expression expr = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(1, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, expr);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 3: // Pattern ::= LocationPathPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("self");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Pattern lpp = (Pattern) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(28, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, lpp);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 138: // QName ::= PRECEDINGSIBLING
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 4: // Pattern ::= LocationPathPattern VBAR Pattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("preceding-sibling");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Pattern lpp = (Pattern) (parser_stack.get(parser_top - 2)).value;
+ Pattern p = (Pattern) (parser_stack.get(parser_top - 0)).value;
+ Pattern result = new AlternativePattern(lpp, p);
+ parser_result = new Symbol(28, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 137: // QName ::= PRECEDING
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 5: // LocationPathPattern ::= SLASH
+ {
+ Pattern result = new AbsolutePathPattern(null);
+ parser_result = new Symbol(29, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 6: // LocationPathPattern ::= SLASH RelativePathPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("preceding");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ RelativePathPattern rpp = (RelativePathPattern) (parser_stack.get(parser_top - 0)).value;
+ Pattern result = new AbsolutePathPattern(rpp);
+ parser_result = new Symbol(29, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 136: // QName ::= PARENT
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 7: // LocationPathPattern ::= IdKeyPattern
+ {
+ IdKeyPattern ikp = (IdKeyPattern) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(29, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, ikp);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 8: // LocationPathPattern ::= IdKeyPattern SLASH RelativePathPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("parent");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ IdKeyPattern ikp = (IdKeyPattern) (parser_stack.get(parser_top - 2)).value;
+ RelativePathPattern rpp = (RelativePathPattern) (parser_stack.get(parser_top - 0)).value;
+ Pattern result = new ParentPattern(ikp, rpp);
+ parser_result = new Symbol(29, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 135: // QName ::= NAMESPACE
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 9: // LocationPathPattern ::= IdKeyPattern DSLASH RelativePathPattern
+ {
+ IdKeyPattern ikp = (IdKeyPattern) (parser_stack.get(parser_top - 2)).value;
+ RelativePathPattern rpp = (RelativePathPattern) (parser_stack.get(parser_top - 0)).value;
+ Pattern result = new AncestorPattern(ikp, rpp);
+ parser_result = new Symbol(29, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 10: // LocationPathPattern ::= DSLASH RelativePathPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("namespace");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ RelativePathPattern rpp = (RelativePathPattern) (parser_stack.get(parser_top - 0)).value;
+ Pattern result = new AncestorPattern(rpp);
+ parser_result = new Symbol(29, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 134: // QName ::= FOLLOWINGSIBLING
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 11: // LocationPathPattern ::= RelativePathPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("following-sibling");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ RelativePathPattern rpp = (RelativePathPattern) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(29, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, rpp);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 133: // QName ::= FOLLOWING
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 12: // IdKeyPattern ::= ID LPAREN Literal RPAREN
+ {
+ String l = (String) (parser_stack.get(parser_top - 1)).value;
+ IdKeyPattern result = new IdPattern(l);
+ parser.setHasIdCall(true);
+ parser_result = new Symbol(27, (parser_stack.get(parser_top - 3)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 13: // IdKeyPattern ::= KEY LPAREN Literal COMMA Literal RPAREN
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("following");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ String l1 = (String) (parser_stack.get(parser_top - 3)).value;
+ String l2 = (String) (parser_stack.get(parser_top - 1)).value;
+ IdKeyPattern result = new KeyPattern(l1, l2);
+ parser_result = new Symbol(27, (parser_stack.get(parser_top - 5)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 132: // QName ::= DESCENDANTORSELF
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 14: // ProcessingInstructionPattern ::= PIPARAM LPAREN Literal RPAREN
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("decendant-or-self");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ String l = (String) (parser_stack.get(parser_top - 1)).value;
+ StepPattern result = new ProcessingInstructionPattern(l);
+ parser_result = new Symbol(30, (parser_stack.get(parser_top - 3)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 15: // RelativePathPattern ::= StepPattern
+ {
+ StepPattern sp = (StepPattern) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(31, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, sp);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 131: // QName ::= DESCENDANT
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 16: // RelativePathPattern ::= StepPattern SLASH RelativePathPattern
+ {
+ StepPattern sp = (StepPattern) (parser_stack.get(parser_top - 2)).value;
+ RelativePathPattern rpp = (RelativePathPattern) (parser_stack.get(parser_top - 0)).value;
+ RelativePathPattern result = new ParentPattern(sp, rpp);
+ parser_result = new Symbol(31, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 17: // RelativePathPattern ::= StepPattern DSLASH RelativePathPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("decendant");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ StepPattern sp = (StepPattern) (parser_stack.get(parser_top - 2)).value;
+ RelativePathPattern rpp = (RelativePathPattern) (parser_stack.get(parser_top - 0)).value;
+ RelativePathPattern result = new AncestorPattern(sp, rpp);
+ parser_result = new Symbol(31, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 130: // QName ::= CHILD
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 18: // StepPattern ::= NodeTestPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("child");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object nt = parser_stack.get(parser_top - 0).value;
+ StepPattern result = parser.createStepPattern(Axis.CHILD, nt, null);
+ parser_result = new Symbol(32, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 129: // QName ::= ATTRIBUTE
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 19: // StepPattern ::= NodeTestPattern Predicates
+ {
+ Object nt = parser_stack.get(parser_top - 1).value;
+ @SuppressWarnings("unchecked")
+ List<Predicate> pp = (ArrayList<Predicate>) (parser_stack.get(parser_top - 0)).value;
+ StepPattern result = parser.createStepPattern(Axis.CHILD, nt, pp);
+ parser_result = new Symbol(32, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 20: // StepPattern ::= ProcessingInstructionPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("attribute");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ StepPattern pip = (StepPattern) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(32, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, pip);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 128: // QName ::= ANCESTORORSELF
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 21: // StepPattern ::= ProcessingInstructionPattern Predicates
+ {
+ StepPattern pip = (StepPattern) (parser_stack.get(parser_top - 1)).value;
+ @SuppressWarnings("unchecked")
+ List<Predicate> pp = (ArrayList<Predicate>) (parser_stack.get(parser_top - 0)).value;
+ StepPattern result = (ProcessingInstructionPattern) pip.setPredicates(pp);
+ parser_result = new Symbol(32, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 22: // StepPattern ::= ChildOrAttributeAxisSpecifier NodeTestPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("ancestor-or-self");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Integer axis = (Integer) (parser_stack.get(parser_top - 1)).value;
+ Object nt = parser_stack.get(parser_top - 0).value;
+ StepPattern result = parser.createStepPattern(axis, nt, null);
+ parser_result = new Symbol(32, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 127: // QName ::= ANCESTOR
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 23: // StepPattern ::= ChildOrAttributeAxisSpecifier NodeTestPattern Predicates
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("child");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Integer axis = (Integer) (parser_stack.get(parser_top - 2)).value;
+ Object nt = parser_stack.get(parser_top - 1).value;
+ @SuppressWarnings("unchecked")
+ List<Predicate> pp = (ArrayList<Predicate>) (parser_stack.get(parser_top - 0)).value;
+ StepPattern result = parser.createStepPattern(axis, nt, pp);
+ parser_result = new Symbol(32, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 126: // QName ::= KEY
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 24: // StepPattern ::= ChildOrAttributeAxisSpecifier ProcessingInstructionPattern
+ {
+ StepPattern pip = (StepPattern) (parser_stack.get(parser_top - 0)).value;
+ StepPattern result = pip; // TODO: report error if axis is attribute
+ parser_result = new Symbol(32, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 25: // StepPattern ::= ChildOrAttributeAxisSpecifier ProcessingInstructionPattern Predicates
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("key");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ StepPattern pip = (StepPattern) (parser_stack.get(parser_top - 1)).value;
+ @SuppressWarnings("unchecked")
+ List<Predicate> pp = (ArrayList<Predicate>) (parser_stack.get(parser_top - 0)).value;
+ // TODO: report error if axis is attribute
+ StepPattern result = (ProcessingInstructionPattern) pip.setPredicates(pp);
+ parser_result = new Symbol(32, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 125: // QName ::= MOD
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 26: // NodeTestPattern ::= NameTestPattern
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("mod");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object nt = parser_stack.get(parser_top - 0).value;
+ parser_result = new Symbol(33, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, nt);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 124: // QName ::= DIV
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 27: // NodeTestPattern ::= NODE
+ {
+ Object result = NodeTest.ANODE;
+ parser_result = new Symbol(33, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 28: // NodeTestPattern ::= TEXT
{
- QName RESULT = null;
- RESULT = parser.getQNameIgnoreDefaultNs("div");
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object result = NodeTest.TEXT;
+ parser_result = new Symbol(33, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 123: // QName ::= QNAME
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 29: // NodeTestPattern ::= COMMENT
+ {
+ Object result = NodeTest.COMMENT;
+ parser_result = new Symbol(33, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 30: // NodeTestPattern ::= PI
{
- QName RESULT = null;
- int qnameleft = (parser_stack.get(parser_top-0)).left;
- int qnameright = (parser_stack.get(parser_top-0)).right;
- String qname = (String)(parser_stack.get(parser_top-0)).value;
- RESULT = parser.getQNameIgnoreDefaultNs(qname);
- parser_result = new Symbol(37/*QName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object result = NodeTest.PI;
+ parser_result = new Symbol(33, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 31: // NameTestPattern ::= STAR
+ {
+ Object result = null;
+ parser_result = new Symbol(34, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 122: // NameTest ::= QName
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 32: // NameTestPattern ::= QName
{
- Object RESULT = null;
- int qnleft = (parser_stack.get(parser_top-0)).left;
- int qnright = (parser_stack.get(parser_top-0)).right;
- QName qn = (QName)(parser_stack.get(parser_top-0)).value;
- RESULT = qn;
- parser_result = new Symbol(26/*NameTest*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName qn = (QName) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(34, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, qn);
}
- return parser_result;
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 33: // ChildOrAttributeAxisSpecifier ::= ATSIGN
+ {
+ Integer result = Axis.ATTRIBUTE;
+ parser_result = new Symbol(42, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 121: // NameTest ::= STAR
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 34: // ChildOrAttributeAxisSpecifier ::= CHILD DCOLON
{
- Object RESULT = null;
- RESULT = null;
- parser_result = new Symbol(26/*NameTest*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Integer result = Axis.CHILD;
+ parser_result = new Symbol(42, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 35: // ChildOrAttributeAxisSpecifier ::= ATTRIBUTE DCOLON
+ {
+ Integer result = Axis.ATTRIBUTE;
+ parser_result = new Symbol(42, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 120: // NodeTest ::= PI
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 36: // Predicates ::= Predicate
{
- Object RESULT = null;
- RESULT = Integer.valueOf(NodeTest.PI);
- parser_result = new Symbol(25/*NodeTest*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression p = (Expression) (parser_stack.get(parser_top - 0)).value;
+ List<Expression> temp = new ArrayList<>();
+ temp.add(p);
+ parser_result = new Symbol(35, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, temp);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 119: // NodeTest ::= PIPARAM LPAREN Literal RPAREN
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 37: // Predicates ::= Predicate Predicates
+ {
+ Expression p = (Expression) (parser_stack.get(parser_top - 1)).value;
+ @SuppressWarnings("unchecked")
+ List<Expression> pp = (ArrayList<Expression>) (parser_stack.get(parser_top - 0)).value;
+ pp.add(0, p);
+ parser_result = new Symbol(35, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, pp);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 38: // Predicate ::= LBRACK Expr RBRACK
{
- Object RESULT = null;
- int lleft = (parser_stack.get(parser_top-1)).left;
- int lright = (parser_stack.get(parser_top-1)).right;
- String l = (String)(parser_stack.get(parser_top-1)).value;
+ Expression e = (Expression) (parser_stack.get(parser_top - 1)).value;
+ Expression result = new Predicate(e);
+ parser_result = new Symbol(5, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- QName name = parser.getQNameIgnoreDefaultNs("name");
- Expression exp = new EqualityExpr(Operators.EQ,
- new NameCall(name),
- new LiteralExpr(l));
- List<Predicate> predicates = new ArrayList<>();
- predicates.add(new Predicate(exp));
- RESULT = new Step(Axis.CHILD, NodeTest.PI, predicates);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 39: // Expr ::= OrExpr
+ {
+ Expression ex = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(2, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, ex);
+ }
+ return parser_result;
- parser_result = new Symbol(25/*NodeTest*/, (parser_stack.get(parser_top-3)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 40: // OrExpr ::= AndExpr
+ {
+ Expression ae = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(8, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, ae);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 118: // NodeTest ::= COMMENT
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 41: // OrExpr ::= OrExpr OR AndExpr
{
- Object RESULT = null;
- RESULT = Integer.valueOf(NodeTest.COMMENT);
- parser_result = new Symbol(25/*NodeTest*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression oe = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression ae = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new LogicalExpr(LogicalExpr.OR, oe, ae);
+ parser_result = new Symbol(8, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 42: // AndExpr ::= EqualityExpr
+ {
+ Expression e = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(9, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, e);
+ }
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 117: // NodeTest ::= TEXT
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 43: // AndExpr ::= AndExpr AND EqualityExpr
{
- Object RESULT = null;
- RESULT = Integer.valueOf(NodeTest.TEXT);
- parser_result = new Symbol(25/*NodeTest*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression ae = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression ee = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new LogicalExpr(LogicalExpr.AND, ae, ee);
+ parser_result = new Symbol(9, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 116: // NodeTest ::= NODE
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 44: // EqualityExpr ::= RelationalExpr
+ {
+ Expression re = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(10, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, re);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 45: // EqualityExpr ::= EqualityExpr EQ RelationalExpr
{
- Object RESULT = null;
- RESULT = Integer.valueOf(NodeTest.ANODE);
- parser_result = new Symbol(25/*NodeTest*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression ee = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression re = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new EqualityExpr(Operators.EQ, ee, re);
+ parser_result = new Symbol(10, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 115: // NodeTest ::= NameTest
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 46: // EqualityExpr ::= EqualityExpr NE RelationalExpr
{
- Object RESULT = null;
- int ntleft = (parser_stack.get(parser_top-0)).left;
- int ntright = (parser_stack.get(parser_top-0)).right;
- Object nt = parser_stack.get(parser_top-0).value;
- RESULT = nt;
- parser_result = new Symbol(25/*NodeTest*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression ee = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression re = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new EqualityExpr(Operators.NE, ee, re);
+ parser_result = new Symbol(10, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 47: // RelationalExpr ::= AdditiveExpr
+ {
+ Expression ae = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(11, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, ae);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 48: // RelationalExpr ::= RelationalExpr LT AdditiveExpr
+ {
+ Expression re = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression ae = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new RelationalExpr(Operators.LT, re, ae);
+ parser_result = new Symbol(11, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 114: // Argument ::= Expr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 49: // RelationalExpr ::= RelationalExpr GT AdditiveExpr
{
- Expression RESULT = null;
- int exleft = (parser_stack.get(parser_top-0)).left;
- int exright = (parser_stack.get(parser_top-0)).right;
- Expression ex = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = ex;
- parser_result = new Symbol(3/*Argument*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression re = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression ae = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new RelationalExpr(Operators.GT, re, ae);
+ parser_result = new Symbol(11, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 50: // RelationalExpr ::= RelationalExpr LE AdditiveExpr
+ {
+ Expression re = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression ae = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new RelationalExpr(Operators.LE, re, ae);
+ parser_result = new Symbol(11, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 113: // VariableName ::= QName
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 51: // RelationalExpr ::= RelationalExpr GE AdditiveExpr
{
- QName RESULT = null;
- int vnameleft = (parser_stack.get(parser_top-0)).left;
- int vnameright = (parser_stack.get(parser_top-0)).right;
- QName vname = (QName)(parser_stack.get(parser_top-0)).value;
+ Expression re = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression ae = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new RelationalExpr(Operators.GE, re, ae);
+ parser_result = new Symbol(11, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- RESULT = vname;
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 52: // AdditiveExpr ::= MultiplicativeExpr
+ {
+ Expression me = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(12, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, me);
+ }
+ return parser_result;
- parser_result = new Symbol(39/*VariableName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 53: // AdditiveExpr ::= AdditiveExpr PLUS MultiplicativeExpr
+ {
+ Expression ae = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression me = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new BinOpExpr(BinOpExpr.PLUS, ae, me);
+ parser_result = new Symbol(12, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 112: // FunctionName ::= QName
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 54: // AdditiveExpr ::= AdditiveExpr MINUS MultiplicativeExpr
+ {
+ Expression ae = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression me = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new BinOpExpr(BinOpExpr.MINUS, ae, me);
+ parser_result = new Symbol(12, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 55: // MultiplicativeExpr ::= UnaryExpr
{
- QName RESULT = null;
- int fnameleft = (parser_stack.get(parser_top-0)).left;
- int fnameright = (parser_stack.get(parser_top-0)).right;
- QName fname = (QName)(parser_stack.get(parser_top-0)).value;
-
- RESULT = fname;
+ Expression ue = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(13, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, ue);
+ }
+ return parser_result;
- parser_result = new Symbol(38/*FunctionName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 56: // MultiplicativeExpr ::= MultiplicativeExpr STAR UnaryExpr
+ {
+ Expression me = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression ue = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new BinOpExpr(BinOpExpr.TIMES, me, ue);
+ parser_result = new Symbol(13, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 111: // NonemptyArgumentList ::= Argument COMMA NonemptyArgumentList
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 57: // MultiplicativeExpr ::= MultiplicativeExpr DIV UnaryExpr
+ {
+ Expression me = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression ue = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new BinOpExpr(BinOpExpr.DIV, me, ue);
+ parser_result = new Symbol(13, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 58: // MultiplicativeExpr ::= MultiplicativeExpr MOD UnaryExpr
{
- List<Expression> RESULT = null;
- int argleft = (parser_stack.get(parser_top-2)).left;
- int argright = (parser_stack.get(parser_top-2)).right;
- Expression arg = (Expression)(parser_stack.get(parser_top-2)).value;
- int arglleft = (parser_stack.get(parser_top-0)).left;
- int arglright = (parser_stack.get(parser_top-0)).right;
- @SuppressWarnings("unchecked")
- List<Expression> argl = (ArrayList<Expression>)(parser_stack.get(parser_top-0)).value;
- argl.add(0, arg);
- RESULT = argl;
- parser_result = new Symbol(36/*NonemptyArgumentList*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression me = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression ue = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new BinOpExpr(BinOpExpr.MOD, me, ue);
+ parser_result = new Symbol(13, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 110: // NonemptyArgumentList ::= Argument
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 59: // UnaryExpr ::= UnionExpr
+ {
+ Expression ue = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(14, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, ue);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 60: // UnaryExpr ::= MINUS UnaryExpr
{
- List<Expression> RESULT = null;
- int argleft = (parser_stack.get(parser_top-0)).left;
- int argright = (parser_stack.get(parser_top-0)).right;
- Expression arg = (Expression)(parser_stack.get(parser_top-0)).value;
+ Expression ue = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new UnaryOpExpr(ue);
+ parser_result = new Symbol(14, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- List<Expression> temp = new ArrayList<>();
- temp.add(arg);
- RESULT = temp;
-
- parser_result = new Symbol(36/*NonemptyArgumentList*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 61: // UnionExpr ::= PathExpr
+ {
+ Expression pe = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(18, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, pe);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 109: // FunctionCall ::= FunctionName LPAREN NonemptyArgumentList RPAREN
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 62: // UnionExpr ::= PathExpr VBAR UnionExpr
{
- Expression RESULT = null;
- int fnameleft = (parser_stack.get(parser_top-3)).left;
- int fnameright = (parser_stack.get(parser_top-3)).right;
- QName fname = (QName)(parser_stack.get(parser_top-3)).value;
- int arglleft = (parser_stack.get(parser_top-1)).left;
- int arglright = (parser_stack.get(parser_top-1)).right;
- @SuppressWarnings("unchecked")
- List<Expression> argl = (ArrayList<Expression>)(parser_stack.get(parser_top-1)).value;
+ Expression pe = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression rest = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new UnionPathExpr(pe, rest);
+ parser_result = new Symbol(18, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 63: // PathExpr ::= LocationPath
+ {
+ Expression lp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(19, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, lp);
+ }
+ return parser_result;
- if (fname == parser.getQNameIgnoreDefaultNs("concat")) {
- RESULT = new ConcatCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("number")) {
- RESULT = new NumberCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("document")) {
- parser.setMultiDocument(true);
- RESULT = new DocumentCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("string")) {
- RESULT = new StringCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("boolean")) {
- RESULT = new BooleanCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("name")) {
- RESULT = new NameCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("generate-id")) {
- RESULT = new GenerateIdCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("not")) {
- RESULT = new NotCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("format-number")) {
- RESULT = new FormatNumberCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("unparsed-entity-uri")) {
- RESULT = new UnparsedEntityUriCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("key")) {
- RESULT = new KeyCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("id")) {
- RESULT = new KeyCall(fname, argl);
- parser.setHasIdCall(true);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("ceiling")) {
- RESULT = new CeilingCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("round")) {
- RESULT = new RoundCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("floor")) {
- RESULT = new FloorCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("contains")) {
- RESULT = new ContainsCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("string-length")) {
- RESULT = new StringLengthCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("starts-with")) {
- RESULT = new StartsWithCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("function-available")) {
- RESULT = new FunctionAvailableCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("element-available")) {
- RESULT = new ElementAvailableCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("local-name")) {
- RESULT = new LocalNameCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("lang")) {
- RESULT = new LangCall(fname, argl);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("namespace-uri")) {
- RESULT = new NamespaceUriCall(fname, argl);
- }
- else if (fname == parser.getQName(Constants.TRANSLET_URI, "xsltc", "cast")) {
- RESULT = new CastCall(fname, argl);
- }
- // Special case for extension function nodeset()
- else if (fname.getLocalPart().equals("nodeset") || fname.getLocalPart().equals("node-set")) {
- parser.setCallsNodeset(true); // implies MultiDOM
- RESULT = new FunctionCall(fname, argl);
- }
- else {
- RESULT = new FunctionCall(fname, argl);
- }
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 64: // PathExpr ::= FilterExpr
+ {
+ Expression fexp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(19, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, fexp);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 65: // PathExpr ::= FilterExpr SLASH RelativeLocationPath
+ {
+ Expression fexp = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression rlp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new FilterParentPath(fexp, rlp);
+ parser_result = new Symbol(19, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 66: // PathExpr ::= FilterExpr DSLASH RelativeLocationPath
+ {
+ Expression fexp = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression rlp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ //
+ // Expand '//' into '/descendant-or-self::node()/' or
+ // into /descendant-or-self::*/
+ //
+ int nodeType = DOM.NO_TYPE;
+ if (rlp instanceof Step
+ && parser.isElementAxis(((Step) rlp).getAxis())) {
+ nodeType = DTM.ELEMENT_NODE;
+ }
+ final Step step = new Step(Axis.DESCENDANTORSELF, nodeType, null);
+ FilterParentPath fpp = new FilterParentPath(fexp, step);
+ fpp = new FilterParentPath(fpp, rlp);
+ if (fexp instanceof KeyCall == false) {
+ fpp.setDescendantAxis();
+ }
+ parser_result = new Symbol(19, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, fpp);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 67: // LocationPath ::= RelativeLocationPath
+ {
+ Expression rlp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(4, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, rlp);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 68: // LocationPath ::= AbsoluteLocationPath
+ {
+ Expression alp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(4, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, alp);
+ }
+ return parser_result;
- parser_result = new Symbol(16/*FunctionCall*/, (parser_stack.get(parser_top-3)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 69: // RelativeLocationPath ::= Step
+ {
+ Expression step = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(21, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, step);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 70: // RelativeLocationPath ::= RelativeLocationPath SLASH Step
+ {
+ Expression result = null;
+ Expression rlp = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression step = (Expression) (parser_stack.get(parser_top - 0)).value;
+ if (rlp instanceof Step && ((Step) rlp).isAbbreviatedDot()) {
+ result = step; // Remove './' from the middle
+ } else if (((Step) step).isAbbreviatedDot()) {
+ result = rlp; // Remove '/.' from the end
+ } else {
+ result
+ = new ParentLocationPath((RelativeLocationPath) rlp, step);
+ }
+ parser_result = new Symbol(21, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 71: // RelativeLocationPath ::= AbbreviatedRelativeLocationPath
+ {
+ Expression arlp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(21, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, arlp);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 72: // AbsoluteLocationPath ::= SLASH
+ {
+ Expression result = new AbsoluteLocationPath();
+ parser_result = new Symbol(23, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 73: // AbsoluteLocationPath ::= SLASH RelativeLocationPath
+ {
+ Expression rlp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new AbsoluteLocationPath(rlp);
+ parser_result = new Symbol(23, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 74: // AbsoluteLocationPath ::= AbbreviatedAbsoluteLocationPath
+ {
+ Expression aalp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(23, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, aalp);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 108: // FunctionCall ::= FunctionName LPAREN RPAREN
- {
- Expression RESULT = null;
- int fnameleft = (parser_stack.get(parser_top-2)).left;
- int fnameright = (parser_stack.get(parser_top-2)).right;
- QName fname = (QName)(parser_stack.get(parser_top-2)).value;
-
+ return parser_result;
- if (fname == parser.getQNameIgnoreDefaultNs("current")) {
- RESULT = new CurrentCall(fname);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("number")) {
- RESULT = new NumberCall(fname, XPathParser.EmptyArgs);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("string")) {
- RESULT = new StringCall(fname, XPathParser.EmptyArgs);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("concat")) {
- RESULT = new ConcatCall(fname, XPathParser.EmptyArgs);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("true")) {
- RESULT = new BooleanExpr(true);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("false")) {
- RESULT = new BooleanExpr(false);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("name")) {
- RESULT = new NameCall(fname);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("generate-id")) {
- RESULT = new GenerateIdCall(fname, XPathParser.EmptyArgs);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("string-length")) {
- RESULT = new StringLengthCall(fname, XPathParser.EmptyArgs);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("position")) {
- RESULT = new PositionCall(fname);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("last")) {
- RESULT = new LastCall(fname);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("local-name")) {
- RESULT = new LocalNameCall(fname);
- }
- else if (fname == parser.getQNameIgnoreDefaultNs("namespace-uri")) {
- RESULT = new NamespaceUriCall(fname);
- }
- else {
- RESULT = new FunctionCall(fname, XPathParser.EmptyArgs);
- }
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 75: // AbbreviatedRelativeLocationPath ::= RelativeLocationPath DSLASH Step
+ {
+ Expression result = null;
+ Expression rlp = (Expression) (parser_stack.get(parser_top - 2)).value;
+ Expression step = (Expression) (parser_stack.get(parser_top - 0)).value;
+ final Step right = (Step) step;
+ final int axis = right.getAxis();
+ final int type = right.getNodeType();
+ final List<Predicate> predicates = right.getPredicates();
+ if ((axis == Axis.CHILD) && (type != NodeTest.ATTRIBUTE)) {
+ // Compress './/child:E' into 'descendant::E' - if possible
+ if (predicates == null) {
+ right.setAxis(Axis.DESCENDANT);
+ if (rlp instanceof Step && ((Step) rlp).isAbbreviatedDot()) {
+ result = right;
+ } else {
+ // Expand 'rlp//child::E' into 'rlp/descendant::E'
+ RelativeLocationPath left = (RelativeLocationPath) rlp;
+ result = new ParentLocationPath(left, right);
+ }
+ } else // Expand './/step' -> 'descendant-or-self::*/step'
+ if (rlp instanceof Step && ((Step) rlp).isAbbreviatedDot()) {
+ Step left = new Step(Axis.DESCENDANTORSELF,
+ DTM.ELEMENT_NODE, null);
+ result = new ParentLocationPath(left, right);
+ } else {
+ // Expand 'rlp//step' -> 'rlp/descendant-or-self::*/step'
+ RelativeLocationPath left = (RelativeLocationPath) rlp;
+ Step mid = new Step(Axis.DESCENDANTORSELF,
+ DTM.ELEMENT_NODE, null);
+ ParentLocationPath ppl = new ParentLocationPath(mid, right);
+ result = new ParentLocationPath(left, ppl);
+ }
+ } else if ((axis == Axis.ATTRIBUTE) || (type == NodeTest.ATTRIBUTE)) {
+ // Expand 'rlp//step' -> 'rlp/descendant-or-self::*/step'
+ RelativeLocationPath left = (RelativeLocationPath) rlp;
+ Step middle = new Step(Axis.DESCENDANTORSELF,
+ DTM.ELEMENT_NODE, null);
+ ParentLocationPath ppl = new ParentLocationPath(middle, right);
+ result = new ParentLocationPath(left, ppl);
+ } else {
+ // Expand 'rlp//step' -> 'rlp/descendant-or-self::node()/step'
+ RelativeLocationPath left = (RelativeLocationPath) rlp;
+ Step middle = new Step(Axis.DESCENDANTORSELF,
+ DOM.NO_TYPE, null);
+ ParentLocationPath ppl = new ParentLocationPath(middle, right);
+ result = new ParentLocationPath(left, ppl);
+ }
+ parser_result = new Symbol(22, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- parser_result = new Symbol(16/*FunctionCall*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 76: // AbbreviatedAbsoluteLocationPath ::= DSLASH RelativeLocationPath
+ {
+ Expression rlp = (Expression) (parser_stack.get(parser_top - 0)).value;
+ //
+ // Expand '//' into '/descendant-or-self::node()/' or
+ // into /descendant-or-self::*/
+ //
+ int nodeType = DOM.NO_TYPE;
+ if (rlp instanceof Step
+ && parser.isElementAxis(((Step) rlp).getAxis())) {
+ nodeType = DTM.ELEMENT_NODE;
+ }
+ final Step step = new Step(Axis.DESCENDANTORSELF, nodeType, null);
+ Expression result = new AbsoluteLocationPath(parser.insertStep(step,
+ (RelativeLocationPath) rlp));
+ parser_result = new Symbol(24, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 107: // VariableReference ::= DOLLAR VariableName
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 77: // Step ::= NodeTest
{
- Expression RESULT = null;
- int varNameleft = (parser_stack.get(parser_top-0)).left;
- int varNameright = (parser_stack.get(parser_top-0)).right;
- QName varName = (QName)(parser_stack.get(parser_top-0)).value;
+ Expression result = null;
+ Object ntest = parser_stack.get(parser_top - 0).value;
+ if (ntest instanceof Step) {
+ result = (Step) ntest;
+ } else {
+ result = new Step(Axis.CHILD,
+ parser.findNodeType(Axis.CHILD, ntest),
+ null);
+ }
+ parser_result = new Symbol(7, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- // An empty qname prefix for a variable or parameter reference
- // should map to the null namespace and not the default URI.
- SyntaxTreeNode node = parser.lookupName(varName);
-
- if (node != null) {
- if (node instanceof Variable) {
- RESULT = new VariableRef((Variable)node);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 78: // Step ::= NodeTest Predicates
+ {
+ Expression result = null;
+ Object ntest = parser_stack.get(parser_top - 1).value;
+ @SuppressWarnings("unchecked")
+ List<Predicate> pp = (ArrayList<Predicate>) (parser_stack.get(parser_top - 0)).value;
+ if (ntest instanceof Step) {
+ Step step = (Step) ntest;
+ step.addPredicates(pp);
+ result = (Step) ntest;
+ } else {
+ result = new Step(Axis.CHILD,
+ parser.findNodeType(Axis.CHILD, ntest), pp);
}
- else if (node instanceof Param) {
- RESULT = new ParameterRef((Param)node);
- }
- else {
- RESULT = new UnresolvedRef(varName);
- }
+ parser_result = new Symbol(7, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 79: // Step ::= AxisSpecifier NodeTest Predicates
+ {
+ Integer axis = (Integer) (parser_stack.get(parser_top - 2)).value;
+ Object ntest = parser_stack.get(parser_top - 1).value;
+ @SuppressWarnings("unchecked")
+ List<Predicate> pp = (ArrayList<Predicate>) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new Step(axis, parser.findNodeType(axis, ntest), pp);
+ parser_result = new Symbol(7, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- if (node == null) {
- RESULT = new UnresolvedRef(varName);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 80: // Step ::= AxisSpecifier NodeTest
+ {
+ Integer axis = (Integer) (parser_stack.get(parser_top - 1)).value;
+ Object ntest = parser_stack.get(parser_top - 0).value;
+ Expression result = new Step(axis, parser.findNodeType(axis, ntest), null);
+ parser_result = new Symbol(7, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
+ return parser_result;
- parser_result = new Symbol(15/*VariableReference*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 81: // Step ::= AbbreviatedStep
+ {
+ Expression abbrev = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(7, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, abbrev);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 106: // PrimaryExpr ::= FunctionCall
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 82: // AxisSpecifier ::= AxisName DCOLON
+ {
+ Integer an = (Integer) (parser_stack.get(parser_top - 1)).value;
+ parser_result = new Symbol(41, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, an);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 83: // AxisSpecifier ::= ATSIGN
{
- Expression RESULT = null;
- int fcleft = (parser_stack.get(parser_top-0)).left;
- int fcright = (parser_stack.get(parser_top-0)).right;
- Expression fc = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = fc;
- parser_result = new Symbol(17/*PrimaryExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Integer result = Axis.ATTRIBUTE;
+ parser_result = new Symbol(41, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 84: // AxisName ::= ANCESTOR
+ {
+ Integer result = Axis.ANCESTOR;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 85: // AxisName ::= ANCESTORORSELF
+ {
+ Integer result = Axis.ANCESTORORSELF;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 86: // AxisName ::= ATTRIBUTE
+ {
+ Integer result = Axis.ATTRIBUTE;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 105: // PrimaryExpr ::= REAL
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 87: // AxisName ::= CHILD
+ {
+ Integer result = Axis.CHILD;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 88: // AxisName ::= DESCENDANT
{
- Expression RESULT = null;
- int numleft = (parser_stack.get(parser_top-0)).left;
- int numright = (parser_stack.get(parser_top-0)).right;
- Double num = (Double)(parser_stack.get(parser_top-0)).value;
- RESULT = new RealExpr(num.doubleValue());
- parser_result = new Symbol(17/*PrimaryExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Integer result = Axis.DESCENDANT;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 89: // AxisName ::= DESCENDANTORSELF
+ {
+ Integer result = Axis.DESCENDANTORSELF;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 90: // AxisName ::= FOLLOWING
+ {
+ Integer result = Axis.FOLLOWING;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 91: // AxisName ::= FOLLOWINGSIBLING
+ {
+ Integer result = Axis.FOLLOWINGSIBLING;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 104: // PrimaryExpr ::= INT
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 92: // AxisName ::= NAMESPACE
+ {
+ Integer result = Axis.NAMESPACE;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 93: // AxisName ::= PARENT
{
- Expression RESULT = null;
- int numleft = (parser_stack.get(parser_top-0)).left;
- int numright = (parser_stack.get(parser_top-0)).right;
- Long num = (Long)(parser_stack.get(parser_top-0)).value;
+ Integer result = Axis.PARENT;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 94: // AxisName ::= PRECEDING
+ {
+ Integer result = Axis.PRECEDING;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 95: // AxisName ::= PRECEDINGSIBLING
+ {
+ Integer result = Axis.PRECEDINGSIBLING;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 96: // AxisName ::= SELF
+ {
+ Integer result = Axis.SELF;
+ parser_result = new Symbol(40, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- long value = num.longValue();
- if (value < Integer.MIN_VALUE || value > Integer.MAX_VALUE) {
- RESULT = new RealExpr(value);
- }
- else {
- if (num.doubleValue() == -0)
- RESULT = new RealExpr(num.doubleValue());
- else if (num.intValue() == 0)
- RESULT = new IntExpr(num.intValue());
- else if (num.doubleValue() == 0.0)
- RESULT = new RealExpr(num.doubleValue());
- else
- RESULT = new IntExpr(num.intValue());
- }
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 97: // AbbreviatedStep ::= DOT
+ {
+ Expression result = new Step(Axis.SELF, NodeTest.ANODE, null);
+ parser_result = new Symbol(20, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 98: // AbbreviatedStep ::= DDOT
+ {
+ Expression result = new Step(Axis.PARENT, NodeTest.ANODE, null);
+ parser_result = new Symbol(20, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 99: // FilterExpr ::= PrimaryExpr
+ {
+ Expression primary = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(6, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, primary);
+ }
+ return parser_result;
- parser_result = new Symbol(17/*PrimaryExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 100: // FilterExpr ::= PrimaryExpr Predicates
+ {
+ Expression primary = (Expression) (parser_stack.get(parser_top - 1)).value;
+ @SuppressWarnings("unchecked")
+ List<Expression> pp = (ArrayList<Expression>) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new FilterExpr(primary, pp);
+ parser_result = new Symbol(6, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 103: // PrimaryExpr ::= Literal
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 101: // PrimaryExpr ::= VariableReference
{
- Expression RESULT = null;
- int stringleft = (parser_stack.get(parser_top-0)).left;
- int stringright = (parser_stack.get(parser_top-0)).right;
- String string = (String)(parser_stack.get(parser_top-0)).value;
+ Expression vr = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(17, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, vr);
+ }
+ return parser_result;
- /*
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 102: // PrimaryExpr ::= LPAREN Expr RPAREN
+ {
+ Expression ex = (Expression) (parser_stack.get(parser_top - 1)).value;
+ parser_result = new Symbol(17, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, ex);
+ }
+ return parser_result;
+
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 103: // PrimaryExpr ::= Literal
+ {
+ String string = (String) (parser_stack.get(parser_top - 0)).value;
+ /*
* If the string appears to have the syntax of a QName, store
* namespace info in the literal expression. This is used for
* element-available and function-available functions, among
* others. Also, the default namespace must be ignored.
- */
- String namespace = null;
- final int index = string.lastIndexOf(':');
-
- if (index > 0) {
- final String prefix = string.substring(0, index);
- namespace = parser._symbolTable.lookupNamespace(prefix);
- }
- RESULT = (namespace == null) ? new LiteralExpr(string)
- : new LiteralExpr(string, namespace);
-
- parser_result = new Symbol(17/*PrimaryExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ */
+ String namespace = null;
+ final int index = string.lastIndexOf(':');
+ if (index > 0) {
+ final String prefix = string.substring(0, index);
+ namespace = parser._symbolTable.lookupNamespace(prefix);
+ }
+ Expression result = (namespace == null) ? new LiteralExpr(string)
+ : new LiteralExpr(string, namespace);
+ parser_result = new Symbol(17, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 102: // PrimaryExpr ::= LPAREN Expr RPAREN
- {
- Expression RESULT = null;
- int exleft = (parser_stack.get(parser_top-1)).left;
- int exright = (parser_stack.get(parser_top-1)).right;
- Expression ex = (Expression)(parser_stack.get(parser_top-1)).value;
- RESULT = ex;
- parser_result = new Symbol(17/*PrimaryExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 101: // PrimaryExpr ::= VariableReference
- {
- Expression RESULT = null;
- int vrleft = (parser_stack.get(parser_top-0)).left;
- int vrright = (parser_stack.get(parser_top-0)).right;
- Expression vr = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = vr;
- parser_result = new Symbol(17/*PrimaryExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 100: // FilterExpr ::= PrimaryExpr Predicates
- {
- Expression RESULT = null;
- int primaryleft = (parser_stack.get(parser_top-1)).left;
- int primaryright = (parser_stack.get(parser_top-1)).right;
- Expression primary = (Expression)(parser_stack.get(parser_top-1)).value;
- int ppleft = (parser_stack.get(parser_top-0)).left;
- int ppright = (parser_stack.get(parser_top-0)).right;
- @SuppressWarnings("unchecked")
- List<Expression> pp = (ArrayList<Expression>)(parser_stack.get(parser_top-0)).value;
- RESULT = new FilterExpr(primary, pp);
- parser_result = new Symbol(6/*FilterExpr*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 99: // FilterExpr ::= PrimaryExpr
- {
- Expression RESULT = null;
- int primaryleft = (parser_stack.get(parser_top-0)).left;
- int primaryright = (parser_stack.get(parser_top-0)).right;
- Expression primary = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = primary;
- parser_result = new Symbol(6/*FilterExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 98: // AbbreviatedStep ::= DDOT
- {
- Expression RESULT = null;
- RESULT = new Step(Axis.PARENT, NodeTest.ANODE, null);
- parser_result = new Symbol(20/*AbbreviatedStep*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 97: // AbbreviatedStep ::= DOT
- {
- Expression RESULT = null;
- RESULT = new Step(Axis.SELF, NodeTest.ANODE, null);
- parser_result = new Symbol(20/*AbbreviatedStep*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 96: // AxisName ::= SELF
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 104: // PrimaryExpr ::= INT
{
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.SELF);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 95: // AxisName ::= PRECEDINGSIBLING
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.PRECEDINGSIBLING);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 94: // AxisName ::= PRECEDING
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.PRECEDING);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression result = null;
+ Long num = (Long) (parser_stack.get(parser_top - 0)).value;
+ if (num < Integer.MIN_VALUE || num > Integer.MAX_VALUE) {
+ result = new RealExpr(num);
+ } else if (num.doubleValue() == -0) {
+ result = new RealExpr(num.doubleValue());
+ } else if (num.intValue() == 0) {
+ result = new IntExpr(num.intValue());
+ } else if (num.doubleValue() == 0.0) {
+ result = new RealExpr(num.doubleValue());
+ } else {
+ result = new IntExpr(num.intValue());
+ }
+ parser_result = new Symbol(17, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 93: // AxisName ::= PARENT
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.PARENT);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 92: // AxisName ::= NAMESPACE
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.NAMESPACE);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 91: // AxisName ::= FOLLOWINGSIBLING
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 105: // PrimaryExpr ::= REAL
{
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.FOLLOWINGSIBLING);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 90: // AxisName ::= FOLLOWING
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.FOLLOWING);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 89: // AxisName ::= DESCENDANTORSELF
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.DESCENDANTORSELF);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Double num = (Double) (parser_stack.get(parser_top - 0)).value;
+ Expression result = new RealExpr(num);
+ parser_result = new Symbol(17, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 88: // AxisName ::= DESCENDANT
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.DESCENDANT);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 87: // AxisName ::= CHILD
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 106: // PrimaryExpr ::= FunctionCall
{
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.CHILD);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression fc = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(17, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, fc);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 86: // AxisName ::= ATTRIBUTE
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 107: // VariableReference ::= DOLLAR VariableName
{
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.ATTRIBUTE);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 85: // AxisName ::= ANCESTORORSELF
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.ANCESTORORSELF);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression result = null;
+ QName varName = (QName) (parser_stack.get(parser_top - 0)).value;
+ // An empty qname prefix for a variable or parameter reference
+ // should map to the null namespace and not the default URI.
+ SyntaxTreeNode node = parser.lookupName(varName);
+ if (node != null) {
+ if (node instanceof Variable) {
+ result = new VariableRef((Variable) node);
+ } else if (node instanceof Param) {
+ result = new ParameterRef((Param) node);
+ } else {
+ result = new UnresolvedRef(varName);
+ }
+ }
+ if (node == null) {
+ result = new UnresolvedRef(varName);
+ }
+ parser_result = new Symbol(15, (parser_stack.get(parser_top - 1)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 84: // AxisName ::= ANCESTOR
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.ANCESTOR);
- parser_result = new Symbol(40/*AxisName*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 83: // AxisSpecifier ::= ATSIGN
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.ATTRIBUTE);
- parser_result = new Symbol(41/*AxisSpecifier*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 82: // AxisSpecifier ::= AxisName DCOLON
- {
- Integer RESULT = null;
- int anleft = (parser_stack.get(parser_top-1)).left;
- int anright = (parser_stack.get(parser_top-1)).right;
- Integer an = (Integer)(parser_stack.get(parser_top-1)).value;
- RESULT = an;
- parser_result = new Symbol(41/*AxisSpecifier*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 81: // Step ::= AbbreviatedStep
- {
- Expression RESULT = null;
- int abbrevleft = (parser_stack.get(parser_top-0)).left;
- int abbrevright = (parser_stack.get(parser_top-0)).right;
- Expression abbrev = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = abbrev;
- parser_result = new Symbol(7/*Step*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 80: // Step ::= AxisSpecifier NodeTest
- {
- Expression RESULT = null;
- int axisleft = (parser_stack.get(parser_top-1)).left;
- int axisright = (parser_stack.get(parser_top-1)).right;
- Integer axis = (Integer)(parser_stack.get(parser_top-1)).value;
- int ntestleft = (parser_stack.get(parser_top-0)).left;
- int ntestright = (parser_stack.get(parser_top-0)).right;
- Object ntest = parser_stack.get(parser_top-0).value;
- RESULT = new Step(axis.intValue(),
- parser.findNodeType(axis.intValue(), ntest),
- null);
-
- parser_result = new Symbol(7/*Step*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 79: // Step ::= AxisSpecifier NodeTest Predicates
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 108: // FunctionCall ::= FunctionName LPAREN RPAREN
{
- Expression RESULT = null;
- int axisleft = (parser_stack.get(parser_top-2)).left;
- int axisright = (parser_stack.get(parser_top-2)).right;
- Integer axis = (Integer)(parser_stack.get(parser_top-2)).value;
- int ntestleft = (parser_stack.get(parser_top-1)).left;
- int ntestright = (parser_stack.get(parser_top-1)).right;
- Object ntest = parser_stack.get(parser_top-1).value;
- int ppleft = (parser_stack.get(parser_top-0)).left;
- int ppright = (parser_stack.get(parser_top-0)).right;
- @SuppressWarnings("unchecked")
- List<Predicate> pp = (ArrayList<Predicate>)(parser_stack.get(parser_top-0)).value;
- RESULT = new Step(axis.intValue(),
- parser.findNodeType(axis.intValue(), ntest),
- pp);
-
- parser_result = new Symbol(7/*Step*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 78: // Step ::= NodeTest Predicates
- {
- Expression RESULT = null;
- int ntestleft = (parser_stack.get(parser_top-1)).left;
- int ntestright = (parser_stack.get(parser_top-1)).right;
- Object ntest = parser_stack.get(parser_top-1).value;
- int ppleft = (parser_stack.get(parser_top-0)).left;
- int ppright = (parser_stack.get(parser_top-0)).right;
- @SuppressWarnings("unchecked")
- List<Predicate> pp = (ArrayList<Predicate>)(parser_stack.get(parser_top-0)).value;
-
- if (ntest instanceof Step) {
- Step step = (Step)ntest;
- step.addPredicates(pp);
- RESULT = (Step)ntest;
- }
- else {
- RESULT = new Step(Axis.CHILD,
- parser.findNodeType(Axis.CHILD, ntest), pp);
- }
-
- parser_result = new Symbol(7/*Step*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression result = null;
+ QName fname = (QName) (parser_stack.get(parser_top - 2)).value;
+ if (fname == parser.getQNameIgnoreDefaultNs("current")) {
+ result = new CurrentCall(fname);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("number")) {
+ result = new NumberCall(fname, XPathParser.EmptyArgs);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("string")) {
+ result = new StringCall(fname, XPathParser.EmptyArgs);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("concat")) {
+ result = new ConcatCall(fname, XPathParser.EmptyArgs);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("true")) {
+ result = new BooleanExpr(true);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("false")) {
+ result = new BooleanExpr(false);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("name")) {
+ result = new NameCall(fname);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("generate-id")) {
+ result = new GenerateIdCall(fname, XPathParser.EmptyArgs);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("string-length")) {
+ result = new StringLengthCall(fname, XPathParser.EmptyArgs);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("position")) {
+ result = new PositionCall(fname);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("last")) {
+ result = new LastCall(fname);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("local-name")) {
+ result = new LocalNameCall(fname);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("namespace-uri")) {
+ result = new NamespaceUriCall(fname);
+ } else {
+ result = new FunctionCall(fname, XPathParser.EmptyArgs);
+ }
+ parser_result = new Symbol(16, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 77: // Step ::= NodeTest
- {
- Expression RESULT = null;
- int ntestleft = (parser_stack.get(parser_top-0)).left;
- int ntestright = (parser_stack.get(parser_top-0)).right;
- Object ntest = parser_stack.get(parser_top-0).value;
-
- if (ntest instanceof Step) {
- RESULT = (Step)ntest;
- }
- else {
- RESULT = new Step(Axis.CHILD,
- parser.findNodeType(Axis.CHILD, ntest),
- null);
- }
-
- parser_result = new Symbol(7/*Step*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 76: // AbbreviatedAbsoluteLocationPath ::= DSLASH RelativeLocationPath
- {
- Expression RESULT = null;
- int rlpleft = (parser_stack.get(parser_top-0)).left;
- int rlpright = (parser_stack.get(parser_top-0)).right;
- Expression rlp = (Expression)(parser_stack.get(parser_top-0)).value;
-
- //
- // Expand '//' into '/descendant-or-self::node()/' or
- // into /descendant-or-self::*/
- //
- int nodeType = DOM.NO_TYPE;
- if (rlp instanceof Step &&
- parser.isElementAxis(((Step) rlp).getAxis()))
- {
- nodeType = DTM.ELEMENT_NODE;
- }
- final Step step = new Step(Axis.DESCENDANTORSELF, nodeType, null);
- RESULT = new AbsoluteLocationPath(parser.insertStep(step,
- (RelativeLocationPath) rlp));
-
- parser_result = new Symbol(24/*AbbreviatedAbsoluteLocationPath*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 75: // AbbreviatedRelativeLocationPath ::= RelativeLocationPath DSLASH Step
- {
- Expression RESULT = null;
- int rlpleft = (parser_stack.get(parser_top-2)).left;
- int rlpright = (parser_stack.get(parser_top-2)).right;
- Expression rlp = (Expression)(parser_stack.get(parser_top-2)).value;
- int stepleft = (parser_stack.get(parser_top-0)).left;
- int stepright = (parser_stack.get(parser_top-0)).right;
- Expression step = (Expression)(parser_stack.get(parser_top-0)).value;
+ return parser_result;
- final Step right = (Step)step;
- final int axis = right.getAxis();
- final int type = right.getNodeType();
- final List<Predicate> predicates = right.getPredicates();
-
- if ((axis == Axis.CHILD) && (type != NodeTest.ATTRIBUTE)) {
- // Compress './/child:E' into 'descendant::E' - if possible
- if (predicates == null) {
- right.setAxis(Axis.DESCENDANT);
- if (rlp instanceof Step && ((Step)rlp).isAbbreviatedDot()) {
- RESULT = right;
- }
- else {
- // Expand 'rlp//child::E' into 'rlp/descendant::E'
- RelativeLocationPath left = (RelativeLocationPath)rlp;
- RESULT = new ParentLocationPath(left, right);
- }
- }
- else {
- // Expand './/step' -> 'descendant-or-self::*/step'
- if (rlp instanceof Step && ((Step)rlp).isAbbreviatedDot()) {
- Step left = new Step(Axis.DESCENDANTORSELF,
- DTM.ELEMENT_NODE, null);
- RESULT = new ParentLocationPath(left, right);
- }
- else {
- // Expand 'rlp//step' -> 'rlp/descendant-or-self::*/step'
- RelativeLocationPath left = (RelativeLocationPath)rlp;
- Step mid = new Step(Axis.DESCENDANTORSELF,
- DTM.ELEMENT_NODE, null);
- ParentLocationPath ppl = new ParentLocationPath(mid, right);
- RESULT = new ParentLocationPath(left, ppl);
- }
- }
- }
- else if ((axis == Axis.ATTRIBUTE) || (type == NodeTest.ATTRIBUTE)) {
- // Expand 'rlp//step' -> 'rlp/descendant-or-self::*/step'
- RelativeLocationPath left = (RelativeLocationPath)rlp;
- Step middle = new Step(Axis.DESCENDANTORSELF,
- DTM.ELEMENT_NODE, null);
- ParentLocationPath ppl = new ParentLocationPath(middle, right);
- RESULT = new ParentLocationPath(left, ppl);
- }
- else {
- // Expand 'rlp//step' -> 'rlp/descendant-or-self::node()/step'
- RelativeLocationPath left = (RelativeLocationPath)rlp;
- Step middle = new Step(Axis.DESCENDANTORSELF,
- DOM.NO_TYPE, null);
- ParentLocationPath ppl = new ParentLocationPath(middle, right);
- RESULT = new ParentLocationPath(left, ppl);
- }
-
- parser_result = new Symbol(22/*AbbreviatedRelativeLocationPath*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 74: // AbsoluteLocationPath ::= AbbreviatedAbsoluteLocationPath
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 109: // FunctionCall ::= FunctionName LPAREN NonemptyArgumentList RPAREN
{
- Expression RESULT = null;
- int aalpleft = (parser_stack.get(parser_top-0)).left;
- int aalpright = (parser_stack.get(parser_top-0)).right;
- Expression aalp = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = aalp;
- parser_result = new Symbol(23/*AbsoluteLocationPath*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 73: // AbsoluteLocationPath ::= SLASH RelativeLocationPath
- {
- Expression RESULT = null;
- int rlpleft = (parser_stack.get(parser_top-0)).left;
- int rlpright = (parser_stack.get(parser_top-0)).right;
- Expression rlp = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new AbsoluteLocationPath(rlp);
- parser_result = new Symbol(23/*AbsoluteLocationPath*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 72: // AbsoluteLocationPath ::= SLASH
- {
- Expression RESULT = null;
- RESULT = new AbsoluteLocationPath();
- parser_result = new Symbol(23/*AbsoluteLocationPath*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression result = null;
+ QName fname = (QName) (parser_stack.get(parser_top - 3)).value;
+ @SuppressWarnings("unchecked")
+ List<Expression> argl = (ArrayList<Expression>) (parser_stack.get(parser_top - 1)).value;
+ if (fname == parser.getQNameIgnoreDefaultNs("concat")) {
+ result = new ConcatCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("number")) {
+ result = new NumberCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("document")) {
+ parser.setMultiDocument(true);
+ result = new DocumentCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("string")) {
+ result = new StringCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("boolean")) {
+ result = new BooleanCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("name")) {
+ result = new NameCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("generate-id")) {
+ result = new GenerateIdCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("not")) {
+ result = new NotCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("format-number")) {
+ result = new FormatNumberCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("unparsed-entity-uri")) {
+ result = new UnparsedEntityUriCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("key")) {
+ result = new KeyCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("id")) {
+ result = new KeyCall(fname, argl);
+ parser.setHasIdCall(true);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("ceiling")) {
+ result = new CeilingCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("round")) {
+ result = new RoundCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("floor")) {
+ result = new FloorCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("contains")) {
+ result = new ContainsCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("string-length")) {
+ result = new StringLengthCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("starts-with")) {
+ result = new StartsWithCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("function-available")) {
+ result = new FunctionAvailableCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("element-available")) {
+ result = new ElementAvailableCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("local-name")) {
+ result = new LocalNameCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("lang")) {
+ result = new LangCall(fname, argl);
+ } else if (fname == parser.getQNameIgnoreDefaultNs("namespace-uri")) {
+ result = new NamespaceUriCall(fname, argl);
+ } else if (fname == parser.getQName(Constants.TRANSLET_URI, "xsltc", "cast")) {
+ result = new CastCall(fname, argl);
+ } // Special case for extension function nodeset()
+ else if (fname.getLocalPart().equals("nodeset") || fname.getLocalPart().equals("node-set")) {
+ parser.setCallsNodeset(true); // implies MultiDOM
+ result = new FunctionCall(fname, argl);
+ } else {
+ result = new FunctionCall(fname, argl);
+ }
+ parser_result = new Symbol(16, (parser_stack.get(parser_top - 3)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 71: // RelativeLocationPath ::= AbbreviatedRelativeLocationPath
- {
- Expression RESULT = null;
- int arlpleft = (parser_stack.get(parser_top-0)).left;
- int arlpright = (parser_stack.get(parser_top-0)).right;
- Expression arlp = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = arlp;
- parser_result = new Symbol(21/*RelativeLocationPath*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 70: // RelativeLocationPath ::= RelativeLocationPath SLASH Step
- {
- Expression RESULT = null;
- int rlpleft = (parser_stack.get(parser_top-2)).left;
- int rlpright = (parser_stack.get(parser_top-2)).right;
- Expression rlp = (Expression)(parser_stack.get(parser_top-2)).value;
- int stepleft = (parser_stack.get(parser_top-0)).left;
- int stepright = (parser_stack.get(parser_top-0)).right;
- Expression step = (Expression)(parser_stack.get(parser_top-0)).value;
-
- if (rlp instanceof Step && ((Step) rlp).isAbbreviatedDot()) {
- RESULT = step; // Remove './' from the middle
- }
- else if (((Step) step).isAbbreviatedDot()) {
- RESULT = rlp; // Remove '/.' from the end
- }
- else {
- RESULT =
- new ParentLocationPath((RelativeLocationPath) rlp, step);
- }
-
- parser_result = new Symbol(21/*RelativeLocationPath*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 69: // RelativeLocationPath ::= Step
- {
- Expression RESULT = null;
- int stepleft = (parser_stack.get(parser_top-0)).left;
- int stepright = (parser_stack.get(parser_top-0)).right;
- Expression step = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = step;
- parser_result = new Symbol(21/*RelativeLocationPath*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 68: // LocationPath ::= AbsoluteLocationPath
- {
- Expression RESULT = null;
- int alpleft = (parser_stack.get(parser_top-0)).left;
- int alpright = (parser_stack.get(parser_top-0)).right;
- Expression alp = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = alp;
- parser_result = new Symbol(4/*LocationPath*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 67: // LocationPath ::= RelativeLocationPath
- {
- Expression RESULT = null;
- int rlpleft = (parser_stack.get(parser_top-0)).left;
- int rlpright = (parser_stack.get(parser_top-0)).right;
- Expression rlp = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = rlp;
- parser_result = new Symbol(4/*LocationPath*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 66: // PathExpr ::= FilterExpr DSLASH RelativeLocationPath
- {
- Expression RESULT = null;
- int fexpleft = (parser_stack.get(parser_top-2)).left;
- int fexpright = (parser_stack.get(parser_top-2)).right;
- Expression fexp = (Expression)(parser_stack.get(parser_top-2)).value;
- int rlpleft = (parser_stack.get(parser_top-0)).left;
- int rlpright = (parser_stack.get(parser_top-0)).right;
- Expression rlp = (Expression)(parser_stack.get(parser_top-0)).value;
+ return parser_result;
- //
- // Expand '//' into '/descendant-or-self::node()/' or
- // into /descendant-or-self::*/
- //
- int nodeType = DOM.NO_TYPE;
- if (rlp instanceof Step &&
- parser.isElementAxis(((Step) rlp).getAxis()))
- {
- nodeType = DTM.ELEMENT_NODE;
- }
- final Step step = new Step(Axis.DESCENDANTORSELF, nodeType, null);
- FilterParentPath fpp = new FilterParentPath(fexp, step);
- fpp = new FilterParentPath(fpp, rlp);
- if (fexp instanceof KeyCall == false) {
- fpp.setDescendantAxis();
- }
- RESULT = fpp;
-
- parser_result = new Symbol(19/*PathExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 65: // PathExpr ::= FilterExpr SLASH RelativeLocationPath
- {
- Expression RESULT = null;
- int fexpleft = (parser_stack.get(parser_top-2)).left;
- int fexpright = (parser_stack.get(parser_top-2)).right;
- Expression fexp = (Expression)(parser_stack.get(parser_top-2)).value;
- int rlpleft = (parser_stack.get(parser_top-0)).left;
- int rlpright = (parser_stack.get(parser_top-0)).right;
- Expression rlp = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new FilterParentPath(fexp, rlp);
- parser_result = new Symbol(19/*PathExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 64: // PathExpr ::= FilterExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 110: // NonemptyArgumentList ::= Argument
{
- Expression RESULT = null;
- int fexpleft = (parser_stack.get(parser_top-0)).left;
- int fexpright = (parser_stack.get(parser_top-0)).right;
- Expression fexp = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = fexp;
- parser_result = new Symbol(19/*PathExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression arg = (Expression) (parser_stack.get(parser_top - 0)).value;
+ List<Expression> temp = new ArrayList<>();
+ temp.add(arg);
+ parser_result = new Symbol(36, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, temp);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 63: // PathExpr ::= LocationPath
- {
- Expression RESULT = null;
- int lpleft = (parser_stack.get(parser_top-0)).left;
- int lpright = (parser_stack.get(parser_top-0)).right;
- Expression lp = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = lp;
- parser_result = new Symbol(19/*PathExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 62: // UnionExpr ::= PathExpr VBAR UnionExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 111: // NonemptyArgumentList ::= Argument COMMA NonemptyArgumentList
{
- Expression RESULT = null;
- int peleft = (parser_stack.get(parser_top-2)).left;
- int peright = (parser_stack.get(parser_top-2)).right;
- Expression pe = (Expression)(parser_stack.get(parser_top-2)).value;
- int restleft = (parser_stack.get(parser_top-0)).left;
- int restright = (parser_stack.get(parser_top-0)).right;
- Expression rest = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new UnionPathExpr(pe, rest);
- parser_result = new Symbol(18/*UnionExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression arg = (Expression) (parser_stack.get(parser_top - 2)).value;
+ @SuppressWarnings("unchecked")
+ List<Expression> argl = (ArrayList<Expression>) (parser_stack.get(parser_top - 0)).value;
+ argl.add(0, arg);
+ parser_result = new Symbol(36, (parser_stack.get(parser_top - 2)).left,
+ (parser_stack.get(parser_top - 0)).right, argl);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 61: // UnionExpr ::= PathExpr
- {
- Expression RESULT = null;
- int peleft = (parser_stack.get(parser_top-0)).left;
- int peright = (parser_stack.get(parser_top-0)).right;
- Expression pe = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = pe;
- parser_result = new Symbol(18/*UnionExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 60: // UnaryExpr ::= MINUS UnaryExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 112: // FunctionName ::= QName
{
- Expression RESULT = null;
- int ueleft = (parser_stack.get(parser_top-0)).left;
- int ueright = (parser_stack.get(parser_top-0)).right;
- Expression ue = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new UnaryOpExpr(ue);
- parser_result = new Symbol(14/*UnaryExpr*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 59: // UnaryExpr ::= UnionExpr
- {
- Expression RESULT = null;
- int ueleft = (parser_stack.get(parser_top-0)).left;
- int ueright = (parser_stack.get(parser_top-0)).right;
- Expression ue = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = ue;
- parser_result = new Symbol(14/*UnaryExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName fname = (QName) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(38, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, fname);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 58: // MultiplicativeExpr ::= MultiplicativeExpr MOD UnaryExpr
- {
- Expression RESULT = null;
- int meleft = (parser_stack.get(parser_top-2)).left;
- int meright = (parser_stack.get(parser_top-2)).right;
- Expression me = (Expression)(parser_stack.get(parser_top-2)).value;
- int ueleft = (parser_stack.get(parser_top-0)).left;
- int ueright = (parser_stack.get(parser_top-0)).right;
- Expression ue = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new BinOpExpr(BinOpExpr.MOD, me, ue);
- parser_result = new Symbol(13/*MultiplicativeExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 57: // MultiplicativeExpr ::= MultiplicativeExpr DIV UnaryExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 113: // VariableName ::= QName
{
- Expression RESULT = null;
- int meleft = (parser_stack.get(parser_top-2)).left;
- int meright = (parser_stack.get(parser_top-2)).right;
- Expression me = (Expression)(parser_stack.get(parser_top-2)).value;
- int ueleft = (parser_stack.get(parser_top-0)).left;
- int ueright = (parser_stack.get(parser_top-0)).right;
- Expression ue = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new BinOpExpr(BinOpExpr.DIV, me, ue);
- parser_result = new Symbol(13/*MultiplicativeExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName vname = (QName) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(39, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, vname);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 56: // MultiplicativeExpr ::= MultiplicativeExpr STAR UnaryExpr
- {
- Expression RESULT = null;
- int meleft = (parser_stack.get(parser_top-2)).left;
- int meright = (parser_stack.get(parser_top-2)).right;
- Expression me = (Expression)(parser_stack.get(parser_top-2)).value;
- int ueleft = (parser_stack.get(parser_top-0)).left;
- int ueright = (parser_stack.get(parser_top-0)).right;
- Expression ue = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new BinOpExpr(BinOpExpr.TIMES, me, ue);
- parser_result = new Symbol(13/*MultiplicativeExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 55: // MultiplicativeExpr ::= UnaryExpr
- {
- Expression RESULT = null;
- int ueleft = (parser_stack.get(parser_top-0)).left;
- int ueright = (parser_stack.get(parser_top-0)).right;
- Expression ue = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = ue;
- parser_result = new Symbol(13/*MultiplicativeExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 54: // AdditiveExpr ::= AdditiveExpr MINUS MultiplicativeExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 114: // Argument ::= Expr
{
- Expression RESULT = null;
- int aeleft = (parser_stack.get(parser_top-2)).left;
- int aeright = (parser_stack.get(parser_top-2)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-2)).value;
- int meleft = (parser_stack.get(parser_top-0)).left;
- int meright = (parser_stack.get(parser_top-0)).right;
- Expression me = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new BinOpExpr(BinOpExpr.MINUS, ae, me);
- parser_result = new Symbol(12/*AdditiveExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Expression ex = (Expression) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(3, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, ex);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 53: // AdditiveExpr ::= AdditiveExpr PLUS MultiplicativeExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 115: // NodeTest ::= NameTest
{
- Expression RESULT = null;
- int aeleft = (parser_stack.get(parser_top-2)).left;
- int aeright = (parser_stack.get(parser_top-2)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-2)).value;
- int meleft = (parser_stack.get(parser_top-0)).left;
- int meright = (parser_stack.get(parser_top-0)).right;
- Expression me = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new BinOpExpr(BinOpExpr.PLUS, ae, me);
- parser_result = new Symbol(12/*AdditiveExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object nt = parser_stack.get(parser_top - 0).value;
+ parser_result = new Symbol(25, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, nt);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 52: // AdditiveExpr ::= MultiplicativeExpr
- {
- Expression RESULT = null;
- int meleft = (parser_stack.get(parser_top-0)).left;
- int meright = (parser_stack.get(parser_top-0)).right;
- Expression me = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = me;
- parser_result = new Symbol(12/*AdditiveExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 51: // RelationalExpr ::= RelationalExpr GE AdditiveExpr
- {
- Expression RESULT = null;
- int releft = (parser_stack.get(parser_top-2)).left;
- int reright = (parser_stack.get(parser_top-2)).right;
- Expression re = (Expression)(parser_stack.get(parser_top-2)).value;
- int aeleft = (parser_stack.get(parser_top-0)).left;
- int aeright = (parser_stack.get(parser_top-0)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new RelationalExpr(Operators.GE, re, ae);
- parser_result = new Symbol(11/*RelationalExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 50: // RelationalExpr ::= RelationalExpr LE AdditiveExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 116: // NodeTest ::= NODE
{
- Expression RESULT = null;
- int releft = (parser_stack.get(parser_top-2)).left;
- int reright = (parser_stack.get(parser_top-2)).right;
- Expression re = (Expression)(parser_stack.get(parser_top-2)).value;
- int aeleft = (parser_stack.get(parser_top-0)).left;
- int aeright = (parser_stack.get(parser_top-0)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new RelationalExpr(Operators.LE, re, ae);
- parser_result = new Symbol(11/*RelationalExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object result = NodeTest.ANODE;
+ parser_result = new Symbol(25, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 49: // RelationalExpr ::= RelationalExpr GT AdditiveExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 117: // NodeTest ::= TEXT
{
- Expression RESULT = null;
- int releft = (parser_stack.get(parser_top-2)).left;
- int reright = (parser_stack.get(parser_top-2)).right;
- Expression re = (Expression)(parser_stack.get(parser_top-2)).value;
- int aeleft = (parser_stack.get(parser_top-0)).left;
- int aeright = (parser_stack.get(parser_top-0)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new RelationalExpr(Operators.GT, re, ae);
- parser_result = new Symbol(11/*RelationalExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object result = NodeTest.TEXT;
+ parser_result = new Symbol(25, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 48: // RelationalExpr ::= RelationalExpr LT AdditiveExpr
- {
- Expression RESULT = null;
- int releft = (parser_stack.get(parser_top-2)).left;
- int reright = (parser_stack.get(parser_top-2)).right;
- Expression re = (Expression)(parser_stack.get(parser_top-2)).value;
- int aeleft = (parser_stack.get(parser_top-0)).left;
- int aeright = (parser_stack.get(parser_top-0)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new RelationalExpr(Operators.LT, re, ae);
- parser_result = new Symbol(11/*RelationalExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 47: // RelationalExpr ::= AdditiveExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 118: // NodeTest ::= COMMENT
{
- Expression RESULT = null;
- int aeleft = (parser_stack.get(parser_top-0)).left;
- int aeright = (parser_stack.get(parser_top-0)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = ae;
- parser_result = new Symbol(11/*RelationalExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object result = NodeTest.COMMENT;
+ parser_result = new Symbol(25, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 46: // EqualityExpr ::= EqualityExpr NE RelationalExpr
- {
- Expression RESULT = null;
- int eeleft = (parser_stack.get(parser_top-2)).left;
- int eeright = (parser_stack.get(parser_top-2)).right;
- Expression ee = (Expression)(parser_stack.get(parser_top-2)).value;
- int releft = (parser_stack.get(parser_top-0)).left;
- int reright = (parser_stack.get(parser_top-0)).right;
- Expression re = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new EqualityExpr(Operators.NE, ee, re);
- parser_result = new Symbol(10/*EqualityExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 45: // EqualityExpr ::= EqualityExpr EQ RelationalExpr
- {
- Expression RESULT = null;
- int eeleft = (parser_stack.get(parser_top-2)).left;
- int eeright = (parser_stack.get(parser_top-2)).right;
- Expression ee = (Expression)(parser_stack.get(parser_top-2)).value;
- int releft = (parser_stack.get(parser_top-0)).left;
- int reright = (parser_stack.get(parser_top-0)).right;
- Expression re = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new EqualityExpr(Operators.EQ, ee, re);
- parser_result = new Symbol(10/*EqualityExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 44: // EqualityExpr ::= RelationalExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 119: // NodeTest ::= PIPARAM LPAREN Literal RPAREN
{
- Expression RESULT = null;
- int releft = (parser_stack.get(parser_top-0)).left;
- int reright = (parser_stack.get(parser_top-0)).right;
- Expression re = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = re;
- parser_result = new Symbol(10/*EqualityExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ String l = (String) (parser_stack.get(parser_top - 1)).value;
+ QName name = parser.getQNameIgnoreDefaultNs("name");
+ Expression exp = new EqualityExpr(Operators.EQ,
+ new NameCall(name),
+ new LiteralExpr(l));
+ List<Predicate> predicates = new ArrayList<>();
+ predicates.add(new Predicate(exp));
+ Object result = new Step(Axis.CHILD, NodeTest.PI, predicates);
+ parser_result = new Symbol(25, (parser_stack.get(parser_top - 3)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 43: // AndExpr ::= AndExpr AND EqualityExpr
- {
- Expression RESULT = null;
- int aeleft = (parser_stack.get(parser_top-2)).left;
- int aeright = (parser_stack.get(parser_top-2)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-2)).value;
- int eeleft = (parser_stack.get(parser_top-0)).left;
- int eeright = (parser_stack.get(parser_top-0)).right;
- Expression ee = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new LogicalExpr(LogicalExpr.AND, ae, ee);
- parser_result = new Symbol(9/*AndExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 42: // AndExpr ::= EqualityExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 120: // NodeTest ::= PI
{
- Expression RESULT = null;
- int eleft = (parser_stack.get(parser_top-0)).left;
- int eright = (parser_stack.get(parser_top-0)).right;
- Expression e = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = e;
- parser_result = new Symbol(9/*AndExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object result = NodeTest.PI;
+ parser_result = new Symbol(25, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 41: // OrExpr ::= OrExpr OR AndExpr
- {
- Expression RESULT = null;
- int oeleft = (parser_stack.get(parser_top-2)).left;
- int oeright = (parser_stack.get(parser_top-2)).right;
- Expression oe = (Expression)(parser_stack.get(parser_top-2)).value;
- int aeleft = (parser_stack.get(parser_top-0)).left;
- int aeright = (parser_stack.get(parser_top-0)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = new LogicalExpr(LogicalExpr.OR, oe, ae);
- parser_result = new Symbol(8/*OrExpr*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 40: // OrExpr ::= AndExpr
- {
- Expression RESULT = null;
- int aeleft = (parser_stack.get(parser_top-0)).left;
- int aeright = (parser_stack.get(parser_top-0)).right;
- Expression ae = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = ae;
- parser_result = new Symbol(8/*OrExpr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 39: // Expr ::= OrExpr
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 121: // NameTest ::= STAR
{
- Expression RESULT = null;
- int exleft = (parser_stack.get(parser_top-0)).left;
- int exright = (parser_stack.get(parser_top-0)).right;
- Expression ex = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = ex;
- parser_result = new Symbol(2/*Expr*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ Object result = null;
+ parser_result = new Symbol(26, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 38: // Predicate ::= LBRACK Expr RBRACK
- {
- Expression RESULT = null;
- int eleft = (parser_stack.get(parser_top-1)).left;
- int eright = (parser_stack.get(parser_top-1)).right;
- Expression e = (Expression)(parser_stack.get(parser_top-1)).value;
-
- RESULT = new Predicate(e);
-
- parser_result = new Symbol(5/*Predicate*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 37: // Predicates ::= Predicate Predicates
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 122: // NameTest ::= QName
{
- List<Expression> RESULT = null;
- int pleft = (parser_stack.get(parser_top-1)).left;
- int pright = (parser_stack.get(parser_top-1)).right;
- Expression p = (Expression)(parser_stack.get(parser_top-1)).value;
- int ppleft = (parser_stack.get(parser_top-0)).left;
- int ppright = (parser_stack.get(parser_top-0)).right;
- @SuppressWarnings("unchecked")
- List<Expression> pp = (ArrayList<Expression>)(parser_stack.get(parser_top-0)).value;
- pp.add(0, p); RESULT = pp;
- parser_result = new Symbol(35/*Predicates*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName qn = (QName) (parser_stack.get(parser_top - 0)).value;
+ parser_result = new Symbol(26, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, qn);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 36: // Predicates ::= Predicate
- {
- List<Expression> RESULT = null;
- int pleft = (parser_stack.get(parser_top-0)).left;
- int pright = (parser_stack.get(parser_top-0)).right;
- Expression p = (Expression)(parser_stack.get(parser_top-0)).value;
+ return parser_result;
- List<Expression> temp = new ArrayList<>();
- temp.add(p);
- RESULT = temp;
-
- parser_result = new Symbol(35/*Predicates*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 35: // ChildOrAttributeAxisSpecifier ::= ATTRIBUTE DCOLON
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 123: // QName ::= QNAME
{
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.ATTRIBUTE);
- parser_result = new Symbol(42/*ChildOrAttributeAxisSpecifier*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ String qname = (String) (parser_stack.get(parser_top - 0)).value;
+ QName result = parser.getQNameIgnoreDefaultNs(qname);
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 34: // ChildOrAttributeAxisSpecifier ::= CHILD DCOLON
- {
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.CHILD);
- parser_result = new Symbol(42/*ChildOrAttributeAxisSpecifier*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 33: // ChildOrAttributeAxisSpecifier ::= ATSIGN
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 124: // QName ::= DIV
{
- Integer RESULT = null;
- RESULT = Integer.valueOf(Axis.ATTRIBUTE);
- parser_result = new Symbol(42/*ChildOrAttributeAxisSpecifier*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("div");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 32: // NameTestPattern ::= QName
- {
- Object RESULT = null;
- int qnleft = (parser_stack.get(parser_top-0)).left;
- int qnright = (parser_stack.get(parser_top-0)).right;
- QName qn = (QName)(parser_stack.get(parser_top-0)).value;
- RESULT = qn;
- parser_result = new Symbol(34/*NameTestPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 31: // NameTestPattern ::= STAR
- {
- Object RESULT = null;
- RESULT = null;
- parser_result = new Symbol(34/*NameTestPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 30: // NodeTestPattern ::= PI
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 125: // QName ::= MOD
{
- Object RESULT = null;
- RESULT = Integer.valueOf(NodeTest.PI);
- parser_result = new Symbol(33/*NodeTestPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 29: // NodeTestPattern ::= COMMENT
- {
- Object RESULT = null;
- RESULT = Integer.valueOf(NodeTest.COMMENT);
- parser_result = new Symbol(33/*NodeTestPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("mod");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 28: // NodeTestPattern ::= TEXT
- {
- Object RESULT = null;
- RESULT = Integer.valueOf(NodeTest.TEXT);
- parser_result = new Symbol(33/*NodeTestPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 27: // NodeTestPattern ::= NODE
- {
- Object RESULT = null;
- RESULT = Integer.valueOf(NodeTest.ANODE);
- parser_result = new Symbol(33/*NodeTestPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 26: // NodeTestPattern ::= NameTestPattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 126: // QName ::= KEY
{
- Object RESULT = null;
- int ntleft = (parser_stack.get(parser_top-0)).left;
- int ntright = (parser_stack.get(parser_top-0)).right;
- Object nt = parser_stack.get(parser_top-0).value;
- RESULT = nt;
- parser_result = new Symbol(33/*NodeTestPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("key");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 25: // StepPattern ::= ChildOrAttributeAxisSpecifier ProcessingInstructionPattern Predicates
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 127: // QName ::= ANCESTOR
{
- StepPattern RESULT = null;
- int axisleft = (parser_stack.get(parser_top-2)).left;
- int axisright = (parser_stack.get(parser_top-2)).right;
- Integer axis = (Integer)(parser_stack.get(parser_top-2)).value;
- int pipleft = (parser_stack.get(parser_top-1)).left;
- int pipright = (parser_stack.get(parser_top-1)).right;
- StepPattern pip = (StepPattern)(parser_stack.get(parser_top-1)).value;
- int ppleft = (parser_stack.get(parser_top-0)).left;
- int ppright = (parser_stack.get(parser_top-0)).right;
- @SuppressWarnings("unchecked")
- List<Predicate> pp = (ArrayList<Predicate>)(parser_stack.get(parser_top-0)).value;
+ QName result = parser.getQNameIgnoreDefaultNs("child");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
+ }
+ return parser_result;
- // TODO: report error if axis is attribute
- RESULT = (ProcessingInstructionPattern)pip.setPredicates(pp);
-
- parser_result = new Symbol(32/*StepPattern*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 24: // StepPattern ::= ChildOrAttributeAxisSpecifier ProcessingInstructionPattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 128: // QName ::= ANCESTORORSELF
{
- StepPattern RESULT = null;
- int axisleft = (parser_stack.get(parser_top-1)).left;
- int axisright = (parser_stack.get(parser_top-1)).right;
- Integer axis = (Integer)(parser_stack.get(parser_top-1)).value;
- int pipleft = (parser_stack.get(parser_top-0)).left;
- int pipright = (parser_stack.get(parser_top-0)).right;
- StepPattern pip = (StepPattern)(parser_stack.get(parser_top-0)).value;
-
- RESULT = pip; // TODO: report error if axis is attribute
-
- parser_result = new Symbol(32/*StepPattern*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 23: // StepPattern ::= ChildOrAttributeAxisSpecifier NodeTestPattern Predicates
- {
- StepPattern RESULT = null;
- int axisleft = (parser_stack.get(parser_top-2)).left;
- int axisright = (parser_stack.get(parser_top-2)).right;
- Integer axis = (Integer)(parser_stack.get(parser_top-2)).value;
- int ntleft = (parser_stack.get(parser_top-1)).left;
- int ntright = (parser_stack.get(parser_top-1)).right;
- Object nt = parser_stack.get(parser_top-1).value;
- int ppleft = (parser_stack.get(parser_top-0)).left;
- int ppright = (parser_stack.get(parser_top-0)).right;
- @SuppressWarnings("unchecked")
- List<Predicate>pp = (ArrayList<Predicate>)(parser_stack.get(parser_top-0)).value;
-
- RESULT = parser.createStepPattern(axis.intValue(), nt, pp);
-
- parser_result = new Symbol(32/*StepPattern*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("ancestor-or-self");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 22: // StepPattern ::= ChildOrAttributeAxisSpecifier NodeTestPattern
- {
- StepPattern RESULT = null;
- int axisleft = (parser_stack.get(parser_top-1)).left;
- int axisright = (parser_stack.get(parser_top-1)).right;
- Integer axis = (Integer)(parser_stack.get(parser_top-1)).value;
- int ntleft = (parser_stack.get(parser_top-0)).left;
- int ntright = (parser_stack.get(parser_top-0)).right;
- Object nt = parser_stack.get(parser_top-0).value;
-
- RESULT = parser.createStepPattern(axis.intValue(), nt, null);
-
- parser_result = new Symbol(32/*StepPattern*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 21: // StepPattern ::= ProcessingInstructionPattern Predicates
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 129: // QName ::= ATTRIBUTE
{
- StepPattern RESULT = null;
- int pipleft = (parser_stack.get(parser_top-1)).left;
- int pipright = (parser_stack.get(parser_top-1)).right;
- StepPattern pip = (StepPattern)(parser_stack.get(parser_top-1)).value;
- int ppleft = (parser_stack.get(parser_top-0)).left;
- int ppright = (parser_stack.get(parser_top-0)).right;
- @SuppressWarnings("unchecked")
- List<Predicate> pp = (ArrayList<Predicate>)(parser_stack.get(parser_top-0)).value;
- RESULT = (ProcessingInstructionPattern)pip.setPredicates(pp);
- parser_result = new Symbol(32/*StepPattern*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("attribute");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 20: // StepPattern ::= ProcessingInstructionPattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 130: // QName ::= CHILD
{
- StepPattern RESULT = null;
- int pipleft = (parser_stack.get(parser_top-0)).left;
- int pipright = (parser_stack.get(parser_top-0)).right;
- StepPattern pip = (StepPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = pip;
- parser_result = new Symbol(32/*StepPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("child");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 19: // StepPattern ::= NodeTestPattern Predicates
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 131: // QName ::= DESCENDANT
{
- StepPattern RESULT = null;
- int ntleft = (parser_stack.get(parser_top-1)).left;
- int ntright = (parser_stack.get(parser_top-1)).right;
- Object nt = parser_stack.get(parser_top-1).value;
- int ppleft = (parser_stack.get(parser_top-0)).left;
- int ppright = (parser_stack.get(parser_top-0)).right;
- @SuppressWarnings("unchecked")
- List<Predicate> pp = (ArrayList<Predicate>)(parser_stack.get(parser_top-0)).value;
-
- RESULT = parser.createStepPattern(Axis.CHILD, nt, pp);
-
- parser_result = new Symbol(32/*StepPattern*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("decendant");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 18: // StepPattern ::= NodeTestPattern
- {
- StepPattern RESULT = null;
- int ntleft = (parser_stack.get(parser_top-0)).left;
- int ntright = (parser_stack.get(parser_top-0)).right;
- Object nt = parser_stack.get(parser_top-0).value;
-
- RESULT = parser.createStepPattern(Axis.CHILD, nt, null);
-
- parser_result = new Symbol(32/*StepPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 17: // RelativePathPattern ::= StepPattern DSLASH RelativePathPattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 132: // QName ::= DESCENDANTORSELF
{
- RelativePathPattern RESULT = null;
- int spleft = (parser_stack.get(parser_top-2)).left;
- int spright = (parser_stack.get(parser_top-2)).right;
- StepPattern sp = (StepPattern)(parser_stack.get(parser_top-2)).value;
- int rppleft = (parser_stack.get(parser_top-0)).left;
- int rppright = (parser_stack.get(parser_top-0)).right;
- RelativePathPattern rpp = (RelativePathPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = new AncestorPattern(sp, rpp);
- parser_result = new Symbol(31/*RelativePathPattern*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("decendant-or-self");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 16: // RelativePathPattern ::= StepPattern SLASH RelativePathPattern
- {
- RelativePathPattern RESULT = null;
- int spleft = (parser_stack.get(parser_top-2)).left;
- int spright = (parser_stack.get(parser_top-2)).right;
- StepPattern sp = (StepPattern)(parser_stack.get(parser_top-2)).value;
- int rppleft = (parser_stack.get(parser_top-0)).left;
- int rppright = (parser_stack.get(parser_top-0)).right;
- RelativePathPattern rpp = (RelativePathPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = new ParentPattern(sp, rpp);
- parser_result = new Symbol(31/*RelativePathPattern*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 15: // RelativePathPattern ::= StepPattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 133: // QName ::= FOLLOWING
{
- RelativePathPattern RESULT = null;
- int spleft = (parser_stack.get(parser_top-0)).left;
- int spright = (parser_stack.get(parser_top-0)).right;
- StepPattern sp = (StepPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = sp;
- parser_result = new Symbol(31/*RelativePathPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("following");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 14: // ProcessingInstructionPattern ::= PIPARAM LPAREN Literal RPAREN
- {
- StepPattern RESULT = null;
- int lleft = (parser_stack.get(parser_top-1)).left;
- int lright = (parser_stack.get(parser_top-1)).right;
- String l = (String)(parser_stack.get(parser_top-1)).value;
- RESULT = new ProcessingInstructionPattern(l);
- parser_result = new Symbol(30/*ProcessingInstructionPattern*/, (parser_stack.get(parser_top-3)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 13: // IdKeyPattern ::= KEY LPAREN Literal COMMA Literal RPAREN
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 134: // QName ::= FOLLOWINGSIBLING
{
- IdKeyPattern RESULT = null;
- int l1left = (parser_stack.get(parser_top-3)).left;
- int l1right = (parser_stack.get(parser_top-3)).right;
- String l1 = (String)(parser_stack.get(parser_top-3)).value;
- int l2left = (parser_stack.get(parser_top-1)).left;
- int l2right = (parser_stack.get(parser_top-1)).right;
- String l2 = (String)(parser_stack.get(parser_top-1)).value;
- RESULT = new KeyPattern(l1, l2);
- parser_result = new Symbol(27/*IdKeyPattern*/, (parser_stack.get(parser_top-5)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("following-sibling");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 12: // IdKeyPattern ::= ID LPAREN Literal RPAREN
- {
- IdKeyPattern RESULT = null;
- int lleft = (parser_stack.get(parser_top-1)).left;
- int lright = (parser_stack.get(parser_top-1)).right;
- String l = (String)(parser_stack.get(parser_top-1)).value;
- RESULT = new IdPattern(l);
- parser.setHasIdCall(true);
-
- parser_result = new Symbol(27/*IdKeyPattern*/, (parser_stack.get(parser_top-3)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 11: // LocationPathPattern ::= RelativePathPattern
- {
- Pattern RESULT = null;
- int rppleft = (parser_stack.get(parser_top-0)).left;
- int rppright = (parser_stack.get(parser_top-0)).right;
- RelativePathPattern rpp = (RelativePathPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = rpp;
- parser_result = new Symbol(29/*LocationPathPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 10: // LocationPathPattern ::= DSLASH RelativePathPattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 135: // QName ::= NAMESPACE
{
- Pattern RESULT = null;
- int rppleft = (parser_stack.get(parser_top-0)).left;
- int rppright = (parser_stack.get(parser_top-0)).right;
- RelativePathPattern rpp = (RelativePathPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = new AncestorPattern(rpp);
- parser_result = new Symbol(29/*LocationPathPattern*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("namespace");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 9: // LocationPathPattern ::= IdKeyPattern DSLASH RelativePathPattern
- {
- Pattern RESULT = null;
- int ikpleft = (parser_stack.get(parser_top-2)).left;
- int ikpright = (parser_stack.get(parser_top-2)).right;
- IdKeyPattern ikp = (IdKeyPattern)(parser_stack.get(parser_top-2)).value;
- int rppleft = (parser_stack.get(parser_top-0)).left;
- int rppright = (parser_stack.get(parser_top-0)).right;
- RelativePathPattern rpp = (RelativePathPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = new AncestorPattern(ikp, rpp);
- parser_result = new Symbol(29/*LocationPathPattern*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 8: // LocationPathPattern ::= IdKeyPattern SLASH RelativePathPattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 136: // QName ::= PARENT
{
- Pattern RESULT = null;
- int ikpleft = (parser_stack.get(parser_top-2)).left;
- int ikpright = (parser_stack.get(parser_top-2)).right;
- IdKeyPattern ikp = (IdKeyPattern)(parser_stack.get(parser_top-2)).value;
- int rppleft = (parser_stack.get(parser_top-0)).left;
- int rppright = (parser_stack.get(parser_top-0)).right;
- RelativePathPattern rpp = (RelativePathPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = new ParentPattern(ikp, rpp);
- parser_result = new Symbol(29/*LocationPathPattern*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("parent");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 7: // LocationPathPattern ::= IdKeyPattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 137: // QName ::= PRECEDING
{
- Pattern RESULT = null;
- int ikpleft = (parser_stack.get(parser_top-0)).left;
- int ikpright = (parser_stack.get(parser_top-0)).right;
- IdKeyPattern ikp = (IdKeyPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = ikp;
- parser_result = new Symbol(29/*LocationPathPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("preceding");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 6: // LocationPathPattern ::= SLASH RelativePathPattern
- {
- Pattern RESULT = null;
- int rppleft = (parser_stack.get(parser_top-0)).left;
- int rppright = (parser_stack.get(parser_top-0)).right;
- RelativePathPattern rpp = (RelativePathPattern)(parser_stack.get(parser_top-0)).value;
- RESULT = new AbsolutePathPattern(rpp);
- parser_result = new Symbol(29/*LocationPathPattern*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 5: // LocationPathPattern ::= SLASH
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 138: // QName ::= PRECEDINGSIBLING
{
- Pattern RESULT = null;
- RESULT = new AbsolutePathPattern(null);
- parser_result = new Symbol(29/*LocationPathPattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("preceding-sibling");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 4: // Pattern ::= LocationPathPattern VBAR Pattern
- {
- Pattern RESULT = null;
- int lppleft = (parser_stack.get(parser_top-2)).left;
- int lppright = (parser_stack.get(parser_top-2)).right;
- Pattern lpp = (Pattern)(parser_stack.get(parser_top-2)).value;
- int pleft = (parser_stack.get(parser_top-0)).left;
- int pright = (parser_stack.get(parser_top-0)).right;
- Pattern p = (Pattern)(parser_stack.get(parser_top-0)).value;
- RESULT = new AlternativePattern(lpp, p);
- parser_result = new Symbol(28/*Pattern*/, (parser_stack.get(parser_top-2)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 3: // Pattern ::= LocationPathPattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 139: // QName ::= SELF
{
- Pattern RESULT = null;
- int lppleft = (parser_stack.get(parser_top-0)).left;
- int lppright = (parser_stack.get(parser_top-0)).right;
- Pattern lpp = (Pattern)(parser_stack.get(parser_top-0)).value;
- RESULT = lpp;
- parser_result = new Symbol(28/*Pattern*/, (parser_stack.get(parser_top-0)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("self");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 2: // TopLevel ::= EXPRESSION Expr
- {
- SyntaxTreeNode RESULT = null;
- int exprleft = (parser_stack.get(parser_top-0)).left;
- int exprright = (parser_stack.get(parser_top-0)).right;
- Expression expr = (Expression)(parser_stack.get(parser_top-0)).value;
- RESULT = expr;
- parser_result = new Symbol(1/*TopLevel*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- return parser_result;
-
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 1: // TopLevel ::= PATTERN Pattern
+ /*. . . . . . . . . . . . . . . . . . . .*/
+ case 140: // QName ::= ID
{
- SyntaxTreeNode RESULT = null;
- int patternleft = (parser_stack.get(parser_top-0)).left;
- int patternright = (parser_stack.get(parser_top-0)).right;
- Pattern pattern = (Pattern)(parser_stack.get(parser_top-0)).value;
- RESULT = pattern;
- parser_result = new Symbol(1/*TopLevel*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
+ QName result = parser.getQNameIgnoreDefaultNs("id");
+ parser_result = new Symbol(37, (parser_stack.get(parser_top - 0)).left,
+ (parser_stack.get(parser_top - 0)).right, result);
}
- return parser_result;
+ return parser_result;
- /*. . . . . . . . . . . . . . . . . . . .*/
- case 0: // $START ::= TopLevel EOF
- {
- Object RESULT = null;
- int start_valleft = (parser_stack.get(parser_top-1)).left;
- int start_valright = (parser_stack.get(parser_top-1)).right;
- SyntaxTreeNode start_val = (SyntaxTreeNode)(parser_stack.get(parser_top-1)).value;
- RESULT = start_val;
- parser_result = new Symbol(0/*$START*/, (parser_stack.get(parser_top-1)).left, (parser_stack.get(parser_top-0)).right, RESULT);
- }
- /* ACCEPT */
- parser_parser.done_parsing();
- return parser_result;
-
- /* . . . . . .*/
- default:
- throw new Exception(
- "Invalid action number found in internal parse table");
+ /* . . . . . .*/
+ default:
+ throw new Exception(
+ "Invalid action number found in internal parse table");
}
}
--- a/src/java.xml/share/legal/jcup.md Thu Nov 21 17:51:11 2019 +0000
+++ b/src/java.xml/share/legal/jcup.md Mon Nov 25 15:16:29 2019 +0000
@@ -1,9 +1,9 @@
-## CUP Parser Generator for Java v 0.10k
+## CUP Parser Generator for Java v 0.11b
### CUP Parser Generator License
<pre>
-Copyright 1996-1999 by Scott Hudson, Frank Flannery, C. Scott Ananian
+Copyright 1996-2015 by Scott Hudson, Frank Flannery, C. Scott Ananian, Michael Petter
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted, provided
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/Main.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/Main.java Mon Nov 25 15:16:29 2019 +0000
@@ -51,6 +51,7 @@
import org.graalvm.compiler.hotspot.HotSpotGraalCompilerFactory;
import org.graalvm.compiler.hotspot.HotSpotGraalOptionValues;
import org.graalvm.compiler.hotspot.HotSpotGraalRuntime;
+import org.graalvm.compiler.hotspot.HotSpotGraalRuntime.HotSpotGC;
import org.graalvm.compiler.hotspot.HotSpotHostBackend;
import org.graalvm.compiler.hotspot.meta.HotSpotInvokeDynamicPlugin;
import org.graalvm.compiler.java.GraphBuilderPhase;
@@ -223,7 +224,11 @@
System.gc();
}
- int gc = runtime.getGarbageCollector().ordinal() + 1;
+ HotSpotGC graal_gc = runtime.getGarbageCollector();
+ int def = graal_gc.ordinal() + 1;
+ String name = "CollectedHeap::" + graal_gc.name();
+ int gc = graalHotSpotVMConfig.getConstant(name, Integer.class, def);
+
BinaryContainer binaryContainer = new BinaryContainer(graalOptions, graalHotSpotVMConfig, graphBuilderConfig, gc, JVM_VERSION);
DataBuilder dataBuilder = new DataBuilder(this, backend, classes, binaryContainer);
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/file/FSInfo.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/file/FSInfo.java Mon Nov 25 15:16:29 2019 +0000
@@ -25,9 +25,11 @@
package com.sun.tools.javac.file;
+import java.io.IOError;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Files;
+import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.nio.file.spi.FileSystemProvider;
import java.util.ArrayList;
@@ -109,10 +111,14 @@
for (StringTokenizer st = new StringTokenizer(path);
st.hasMoreTokens(); ) {
String elt = st.nextToken();
- Path f = FileSystems.getDefault().getPath(elt);
- if (!f.isAbsolute() && parent != null)
- f = parent.resolve(f).toAbsolutePath();
- list.add(f);
+ try {
+ Path f = FileSystems.getDefault().getPath(elt);
+ if (!f.isAbsolute() && parent != null)
+ f = parent.resolve(f).toAbsolutePath();
+ list.add(f);
+ } catch (InvalidPathException | IOError e) {
+ throw new IOException(e);
+ }
}
return list;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java Mon Nov 25 15:16:29 2019 +0000
@@ -232,6 +232,7 @@
// Supported GCs
Serial(true, "UseSerialGC"),
Parallel(true, "UseParallelGC", "UseParallelOldGC"),
+ CMS(true, "UseConcMarkSweepGC"),
G1(true, "UseG1GC"),
// Unsupported GCs
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/DocFilesHandlerImpl.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/DocFilesHandlerImpl.java Mon Nov 25 15:16:29 2019 +0000
@@ -169,7 +169,7 @@
private void handleHtmlFile(DocFile srcfile, DocPath dstPath) throws DocFileIOException {
Utils utils = configuration.utils;
FileObject fileObject = srcfile.getFileObject();
- DocFileElement dfElement = new DocFileElement(element, fileObject);
+ DocFileElement dfElement = new DocFileElement(utils, element, fileObject);
DocPath dfilePath = dstPath.resolve(srcfile.getName());
HtmlDocletWriter docletWriter = new DocFileWriter(configuration, dfilePath, element);
@@ -181,8 +181,8 @@
String title = getWindowTitle(docletWriter, dfElement).trim();
HtmlTree htmlContent = docletWriter.getBody(title);
docletWriter.addTop(htmlContent);
- PackageElement pkg = (PackageElement) element;
- this.navBar = new Navigation(pkg, configuration, docletWriter.fixedNavDiv,
+ PackageElement pkg = dfElement.getPackageElement();
+ this.navBar = new Navigation(element, configuration, docletWriter.fixedNavDiv,
PageMode.DOCFILE, docletWriter.path);
Content mdleLinkContent = docletWriter.getModuleLink(utils.elementUtils.getModuleOf(pkg),
docletWriter.contents.moduleLabel);
@@ -299,8 +299,6 @@
private static class DocFileWriter extends HtmlDocletWriter {
- final PackageElement pkg;
-
/**
* Constructor to construct the HtmlDocletWriter object.
*
@@ -312,7 +310,7 @@
super(configuration, path);
switch (e.getKind()) {
case PACKAGE:
- pkg = (PackageElement)e;
+ case MODULE:
break;
default:
throw new AssertionError("unsupported element: " + e.getKind());
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/DocFileElement.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/DocFileElement.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,19 +25,12 @@
package jdk.javadoc.internal.doclets.toolkit;
-import java.lang.annotation.Annotation;
-import java.util.Set;
-
-import javax.lang.model.element.AnnotationMirror;
import javax.lang.model.element.Element;
-import javax.lang.model.element.ElementKind;
-import javax.lang.model.element.ElementVisitor;
-import javax.lang.model.element.Name;
+import javax.lang.model.element.ModuleElement;
import javax.lang.model.element.PackageElement;
-import javax.lang.model.type.TypeMirror;
import javax.tools.FileObject;
-import jdk.javadoc.doclet.DocletEnvironment;
+import jdk.javadoc.internal.doclets.toolkit.util.Utils;
/**
* This is a pseudo element wrapper for doc-files html contents, essentially to
@@ -50,26 +43,30 @@
*/
public class DocFileElement implements DocletElement {
- private final Element element;
+ private final PackageElement packageElement;
private final FileObject fo;
- public DocFileElement(Element element, FileObject fo) {
- this.element = element;
+ public DocFileElement(Utils utils, Element element, FileObject fo) {
this.fo = fo;
+
+ switch(element.getKind()) {
+ case MODULE:
+ ModuleElement moduleElement = (ModuleElement) element;
+ packageElement = utils.elementUtils.getPackageElement(moduleElement, "");
+ break;
+
+ case PACKAGE:
+ packageElement = (PackageElement) element;
+ break;
+
+ default:
+ throw new AssertionError("unknown kind: " + element.getKind());
+ }
}
@Override
public PackageElement getPackageElement() {
- switch(element.getKind()) {
- case MODULE:
- // uncomment to support doc-files in modules
- // return configuration.workArounds.getUnnamedPackage();
- throw new UnsupportedOperationException("not implemented");
- case PACKAGE:
- return (PackageElement)element;
- default:
- throw new AssertionError("unknown kind: " + element.getKind());
- }
+ return packageElement;
}
@Override
--- a/src/jdk.jdeps/share/classes/com/sun/tools/javap/AttributeWriter.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jdeps/share/classes/com/sun/tools/javap/AttributeWriter.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -126,9 +126,6 @@
@Override
public Void visitDefault(DefaultAttribute attr, Void ignore) {
- if (attr.reason != null) {
- report(attr.reason);
- }
byte[] data = attr.info;
int i = 0;
int j = 0;
@@ -140,7 +137,11 @@
print("attribute name = #" + attr.attribute_name_index);
}
print(": ");
- println("length = 0x" + toHex(attr.info.length));
+ print("length = 0x" + toHex(attr.info.length));
+ if (attr.reason != null) {
+ print(" (" + attr.reason + ")");
+ }
+ println();
print(" ");
--- a/src/jdk.jfr/share/classes/jdk/jfr/consumer/EventStream.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/consumer/EventStream.java Mon Nov 25 15:16:29 2019 +0000
@@ -139,7 +139,7 @@
*/
public static EventStream openRepository() throws IOException {
Utils.checkAccessFlightRecorder();
- return new EventDirectoryStream(AccessController.getContext(), null, SecuritySupport.PRIVILIGED, false);
+ return new EventDirectoryStream(AccessController.getContext(), null, SecuritySupport.PRIVILIGED, null);
}
/**
@@ -162,7 +162,7 @@
public static EventStream openRepository(Path directory) throws IOException {
Objects.nonNull(directory);
AccessControlContext acc = AccessController.getContext();
- return new EventDirectoryStream(acc, directory, FileAccess.UNPRIVILIGED, false);
+ return new EventDirectoryStream(acc, directory, FileAccess.UNPRIVILIGED, null);
}
/**
--- a/src/jdk.jfr/share/classes/jdk/jfr/consumer/RecordingStream.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/consumer/RecordingStream.java Mon Nov 25 15:16:29 2019 +0000
@@ -88,7 +88,8 @@
this.recording = new Recording();
this.recording.setFlushInterval(Duration.ofMillis(1000));
try {
- this.directoryStream = new EventDirectoryStream(acc, null, SecuritySupport.PRIVILIGED, true);
+ PlatformRecording pr = PrivateAccess.getInstance().getPlatformRecording(recording);
+ this.directoryStream = new EventDirectoryStream(acc, null, SecuritySupport.PRIVILIGED, pr);
} catch (IOException ioe) {
this.recording.close();
throw new IllegalStateException(ioe.getMessage());
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/JVM.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/JVM.java Mon Nov 25 15:16:29 2019 +0000
@@ -43,7 +43,6 @@
static final long RESERVED_CLASS_ID_LIMIT = 400;
- private volatile boolean recording;
private volatile boolean nativeOK;
private static native void registerNatives();
@@ -69,6 +68,15 @@
}
/**
+ * Marks current chunk as final
+ * <p>
+ * This allows streaming clients to read the chunk header and
+ * close the stream when no more data will be written into
+ * the current repository.
+ */
+ public native void markChunkFinal();
+
+ /**
* Begin recording events
*
* Requires that JFR has been started with {@link #createNativeJFR()}
@@ -76,6 +84,19 @@
public native void beginRecording();
/**
+ * Return true if the JVM is recording
+ */
+ public native boolean isRecording();
+
+ /**
+ * End recording events, which includes flushing data in thread buffers
+ *
+ * Requires that JFR has been started with {@link #createNativeJFR()}
+ *
+ */
+ public native void endRecording();
+
+ /**
* Return ticks
*
* @return the time, in ticks
@@ -97,13 +118,7 @@
*/
public native boolean emitEvent(long eventTypeId, long timestamp, long when);
- /**
- * End recording events, which includes flushing data in thread buffers
- *
- * Requires that JFR has been started with {@link #createNativeJFR()}
- *
- */
- public native void endRecording();
+
/**
* Return a list of all classes deriving from {@link jdk.internal.event.Event}
@@ -354,20 +369,6 @@
*/
public native void storeMetadataDescriptor(byte[] bytes);
- public void endRecording_() {
- endRecording();
- recording = false;
- }
-
- public void beginRecording_() {
- beginRecording();
- recording = true;
- }
-
- public boolean isRecording() {
- return recording;
- }
-
/**
* If the JVM supports JVM TI and retransformation has not been disabled this
* method will return true. This flag can not change during the lifetime of
@@ -558,4 +559,5 @@
*@return start time of the recording in nanos, -1 in case of in-memory
*/
public native long getChunkStartNanos();
+
}
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/PlatformRecorder.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/PlatformRecorder.java Mon Nov 25 15:16:29 2019 +0000
@@ -31,6 +31,7 @@
import static jdk.jfr.internal.LogTag.JFR;
import static jdk.jfr.internal.LogTag.JFR_SYSTEM;
+import java.io.IOException;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.time.Duration;
@@ -53,6 +54,7 @@
import jdk.jfr.RecordingState;
import jdk.jfr.events.ActiveRecordingEvent;
import jdk.jfr.events.ActiveSettingEvent;
+import jdk.jfr.internal.SecuritySupport.SafePath;
import jdk.jfr.internal.SecuritySupport.SecureRecorderListener;
import jdk.jfr.internal.instrument.JDKEvents;
@@ -70,6 +72,7 @@
private long recordingCounter = 0;
private RepositoryChunk currentChunk;
+ private boolean inShutdown;
public PlatformRecorder() throws Exception {
repository = Repository.getRepository();
@@ -176,6 +179,10 @@
}
}
+ synchronized void setInShutDown() {
+ this.inShutdown = true;
+ }
+
// called by shutdown hook
synchronized void destroy() {
try {
@@ -198,7 +205,7 @@
if (jvm.hasNativeJFR()) {
if (jvm.isRecording()) {
- jvm.endRecording_();
+ jvm.endRecording();
}
jvm.destroyNativeJFR();
}
@@ -236,7 +243,7 @@
MetadataRepository.getInstance().setOutput(null);
}
currentChunk = newChunk;
- jvm.beginRecording_();
+ jvm.beginRecording();
startNanos = jvm.getChunkStartNanos();
recording.setState(RecordingState.RUNNING);
updateSettings();
@@ -289,11 +296,15 @@
}
}
OldObjectSample.emit(recording);
+ recording.setFinalStartnanos(jvm.getChunkStartNanos());
if (endPhysical) {
RequestEngine.doChunkEnd();
if (recording.isToDisk()) {
if (currentChunk != null) {
+ if (inShutdown) {
+ jvm.markChunkFinal();
+ }
MetadataRepository.getInstance().setOutput(null);
finishChunk(currentChunk, now, null);
currentChunk = null;
@@ -302,7 +313,7 @@
// last memory
dumpMemoryToDestination(recording);
}
- jvm.endRecording_();
+ jvm.endRecording();
disableEvents();
} else {
RepositoryChunk newChunk = null;
@@ -327,7 +338,6 @@
} else {
RequestEngine.setFlushInterval(Long.MAX_VALUE);
}
-
recording.setState(RecordingState.STOPPED);
}
@@ -357,17 +367,7 @@
MetadataRepository.getInstance().setSettings(list);
}
- public synchronized void rotateIfRecordingToDisk() {
- boolean disk = false;
- for (PlatformRecording s : getRecordings()) {
- if (RecordingState.RUNNING == s.getState() && s.isToDisk()) {
- disk = true;
- }
- }
- if (disk) {
- rotateDisk();
- }
- }
+
synchronized void rotateDisk() {
Instant now = Instant.now();
@@ -584,6 +584,19 @@
target.setInternalDuration(Duration.between(startTime, endTime));
}
-
-
+ public synchronized void migrate(SafePath repo) throws IOException {
+ // Must set repository while holding recorder lock so
+ // the final chunk in repository gets marked correctly
+ Repository.getRepository().setBasePath(repo);
+ boolean disk = false;
+ for (PlatformRecording s : getRecordings()) {
+ if (RecordingState.RUNNING == s.getState() && s.isToDisk()) {
+ disk = true;
+ }
+ }
+ if (disk) {
+ jvm.markChunkFinal();
+ rotateDisk();
+ }
+ }
}
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/PlatformRecording.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/PlatformRecording.java Mon Nov 25 15:16:29 2019 +0000
@@ -85,6 +85,7 @@
private AccessControlContext noDestinationDumpOnExitAccessControlContext;
private boolean shuoldWriteActiveRecordingEvent = true;
private Duration flushInterval = Duration.ofSeconds(1);
+ private long finalStartChunkNanos = Long.MIN_VALUE;
PlatformRecording(PlatformRecorder recorder, long id) {
// Typically the access control context is taken
@@ -811,4 +812,12 @@
return Long.MAX_VALUE;
}
}
+
+ public long getFinalChunkStartNanos() {
+ return finalStartChunkNanos;
+ }
+
+ public void setFinalStartnanos(long chunkStartNanos) {
+ this.finalStartChunkNanos = chunkStartNanos;
+ }
}
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/Repository.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/Repository.java Mon Nov 25 15:16:29 2019 +0000
@@ -85,6 +85,7 @@
if (!SecuritySupport.existDirectory(repository)) {
this.repository = createRepository(baseLocation);
jvm.setRepositoryLocation(repository.toString());
+ SecuritySupport.setProperty(JFR_REPOSITORY_LOCATION_PROPERTY, repository.toString());
cleanupDirectories.add(repository);
}
return new RepositoryChunk(repository, timestamp);
@@ -115,9 +116,7 @@
if (i == MAX_REPO_CREATION_RETRIES) {
throw new IOException("Unable to create JFR repository directory using base location (" + basePath + ")");
}
- SafePath canonicalRepositoryPath = SecuritySupport.toRealPath(f);
- SecuritySupport.setProperty(JFR_REPOSITORY_LOCATION_PROPERTY, canonicalRepositoryPath.toString());
- return canonicalRepositoryPath;
+ return SecuritySupport.toRealPath(f);
}
private static SafePath createRealBasePath(SafePath safePath) throws IOException {
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/ShutdownHook.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/ShutdownHook.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,7 @@
// starting any "real" operations. In low memory situations,
// we would like to take an OOM as early as possible.
tlabDummyObject = new Object();
-
+ recorder.setInShutDown();
for (PlatformRecording recording : recorder.getRecordings()) {
if (recording.getDumpOnExit() && recording.getState() == RecordingState.RUNNING) {
dump(recording);
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/AbstractEventStream.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/AbstractEventStream.java Mon Nov 25 15:16:29 2019 +0000
@@ -40,6 +40,7 @@
import jdk.jfr.internal.LogLevel;
import jdk.jfr.internal.LogTag;
import jdk.jfr.internal.Logger;
+import jdk.jfr.internal.PlatformRecording;
import jdk.jfr.internal.SecuritySupport;
/*
@@ -50,19 +51,19 @@
private final static AtomicLong counter = new AtomicLong(1);
private final Object terminated = new Object();
- private final boolean active;
private final Runnable flushOperation = () -> dispatcher().runFlushActions();
private final AccessControlContext accessControllerContext;
private final StreamConfiguration configuration = new StreamConfiguration();
+ private final PlatformRecording recording;
private volatile Thread thread;
private Dispatcher dispatcher;
private volatile boolean closed;
- AbstractEventStream(AccessControlContext acc, boolean active) throws IOException {
+ AbstractEventStream(AccessControlContext acc, PlatformRecording recording) throws IOException {
this.accessControllerContext = Objects.requireNonNull(acc);
- this.active = active;
+ this.recording = recording;
}
@Override
@@ -229,7 +230,7 @@
if (configuration.started) {
throw new IllegalStateException("Event stream can only be started once");
}
- if (active && configuration.startTime == null) {
+ if (recording != null && configuration.startTime == null) {
configuration.setStartNanos(startNanos);
}
configuration.setStarted(true);
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkHeader.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkHeader.java Mon Nov 25 15:16:29 2019 +0000
@@ -39,8 +39,10 @@
private static final long CHUNK_SIZE_POSITION = 8;
private static final long DURATION_NANOS_POSITION = 40;
private static final long FILE_STATE_POSITION = 64;
+ private static final long FLAG_BYTE_POSITION = 67;
private static final long METADATA_TYPE_ID = 0;
private static final byte[] FILE_MAGIC = { 'F', 'L', 'R', '\0' };
+ private static final int MASK_FINAL_CHUNK = 1 << 1;
private final short major;
private final short minor;
@@ -58,6 +60,7 @@
private long absoluteChunkEnd;
private boolean isFinished;
private boolean finished;
+ private boolean finalChunk;
public ChunkHeader(RecordingInput input) throws IOException {
this(input, 0, 0);
@@ -101,8 +104,7 @@
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Chunk: startTicks=" + chunkStartTicks);
ticksPerSecond = input.readRawLong();
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Chunk: ticksPerSecond=" + ticksPerSecond);
- input.readRawInt(); // features, not used
-
+ input.readRawInt(); // ignore file state and flag bits
refresh();
input.position(absoluteEventStart);
}
@@ -123,6 +125,8 @@
long durationNanos = input.readPhysicalLong();
input.positionPhysical(absoluteChunkStart + FILE_STATE_POSITION);
byte fileState2 = input.readPhysicalByte();
+ input.positionPhysical(absoluteChunkStart + FLAG_BYTE_POSITION);
+ int flagByte = input.readPhysicalByte();
if (fileState1 == fileState2) { // valid header
finished = fileState1 == 0;
if (metadataPosition != 0) {
@@ -150,6 +154,8 @@
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Chunk: generation=" + fileState2);
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Chunk: finished=" + isFinished);
Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Chunk: fileSize=" + input.size());
+ this.finalChunk = (flagByte & MASK_FINAL_CHUNK) != 0;
+ Logger.log(LogTag.JFR_SYSTEM_PARSER, LogLevel.INFO, "Chunk: finalChunk=" + finalChunk);
absoluteChunkEnd = absoluteChunkStart + chunkSize;
return;
}
@@ -183,6 +189,10 @@
return input.getFileSize() == absoluteChunkEnd;
}
+ public boolean isFinalChunk() {
+ return finalChunk;
+ }
+
public boolean isFinished() throws IOException {
return isFinished;
}
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkParser.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ChunkParser.java Mon Nov 25 15:16:29 2019 +0000
@@ -448,4 +448,8 @@
return chunkHeader.getStartNanos();
}
+ public boolean isFinalChunk() {
+ return chunkHeader.isFinalChunk();
+ }
+
}
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/EventDirectoryStream.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/EventDirectoryStream.java Mon Nov 25 15:16:29 2019 +0000
@@ -35,6 +35,7 @@
import jdk.jfr.consumer.RecordedEvent;
import jdk.jfr.internal.JVM;
+import jdk.jfr.internal.PlatformRecording;
import jdk.jfr.internal.Utils;
import jdk.jfr.internal.consumer.ChunkParser.ParserConfiguration;
@@ -43,12 +44,12 @@
* with chunk files.
*
*/
-public final class EventDirectoryStream extends AbstractEventStream {
+public class EventDirectoryStream extends AbstractEventStream {
private final static Comparator<? super RecordedEvent> EVENT_COMPARATOR = JdkJfrConsumer.instance().eventComparator();
private final RepositoryFiles repositoryFiles;
- private final boolean active;
+ private final PlatformRecording recording;
private final FileAccess fileAccess;
private ChunkParser currentParser;
@@ -56,10 +57,10 @@
private RecordedEvent[] sortedCache;
private int threadExclusionLevel = 0;
- public EventDirectoryStream(AccessControlContext acc, Path p, FileAccess fileAccess, boolean active) throws IOException {
- super(acc, active);
+ public EventDirectoryStream(AccessControlContext acc, Path p, FileAccess fileAccess, PlatformRecording recording) throws IOException {
+ super(acc, recording);
this.fileAccess = Objects.requireNonNull(fileAccess);
- this.active = active;
+ this.recording = recording;
this.repositoryFiles = new RepositoryFiles(fileAccess, p);
}
@@ -104,7 +105,7 @@
Dispatcher disp = dispatcher();
Path path;
- boolean validStartTime = active || disp.startTime != null;
+ boolean validStartTime = recording != null || disp.startTime != null;
if (validStartTime) {
path = repositoryFiles.firstPath(disp.startNanos);
} else {
@@ -139,8 +140,17 @@
return;
}
}
+ if (isLastChunk()) {
+ // Recording was stopped/closed externally, and no more data to process.
+ return;
+ }
+ if (repositoryFiles.hasFixedPath() && currentParser.isFinalChunk()) {
+ // JVM process exited/crashed, or repository migrated to an unknown location
+ return;
+ }
if (isClosed()) {
+ // Stream was closed
return;
}
long durationNanos = currentParser.getChunkDuration();
@@ -162,6 +172,13 @@
}
}
+ private boolean isLastChunk() {
+ if (recording == null) {
+ return false;
+ }
+ return recording.getFinalChunkStartNanos() >= currentParser.getStartNanos();
+ }
+
private boolean processOrdered(Dispatcher c, boolean awaitNewEvents) throws IOException {
if (sortedCache == null) {
sortedCache = new RecordedEvent[100_000];
@@ -206,4 +223,5 @@
}
}
}
+
}
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/EventFileStream.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/EventFileStream.java Mon Nov 25 15:16:29 2019 +0000
@@ -50,7 +50,7 @@
private RecordedEvent[] cacheSorted;
public EventFileStream(AccessControlContext acc, Path path) throws IOException {
- super(acc, false);
+ super(acc, null);
Objects.requireNonNull(path);
this.input = new RecordingInput(path.toFile(), FileAccess.UNPRIVILIGED);
}
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/RepositoryFiles.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/RepositoryFiles.java Mon Nov 25 15:16:29 2019 +0000
@@ -227,4 +227,8 @@
waitObject.notify();
}
}
+
+ public boolean hasFixedPath() {
+ return repository != null;
+ }
}
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/dcmd/DCmdConfigure.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/dcmd/DCmdConfigure.java Mon Nov 25 15:16:29 2019 +0000
@@ -32,6 +32,7 @@
import jdk.jfr.internal.LogTag;
import jdk.jfr.internal.Logger;
import jdk.jfr.internal.Options;
+import jdk.jfr.internal.PlatformRecorder;
import jdk.jfr.internal.PrivateAccess;
import jdk.jfr.internal.Repository;
import jdk.jfr.internal.SecuritySupport.SafePath;
@@ -89,11 +90,12 @@
if (repositoryPath != null) {
try {
SafePath s = new SafePath(repositoryPath);
- Repository.getRepository().setBasePath(s);
+ if (FlightRecorder.isInitialized()) {
+ PrivateAccess.getInstance().getPlatformRecorder().migrate(s);
+ } else {
+ Repository.getRepository().setBasePath(s);
+ }
Logger.log(LogTag.JFR, LogLevel.INFO, "Base repository path set to " + repositoryPath);
- if (FlightRecorder.isInitialized()) {
- PrivateAccess.getInstance().getPlatformRecorder().rotateIfRecordingToDisk();;
- }
} catch (Exception e) {
throw new DCmdException("Could not use " + repositoryPath + " as repository. " + e.getMessage(), e);
}
--- a/src/jdk.zipfs/share/classes/jdk/nio/zipfs/JarFileSystem.java Thu Nov 21 17:51:11 2019 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,202 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.nio.zipfs;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.Runtime.Version;
-import java.nio.file.NoSuchFileException;
-import java.nio.file.Path;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.function.Consumer;
-import java.util.function.Function;
-import java.util.jar.Attributes;
-import java.util.jar.Manifest;
-
-/**
- * Adds aliasing to ZipFileSystem to support multi-release jar files. An alias map
- * is created by {@link JarFileSystem#createVersionedLinks(int)}. The map is then
- * consulted when an entry is looked up in {@link JarFileSystem#getInode(byte[])}
- * to determine if the entry has a corresponding versioned entry. If so, the
- * versioned entry is returned.
- *
- * @author Steve Drach
- */
-class JarFileSystem extends ZipFileSystem {
- // lookup needs to be initialized because isMultiReleaseJar is called before createVersionedLinks
- private Function<byte[], byte[]> lookup = path -> path;
-
- @Override
- IndexNode getInode(byte[] path) {
- // check for an alias to a versioned entry
- return super.getInode(lookup.apply(path));
- }
-
- JarFileSystem(ZipFileSystemProvider provider, Path zfpath, Map<String,?> env) throws IOException {
- super(provider, zfpath, env);
- Object o = getRuntimeVersion(env);
- if (isMultiReleaseJar() && (o != null)) {
- int version;
- if (o instanceof String) {
- String s = (String)o;
- if (s.equals("runtime")) {
- version = Runtime.version().feature();
- } else if (s.matches("^[1-9][0-9]*$")) {
- version = Version.parse(s).feature();
- } else {
- throw new IllegalArgumentException("Invalid runtime version");
- }
- } else if (o instanceof Integer) {
- version = Version.parse(((Integer)o).toString()).feature();
- } else if (o instanceof Version) {
- version = ((Version)o).feature();
- } else {
- throw new IllegalArgumentException("env parameter must be String, Integer, "
- + "or Version");
- }
- createVersionedLinks(version < 0 ? 0 : version);
- setReadOnly();
- }
- }
-
- /**
- * Utility method to get the release version for a multi-release JAR. It
- * first checks the documented property {@code releaseVersion} and if not
- * found checks the original property {@code multi-release}
- * @param env ZIP FS map
- * @return release version or null if it is not specified
- */
- private Object getRuntimeVersion(Map<String, ?> env) {
- Object o = null;
- if (env.containsKey(ZipFileSystemProvider.PROPERTY_RELEASE_VERSION)) {
- o = env.get(ZipFileSystemProvider.PROPERTY_RELEASE_VERSION);
- } else {
- o = env.get(ZipFileSystemProvider.PROPERTY_MULTI_RELEASE);
- }
- return o;
- }
-
- private boolean isMultiReleaseJar() throws IOException {
- try (InputStream is = newInputStream(getBytes("/META-INF/MANIFEST.MF"))) {
- String multiRelease = new Manifest(is).getMainAttributes()
- .getValue(Attributes.Name.MULTI_RELEASE);
- return "true".equalsIgnoreCase(multiRelease);
- } catch (NoSuchFileException x) {
- return false;
- }
- }
-
- /**
- * create a map of aliases for versioned entries, for example:
- * version/PackagePrivate.class -> META-INF/versions/9/version/PackagePrivate.class
- * version/PackagePrivate.java -> META-INF/versions/9/version/PackagePrivate.java
- * version/Version.class -> META-INF/versions/10/version/Version.class
- * version/Version.java -> META-INF/versions/10/version/Version.java
- *
- * then wrap the map in a function that getEntry can use to override root
- * entry lookup for entries that have corresponding versioned entries
- */
- private void createVersionedLinks(int version) {
- IndexNode verdir = getInode(getBytes("/META-INF/versions"));
- // nothing to do, if no /META-INF/versions
- if (verdir == null) {
- return;
- }
- // otherwise, create a map and for each META-INF/versions/{n} directory
- // put all the leaf inodes, i.e. entries, into the alias map
- // possibly shadowing lower versioned entries
- HashMap<IndexNode, byte[]> aliasMap = new HashMap<>();
- getVersionMap(version, verdir).values().forEach(versionNode ->
- walk(versionNode.child, entryNode ->
- aliasMap.put(
- getOrCreateInode(getRootName(entryNode, versionNode), entryNode.isdir),
- entryNode.name))
- );
- lookup = path -> {
- byte[] entry = aliasMap.get(IndexNode.keyOf(path));
- return entry == null ? path : entry;
- };
- }
-
- /**
- * create a sorted version map of version -> inode, for inodes <= max version
- * 9 -> META-INF/versions/9
- * 10 -> META-INF/versions/10
- */
- private TreeMap<Integer, IndexNode> getVersionMap(int version, IndexNode metaInfVersions) {
- TreeMap<Integer,IndexNode> map = new TreeMap<>();
- IndexNode child = metaInfVersions.child;
- while (child != null) {
- Integer key = getVersion(child, metaInfVersions);
- if (key != null && key <= version) {
- map.put(key, child);
- }
- child = child.sibling;
- }
- return map;
- }
-
- /**
- * extract the integer version number -- META-INF/versions/9 returns 9
- */
- private Integer getVersion(IndexNode inode, IndexNode metaInfVersions) {
- try {
- byte[] fullName = inode.name;
- return Integer.parseInt(getString(Arrays
- .copyOfRange(fullName, metaInfVersions.name.length + 1, fullName.length)));
- } catch (NumberFormatException x) {
- // ignore this even though it might indicate issues with the JAR structure
- return null;
- }
- }
-
- /**
- * walk the IndexNode tree processing all leaf nodes
- */
- private void walk(IndexNode inode, Consumer<IndexNode> consumer) {
- if (inode == null) return;
- if (inode.isDir()) {
- walk(inode.child, consumer);
- } else {
- consumer.accept(inode);
- }
- walk(inode.sibling, consumer);
- }
-
- /**
- * extract the root name from a versioned entry name
- * given inode for META-INF/versions/9/foo/bar.class
- * and prefix META-INF/versions/9/
- * returns foo/bar.class
- */
- private byte[] getRootName(IndexNode inode, IndexNode prefix) {
- byte[] fullName = inode.name;
- return Arrays.copyOfRange(fullName, prefix.name.length, fullName.length);
- }
-}
--- a/src/jdk.zipfs/share/classes/jdk/nio/zipfs/JarFileSystemProvider.java Thu Nov 21 17:51:11 2019 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.nio.zipfs;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.file.FileSystem;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
-class JarFileSystemProvider extends ZipFileSystemProvider {
-
- @Override
- public String getScheme() {
- return "jar";
- }
-
- @Override
- protected Path uriToPath(URI uri) {
- String scheme = uri.getScheme();
- if ((scheme == null) || !scheme.equalsIgnoreCase(getScheme())) {
- throw new IllegalArgumentException("URI scheme is not '" + getScheme() + "'");
- }
- try {
- String uristr = uri.toString();
- int end = uristr.indexOf("!/");
- uristr = uristr.substring(4, (end == -1) ? uristr.length() : end);
- uri = new URI(uristr);
- return Paths.get(new URI("file", uri.getHost(), uri.getPath(), null))
- .toAbsolutePath();
- } catch (URISyntaxException e) {
- throw new AssertionError(e); //never thrown
- }
- }
-
- @Override
- public Path getPath(URI uri) {
- FileSystem fs = getFileSystem(uri);
- String path = uri.getFragment();
- if (path == null) {
- String uristr = uri.toString();
- int off = uristr.indexOf("!/");
- if (off != -1)
- path = uristr.substring(off + 2);
- }
- if (path != null)
- return fs.getPath(path);
- throw new IllegalArgumentException("URI: "
- + uri
- + " does not contain path fragment ex. jar:///c:/foo.zip!/BAR");
- }
-}
--- a/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystem.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystem.java Mon Nov 25 15:16:29 2019 +0000
@@ -33,6 +33,7 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.lang.Runtime.Version;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
@@ -50,6 +51,10 @@
import java.util.*;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.jar.Attributes;
+import java.util.jar.Manifest;
import java.util.regex.Pattern;
import java.util.zip.CRC32;
import java.util.zip.Deflater;
@@ -85,6 +90,11 @@
private static final String PROPERTY_DEFAULT_OWNER = "defaultOwner";
private static final String PROPERTY_DEFAULT_GROUP = "defaultGroup";
private static final String PROPERTY_DEFAULT_PERMISSIONS = "defaultPermissions";
+ // Property used to specify the entry version to use for a multi-release JAR
+ private static final String PROPERTY_RELEASE_VERSION = "releaseVersion";
+ // Original property used to specify the entry version to use for a
+ // multi-release JAR which is kept for backwards compatibility.
+ private static final String PROPERTY_MULTI_RELEASE = "multi-release";
private static final Set<PosixFilePermission> DEFAULT_PERMISSIONS =
PosixFilePermissions.fromString("rwxrwxrwx");
@@ -112,6 +122,9 @@
private final int defaultCompressionMethod; // METHOD_STORED if "noCompression=true"
// METHOD_DEFLATED otherwise
+ // entryLookup is identity by default, will be overridden for multi-release jars
+ private Function<byte[], byte[]> entryLookup = Function.identity();
+
// POSIX support
final boolean supportPosix;
private final UserPrincipal defaultOwner;
@@ -167,6 +180,8 @@
}
this.provider = provider;
this.zfpath = zfpath;
+
+ initializeReleaseVersion(env);
}
/**
@@ -1349,6 +1364,142 @@
}
}
+ /**
+ * If a version property has been specified and the file represents a multi-release JAR,
+ * determine the requested runtime version and initialize the ZipFileSystem instance accordingly.
+ *
+ * Checks if the Zip File System property "releaseVersion" has been specified. If it has,
+ * use its value to determine the requested version. If not use the value of the "multi-release" property.
+ */
+ private void initializeReleaseVersion(Map<String, ?> env) throws IOException {
+ Object o = env.containsKey(PROPERTY_RELEASE_VERSION) ?
+ env.get(PROPERTY_RELEASE_VERSION) :
+ env.get(PROPERTY_MULTI_RELEASE);
+
+ if (o != null && isMultiReleaseJar()) {
+ int version;
+ if (o instanceof String) {
+ String s = (String)o;
+ if (s.equals("runtime")) {
+ version = Runtime.version().feature();
+ } else if (s.matches("^[1-9][0-9]*$")) {
+ version = Version.parse(s).feature();
+ } else {
+ throw new IllegalArgumentException("Invalid runtime version");
+ }
+ } else if (o instanceof Integer) {
+ version = Version.parse(((Integer)o).toString()).feature();
+ } else if (o instanceof Version) {
+ version = ((Version)o).feature();
+ } else {
+ throw new IllegalArgumentException("env parameter must be String, " +
+ "Integer, or Version");
+ }
+ createVersionedLinks(version < 0 ? 0 : version);
+ setReadOnly();
+ }
+ }
+
+ /**
+ * Returns true if the Manifest main attribute "Multi-Release" is set to true; false otherwise.
+ */
+ private boolean isMultiReleaseJar() throws IOException {
+ try (InputStream is = newInputStream(getBytes("/META-INF/MANIFEST.MF"))) {
+ String multiRelease = new Manifest(is).getMainAttributes()
+ .getValue(Attributes.Name.MULTI_RELEASE);
+ return "true".equalsIgnoreCase(multiRelease);
+ } catch (NoSuchFileException x) {
+ return false;
+ }
+ }
+
+ /**
+ * Create a map of aliases for versioned entries, for example:
+ * version/PackagePrivate.class -> META-INF/versions/9/version/PackagePrivate.class
+ * version/PackagePrivate.java -> META-INF/versions/9/version/PackagePrivate.java
+ * version/Version.class -> META-INF/versions/10/version/Version.class
+ * version/Version.java -> META-INF/versions/10/version/Version.java
+ *
+ * Then wrap the map in a function that getEntry can use to override root
+ * entry lookup for entries that have corresponding versioned entries.
+ */
+ private void createVersionedLinks(int version) {
+ IndexNode verdir = getInode(getBytes("/META-INF/versions"));
+ // nothing to do, if no /META-INF/versions
+ if (verdir == null) {
+ return;
+ }
+ // otherwise, create a map and for each META-INF/versions/{n} directory
+ // put all the leaf inodes, i.e. entries, into the alias map
+ // possibly shadowing lower versioned entries
+ HashMap<IndexNode, byte[]> aliasMap = new HashMap<>();
+ getVersionMap(version, verdir).values().forEach(versionNode ->
+ walk(versionNode.child, entryNode ->
+ aliasMap.put(
+ getOrCreateInode(getRootName(entryNode, versionNode), entryNode.isdir),
+ entryNode.name))
+ );
+ entryLookup = path -> {
+ byte[] entry = aliasMap.get(IndexNode.keyOf(path));
+ return entry == null ? path : entry;
+ };
+ }
+
+ /**
+ * Create a sorted version map of version -> inode, for inodes <= max version.
+ * 9 -> META-INF/versions/9
+ * 10 -> META-INF/versions/10
+ */
+ private TreeMap<Integer, IndexNode> getVersionMap(int version, IndexNode metaInfVersions) {
+ TreeMap<Integer,IndexNode> map = new TreeMap<>();
+ IndexNode child = metaInfVersions.child;
+ while (child != null) {
+ Integer key = getVersion(child, metaInfVersions);
+ if (key != null && key <= version) {
+ map.put(key, child);
+ }
+ child = child.sibling;
+ }
+ return map;
+ }
+
+ /**
+ * Extract the integer version number -- META-INF/versions/9 returns 9.
+ */
+ private Integer getVersion(IndexNode inode, IndexNode metaInfVersions) {
+ try {
+ byte[] fullName = inode.name;
+ return Integer.parseInt(getString(Arrays
+ .copyOfRange(fullName, metaInfVersions.name.length + 1, fullName.length)));
+ } catch (NumberFormatException x) {
+ // ignore this even though it might indicate issues with the JAR structure
+ return null;
+ }
+ }
+
+ /**
+ * Walk the IndexNode tree processing all leaf nodes.
+ */
+ private void walk(IndexNode inode, Consumer<IndexNode> consumer) {
+ if (inode == null) return;
+ if (inode.isDir()) {
+ walk(inode.child, consumer);
+ } else {
+ consumer.accept(inode);
+ }
+ walk(inode.sibling, consumer);
+ }
+
+ /**
+ * Extract the root name from a versioned entry name.
+ * E.g. given inode 'META-INF/versions/9/foo/bar.class'
+ * and prefix 'META-INF/versions/9/' returns 'foo/bar.class'.
+ */
+ private byte[] getRootName(IndexNode inode, IndexNode prefix) {
+ byte[] fullName = inode.name;
+ return Arrays.copyOfRange(fullName, prefix.name.length, fullName.length);
+ }
+
// Reads zip file central directory. Returns the file position of first
// CEN header, otherwise returns -1 if an error occurred. If zip->msg != NULL
// then the error was a zip format error and zip->msg has the error text.
@@ -1644,15 +1795,15 @@
hasUpdate = false; // clear
}
- IndexNode getInode(byte[] path) {
- return inodes.get(IndexNode.keyOf(Objects.requireNonNull(path, "path")));
+ private IndexNode getInode(byte[] path) {
+ return inodes.get(IndexNode.keyOf(Objects.requireNonNull(entryLookup.apply(path), "path")));
}
/**
* Return the IndexNode from the root tree. If it doesn't exist,
* it gets created along with all parent directory IndexNodes.
*/
- IndexNode getOrCreateInode(byte[] path, boolean isdir) {
+ private IndexNode getOrCreateInode(byte[] path, boolean isdir) {
IndexNode node = getInode(path);
// if node exists, return it
if (node != null) {
@@ -2248,7 +2399,7 @@
private static final ThreadLocal<IndexNode> cachedKey = new ThreadLocal<>();
- final static IndexNode keyOf(byte[] name) { // get a lookup key;
+ static final IndexNode keyOf(byte[] name) { // get a lookup key;
IndexNode key = cachedKey.get();
if (key == null) {
key = new IndexNode(name, -1);
--- a/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystemProvider.java Thu Nov 21 17:51:11 2019 +0000
+++ b/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystemProvider.java Mon Nov 25 15:16:29 2019 +0000
@@ -52,12 +52,6 @@
* @author Xueming Shen, Rajendra Gutupalli, Jaya Hangal
*/
public class ZipFileSystemProvider extends FileSystemProvider {
-
- // Property used to specify the entry version to use for a multi-release JAR
- static final String PROPERTY_RELEASE_VERSION = "releaseVersion";
- // Original property used to specify the entry version to use for a
- // multi-release JAR which is kept for backwards compatibility.
- static final String PROPERTY_MULTI_RELEASE = "multi-release";
private final Map<Path, ZipFileSystem> filesystems = new HashMap<>();
public ZipFileSystemProvider() {}
@@ -127,21 +121,14 @@
}
private ZipFileSystem getZipFileSystem(Path path, Map<String, ?> env) throws IOException {
- ZipFileSystem zipfs;
try {
- if (env.containsKey(PROPERTY_RELEASE_VERSION) ||
- env.containsKey(PROPERTY_MULTI_RELEASE)) {
- zipfs = new JarFileSystem(this, path, env);
- } else {
- zipfs = new ZipFileSystem(this, path, env);
- }
+ return new ZipFileSystem(this, path, env);
} catch (ZipException ze) {
String pname = path.toString();
if (pname.endsWith(".zip") || pname.endsWith(".jar"))
throw ze;
throw new UnsupportedOperationException();
}
- return zipfs;
}
@Override
--- a/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -27,7 +27,6 @@
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/orderAccess.hpp"
#include "runtime/semaphore.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
@@ -108,14 +107,14 @@
{}
virtual void main_run() {
- while (OrderAccess::load_acquire(_continue_running)) {
+ while (Atomic::load_acquire(_continue_running)) {
uint id = _set->claim_par_id();
_set->release_par_id(id);
++_allocations;
ThreadBlockInVM tbiv(this); // Safepoint check.
}
tty->print_cr("%u allocations: " SIZE_FORMAT, _thread_number, _allocations);
- Atomic::add(_allocations, _total_allocations);
+ Atomic::add(_total_allocations, _allocations);
}
};
@@ -147,7 +146,7 @@
ThreadInVMfromNative invm(this_thread);
this_thread->sleep(milliseconds_to_run);
}
- OrderAccess::release_store(&continue_running, false);
+ Atomic::release_store(&continue_running, false);
for (uint i = 0; i < nthreads; ++i) {
ThreadInVMfromNative invm(this_thread);
post.wait_with_safepoint_check(this_thread);
--- a/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/gtest/gc/shared/test_ptrQueueBufferAllocator.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -26,7 +26,7 @@
#include "gc/shared/ptrQueue.hpp"
#include "memory/allocation.hpp"
#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/semaphore.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/globalCounter.inline.hpp"
@@ -150,14 +150,14 @@
{}
virtual void main_run() {
- while (OrderAccess::load_acquire(_continue_running)) {
+ while (Atomic::load_acquire(_continue_running)) {
BufferNode* node = _allocator->allocate();
_cbl->push(node);
++_allocations;
ThreadBlockInVM tbiv(this); // Safepoint check.
}
tty->print_cr("allocations: " SIZE_FORMAT, _allocations);
- Atomic::add(_allocations, _total_allocations);
+ Atomic::add(_total_allocations, _allocations);
}
};
@@ -184,7 +184,7 @@
BufferNode* node = _cbl->pop();
if (node != NULL) {
_allocator->release(node);
- } else if (!OrderAccess::load_acquire(_continue_running)) {
+ } else if (!Atomic::load_acquire(_continue_running)) {
return;
}
ThreadBlockInVM tbiv(this); // Safepoint check.
@@ -226,12 +226,12 @@
ThreadInVMfromNative invm(this_thread);
this_thread->sleep(milliseconds_to_run);
}
- OrderAccess::release_store(&allocator_running, false);
+ Atomic::release_store(&allocator_running, false);
for (uint i = 0; i < nthreads; ++i) {
ThreadInVMfromNative invm(this_thread);
post.wait_with_safepoint_check(this_thread);
}
- OrderAccess::release_store(&processor_running, false);
+ Atomic::release_store(&processor_running, false);
for (uint i = 0; i < nthreads; ++i) {
ThreadInVMfromNative invm(this_thread);
post.wait_with_safepoint_check(this_thread);
--- a/test/hotspot/gtest/runtime/test_globals.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/gtest/runtime/test_globals.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,10 +54,6 @@
TEST_FLAG(ConcGCThreads, uint, 1337);
}
-TEST_VM(FlagGuard, uintx_flag) {
- TEST_FLAG(GCTaskTimeStampEntries, uint, 1337);
-}
-
TEST_VM(FlagGuard, size_t_flag) {
TEST_FLAG(HeapSizePerGCThread, size_t, 1337);
}
--- a/test/hotspot/gtest/utilities/test_globalCounter.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/gtest/utilities/test_globalCounter.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -23,7 +23,6 @@
#include "precompiled.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "utilities/globalCounter.hpp"
#include "utilities/globalCounter.inline.hpp"
@@ -48,14 +47,14 @@
_wrt_start->signal();
while (!_exit) {
GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(this);
- volatile TestData* test = OrderAccess::load_acquire(_test);
- long value = OrderAccess::load_acquire(&test->test_value);
+ volatile TestData* test = Atomic::load_acquire(_test);
+ long value = Atomic::load_acquire(&test->test_value);
ASSERT_EQ(value, GOOD_VALUE);
GlobalCounter::critical_section_end(this, cs_context);
{
GlobalCounter::CriticalSection cs(this);
- volatile TestData* test = OrderAccess::load_acquire(_test);
- long value = OrderAccess::load_acquire(&test->test_value);
+ volatile TestData* test = Atomic::load_acquire(_test);
+ long value = Atomic::load_acquire(&test->test_value);
ASSERT_EQ(value, GOOD_VALUE);
}
}
@@ -82,7 +81,7 @@
TestData* tmp = new TestData();
tmp->test_value = GOOD_VALUE;
- OrderAccess::release_store_fence(&test, tmp);
+ Atomic::release_store_fence(&test, tmp);
reader1->doit();
reader2->doit();
@@ -99,7 +98,7 @@
volatile TestData* free_tmp = test;
tmp = new TestData();
tmp->test_value = GOOD_VALUE;
- OrderAccess::release_store(&test, tmp);
+ Atomic::release_store(&test, tmp);
GlobalCounter::write_synchronize();
free_tmp->test_value = BAD_VALUE;
delete free_tmp;
--- a/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "metaprogramming/isRegisteredEnum.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "utilities/globalCounter.hpp"
#include "utilities/globalCounter.inline.hpp"
@@ -57,21 +56,21 @@
~RCUNestedThread() {}
void set_state(NestedTestState new_state) {
- OrderAccess::release_store(&_state, new_state);
+ Atomic::release_store(&_state, new_state);
}
void wait_with_state(NestedTestState new_state) {
SpinYield spinner;
- OrderAccess::release_store(&_state, new_state);
- while (!OrderAccess::load_acquire(&_proceed)) {
+ Atomic::release_store(&_state, new_state);
+ while (!Atomic::load_acquire(&_proceed)) {
spinner.wait();
}
- OrderAccess::release_store(&_proceed, false);
+ Atomic::release_store(&_proceed, false);
}
public:
NestedTestState state() const {
- return OrderAccess::load_acquire(&_state);
+ return Atomic::load_acquire(&_state);
}
void wait_for_state(NestedTestState goal) {
@@ -82,7 +81,7 @@
}
void proceed() {
- OrderAccess::release_store(&_proceed, true);
+ Atomic::release_store(&_proceed, true);
}
};
--- a/test/hotspot/gtest/utilities/test_lockFreeStack.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/gtest/utilities/test_lockFreeStack.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/lockFreeStack.hpp"
#include "threadHelper.inline.hpp"
@@ -226,21 +225,21 @@
{}
virtual void main_run() {
- OrderAccess::release_store_fence(&_ready, true);
+ Atomic::release_store_fence(&_ready, true);
while (true) {
Element* e = _from->pop();
if (e != NULL) {
_to->push(*e);
Atomic::inc(_processed);
++_local_processed;
- } else if (OrderAccess::load_acquire(_processed) == _process_limit) {
+ } else if (Atomic::load_acquire(_processed) == _process_limit) {
tty->print_cr("thread %u processed " SIZE_FORMAT, _id, _local_processed);
return;
}
}
}
- bool ready() const { return OrderAccess::load_acquire(&_ready); }
+ bool ready() const { return Atomic::load_acquire(&_ready); }
};
TEST_VM(LockFreeStackTest, stress) {
--- a/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
@@ -56,14 +56,14 @@
virtual void main_run() {
size_t iterations = 0;
size_t values_changed = 0;
- while (OrderAccess::load_acquire(_continue_running) != 0) {
+ while (Atomic::load_acquire(_continue_running) != 0) {
{ ThreadBlockInVM tbiv(this); } // Safepoint check outside critical section.
++iterations;
SingleWriterSynchronizer::CriticalSection cs(_synchronizer);
- uintx value = OrderAccess::load_acquire(_synchronized_value);
+ uintx value = Atomic::load_acquire(_synchronized_value);
uintx new_value = value;
for (uint i = 0; i < reader_iterations; ++i) {
- new_value = OrderAccess::load_acquire(_synchronized_value);
+ new_value = Atomic::load_acquire(_synchronized_value);
// A reader can see either the value it first read after
// entering the critical section, or that value + 1. No other
// values are possible.
@@ -97,7 +97,7 @@
{}
virtual void main_run() {
- while (OrderAccess::load_acquire(_continue_running) != 0) {
+ while (Atomic::load_acquire(_continue_running) != 0) {
++*_synchronized_value;
_synchronizer->synchronize();
{ ThreadBlockInVM tbiv(this); } // Safepoint check.
--- a/test/hotspot/gtest/utilities/test_waitBarrier.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/gtest/utilities/test_waitBarrier.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -49,9 +49,9 @@
// Similar to how a JavaThread would stop in a safepoint.
while (!_exit) {
// Load the published tag.
- tag = OrderAccess::load_acquire(&wait_tag);
+ tag = Atomic::load_acquire(&wait_tag);
// Publish the tag this thread is going to wait for.
- OrderAccess::release_store(&_on_barrier, tag);
+ Atomic::release_store(&_on_barrier, tag);
if (_on_barrier == 0) {
SpinPause();
continue;
@@ -60,9 +60,9 @@
// Wait until we are woken.
_wait_barrier->wait(tag);
// Verify that we do not see an invalid value.
- vv = OrderAccess::load_acquire(&valid_value);
+ vv = Atomic::load_acquire(&valid_value);
ASSERT_EQ((vv & 0x1), 0);
- OrderAccess::release_store(&_on_barrier, 0);
+ Atomic::release_store(&_on_barrier, 0);
}
}
};
@@ -104,7 +104,7 @@
// Arm next tag.
wb.arm(next_tag);
// Publish tag.
- OrderAccess::release_store_fence(&wait_tag, next_tag);
+ Atomic::release_store_fence(&wait_tag, next_tag);
// Wait until threads picked up new tag.
while (reader1->_on_barrier != wait_tag ||
@@ -115,12 +115,12 @@
}
// Set an invalid value.
- OrderAccess::release_store(&valid_value, valid_value + 1); // odd
+ Atomic::release_store(&valid_value, valid_value + 1); // odd
os::naked_yield();
// Set a valid value.
- OrderAccess::release_store(&valid_value, valid_value + 1); // even
+ Atomic::release_store(&valid_value, valid_value + 1); // even
// Publish inactive tag.
- OrderAccess::release_store_fence(&wait_tag, 0); // Stores in WB must not float up.
+ Atomic::release_store_fence(&wait_tag, 0); // Stores in WB must not float up.
wb.disarm();
// Wait until threads done valid_value verification.
--- a/test/hotspot/jtreg/compiler/c2/Test6857159.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/jtreg/compiler/c2/Test6857159.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,67 +27,59 @@
* @summary local schedule failed with checkcast of Thread.currentThread()
* @library /test/lib
* @modules java.base/jdk.internal.misc
- * java.management
*
- * @run driver compiler.c2.Test6857159
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
+ * -Xbatch -XX:CompileCommand=compileonly,compiler.c2.Test6857159$ct0::run
+ * compiler.c2.Test6857159
*/
package compiler.c2;
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.process.ProcessTools;
+import sun.hotspot.WhiteBox;
-public class Test6857159 {
- public static void main(String[] args) throws Throwable {
- String className = Test.class.getName();
- OutputAnalyzer analyzer = ProcessTools.executeTestJvm("-Xbatch",
- "-XX:+PrintCompilation",
- "-XX:CompileOnly="+ className + "$ct::run",
- className);
- analyzer.shouldNotContain("COMPILE SKIPPED");
- analyzer.shouldContain(className + "$ct0::run (16 bytes)");
- analyzer.shouldHaveExitValue(0);
+public class Test6857159 extends Thread {
+ public static void main(String[] args) throws Exception {
+ var whiteBox = WhiteBox.getWhiteBox();
+ var method = ct0.class.getDeclaredMethod("run");
+ for (int i = 0; i < 20000; i++) {
+ Thread t = null;
+ switch (i % 3) {
+ case 0:
+ t = new ct0();
+ break;
+ case 1:
+ t = new ct1();
+ break;
+ case 2:
+ t = new ct2();
+ break;
+ }
+ t.start();
+ t.join();
+ }
+ if (!whiteBox.isMethodCompiled(method)) {
+ throw new AssertionError(method + " didn't get compiled");
+ }
}
- static class Test extends Thread {
- static class ct0 extends Test {
- public void message() {
- }
-
- public void run() {
- message();
- ct0 ct = (ct0) Thread.currentThread();
- ct.message();
- }
- }
-
- static class ct1 extends ct0 {
- public void message() {
- }
- }
+ static class ct0 extends Test6857159 {
+ public void message() { }
- static class ct2 extends ct0 {
- public void message() {
- }
- }
-
- public static void main(String[] args) throws Exception {
- for (int i = 0; i < 20000; i++) {
- Thread t = null;
- switch (i % 3) {
- case 0:
- t = new ct0();
- break;
- case 1:
- t = new ct1();
- break;
- case 2:
- t = new ct2();
- break;
- }
- t.start();
- t.join();
- }
+ public void run() {
+ message();
+ ct0 ct = (ct0) Thread.currentThread();
+ ct.message();
}
}
+
+ static class ct1 extends ct0 {
+ public void message() { }
+ }
+
+ static class ct2 extends ct0 {
+ public void message() { }
+ }
}
--- a/test/hotspot/jtreg/gc/parallel/TestPrintGCDetailsVerbose.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/jtreg/gc/parallel/TestPrintGCDetailsVerbose.java Mon Nov 25 15:16:29 2019 +0000
@@ -31,7 +31,6 @@
* @requires vm.gc.Parallel
* @modules java.base/jdk.internal.misc
* @run main/othervm -Xmx50m -XX:+UseParallelGC -Xlog:gc*=trace gc.parallel.TestPrintGCDetailsVerbose
- * @run main/othervm -Xmx50m -XX:+UseParallelGC -XX:GCTaskTimeStampEntries=1 -Xlog:gc*=trace gc.parallel.TestPrintGCDetailsVerbose
*/
public class TestPrintGCDetailsVerbose {
--- a/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/jtreg/runtime/CompressedOops/CompressedClassPointers.java Mon Nov 25 15:16:29 2019 +0000
@@ -25,7 +25,7 @@
* @test
* @bug 8024927
* @summary Testing address of compressed class pointer space as best as possible.
- * @requires vm.bits == 64 & vm.opt.final.UseCompressedOops == true
+ * @requires vm.bits == 64 & vm.opt.final.UseCompressedOops == true & os.family != "windows"
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management
--- a/test/hotspot/jtreg/runtime/cds/appcds/TestCommon.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/jtreg/runtime/cds/appcds/TestCommon.java Mon Nov 25 15:16:29 2019 +0000
@@ -662,4 +662,24 @@
}
return linkedJar;
}
+
+ // Remove all UL log messages from a JVM's STDOUT (such as those printed by -Xlog:cds)
+ static Pattern logPattern = Pattern.compile("^\\[[0-9. ]*s\\].*");
+ public static String filterOutLogs(String stdout) {
+ StringBuilder sb = new StringBuilder();
+ String prefix = "";
+ for (String line : stdout.split("\n")) {
+ if (logPattern.matcher(line).matches()) {
+ continue;
+ }
+ sb.append(prefix);
+ sb.append(line);
+ prefix = "\n";
+ }
+ if (stdout.endsWith("\n")) {
+ // String.split("A\n") returns {"A"}, not {"A", ""}.
+ sb.append("\n");
+ }
+ return sb.toString();
+ }
}
--- a/test/hotspot/jtreg/runtime/cds/appcds/cacheObject/ArchivedModuleCompareTest.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/jtreg/runtime/cds/appcds/cacheObject/ArchivedModuleCompareTest.java Mon Nov 25 15:16:29 2019 +0000
@@ -49,12 +49,12 @@
output = TestCommon.execOff("-cp", appJar, "PrintSystemModulesApp");
output.shouldHaveExitValue(0);
- String bootModules1 = output.getStdout();
+ String bootModules1 = TestCommon.filterOutLogs(output.getStdout());
output = TestCommon.exec(appJar, "PrintSystemModulesApp");
TestCommon.checkExec(output);
if (output.getStderr().contains("sharing")) {
- String bootModules2 = output.getStdout();
+ String bootModules2 = TestCommon.filterOutLogs(output.getStdout());
TestCommon.checkOutputStrings(bootModules1, bootModules2, ", ");
}
@@ -66,14 +66,14 @@
"--show-module-resolution",
"-version");
output.shouldHaveExitValue(0);
- String moduleResolutionOut1 = output.getStdout();
+ String moduleResolutionOut1 = TestCommon.filterOutLogs(output.getStdout());
output = TestCommon.exec(appJar,
"--show-module-resolution",
"-version");
TestCommon.checkExec(output);
if (output.getStderr().contains("sharing")) {
- String moduleResolutionOut2 = output.getStdout();
+ String moduleResolutionOut2 = TestCommon.filterOutLogs(output.getStdout());
TestCommon.checkOutputStrings(
moduleResolutionOut1, moduleResolutionOut2, "\n");
}
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/unit/GetLocalVariable/getlocal003.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/unit/GetLocalVariable/getlocal003.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -203,9 +203,11 @@
public double meth01() {
float f = 6.0f;
double d = 7.0;
+ instMeth();
return d + f;
}
+ native void instMeth();
native static void getMeth();
native static void checkLoc(Thread thr);
native static int getRes();
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/unit/GetLocalVariable/getlocal003/getlocal003.cpp Thu Nov 21 17:51:11 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/unit/GetLocalVariable/getlocal003/getlocal003.cpp Mon Nov 25 15:16:29 2019 +0000
@@ -231,8 +231,44 @@
}
JNIEXPORT void JNICALL
+Java_nsk_jvmti_unit_GetLocalVariable_getlocal003_instMeth(JNIEnv *env, jobject inst) {
+ jvmtiError err;
+ jobject obj = NULL;
+
+ printf("\n Native instMeth: started\n");
+
+ // Test GetLocalInstance with native instance method instMeth() frame
+ err = jvmti->GetLocalInstance(NULL, 0, &obj);
+ printf(" Native instMeth: GetLocalInstance: %s (%d)\n", TranslateError(err), err);
+ if (err != JVMTI_ERROR_NONE) {
+ printf("FAIL: GetLocalInstance failed to get instance for native instance method frame\n");
+ result = STATUS_FAILED;
+ }
+ if (env->IsSameObject(inst, obj) == JNI_FALSE) {
+ printf("FAIL: GetLocalInstance returned unexpected instance for native instance method frame\n");
+ result = STATUS_FAILED;
+ }
+
+ // Test GetLocalInstance with java instance method meth01() frame
+ err = jvmti->GetLocalInstance(NULL, 1, &obj);
+ printf(" Native instMeth: GetLocalInstance: %s (%d)\n", TranslateError(err), err);
+ if (err != JVMTI_ERROR_NONE) {
+ printf("FAIL: GetLocalInstance failed to get instance for java instance method frame\n");
+ result = STATUS_FAILED;
+ }
+ if (env->IsSameObject(inst, obj) == JNI_FALSE) {
+ printf("FAIL: GetLocalInstance returned unexpected instance for java instance method frame\n");
+ result = STATUS_FAILED;
+ }
+ printf(" Native instMeth: finished\n\n");
+}
+
+JNIEXPORT void JNICALL
Java_nsk_jvmti_unit_GetLocalVariable_getlocal003_getMeth(JNIEnv *env, jclass cls) {
jvmtiError err;
+ jobject obj = NULL;
+
+ printf("\n Native getMeth: started\n");
if (jvmti == NULL) {
printf("JVMTI client was not properly loaded!\n");
@@ -261,6 +297,24 @@
TranslateError(err), err);
result = STATUS_FAILED;
}
+
+ // Test GetLocalInstance with native static method getMeth() frame
+ err = jvmti->GetLocalInstance(NULL, 0, &obj);
+ printf(" Native getMeth: GetLocalInstance: %s (%d)\n", TranslateError(err), err);
+ if (err != JVMTI_ERROR_INVALID_SLOT) {
+ printf("FAIL: GetLocalInstance failed to return JVMTI_ERROR_INVALID_SLOT for native static method frame\n");
+ result = STATUS_FAILED;
+ }
+
+ // Test GetLocalInstance with java static method run() frame
+ err = jvmti->GetLocalInstance(NULL, 1, &obj);
+ printf(" Native getMeth: GetLocalInstance: %s (%d)\n", TranslateError(err), err);
+ if (err != JVMTI_ERROR_INVALID_SLOT) {
+ printf("FAIL: GetLocalInstance failed to return JVMTI_ERROR_INVALID_SLOT for java static method frame\n");
+ result = STATUS_FAILED;
+ }
+
+ printf(" Native getMeth: finished\n\n");
fflush(stdout);
}
--- a/test/jdk/java/lang/module/customfs/ModulesInCustomFileSystem.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/jdk/java/lang/module/customfs/ModulesInCustomFileSystem.java Mon Nov 25 15:16:29 2019 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,9 +54,9 @@
private static final Path HERE = Paths.get("");
/**
- * Test exploded modules in a JAR file system.
+ * Test exploded modules in a Zip file system.
*/
- public void testExplodedModulesInJarFileSystem() throws Exception {
+ public void testExplodedModulesInZipFileSystem() throws Exception {
Path m1 = findModuleDirectory("m1");
Path m2 = findModuleDirectory("m2");
Path mlib = m1.getParent();
@@ -65,13 +65,13 @@
// create JAR file containing m1/** and m2/**
Path jar = Files.createTempDirectory(HERE, "mlib").resolve("modules.jar");
JarUtils.createJarFile(jar, mlib);
- testJarFileSystem(jar);
+ testZipFileSystem(jar);
}
/**
- * Test modular JARs in a JAR file system
+ * Test modular JARs in a Zip file system.
*/
- public void testModularJARsInJarFileSystem() throws Exception {
+ public void testModularJARsInZipFileSystem() throws Exception {
Path m1 = findModuleDirectory("m1");
Path m2 = findModuleDirectory("m2");
Path contents = Files.createTempDirectory(HERE, "contents");
@@ -81,15 +81,14 @@
// create JAR file containing m1.jar and m2.jar
Path jar = Files.createTempDirectory(HERE, "mlib").resolve("modules.jar");
JarUtils.createJarFile(jar, contents);
- testJarFileSystem(jar);
+ testZipFileSystem(jar);
}
/**
* Opens a JAR file as a file system
*/
- private void testJarFileSystem(Path jar) throws Exception {
- ClassLoader scl = ClassLoader.getSystemClassLoader();
- try (FileSystem fs = FileSystems.newFileSystem(jar, scl)) {
+ private void testZipFileSystem(Path zip) throws Exception {
+ try (FileSystem fs = FileSystems.newFileSystem(zip)) {
// ModuleFinder to find modules in top-level directory
Path top = fs.getPath("/");
ModuleFinder finder = ModuleFinder.of(top);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/net/MulticastSocket/SetLoopbackOption.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8233296
+ * @summary Check that MulticastSocket::setOption and MulticastSocket::getOption
+ * return the correct result for StandardSocketOptions.IP_MULTICAST_LOOP.
+ * The test sets a DatagramSocketImplFactory and needs to run in /othervm
+ * mode.
+ * @run testng/othervm SetLoopbackOption
+ * @run testng/othervm -Djava.net.preferIPv4Stack=true SetLoopbackOption
+ * @run testng/othervm -Djava.net.preferIPv6Addresses=true SetLoopbackOption
+ */
+
+import java.io.FileDescriptor;
+import java.io.IOException;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
+import java.net.DatagramSocketImpl;
+import java.net.DatagramSocketImplFactory;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.MulticastSocket;
+import java.net.NetworkInterface;
+import java.net.SocketAddress;
+import java.net.SocketException;
+import java.net.SocketOption;
+import java.net.SocketOptions;
+import java.net.StandardSocketOptions;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.testng.annotations.Test;
+import static org.testng.Assert.*;
+
+import static java.lang.System.out;
+
+public class SetLoopbackOption {
+
+ final InetAddress loopbackAddress = InetAddress.getLoopbackAddress();
+
+ @Test
+ public void run() throws Exception {
+ var bindAddress = new InetSocketAddress(loopbackAddress, 0);
+ try (MulticastSocket sock = new MulticastSocket(null)) {
+ out.println("Testing unbound socket");
+ test(sock, null);
+ out.printf("\nBinding socket to %s and testing again%n", bindAddress);
+ sock.bind(bindAddress);
+ test(sock, null);
+ }
+ TestDatagramSocketImplFactory factory = new TestDatagramSocketImplFactory();
+ DatagramSocket.setDatagramSocketImplFactory(factory);
+ try (MulticastSocket sock = new MulticastSocket(null)) {
+ out.println("\nTesting unbound socket with custom impl");
+ TestDatagramSocketImpl impl = factory.last;
+ test(sock, impl);
+ out.printf("\nBinding socket to %s and testing again%n", bindAddress);
+ sock.bind(new InetSocketAddress(loopbackAddress, 0));
+ test(sock, impl);
+ }
+ }
+
+ private void test(MulticastSocket sock, TestDatagramSocketImpl impl) throws Exception {
+ out.println("Testing with " + sock.getClass() + (impl == null ? "" : ", " + impl.getClass()));
+ var op = StandardSocketOptions.IP_MULTICAST_LOOP;
+ var opId = SocketOptions.IP_MULTICAST_LOOP;
+ boolean enable = sock.getOption(op);
+ assertTrue(enable, "Initial Value for " + op);
+ boolean disable = sock.getLoopbackMode();
+ assertFalse(disable, "Initial Value for getLoopbackMode()");
+ if (impl != null) {
+ assertFalse((Boolean)impl.getOption(opId));
+ assertTrue((Boolean)impl.getOption(op));
+ }
+
+ out.println("Setting " + op + " to " + false);
+ if (impl != null) {
+ // allows setOption(SocketOption, Object) to be called
+ impl.allowAllSetOptions(true);
+ }
+ sock.setOption(op, false);
+ enable = sock.getOption(op);
+ assertFalse(enable, "Value for " + op);
+ disable = sock.getLoopbackMode();
+ assertTrue(disable, "Value for getLoopbackMode()");
+ if (impl != null) {
+ assertTrue((Boolean)impl.getOption(opId));
+ assertFalse((Boolean)impl.getOption(op));
+ }
+ out.println("Setting " + op + " to " + true);
+ sock.setOption(op, true);
+ enable = sock.getOption(op);
+ assertTrue(enable, "Value for " + op);
+ disable = sock.getLoopbackMode();
+ assertFalse(disable, "Value for getLoopbackMode()");
+ if (impl != null) {
+ assertFalse((Boolean)impl.getOption(opId));
+ assertTrue((Boolean)impl.getOption(op));
+ }
+
+ out.println("Calling setLoopbackMode(true)");
+ if (impl != null) {
+ // for backward compatibility reason, setLoopbackMode
+ // should call setOption(int, Object), not setOption(SocketOption, Object)
+ // Make sure that an exception is thrown if the latter is ever called.
+ impl.allowAllSetOptions(false);
+ }
+ sock.setLoopbackMode(true);
+ enable = sock.getOption(op);
+ assertFalse(enable, "Value for " + op);
+ disable = sock.getLoopbackMode();
+ assertTrue(disable, "Value for getLoopbackMode()");
+ if (impl != null) {
+ assertTrue((Boolean)impl.getOption(opId));
+ assertFalse((Boolean)impl.getOption(op));
+ }
+ out.println("Calling setLoopbackMode(false)");
+ sock.setLoopbackMode(false);
+ enable = sock.getOption(op);
+ assertTrue(enable, "Value for " + op);
+ disable = sock.getLoopbackMode();
+ assertFalse(disable, "Value for getLoopbackMode()");
+ if (impl != null) {
+ assertFalse((Boolean)impl.getOption(opId));
+ assertTrue((Boolean)impl.getOption(op));
+ }
+ }
+
+ // Used to attempt to control what is called/passed to the impl.
+ static class TestDatagramSocketImplFactory implements DatagramSocketImplFactory {
+ TestDatagramSocketImpl last;
+ public synchronized DatagramSocketImpl createDatagramSocketImpl() {
+ TestDatagramSocketImpl last = this.last;
+ if (last == null) {
+ return (last = this.last = new TestDatagramSocketImpl());
+ } else {
+ throw new AssertionError("Only one instance should be created");
+ }
+ }
+ }
+
+ // Used to attempt to control what is called/passed to the impl.
+ static class TestDatagramSocketImpl extends DatagramSocketImpl {
+ InetAddress address;
+ private boolean allowAllSetOptions;
+
+ @Override
+ protected void create() throws SocketException {
+ legacyOptions.put(SocketOptions.IP_MULTICAST_LOOP, false);
+ options.put(StandardSocketOptions.IP_MULTICAST_LOOP, true);
+ }
+
+ final Map<Integer, Object> legacyOptions = new HashMap<>();
+ final Map<SocketOption<?>, Object> options = new HashMap<>();
+
+ static <T> T shouldNotComeHere() {
+ throw new AssertionError("should not come here");
+ }
+
+ @Override
+ protected void bind(int lport, InetAddress laddr) throws SocketException {
+ this.localPort = (lport == 0 ? 6789 : lport);
+ this.address = laddr;
+ }
+
+ @Override
+ protected void send(DatagramPacket p) throws IOException {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected int peek(InetAddress i) throws IOException {
+ return shouldNotComeHere();
+ }
+
+ @Override
+ protected int peekData(DatagramPacket p) throws IOException {
+ return shouldNotComeHere();
+ }
+
+ @Override
+ protected void receive(DatagramPacket p) throws IOException {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected void setTTL(byte ttl) throws IOException {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected byte getTTL() throws IOException {
+ return shouldNotComeHere();
+ }
+
+ @Override
+ protected void setTimeToLive(int ttl) throws IOException {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected int getTimeToLive() throws IOException {
+ return shouldNotComeHere();
+ }
+
+ @Override
+ protected void join(InetAddress inetaddr) throws IOException {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected void leave(InetAddress inetaddr) throws IOException {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected void joinGroup(SocketAddress mcastaddr, NetworkInterface netIf)
+ throws IOException {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected void leaveGroup(SocketAddress mcastaddr, NetworkInterface netIf)
+ throws IOException {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected void close() {
+
+ }
+
+ @Override
+ public void setOption(int optID, Object value) throws SocketException {
+ legacyOptions.put(optID, value);
+ if (optID == SocketOptions.IP_MULTICAST_LOOP) {
+ boolean disable = (Boolean) value;
+ options.put(StandardSocketOptions.IP_MULTICAST_LOOP, !disable);
+ }
+ }
+
+ @Override
+ public Object getOption(int optID) throws SocketException {
+ return legacyOptions.get(optID);
+ }
+
+ @Override
+ protected Set<SocketOption<?>> supportedOptions() {
+ return Set.of(StandardSocketOptions.IP_MULTICAST_LOOP);
+ }
+
+ @Override
+ protected void connect(InetAddress address, int port) throws SocketException {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected void disconnect() {
+ shouldNotComeHere();
+ }
+
+ @Override
+ protected FileDescriptor getFileDescriptor() {
+ return super.getFileDescriptor();
+ }
+
+ @Override
+ protected <T> void setOption(SocketOption<T> name, T value) throws IOException {
+ if (!allowAllSetOptions) shouldNotComeHere();
+ options.put(name, value);
+ if (name.equals(StandardSocketOptions.IP_MULTICAST_LOOP)) {
+ boolean enable = (Boolean)value;
+ legacyOptions.put(SocketOptions.IP_MULTICAST_LOOP, !enable);
+ }
+ }
+
+ @Override
+ protected <T> T getOption(SocketOption<T> name) throws IOException {
+ return (T) options.get(name);
+ }
+
+ public void allowAllSetOptions(boolean allow) {
+ this.allowAllSetOptions = allow;
+ }
+ }
+
+ public static void main (String args[]) throws Exception {
+ new SetLoopbackOption().run();
+ }
+}
--- a/test/jdk/java/net/SocketOption/OptionsTest.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/jdk/java/net/SocketOption/OptionsTest.java Mon Nov 25 15:16:29 2019 +0000
@@ -23,7 +23,7 @@
/*
* @test
- * @bug 8036979 8072384 8044773 8225214
+ * @bug 8036979 8072384 8044773 8225214 8233296
* @library /test/lib
* @requires !vm.graal.enabled
* @run main/othervm -Xcheck:jni OptionsTest
@@ -321,7 +321,7 @@
} else if (option.equals(StandardSocketOptions.IP_MULTICAST_TTL)) {
return Integer.valueOf(socket.getTimeToLive());
} else if (option.equals(StandardSocketOptions.IP_MULTICAST_LOOP)) {
- return Boolean.valueOf(socket.getLoopbackMode());
+ return !Boolean.valueOf(socket.getLoopbackMode());
} else {
throw new RuntimeException("unexpected socket option");
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/api/consumer/recordingstream/TestStoppedRecording.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.jfr.api.consumer.recordingstream;
+
+import java.util.concurrent.CountDownLatch;
+
+import jdk.jfr.Event;
+import jdk.jfr.FlightRecorder;
+import jdk.jfr.consumer.RecordingStream;
+
+/**
+ * @test
+ * @summary Tests that a RecordingStream is closed if the underlying Recording
+ * is stopped.
+ * @key jfr
+ * @requires vm.hasJFR
+ * @library /test/lib
+ * @run main/othervm jdk.jfr.api.consumer.recordingstream.TestStoppedRecording
+ */
+public class TestStoppedRecording {
+
+ private static final class StopEvent extends Event {
+ }
+
+ public static void main(String... args) throws Exception {
+ CountDownLatch latch = new CountDownLatch(1);
+ try (RecordingStream rs = new RecordingStream()) {
+ rs.onEvent(e -> {
+ FlightRecorder.getFlightRecorder().getRecordings().get(0).stop();
+ });
+ rs.onClose(() -> {
+ latch.countDown();
+ });
+ rs.startAsync();
+ StopEvent stop = new StopEvent();
+ stop.commit();
+ latch.await();
+ }
+ }
+}
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/api/consumer/streaming/TestCrossProcessStreaming.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.jfr.api.consumer.streaming;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.sun.tools.attach.VirtualMachine;
+import jdk.jfr.Event;
+import jdk.jfr.Recording;
+import jdk.jfr.consumer.EventStream;
+import jdk.test.lib.Asserts;
+import jdk.test.lib.process.ProcessTools;
+
+/**
+ * @test
+ * @summary Test scenario where JFR event producer is in a different process
+ * with respect to the JFR event stream consumer.
+ * @key jfr
+ * @requires vm.hasJFR
+ * @library /test/lib /test/jdk
+ * @modules jdk.attach
+ * jdk.jfr
+ * @run main jdk.jfr.api.consumer.streaming.TestCrossProcessStreaming
+ */
+
+public class TestCrossProcessStreaming {
+ static String MAIN_STARTED_TOKEN = "MAIN_STARTED";
+
+ public static class TestEvent extends Event {
+ }
+
+ public static class ResultEvent extends Event {
+ int nrOfEventsProduced;
+ }
+
+ public static class EventProducer {
+ public static void main(String... args) throws Exception {
+ CrossProcessSynchronizer sync = new CrossProcessSynchronizer();
+ log(MAIN_STARTED_TOKEN);
+
+ long pid = ProcessHandle.current().pid();
+ int nrOfEvents = 0;
+ boolean exitRequested = false;
+ while (!exitRequested) {
+ TestEvent e = new TestEvent();
+ e.commit();
+ nrOfEvents++;
+ if (nrOfEvents % 1000 == 0) {
+ Thread.sleep(100);
+ exitRequested = CrossProcessSynchronizer.exitRequested(pid);
+ }
+ }
+
+ ResultEvent re = new ResultEvent();
+ re.nrOfEventsProduced = nrOfEvents;
+ re.commit();
+
+ log("Number of TestEvents generated: " + nrOfEvents);
+ }
+ }
+
+
+ static class CrossProcessSynchronizer {
+ static void requestExit(long pid) throws Exception {
+ Files.createFile(file(pid));
+ }
+
+ static boolean exitRequested(long pid) throws Exception {
+ return Files.exists(file(pid));
+ }
+
+ static Path file(long pid) {
+ return Paths.get(".", "exit-requested-" + pid);
+ }
+ }
+
+
+ static class ConsumedEvents {
+ AtomicInteger total = new AtomicInteger(0);
+ AtomicInteger whileProducerAlive = new AtomicInteger(0);
+ AtomicInteger produced = new AtomicInteger(-1);
+ }
+
+
+ public static void main(String... args) throws Exception {
+ Process p = startProducerProcess("normal");
+ String repo = getJfrRepository(p);
+
+ ConsumedEvents ce = consumeEvents(p, repo);
+
+ p.waitFor();
+ Asserts.assertEquals(p.exitValue(), 0,
+ "Process exited abnormally, exitValue = " + p.exitValue());
+
+ Asserts.assertEquals(ce.total.get(), ce.produced.get(), "Some events were lost");
+
+ // Expected that some portion of events emitted by the producer are delivered
+ // to the consumer while producer is still alive, at least one event for certain.
+ Asserts.assertLTE(1, ce.whileProducerAlive.get(),
+ "Too few events are delivered while producer is alive");
+ }
+
+ static Process startProducerProcess(String extraParam) throws Exception {
+ ProcessBuilder pb =
+ ProcessTools.createJavaProcessBuilder(false,
+ "-XX:StartFlightRecording",
+ EventProducer.class.getName(),
+ extraParam);
+ Process p = ProcessTools.startProcess("Event-Producer", pb,
+ line -> line.equals(MAIN_STARTED_TOKEN),
+ 0, TimeUnit.SECONDS);
+ return p;
+ }
+
+ static String getJfrRepository(Process p) throws Exception {
+ String repo = null;
+
+ // It may take little bit of time for the observed process to set the property after
+ // the process starts, therefore read the property in a loop.
+ while (repo == null) {
+ VirtualMachine vm = VirtualMachine.attach(String.valueOf(p.pid()));
+ repo = vm.getSystemProperties().getProperty("jdk.jfr.repository");
+ vm.detach();
+ }
+
+ log("JFR repository = " + repo);
+ return repo;
+ }
+
+ static ConsumedEvents consumeEvents(Process p, String repo) throws Exception {
+ ConsumedEvents result = new ConsumedEvents();
+
+ // wait for couple of JFR stream flushes before concluding the test
+ CountDownLatch flushed = new CountDownLatch(2);
+
+ // consume events produced by another process via event stream
+ try (EventStream es = EventStream.openRepository(Paths.get(repo))) {
+ es.onEvent(TestEvent.class.getName(),
+ e -> {
+ result.total.incrementAndGet();
+ if (p.isAlive()) {
+ result.whileProducerAlive.incrementAndGet();
+ }
+ });
+
+ es.onEvent(ResultEvent.class.getName(),
+ e -> result.produced.set(e.getInt("nrOfEventsProduced")));
+
+ es.onFlush( () -> flushed.countDown() );
+
+ // Setting start time to the beginning of the Epoch is a good way to start
+ // reading the stream from the very beginning.
+ es.setStartTime(Instant.EPOCH);
+ es.startAsync();
+
+ // await for certain number of flush events before concluding the test case
+ flushed.await();
+ CrossProcessSynchronizer.requestExit(p.pid());
+
+ es.awaitTermination();
+ }
+
+ return result;
+ }
+
+ private static final void log(String msg) {
+ System.out.println(msg);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/api/consumer/streaming/TestInProcessMigration.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.jfr.api.consumer.streaming;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.concurrent.CountDownLatch;
+
+import jdk.jfr.Event;
+import jdk.jfr.Recording;
+import jdk.jfr.consumer.EventStream;
+import jdk.jfr.jcmd.JcmdHelper;
+
+/**
+ * @test
+ * @summary Verifies that is possible to stream from an in-process repository
+ * that is being moved.
+ * @key jfr
+ * @requires vm.hasJFR
+ * @library /test/lib /test/jdk
+ * @run main/othervm jdk.jfr.api.consumer.streaming.TestInProcessMigration
+ */
+public class TestInProcessMigration {
+ static class MigrationEvent extends Event {
+ int id;
+ }
+
+ public static void main(String... args) throws Exception {
+ Path newRepository = Paths.get("new-repository");
+ CountDownLatch event1 = new CountDownLatch(1);
+ CountDownLatch event2 = new CountDownLatch(1);
+
+ try (EventStream es = EventStream.openRepository()) {
+ es.setStartTime(Instant.EPOCH);
+ es.onEvent(e -> {
+ System.out.println(e);
+ if (e.getInt("id") == 1) {
+ event1.countDown();
+ }
+ if (e.getInt("id") == 2) {
+ event2.countDown();
+ }
+ });
+ es.startAsync();
+ System.out.println("Started es.startAsync()");
+
+ try (Recording r = new Recording()) {
+ r.setFlushInterval(Duration.ofSeconds(1));
+ r.start();
+ // Chunk in default repository
+ MigrationEvent e1 = new MigrationEvent();
+ e1.id = 1;
+ e1.commit();
+ event1.await();
+ System.out.println("Passed the event1.await()");
+ JcmdHelper.jcmd("JFR.configure", "repositorypath=" + newRepository.toAbsolutePath());
+ // Chunk in new repository
+ MigrationEvent e2 = new MigrationEvent();
+ e2.id = 2;
+ e2.commit();
+ r.stop();
+ event2.await();
+ System.out.println("Passed the event2.await()");
+ // Verify that it happened in new repository
+ if (!Files.exists(newRepository)) {
+ throw new AssertionError("Could not find repository " + newRepository);
+ }
+ System.out.println("Listing contents in new repository:");
+ boolean empty = true;
+ for (Path p : Files.newDirectoryStream(newRepository)) {
+ System.out.println(p.toAbsolutePath());
+ empty = false;
+ }
+ System.out.println();
+ if (empty) {
+ throw new AssertionError("Could not find contents in new repository location " + newRepository);
+ }
+ }
+ }
+ }
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/api/consumer/streaming/TestJVMCrash.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package jdk.jfr.api.consumer.streaming;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import jdk.jfr.consumer.EventStream;
+
+/**
+ * @test
+ * @summary Test that a stream ends/closes when an application crashes.
+ * @key jfr
+ * @requires vm.hasJFR
+ * @library /test/lib /test/jdk
+ * @modules jdk.jfr jdk.attach java.base/jdk.internal.misc
+ *
+ * @run main/othervm jdk.jfr.api.consumer.streaming.TestJVMCrash
+ */
+public class TestJVMCrash {
+
+ public static void main(String... args) throws Exception {
+ int id = 1;
+ while (true) {
+ try (TestProcess process = new TestProcess("crash-application-" + id++)) {
+ AtomicInteger eventCounter = new AtomicInteger();
+ try (EventStream es = EventStream.openRepository(process.getRepository())) {
+ // Start from first event in repository
+ es.setStartTime(Instant.EPOCH);
+ es.onEvent(e -> {
+ if (eventCounter.incrementAndGet() == TestProcess.NUMBER_OF_EVENTS) {
+ process.crash();
+ }
+ });
+ es.startAsync();
+ // If crash corrupts chunk in repository, retry in 30 seconds
+ es.awaitTermination(Duration.ofSeconds(30));
+ if (eventCounter.get() == TestProcess.NUMBER_OF_EVENTS) {
+ return;
+ }
+ System.out.println("Incorrect event count. Retrying...");
+ }
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/api/consumer/streaming/TestJVMExit.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package jdk.jfr.api.consumer.streaming;
+
+import java.time.Instant;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import jdk.jfr.consumer.EventStream;
+
+/**
+ * @test
+ * @summary Test that a stream ends/closes when an application exists.
+ * @key jfr
+ * @requires vm.hasJFR
+ * @library /test/lib /test/jdk
+ * @modules jdk.jfr jdk.attach java.base/jdk.internal.misc
+ *
+ * @run main/othervm jdk.jfr.api.consumer.streaming.TestJVMExit
+ */
+public class TestJVMExit {
+
+ public static void main(String... args) throws Exception {
+ try (TestProcess process = new TestProcess("exit-application")) {
+ AtomicInteger eventCounter = new AtomicInteger();
+ try (EventStream es = EventStream.openRepository(process.getRepository())) {
+ // Start from first event in repository
+ es.setStartTime(Instant.EPOCH);
+ es.onEvent(e -> {
+ if (eventCounter.incrementAndGet() == TestProcess.NUMBER_OF_EVENTS) {
+ process.exit();
+ }
+ });
+ es.start();
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/api/consumer/streaming/TestOutOfProcessMigration.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.jfr.api.consumer.streaming;
+
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Instant;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import jdk.jfr.consumer.EventStream;
+import jdk.test.lib.dcmd.CommandExecutor;
+import jdk.test.lib.dcmd.PidJcmdExecutor;
+
+/**
+ * @test
+ * @summary Verifies that a out-of-process stream is closed when the repository
+ * is changed.
+ * @key jfr
+ * @requires vm.hasJFR
+ * @library /test/lib /test/jdk
+ * @modules jdk.jfr jdk.attach java.base/jdk.internal.misc
+ * @run main/othervm jdk.jfr.api.consumer.streaming.TestOutOfProcessMigration
+ */
+public class TestOutOfProcessMigration {
+ public static void main(String... args) throws Exception {
+ try (TestProcess process = new TestProcess("application")) {
+ AtomicInteger eventCounter = new AtomicInteger();
+ Path newRepo = Paths.get("new-repository").toAbsolutePath();
+ try (EventStream es = EventStream.openRepository(process.getRepository())) {
+ // Start from first event in repository
+ es.setStartTime(Instant.EPOCH);
+ es.onEvent(e -> {
+ if (eventCounter.incrementAndGet() == TestProcess.NUMBER_OF_EVENTS) {
+ System.out.println("Changing repository to " + newRepo + " ...");
+ CommandExecutor executor = new PidJcmdExecutor(String.valueOf(process.pid()));
+ // This should close stream
+ executor.execute("JFR.configure repositorypath=" + newRepo);
+ }
+ });
+ es.start();
+ process.exit();
+ // Wait for process to die, so files are cleaned up
+ process.awaitDeath();
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/api/consumer/streaming/TestProcess.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package jdk.jfr.api.consumer.streaming;
+
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Properties;
+
+import jdk.internal.misc.Unsafe;
+import jdk.jfr.Event;
+import jdk.test.lib.process.ProcessTools;
+
+import com.sun.tools.attach.VirtualMachine;
+
+/**
+ * Class that emits a NUMBER_OF_EVENTS and then awaits crash or exit
+ *
+ * Requires jdk.attach module.
+ *
+ */
+public final class TestProcess implements AutoCloseable {
+
+ private static class TestEvent extends Event {
+ }
+
+ public final static int NUMBER_OF_EVENTS = 10;
+
+ private final Process process;
+ private final Path path;
+
+ public TestProcess(String name) throws IOException {
+ this.path = Paths.get("action-" + System.currentTimeMillis()).toAbsolutePath();
+ String[] args = {
+ "--add-exports",
+ "java.base/jdk.internal.misc=ALL-UNNAMED",
+ "-XX:StartFlightRecording:settings=none",
+ TestProcess.class.getName(), path.toString()
+ };
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(false, args);
+ process = ProcessTools.startProcess(name, pb);
+ }
+
+ public static void main(String... args) throws Exception {
+ for (int i = 0; i < NUMBER_OF_EVENTS; i++) {
+ TestEvent e = new TestEvent();
+ e.commit();
+ }
+
+ Path path = Paths.get(args[0]);
+ while (true) {
+ try {
+ String action = Files.readString(path);
+ if ("crash".equals(action)) {
+ System.out.println("About to crash...");
+ Unsafe.getUnsafe().putInt(0L, 0);
+ }
+ if ("exit".equals(action)) {
+ System.out.println("About to exit...");
+ System.exit(0);
+ }
+ } catch (Exception ioe) {
+ // Ignore
+ }
+ takeNap();
+ }
+ }
+
+ public Path getRepository() {
+ while (true) {
+ try {
+ VirtualMachine vm = VirtualMachine.attach(String.valueOf(process.pid()));
+ Properties p = vm.getSystemProperties();
+ vm.detach();
+ String repo = (String) p.get("jdk.jfr.repository");
+ if (repo != null) {
+ return Paths.get(repo);
+ }
+ } catch (Exception e) {
+ System.out.println("Attach failed: " + e.getMessage());
+ System.out.println("Retrying...");
+ }
+ takeNap();
+ }
+ }
+
+ private static void takeNap() {
+ try {
+ Thread.sleep(10);
+ } catch (InterruptedException ie) {
+ // ignore
+ }
+ }
+
+ public void crash() {
+ try {
+ Files.writeString(path, "crash");
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ }
+ }
+
+ public void exit() {
+ try {
+ Files.writeString(path, "exit");
+ } catch (IOException ioe) {
+ ioe.printStackTrace();
+ }
+ }
+
+ public long pid() {
+ return process.pid();
+ }
+
+ @Override
+ public void close() throws Exception {
+ try {
+ if (path != null) {
+ Files.delete(path);
+ }
+ } catch(NoSuchFileException nfe) {
+ // ignore
+ }
+ }
+
+ public void awaitDeath() {
+ while (process.isAlive()) {
+ takeNap();
+ }
+ }
+}
--- a/test/jdk/jdk/jfr/api/consumer/streaming/TestRepositoryMigration.java Thu Nov 21 17:51:11 2019 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.api.consumer.streaming;
-
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.time.Duration;
-import java.time.Instant;
-import java.util.concurrent.CountDownLatch;
-
-import jdk.jfr.Event;
-import jdk.jfr.Recording;
-import jdk.jfr.consumer.EventStream;
-import jdk.jfr.jcmd.JcmdHelper;
-
-/**
- * @test
- * @summary Verifies that is possible to stream from a repository that is being
- * moved.
- * @key jfr
- * @requires vm.hasJFR
- * @library /test/lib /test/jdk
- * @run main/othervm jdk.jfr.api.consumer.streaming.TestRepositoryMigration
- */
-public class TestRepositoryMigration {
- static class MigrationEvent extends Event {
- int id;
- }
-
- public static void main(String... args) throws Exception {
- Path newRepository = Paths.get("new-repository");
- CountDownLatch event1 = new CountDownLatch(1);
- CountDownLatch event2 = new CountDownLatch(1);
-
- try (EventStream es = EventStream.openRepository()) {
- es.setStartTime(Instant.EPOCH);
- es.onEvent(e -> {
- System.out.println(e);
- if (e.getInt("id") == 1) {
- event1.countDown();
- }
- if (e.getInt("id") == 2) {
- event2.countDown();
- }
- });
- es.startAsync();
- System.out.println("Started es.startAsync()");
-
- try (Recording r = new Recording()) {
- r.setFlushInterval(Duration.ofSeconds(1));
- r.start();
- // Chunk in default repository
- MigrationEvent e1 = new MigrationEvent();
- e1.id = 1;
- e1.commit();
- event1.await();
- System.out.println("Passed the event1.await()");
- JcmdHelper.jcmd("JFR.configure", "repositorypath=" + newRepository.toAbsolutePath());
- // Chunk in new repository
- MigrationEvent e2 = new MigrationEvent();
- e2.id = 2;
- e2.commit();
- r.stop();
- event2.await();
- System.out.println("Passed the event2.await()");
- // Verify that it happened in new repository
- if (!Files.exists(newRepository)) {
- throw new AssertionError("Could not find repository " + newRepository);
- }
- System.out.println("Listing contents in new repository:");
- boolean empty = true;
- for (Path p : Files.newDirectoryStream(newRepository)) {
- System.out.println(p.toAbsolutePath());
- empty = false;
- }
- System.out.println();
- if (empty) {
- throw new AssertionError("Could not find contents in new repository location " + newRepository);
- }
- }
- }
- }
-
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/event/oldobject/TestObjectAge.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package jdk.jfr.event.oldobject;
+
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+
+import jdk.jfr.Recording;
+import jdk.jfr.consumer.RecordedEvent;
+import jdk.jfr.internal.test.WhiteBox;
+import jdk.test.lib.jfr.EventNames;
+import jdk.test.lib.jfr.Events;
+
+/**
+ * @test
+ * @key jfr
+ * @requires vm.hasJFR
+ * @requires vm.gc == "null"
+ * @library /test/lib /test/jdk
+ * @modules jdk.jfr/jdk.jfr.internal.test
+ * @run main/othervm -XX:TLABSize=2k jdk.jfr.event.oldobject.TestObjectAge
+ */
+public class TestObjectAge {
+
+ public final static List<Object[]> leak = new ArrayList<>(OldObjects.MIN_SIZE);
+
+ public static void main(String[] args) throws Exception {
+ WhiteBox.setWriteAllObjectSamples(true);
+
+ try (Recording recording = new Recording()) {
+ recording.enable(EventNames.OldObjectSample).withStackTrace().with("cutoff", "1 h");
+ recording.start();
+
+ // Allocate array to trigger sampling code path for interpreter / c1
+ for (int i = 0; i < OldObjects.MIN_SIZE; i++) {
+ leak.add(new Object[0]);
+ }
+ recording.stop();
+
+ List<RecordedEvent> events = Events.fromRecording(recording);
+ // OK if we sometimes don't get an event
+ for (RecordedEvent event : events) {
+ Duration objectAge = event.getDuration("objectAge");
+ // Sanity check
+ if (objectAge.isNegative() || objectAge.toMinutes() > 60) {
+ throw new Exception("Object age " + objectAge + " is not reasonable");
+ }
+ }
+ }
+ }
+}
--- a/test/jdk/jdk/net/Sockets/QuickAckTest.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/jdk/jdk/net/Sockets/QuickAckTest.java Mon Nov 25 15:16:29 2019 +0000
@@ -26,7 +26,7 @@
* @bug 8145635
* @summary Add TCP_QUICKACK socket option
* @modules jdk.net
- * @run main QuickAckTest
+ * @run main/othervm QuickAckTest
*/
import java.io.IOException;
import java.net.DatagramSocket;
--- a/test/jdk/jdk/nio/zipfs/jarfs/JFSTester.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/jdk/jdk/nio/zipfs/jarfs/JFSTester.java Mon Nov 25 15:16:29 2019 +0000
@@ -24,7 +24,7 @@
/*
* @test
* @bug 8164389 8222440
- * @summary walk entries in a jdk.nio.zipfs.JarFileSystem
+ * @summary walk entries in a multi-release jar file via jdk.zipfs
* @library /lib/testlibrary/java/util/jar
* @modules jdk.jartool
* jdk.zipfs
--- a/test/jdk/jdk/nio/zipfs/jarfs/MultiReleaseJarTest.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/jdk/jdk/nio/zipfs/jarfs/MultiReleaseJarTest.java Mon Nov 25 15:16:29 2019 +0000
@@ -50,6 +50,8 @@
public class MultiReleaseJarTest {
final private int MAJOR_VERSION = Runtime.version().feature();
+ private static final String PROPERTY_RELEASE_VERSION = "releaseVersion";
+ private static final String PROPERTY_MULTI_RELEASE = "multi-release";
final private String userdir = System.getProperty("user.dir",".");
final private CreateMultiReleaseTestJars creator = new CreateMultiReleaseTestJars();
@@ -88,56 +90,56 @@
@DataProvider(name="strings")
public Object[][] createStrings() {
return new Object[][]{
- {"runtime", MAJOR_VERSION},
- {null, 8},
- {"8", 8},
- {"9", 9},
- {Integer.toString(MAJOR_VERSION), MAJOR_VERSION},
- {Integer.toString(MAJOR_VERSION+1), MAJOR_VERSION},
- {"50", MAJOR_VERSION}
+ {"runtime", MAJOR_VERSION, "8"},
+ {null, 8, Integer.toString(MAJOR_VERSION)},
+ {"8", 8, "9"},
+ {"9", 9, null},
+ {Integer.toString(MAJOR_VERSION), MAJOR_VERSION, "8"},
+ {Integer.toString(MAJOR_VERSION+1), MAJOR_VERSION, "8"},
+ {"50", MAJOR_VERSION, "9"}
};
}
@DataProvider(name="integers")
public Object[][] createIntegers() {
return new Object[][] {
- {null, 8},
- {Integer.valueOf(8), 8},
- {Integer.valueOf(9), 9},
- {Integer.valueOf(MAJOR_VERSION), MAJOR_VERSION},
- {Integer.valueOf(MAJOR_VERSION + 1), MAJOR_VERSION},
- {Integer.valueOf(100), MAJOR_VERSION}
+ {null, 8, Integer.valueOf(9)},
+ {Integer.valueOf(8), 8, Integer.valueOf(9)},
+ {Integer.valueOf(9), 9, Integer.valueOf(MAJOR_VERSION)},
+ {Integer.valueOf(MAJOR_VERSION), MAJOR_VERSION, Integer.valueOf(8)},
+ {Integer.valueOf(MAJOR_VERSION + 1), MAJOR_VERSION, null},
+ {Integer.valueOf(100), MAJOR_VERSION, Integer.valueOf(8)}
};
}
@DataProvider(name="versions")
public Object[][] createVersions() {
return new Object[][] {
- {null, 8},
- {Version.parse("8"), 8},
- {Version.parse("9"), 9},
- {Version.parse(Integer.toString(MAJOR_VERSION)), MAJOR_VERSION},
- {Version.parse(Integer.toString(MAJOR_VERSION) + 1), MAJOR_VERSION},
- {Version.parse("100"), MAJOR_VERSION}
+ {null, 8, Version.parse("14")},
+ {Version.parse("8"), 8, Version.parse("7")},
+ {Version.parse("9"), 9, null},
+ {Version.parse(Integer.toString(MAJOR_VERSION)), MAJOR_VERSION, Version.parse("8")},
+ {Version.parse(Integer.toString(MAJOR_VERSION) + 1), MAJOR_VERSION, Version.parse("9")},
+ {Version.parse("100"), MAJOR_VERSION, Version.parse("14")}
};
}
@DataProvider(name="invalidVersions")
public Object[][] invalidVersions() {
return new Object[][] {
- {Map.of("releaseVersion", "")},
- {Map.of("releaseVersion", "invalid")},
- {Map.of("releaseVersion", "0")},
- {Map.of("releaseVersion", "-1")},
- {Map.of("releaseVersion", "11.0.1")},
- {Map.of("releaseVersion", new ArrayList<Long>())},
- {Map.of("releaseVersion", Integer.valueOf(0))},
- {Map.of("releaseVersion", Integer.valueOf(-1))}
+ {Map.of(PROPERTY_RELEASE_VERSION, "")},
+ {Map.of(PROPERTY_RELEASE_VERSION, "invalid")},
+ {Map.of(PROPERTY_RELEASE_VERSION, "0")},
+ {Map.of(PROPERTY_RELEASE_VERSION, "-1")},
+ {Map.of(PROPERTY_RELEASE_VERSION, "11.0.1")},
+ {Map.of(PROPERTY_RELEASE_VERSION, new ArrayList<Long>())},
+ {Map.of(PROPERTY_RELEASE_VERSION, Integer.valueOf(0))},
+ {Map.of(PROPERTY_RELEASE_VERSION, Integer.valueOf(-1))}
};
}
- // Not the best test but all I can do since ZipFileSystem and JarFileSystem
- // are not public, so I can't use (fs instanceof ...)
+ // Not the best test but all I can do since ZipFileSystem
+ // is not public, so I can't use (fs instanceof ...)
@Test
public void testNewFileSystem() throws Exception {
Map<String,String> env = new HashMap<>();
@@ -145,7 +147,7 @@
try (FileSystem fs = FileSystems.newFileSystem(mruri, env)) {
Assert.assertTrue(readAndCompare(fs, 8));
}
- env.put("releaseVersion", "runtime");
+ env.put(PROPERTY_RELEASE_VERSION, "runtime");
// a configuration and jar file is multi-release
try (FileSystem fs = FileSystems.newFileSystem(mruri, env)) {
Assert.assertTrue(readAndCompare(fs, MAJOR_VERSION));
@@ -163,28 +165,38 @@
}
@Test(dataProvider="strings")
- public void testStrings(String value, int expected) throws Throwable {
- stringEnv.put("releaseVersion", value);
+ public void testStrings(String value, int expected, String ignorable) throws Throwable {
+ stringEnv.clear();
+ stringEnv.put(PROPERTY_RELEASE_VERSION, value);
+ // we check, that values for "multi-release" are ignored
+ stringEnv.put(PROPERTY_MULTI_RELEASE, ignorable);
runTest(stringEnv, expected);
}
@Test(dataProvider="integers")
- public void testIntegers(Integer value, int expected) throws Throwable {
- integerEnv.put("releaseVersion", value);
+ public void testIntegers(Integer value, int expected, Integer ignorable) throws Throwable {
+ integerEnv.clear();
+ integerEnv.put(PROPERTY_RELEASE_VERSION, value);
+ // we check, that values for "multi-release" are ignored
+ integerEnv.put(PROPERTY_MULTI_RELEASE, value);
runTest(integerEnv, expected);
}
@Test(dataProvider="versions")
- public void testVersions(Version value, int expected) throws Throwable {
- versionEnv.put("releaseVersion", value);
+ public void testVersions(Version value, int expected, Version ignorable) throws Throwable {
+ versionEnv.clear();
+ versionEnv.put(PROPERTY_RELEASE_VERSION, value);
+ // we check, that values for "multi-release" are ignored
+ versionEnv.put(PROPERTY_MULTI_RELEASE, ignorable);
runTest(versionEnv, expected);
}
@Test
public void testShortJar() throws Throwable {
- integerEnv.put("releaseVersion", Integer.valueOf(MAJOR_VERSION));
+ integerEnv.clear();
+ integerEnv.put(PROPERTY_RELEASE_VERSION, Integer.valueOf(MAJOR_VERSION));
runTest(smruri, integerEnv, MAJOR_VERSION);
- integerEnv.put("releaseVersion", Integer.valueOf(9));
+ integerEnv.put(PROPERTY_RELEASE_VERSION, Integer.valueOf(9));
runTest(smruri, integerEnv, 8);
}
@@ -205,23 +217,23 @@
// The following tests are for backwards compatibility to validate that
// the original property still works
@Test(dataProvider="strings")
- public void testMRStrings(String value, int expected) throws Throwable {
+ public void testMRStrings(String value, int expected, String ignorable) throws Throwable {
stringEnv.clear();
- stringEnv.put("multi-release", value);
+ stringEnv.put(PROPERTY_MULTI_RELEASE, value);
runTest(stringEnv, expected);
}
@Test(dataProvider="integers")
- public void testMRIntegers(Integer value, int expected) throws Throwable {
+ public void testMRIntegers(Integer value, int expected, Integer ignorable) throws Throwable {
integerEnv.clear();
- integerEnv.put("multi-release", value);
+ integerEnv.put(PROPERTY_MULTI_RELEASE, value);
runTest(integerEnv, expected);
}
@Test(dataProvider="versions")
- public void testMRVersions(Version value, int expected) throws Throwable {
+ public void testMRVersions(Version value, int expected, Version ignorable) throws Throwable {
versionEnv.clear();
- versionEnv.put("multi-release", value);
+ versionEnv.put(PROPERTY_MULTI_RELEASE, value);
runTest(versionEnv, expected);
}
@@ -264,7 +276,7 @@
JarBuilder jb = new JarBuilder(jfname);
jb.addAttribute("Multi-Release", "true");
jb.build();
- Map<String,String> env = Map.of("releaseVersion", "runtime");
+ Map<String,String> env = Map.of(PROPERTY_RELEASE_VERSION, "runtime");
try (FileSystem fs = FileSystems.newFileSystem(uri, env)) {
Assert.assertTrue(true);
}
@@ -279,7 +291,7 @@
creator.buildCustomMultiReleaseJar(fileName, value, Map.of(),
/*addEntries*/true);
- Map<String,String> env = Map.of("releaseVersion", "runtime");
+ Map<String,String> env = Map.of(PROPERTY_RELEASE_VERSION, "runtime");
Path filePath = Paths.get(userdir, fileName);
String ssp = filePath.toUri().toString();
URI customJar = new URI("jar", ssp , null);
--- a/test/jdk/sun/security/tools/jarsigner/warnings/BadKeyUsageTest.java Thu Nov 21 17:51:11 2019 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.util.JarUtils;
-
-/**
- * @test
- * @bug 8024302 8026037
- * @summary Test for badKeyUsage warning
- * @library /test/lib ../
- * @ignore until 8026393 is fixed
- * @build jdk.test.lib.util.JarUtils
- * @run main BadKeyUsageTest
- */
-public class BadKeyUsageTest extends Test {
-
- /**
- * The test signs and verifies a jar that contains entries
- * whose signer certificate's KeyUsage extension
- * doesn't allow code signing (badKeyUsage).
- * Warning message is expected.
- */
- public static void main(String[] args) throws Throwable {
- BadKeyUsageTest test = new BadKeyUsageTest();
- test.start();
- }
-
- private void start() throws Throwable {
- // create a jar file that contains one class file
- Utils.createFiles(FIRST_FILE);
- JarUtils.createJar(UNSIGNED_JARFILE, FIRST_FILE);
-
- // create a certificate whose signer certificate's KeyUsage extension
- // doesn't allow code signing
- createAlias(CA_KEY_ALIAS);
- createAlias(KEY_ALIAS);
-
- issueCert(
- KEY_ALIAS,
- "-ext", "KeyUsage=keyAgreement",
- "-validity", Integer.toString(VALIDITY));
-
- // sign jar
- OutputAnalyzer analyzer = jarsigner(
- "-verbose",
- "-keystore", KEYSTORE,
- "-storepass", PASSWORD,
- "-keypass", PASSWORD,
- "-signedjar", SIGNED_JARFILE,
- UNSIGNED_JARFILE,
- KEY_ALIAS);
-
- checkSigning(analyzer, BAD_KEY_USAGE_SIGNING_WARNING);
-
- // verify signed jar
- analyzer = jarsigner(
- "-verify",
- "-verbose",
- "-keystore", KEYSTORE,
- "-storepass", PASSWORD,
- "-keypass", PASSWORD,
- SIGNED_JARFILE);
-
- checkVerifying(analyzer, 0, BAD_KEY_USAGE_VERIFYING_WARNING);
-
- // verify signed jar in strict mode
- analyzer = jarsigner(
- "-verify",
- "-verbose",
- "-strict",
- "-keystore", KEYSTORE,
- "-storepass", PASSWORD,
- "-keypass", PASSWORD,
- SIGNED_JARFILE);
-
- checkVerifying(analyzer, BAD_KEY_USAGE_EXIT_CODE,
- BAD_KEY_USAGE_VERIFYING_WARNING);
-
- System.out.println("Test passed");
- }
-
-}
--- a/test/langtools/jdk/javadoc/doclet/testDocFiles/TestDocFiles.java Thu Nov 21 17:51:11 2019 +0000
+++ b/test/langtools/jdk/javadoc/doclet/testDocFiles/TestDocFiles.java Mon Nov 25 15:16:29 2019 +0000
@@ -23,31 +23,109 @@
/*
* @test
- * @bug 8008949
- * @summary verify that doc-files get copied
- * @library ../../lib
+ * @bug 8008949 8234051
+ * @summary doclet crashes if HTML files in module doc-files directories
+ * @library /tools/lib ../../lib
* @modules jdk.javadoc/jdk.javadoc.internal.tool
- * @build javadoc.tester.*
+ * @build toolbox.ToolBox javadoc.tester.*
* @run main TestDocFiles
*/
+import java.io.IOException;
+import java.nio.file.Path;
+
+import toolbox.ToolBox;
import javadoc.tester.JavadocTester;
public class TestDocFiles extends JavadocTester {
public static void main(String... args) throws Exception {
TestDocFiles tester = new TestDocFiles();
- tester.runTests();
+ tester.runTests(m -> new Object[] { Path.of(m.getName()) });
+ }
+
+ ToolBox tb = new ToolBox();
+
+ /**
+ * Check doc-files support for a package that is not in a module.
+ * @param base the base directory for scratch files
+ * @throws IOException if an exception occurs
+ */
+ @Test
+ public void testPackage(Path base) throws IOException {
+ Path src = base.resolve("src");
+
+ // write the skeletal Java files
+ tb.writeJavaFiles(src,
+ "package p; public class C { }\n");
+
+ // write the doc files for the package
+ Path pkgDocFiles = src.resolve("p").resolve("doc-files");
+ tb.writeFile(pkgDocFiles.resolve("pkg-file.txt"),
+ "package text file\n");
+ tb.writeFile(pkgDocFiles.resolve("pkg-file.html"),
+ "<html>\n"
+ + "<head><title>Package HTML file</title></head>\n"
+ + "<body><h1>Package HTML file</h1>File content</body>\n"
+ + "</html>\n");
+
+ javadoc("-d", base.resolve("out").toString(),
+ "--source-path", src.toString(),
+ "p");
+ checkExit(Exit.OK);
+
+ checkOutput("p/doc-files/pkg-file.txt", true,
+ "package text file");
+ checkOutput("p/doc-files/pkg-file.html", true,
+ "Package HTML file");
}
+ /**
+ * Check doc-files support for a module and a package that is in a module.
+ * @param base the base directory for scratch files
+ * @throws IOException if an exception occurs
+ */
@Test
- public void test() {
- javadoc("-d", "out",
- "-sourcepath", testSrc,
- "pkg");
+ public void testModules(Path base) throws IOException {
+ Path src = base.resolve("src");
+
+ // write the skeletal Java files
+ tb.writeJavaFiles(src,
+ "module m { exports p; }\n",
+ "package p; public class C { }\n");
+
+ // write the doc files for the module
+ Path mdlDocFiles = src.resolve("doc-files");
+ tb.writeFile(mdlDocFiles.resolve("mdl-file.txt"),
+ "module text file\n");
+ tb.writeFile(mdlDocFiles.resolve("mdl-file.html"),
+ "<html>\n"
+ + "<head><title>Module HTML file</title></head>\n"
+ + "<body><h1>Module HTML file</h1>File content</body>\n"
+ + "</html>\n");
+
+ // write the doc files for a package in the module
+ Path pkgDocFiles = src.resolve("p").resolve("doc-files");
+ tb.writeFile(pkgDocFiles.resolve("pkg-file.txt"),
+ "package text file\n");
+ tb.writeFile(pkgDocFiles.resolve("pkg-file.html"),
+ "<html>\n"
+ + "<head><title>Package HTML file</title></head>\n"
+ + "<body><h1>Package HTML file</h1>File content</body>\n"
+ + "</html>\n");
+
+ javadoc("-d", base.resolve("out").toString(),
+ "--source-path", src.toString(),
+ "--module", "m");
checkExit(Exit.OK);
- checkOutput("pkg/doc-files/test.txt", true,
- "test file");
+ checkOutput("m/doc-files/mdl-file.txt", true,
+ "module text file");
+ checkOutput("m/doc-files/mdl-file.html", true,
+ "Module HTML file");
+ checkOutput("m/p/doc-files/pkg-file.txt", true,
+ "package text file");
+ checkOutput("m/p/doc-files/pkg-file.html", true,
+ "Package HTML file");
}
}
--- a/test/langtools/jdk/javadoc/doclet/testDocFiles/pkg/Test.java Thu Nov 21 17:51:11 2019 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package pkg;
-
-public class Test { }
-
--- a/test/langtools/jdk/javadoc/doclet/testDocFiles/pkg/doc-files/test.txt Thu Nov 21 17:51:11 2019 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-this is a test file
-
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/file/FSInfoTest.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.sun.tools.javac.file.FSInfo;
+import com.sun.tools.javac.util.Context;
+import org.testng.annotations.Test;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Locale;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Manifest;
+
+/*
+ * @test
+ * @bug 8232170
+ * @summary Test com.sun.tools.javac.file.FSInfo
+ * @modules jdk.compiler/com.sun.tools.javac.util
+ * jdk.compiler/com.sun.tools.javac.file
+ * @run testng FSInfoTest
+ */
+public class FSInfoTest {
+
+ /**
+ * Tests that if a jar file has a manifest with a invalid path value for {@code Class-Path} attribute,
+ * then parsing such a jar file through {@link FSInfo#getJarClassPath(Path)} doesn't throw any other
+ * exception other than {@link IOException}
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testInvalidClassPath() throws Exception {
+ final String invalidOSPath = System.getProperty("os.name").toLowerCase(Locale.ENGLISH).contains("windows")
+ ? "C:\\*" : "foo\u0000bar";
+ final Path jarFile = Files.createTempFile(null, ".jar");
+ jarFile.toFile().deleteOnExit();
+ final Manifest mf = new Manifest();
+ mf.getMainAttributes().putValue("Manifest-Version", "1.0");
+ // add Class-Path which points to an invalid path
+ System.out.println("Intentionally using an invalid Class-Path entry " + invalidOSPath + " in manifest");
+ mf.getMainAttributes().putValue("Class-Path", invalidOSPath + " " + "/some/other-random/path");
+
+ // create a jar file with the manifest
+ try (final JarOutputStream jar = new JarOutputStream(Files.newOutputStream(jarFile), mf)) {
+ }
+ final FSInfo fsInfo = FSInfo.instance(new Context());
+ try {
+ fsInfo.getJarClassPath(jarFile);
+ // we don't rely on fsInfo.getJarClassPath to throw an exception for invalid
+ // paths. Hence no Assert.fail(...) call here. But if it does throw some exception,
+ // then that exception should always be a IOException.
+ } catch (IOException ioe) {
+ // expected
+ System.out.println("(As expected) FSInfo.getJarClassPath threw an IOException - " + ioe.getMessage());
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javap/BadAttributeName.java Mon Nov 25 15:16:29 2019 +0000
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8234687
+ * @summary change javap reporting on unknown attributes
+ * @modules jdk.jdeps/com.sun.tools.javap
+ * @run main BadAttributeName
+ */
+
+
+import java.io.*;
+import java.nio.file.*;
+
+public class BadAttributeName {
+
+ public static String source = "public class Test {\n" +
+ " public static void main(String[] args) {}\n" +
+ "}";
+
+ public static void main(String[] args) throws Exception {
+ final File srcFile = new File("Test.java");
+ Files.writeString(Path.of("Test.java"), source);
+
+ final String[] javacOpts = {"Test.java"};
+
+ if (com.sun.tools.javac.Main.compile(javacOpts) != 0) {
+ throw new Exception("Can't compile embedded test.");
+ }
+
+ RandomAccessFile raf = new RandomAccessFile("Test.class", "rw");
+ String sourceFile = "SourceFile";
+ long namePos = getConstantPoolUTF8Pos(raf, sourceFile);
+ if (namePos < 0) {
+ throw new Exception("The class file contains no SourceFile attribute.");
+ }
+
+ raf.seek(namePos); // Jump to the SourceFile name
+ // Create a "custom" attribute by reusing/renaming an unimportant existing one
+ String customAttr = "CustomAttribute".substring(0, sourceFile.length());
+ raf.writeUTF(customAttr);
+ raf.close();
+
+ String[] opts = { "-v", "Test.class" };
+ StringWriter sw = new StringWriter();
+ PrintWriter pout = new PrintWriter(sw);
+
+ com.sun.tools.javap.Main.run(opts, pout);
+ pout.flush();
+
+ String expect = customAttr + ": length = 0x2 (unknown attribute)";
+ if (sw.toString().indexOf(expect) == -1) {
+ sw.toString().lines().forEach(System.out::println);
+ throw new Exception("expected text not found: " + expect);
+ }
+ }
+
+ private static long getConstantPoolUTF8Pos(RandomAccessFile cfile, String name) throws Exception {
+ cfile.seek(0);
+ int v1, v2;
+ v1 = cfile.readInt();
+ // System.out.println("Magic: " + String.format("%X", v1));
+
+ v1 = cfile.readUnsignedShort();
+ v2 = cfile.readUnsignedShort();
+ // System.out.println("Version: " + String.format("%d.%d", v1, v2));
+
+ v1 = cfile.readUnsignedShort();
+ // System.out.println("CPool size: " + v1);
+ // Exhaust the constant pool
+ for (; v1 > 1; v1--) {
+ // System.out.print(".");
+ byte tag = cfile.readByte();
+ switch (tag) {
+ case 7 : // Class
+ case 8 : // String
+ // Data is 2 bytes long
+ cfile.skipBytes(2);
+ break;
+ case 3 : // Integer
+ case 4 : // Float
+ case 9 : // FieldRef
+ case 10 : // MethodRef
+ case 11 : // InterfaceMethodRef
+ case 12 : // Name and Type
+ // Data is 4 bytes long
+ cfile.skipBytes(4);
+ break;
+ case 5 : // Long
+ case 6 : // Double
+ // Data is 8 bytes long
+ cfile.skipBytes(8);
+ break;
+ case 1 : // Utf8
+ long fp = cfile.getFilePointer();
+ String s = cfile.readUTF();
+ if (s.equals(name)) {
+ return fp;
+ }
+ break;
+ default :
+ throw new Exception("Unexpected tag in CPool: [" + tag + "] at "
+ + Long.toHexString(cfile.getFilePointer()));
+ }
+ }
+ // System.out.println();
+
+ // Bummer! Name not found!
+ return -1L;
+ }
+}