98 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) |
98 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) |
99 |
99 |
100 private: |
100 private: |
101 static OSThread* _starting_thread; |
101 static OSThread* _starting_thread; |
102 static address _polling_page; |
102 static address _polling_page; |
103 static volatile int32_t * _mem_serialize_page; |
|
104 static uintptr_t _serialize_page_mask; |
|
105 public: |
103 public: |
106 static size_t _page_sizes[page_sizes_max]; |
104 static size_t _page_sizes[page_sizes_max]; |
107 |
105 |
108 private: |
106 private: |
109 static void init_page_sizes(size_t default_page_size) { |
107 static void init_page_sizes(size_t default_page_size) { |
417 static void make_polling_page_readable(); |
415 static void make_polling_page_readable(); |
418 |
416 |
419 // Check if pointer points to readable memory (by 4-byte read access) |
417 // Check if pointer points to readable memory (by 4-byte read access) |
420 static bool is_readable_pointer(const void* p); |
418 static bool is_readable_pointer(const void* p); |
421 static bool is_readable_range(const void* from, const void* to); |
419 static bool is_readable_range(const void* from, const void* to); |
422 |
|
423 // Routines used to serialize the thread state without using membars |
|
424 static void serialize_thread_states(); |
|
425 |
|
426 // Since we write to the serialize page from every thread, we |
|
427 // want stores to be on unique cache lines whenever possible |
|
428 // in order to minimize CPU cross talk. We pre-compute the |
|
429 // amount to shift the thread* to make this offset unique to |
|
430 // each thread. |
|
431 static int get_serialize_page_shift_count() { |
|
432 return SerializePageShiftCount; |
|
433 } |
|
434 |
|
435 static void set_serialize_page_mask(uintptr_t mask) { |
|
436 _serialize_page_mask = mask; |
|
437 } |
|
438 |
|
439 static unsigned int get_serialize_page_mask() { |
|
440 return _serialize_page_mask; |
|
441 } |
|
442 |
|
443 static void set_memory_serialize_page(address page); |
|
444 |
|
445 static address get_memory_serialize_page() { |
|
446 return (address)_mem_serialize_page; |
|
447 } |
|
448 |
|
449 static inline void write_memory_serialize_page(JavaThread *thread) { |
|
450 uintptr_t page_offset = ((uintptr_t)thread >> |
|
451 get_serialize_page_shift_count()) & |
|
452 get_serialize_page_mask(); |
|
453 *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; |
|
454 } |
|
455 |
|
456 static bool is_memory_serialize_page(JavaThread *thread, address addr) { |
|
457 if (UseMembar) return false; |
|
458 // Previously this function calculated the exact address of this |
|
459 // thread's serialize page, and checked if the faulting address |
|
460 // was equal. However, some platforms mask off faulting addresses |
|
461 // to the page size, so now we just check that the address is |
|
462 // within the page. This makes the thread argument unnecessary, |
|
463 // but we retain the NULL check to preserve existing behavior. |
|
464 if (thread == NULL) return false; |
|
465 address page = (address) _mem_serialize_page; |
|
466 return addr >= page && addr < (page + os::vm_page_size()); |
|
467 } |
|
468 |
|
469 static void block_on_serialize_page_trap(); |
|
470 |
420 |
471 // threads |
421 // threads |
472 |
422 |
473 enum ThreadType { |
423 enum ThreadType { |
474 vm_thread, |
424 vm_thread, |