|
1 /* |
|
2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_UTILITIES_GLOBAL_COUNTER_HPP |
|
26 #define SHARE_UTILITIES_GLOBAL_COUNTER_HPP |
|
27 |
|
28 #include "memory/allocation.hpp" |
|
29 #include "memory/padded.hpp" |
|
30 |
|
31 class Thread; |
|
32 |
|
33 // The GlobalCounter provides a synchronization mechanism between threads for |
|
34 // safe memory reclamation and other ABA problems. All readers must call |
|
35 // critical_section_begin before reading the volatile data and |
|
36 // critical_section_end afterwards. The write side must call write_synchronize |
|
37 // before reclaming the memory. The read-path only does an uncontented store |
|
38 // to a thread-local-storage and fence to stop any loads from floating up, thus |
|
39 // light weight and wait-free. The write-side is more heavy since it must check |
|
40 // all readers and wait until they have left the generation. (a system memory |
|
41 // barrier can be used on write-side to remove fence in read-side, |
|
42 // not implemented). |
|
43 class GlobalCounter : public AllStatic { |
|
44 private: |
|
45 // Since do not know what we will end up next to in BSS, we make sure the |
|
46 // counter is on a seperate cacheline. |
|
47 struct PaddedCounter { |
|
48 DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE/2, 0); |
|
49 volatile uintx _counter; |
|
50 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE/2, sizeof(volatile uintx)); |
|
51 }; |
|
52 |
|
53 // The global counter |
|
54 static PaddedCounter _global_counter; |
|
55 |
|
56 // Bit 0 is active bit. |
|
57 static const uintx COUNTER_ACTIVE = 1; |
|
58 // Thus we increase counter by 2. |
|
59 static const uintx COUNTER_INCREMENT = 2; |
|
60 |
|
61 // The per thread scanning closure. |
|
62 class CounterThreadCheck; |
|
63 |
|
64 public: |
|
65 // Must be called before accessing the data. Only threads accessible lock-free |
|
66 // can used this. Those included now are all Threads on SMR ThreadsList and |
|
67 // the VMThread. Nesting is not yet supported. |
|
68 static void critical_section_begin(Thread *thread); |
|
69 |
|
70 // Must be called after finished accessing the data. |
|
71 // Do not provide fence, allows load/stores moving into the critical section. |
|
72 static void critical_section_end(Thread *thread); |
|
73 |
|
74 // Make the data inaccessible to readers before calling. When this call |
|
75 // returns it's safe to reclaim the data. |
|
76 static void write_synchronize(); |
|
77 |
|
78 // A scoped object for a reads-side critical-section. |
|
79 class CriticalSection; |
|
80 }; |
|
81 |
|
82 #endif // include guard |