25 #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP |
25 #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP |
26 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP |
26 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP |
27 |
27 |
28 #include "gc_interface/collectedHeap.hpp" |
28 #include "gc_interface/collectedHeap.hpp" |
29 |
29 |
30 // A "SharedHeap" is an implementation of a java heap for HotSpot. This |
|
31 // is an abstract class: there may be many different kinds of heaps. This |
|
32 // class defines the functions that a heap must implement, and contains |
|
33 // infrastructure common to all heaps. |
|
34 |
|
35 // Note on use of FlexibleWorkGang's for GC. |
|
36 // There are three places where task completion is determined. |
|
37 // In |
|
38 // 1) ParallelTaskTerminator::offer_termination() where _n_threads |
|
39 // must be set to the correct value so that count of workers that |
|
40 // have offered termination will exactly match the number |
|
41 // working on the task. Tasks such as those derived from GCTask |
|
42 // use ParallelTaskTerminator's. Tasks that want load balancing |
|
43 // by work stealing use this method to gauge completion. |
|
44 // 2) SubTasksDone has a variable _n_threads that is used in |
|
45 // all_tasks_completed() to determine completion. all_tasks_complete() |
|
46 // counts the number of tasks that have been done and then reset |
|
47 // the SubTasksDone so that it can be used again. When the number of |
|
48 // tasks is set to the number of GC workers, then _n_threads must |
|
49 // be set to the number of active GC workers. G1RootProcessor and |
|
50 // GenCollectedHeap have SubTasksDone. |
|
51 // 3) SequentialSubTasksDone has an _n_threads that is used in |
|
52 // a way similar to SubTasksDone and has the same dependency on the |
|
53 // number of active GC workers. CompactibleFreeListSpace and Space |
|
54 // have SequentialSubTasksDone's. |
|
55 // |
|
56 // Examples of using SubTasksDone and SequentialSubTasksDone: |
|
57 // G1RootProcessor and GenCollectedHeap::process_roots() use |
|
58 // SubTasksDone* _process_strong_tasks to claim tasks for workers |
|
59 // |
|
60 // GenCollectedHeap::gen_process_roots() calls |
|
61 // rem_set()->younger_refs_iterate() |
|
62 // to scan the card table and which eventually calls down into |
|
63 // CardTableModRefBS::par_non_clean_card_iterate_work(). This method |
|
64 // uses SequentialSubTasksDone* _pst to claim tasks. |
|
65 // Both SubTasksDone and SequentialSubTasksDone call their method |
|
66 // all_tasks_completed() to count the number of GC workers that have |
|
67 // finished their work. That logic is "when all the workers are |
|
68 // finished the tasks are finished". |
|
69 // |
|
70 // The pattern that appears in the code is to set _n_threads |
|
71 // to a value > 1 before a task that you would like executed in parallel |
|
72 // and then to set it to 0 after that task has completed. A value of |
|
73 // 0 is a "special" value in set_n_threads() which translates to |
|
74 // setting _n_threads to 1. |
|
75 // |
|
76 // Some code uses _n_termination to decide if work should be done in |
|
77 // parallel. The notorious possibly_parallel_oops_do() in threads.cpp |
|
78 // is an example of such code. Look for variable "is_par" for other |
|
79 // examples. |
|
80 // |
|
81 // The active_workers is not reset to 0 after a parallel phase. It's |
|
82 // value may be used in later phases and in one instance at least |
|
83 // (the parallel remark) it has to be used (the parallel remark depends |
|
84 // on the partitioning done in the previous parallel scavenge). |
|
85 |
|
86 class SharedHeap : public CollectedHeap { |
30 class SharedHeap : public CollectedHeap { |
87 friend class VMStructs; |
31 friend class VMStructs; |
88 |
32 |
89 protected: |
33 protected: |
90 // Full initialization is done in a concrete subtype's "initialize" |
34 // Full initialization is done in a concrete subtype's "initialize" |
91 // function. |
35 // function. |
92 SharedHeap(); |
36 SharedHeap(); |
93 |
|
94 public: |
|
95 // Note, the below comment needs to be updated to reflect the changes |
|
96 // introduced by JDK-8076225. This should be done as part of JDK-8076289. |
|
97 // |
|
98 //Some collectors will perform "process_strong_roots" in parallel. |
|
99 // Such a call will involve claiming some fine-grained tasks, such as |
|
100 // scanning of threads. To make this process simpler, we provide the |
|
101 // "strong_roots_parity()" method. Collectors that start parallel tasks |
|
102 // whose threads invoke "process_strong_roots" must |
|
103 // call "change_strong_roots_parity" in sequential code starting such a |
|
104 // task. (This also means that a parallel thread may only call |
|
105 // process_strong_roots once.) |
|
106 // |
|
107 // For calls to process_roots by sequential code, the parity is |
|
108 // updated automatically. |
|
109 // |
|
110 // The idea is that objects representing fine-grained tasks, such as |
|
111 // threads, will contain a "parity" field. A task will is claimed in the |
|
112 // current "process_roots" call only if its parity field is the |
|
113 // same as the "strong_roots_parity"; task claiming is accomplished by |
|
114 // updating the parity field to the strong_roots_parity with a CAS. |
|
115 // |
|
116 // If the client meats this spec, then strong_roots_parity() will have |
|
117 // the following properties: |
|
118 // a) to return a different value than was returned before the last |
|
119 // call to change_strong_roots_parity, and |
|
120 // c) to never return a distinguished value (zero) with which such |
|
121 // task-claiming variables may be initialized, to indicate "never |
|
122 // claimed". |
|
123 public: |
|
124 |
|
125 // Call these in sequential code around process_roots. |
|
126 // strong_roots_prologue calls change_strong_roots_parity, if |
|
127 // parallel tasks are enabled. |
|
128 class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope { |
|
129 SharedHeap* _sh; |
|
130 |
|
131 public: |
|
132 StrongRootsScope(SharedHeap* heap, bool activate = true); |
|
133 ~StrongRootsScope(); |
|
134 }; |
|
135 }; |
37 }; |
136 |
38 |
137 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP |
39 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP |