23 */ |
23 */ |
24 |
24 |
25 #ifndef SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP |
25 #ifndef SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP |
26 #define SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP |
26 #define SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP |
27 |
27 |
28 #include "gc_implementation/shared/markSweep.inline.hpp" |
28 #include "memory/memRegion.hpp" |
|
29 #include "memory/iterator.inline.hpp" |
29 #include "oops/objArrayKlass.hpp" |
30 #include "oops/objArrayKlass.hpp" |
|
31 #include "oops/objArrayOop.inline.hpp" |
|
32 #include "oops/oop.inline.hpp" |
30 #include "utilities/macros.hpp" |
33 #include "utilities/macros.hpp" |
31 #if INCLUDE_ALL_GCS |
|
32 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp" |
|
33 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" |
|
34 #endif // INCLUDE_ALL_GCS |
|
35 |
34 |
36 void ObjArrayKlass::oop_follow_contents(oop obj, int index) { |
35 template <bool nv, typename T, class OopClosureType> |
37 if (UseCompressedOops) { |
36 void ObjArrayKlass::oop_oop_iterate_elements_specialized(objArrayOop a, OopClosureType* closure) { |
38 objarray_follow_contents<narrowOop>(obj, index); |
37 T* p = (T*)a->base(); |
39 } else { |
38 T* const end = p + a->length(); |
40 objarray_follow_contents<oop>(obj, index); |
39 |
|
40 for (;p < end; p++) { |
|
41 Devirtualizer<nv>::do_oop(closure, p); |
41 } |
42 } |
42 } |
43 } |
43 |
44 |
44 template <class T> |
45 template <bool nv, typename T, class OopClosureType> |
45 void ObjArrayKlass::objarray_follow_contents(oop obj, int index) { |
46 void ObjArrayKlass::oop_oop_iterate_elements_specialized_bounded( |
46 objArrayOop a = objArrayOop(obj); |
47 objArrayOop a, OopClosureType* closure, void* low, void* high) { |
47 const size_t len = size_t(a->length()); |
|
48 const size_t beg_index = size_t(index); |
|
49 assert(beg_index < len || len == 0, "index too large"); |
|
50 |
48 |
51 const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride); |
49 T* const l = (T*)low; |
52 const size_t end_index = beg_index + stride; |
50 T* const h = (T*)high; |
53 T* const base = (T*)a->base(); |
|
54 T* const beg = base + beg_index; |
|
55 T* const end = base + end_index; |
|
56 |
51 |
57 // Push the non-NULL elements of the next stride on the marking stack. |
52 T* p = (T*)a->base(); |
58 for (T* e = beg; e < end; e++) { |
53 T* end = p + a->length(); |
59 MarkSweep::mark_and_push<T>(e); |
54 |
|
55 if (p < l) { |
|
56 p = l; |
|
57 } |
|
58 if (end > h) { |
|
59 end = h; |
60 } |
60 } |
61 |
61 |
62 if (end_index < len) { |
62 for (;p < end; ++p) { |
63 MarkSweep::push_objarray(a, end_index); // Push the continuation. |
63 Devirtualizer<nv>::do_oop(closure, p); |
64 } |
64 } |
65 } |
65 } |
66 |
66 |
67 #if INCLUDE_ALL_GCS |
67 template <bool nv, class OopClosureType> |
68 void ObjArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj, |
68 void ObjArrayKlass::oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure) { |
69 int index) { |
|
70 if (UseCompressedOops) { |
69 if (UseCompressedOops) { |
71 objarray_follow_contents<narrowOop>(cm, obj, index); |
70 oop_oop_iterate_elements_specialized<nv, narrowOop>(a, closure); |
72 } else { |
71 } else { |
73 objarray_follow_contents<oop>(cm, obj, index); |
72 oop_oop_iterate_elements_specialized<nv, oop>(a, closure); |
74 } |
73 } |
75 } |
74 } |
76 |
75 |
77 template <class T> |
76 template <bool nv, class OopClosureType> |
78 void ObjArrayKlass::objarray_follow_contents(ParCompactionManager* cm, oop obj, |
77 void ObjArrayKlass::oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureType* closure, MemRegion mr) { |
79 int index) { |
78 if (UseCompressedOops) { |
|
79 oop_oop_iterate_elements_specialized_bounded<nv, narrowOop>(a, closure, mr.start(), mr.end()); |
|
80 } else { |
|
81 oop_oop_iterate_elements_specialized_bounded<nv, oop>(a, closure, mr.start(), mr.end()); |
|
82 } |
|
83 } |
|
84 |
|
85 template <bool nv, typename OopClosureType> |
|
86 int ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { |
|
87 assert (obj->is_array(), "obj must be array"); |
80 objArrayOop a = objArrayOop(obj); |
88 objArrayOop a = objArrayOop(obj); |
81 const size_t len = size_t(a->length()); |
|
82 const size_t beg_index = size_t(index); |
|
83 assert(beg_index < len || len == 0, "index too large"); |
|
84 |
89 |
85 const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride); |
90 // Get size before changing pointers. |
86 const size_t end_index = beg_index + stride; |
91 // Don't call size() or oop_size() since that is a virtual call. |
87 T* const base = (T*)a->base(); |
92 int size = a->object_size(); |
88 T* const beg = base + beg_index; |
93 if (Devirtualizer<nv>::do_metadata(closure)) { |
89 T* const end = base + end_index; |
94 Devirtualizer<nv>::do_klass(closure, obj->klass()); |
90 |
|
91 // Push the non-NULL elements of the next stride on the marking stack. |
|
92 for (T* e = beg; e < end; e++) { |
|
93 PSParallelCompact::mark_and_push<T>(cm, e); |
|
94 } |
95 } |
95 |
96 |
96 if (end_index < len) { |
97 oop_oop_iterate_elements<nv>(a, closure); |
97 cm->push_objarray(a, end_index); // Push the continuation. |
98 |
|
99 return size; |
|
100 } |
|
101 |
|
102 template <bool nv, typename OopClosureType> |
|
103 int ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { |
|
104 assert(obj->is_array(), "obj must be array"); |
|
105 objArrayOop a = objArrayOop(obj); |
|
106 |
|
107 // Get size before changing pointers. |
|
108 // Don't call size() or oop_size() since that is a virtual call |
|
109 int size = a->object_size(); |
|
110 |
|
111 if (Devirtualizer<nv>::do_metadata(closure)) { |
|
112 Devirtualizer<nv>::do_klass(closure, a->klass()); |
98 } |
113 } |
|
114 |
|
115 oop_oop_iterate_elements_bounded<nv>(a, closure, mr); |
|
116 |
|
117 return size; |
99 } |
118 } |
100 #endif // INCLUDE_ALL_GCS |
119 |
|
120 template <bool nv, typename T, class OopClosureType> |
|
121 void ObjArrayKlass::oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end) { |
|
122 if (Devirtualizer<nv>::do_metadata(closure)) { |
|
123 Devirtualizer<nv>::do_klass(closure, a->klass()); |
|
124 } |
|
125 |
|
126 T* low = start == 0 ? cast_from_oop<T*>(a) : a->obj_at_addr<T>(start); |
|
127 T* high = (T*)a->base() + end; |
|
128 |
|
129 oop_oop_iterate_elements_specialized_bounded<nv, T>(a, closure, low, high); |
|
130 } |
|
131 |
|
132 // Like oop_oop_iterate but only iterates over a specified range and only used |
|
133 // for objArrayOops. |
|
134 template <bool nv, class OopClosureType> |
|
135 int ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end) { |
|
136 assert(obj->is_array(), "obj must be array"); |
|
137 objArrayOop a = objArrayOop(obj); |
|
138 |
|
139 // Get size before changing pointers. |
|
140 // Don't call size() or oop_size() since that is a virtual call |
|
141 int size = a->object_size(); |
|
142 |
|
143 if (UseCompressedOops) { |
|
144 oop_oop_iterate_range_specialized<nv, narrowOop>(a, closure, start, end); |
|
145 } else { |
|
146 oop_oop_iterate_range_specialized<nv, oop>(a, closure, start, end); |
|
147 } |
|
148 |
|
149 return size; |
|
150 } |
|
151 |
|
152 |
|
153 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
|
154 \ |
|
155 int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ |
|
156 return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \ |
|
157 } |
|
158 |
|
159 #if INCLUDE_ALL_GCS |
|
160 #define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ |
|
161 int ObjArrayKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ |
|
162 /* No reverse implementation ATM. */ \ |
|
163 return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure); \ |
|
164 } |
|
165 #else |
|
166 #define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) |
|
167 #endif |
|
168 |
|
169 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ |
|
170 \ |
|
171 int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ |
|
172 return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr); \ |
|
173 } |
|
174 |
|
175 #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \ |
|
176 \ |
|
177 int ObjArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) { \ |
|
178 return oop_oop_iterate_range<nvs_to_bool(nv_suffix)>(obj, closure, start, end); \ |
|
179 } |
|
180 |
|
181 |
|
182 #define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
|
183 ObjArrayKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ |
|
184 ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ |
|
185 ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ |
|
186 ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r( OopClosureType, nv_suffix) |
|
187 |
101 |
188 |
102 #endif // SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP |
189 #endif // SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP |