|
1 /* |
|
2 * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP |
|
26 #define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP |
|
27 |
|
28 #include "memory/iterator.hpp" |
|
29 #include "oops/instanceKlass.hpp" |
|
30 #include "oops/klass.hpp" |
|
31 #include "oops/oop.inline.hpp" |
|
32 #include "runtime/orderAccess.inline.hpp" |
|
33 #include "utilities/debug.hpp" |
|
34 #include "utilities/globalDefinitions.hpp" |
|
35 #include "utilities/macros.hpp" |
|
36 |
|
37 inline Klass* InstanceKlass::array_klasses_acquire() const { |
|
38 return (Klass*) OrderAccess::load_ptr_acquire(&_array_klasses); |
|
39 } |
|
40 |
|
41 inline void InstanceKlass::release_set_array_klasses(Klass* k) { |
|
42 OrderAccess::release_store_ptr(&_array_klasses, k); |
|
43 } |
|
44 |
|
45 inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const { |
|
46 return (jmethodID*)OrderAccess::load_ptr_acquire(&_methods_jmethod_ids); |
|
47 } |
|
48 |
|
49 inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) { |
|
50 OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); |
|
51 } |
|
52 |
|
53 // The iteration over the oops in objects is a hot path in the GC code. |
|
54 // By force inlining the following functions, we get similar GC performance |
|
55 // as the previous macro based implementation. |
|
56 |
|
57 template <bool nv, typename T, class OopClosureType> |
|
58 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) { |
|
59 T* p = (T*)obj->obj_field_addr<T>(map->offset()); |
|
60 T* const end = p + map->count(); |
|
61 |
|
62 for (; p < end; ++p) { |
|
63 Devirtualizer<nv>::do_oop(closure, p); |
|
64 } |
|
65 } |
|
66 |
|
67 #if INCLUDE_ALL_GCS |
|
68 template <bool nv, typename T, class OopClosureType> |
|
69 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) { |
|
70 T* const start = (T*)obj->obj_field_addr<T>(map->offset()); |
|
71 T* p = start + map->count(); |
|
72 |
|
73 while (start < p) { |
|
74 --p; |
|
75 Devirtualizer<nv>::do_oop(closure, p); |
|
76 } |
|
77 } |
|
78 #endif |
|
79 |
|
80 template <bool nv, typename T, class OopClosureType> |
|
81 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) { |
|
82 T* p = (T*)obj->obj_field_addr<T>(map->offset()); |
|
83 T* end = p + map->count(); |
|
84 |
|
85 T* const l = (T*)mr.start(); |
|
86 T* const h = (T*)mr.end(); |
|
87 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && |
|
88 mask_bits((intptr_t)h, sizeof(T)-1) == 0, |
|
89 "bounded region must be properly aligned"); |
|
90 |
|
91 if (p < l) { |
|
92 p = l; |
|
93 } |
|
94 if (end > h) { |
|
95 end = h; |
|
96 } |
|
97 |
|
98 for (;p < end; ++p) { |
|
99 Devirtualizer<nv>::do_oop(closure, p); |
|
100 } |
|
101 } |
|
102 |
|
103 template <bool nv, typename T, class OopClosureType> |
|
104 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) { |
|
105 OopMapBlock* map = start_of_nonstatic_oop_maps(); |
|
106 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); |
|
107 |
|
108 for (; map < end_map; ++map) { |
|
109 oop_oop_iterate_oop_map<nv, T>(map, obj, closure); |
|
110 } |
|
111 } |
|
112 |
|
113 #if INCLUDE_ALL_GCS |
|
114 template <bool nv, typename T, class OopClosureType> |
|
115 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) { |
|
116 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); |
|
117 OopMapBlock* map = start_map + nonstatic_oop_map_count(); |
|
118 |
|
119 while (start_map < map) { |
|
120 --map; |
|
121 oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure); |
|
122 } |
|
123 } |
|
124 #endif |
|
125 |
|
126 template <bool nv, typename T, class OopClosureType> |
|
127 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) { |
|
128 OopMapBlock* map = start_of_nonstatic_oop_maps(); |
|
129 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); |
|
130 |
|
131 for (;map < end_map; ++map) { |
|
132 oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr); |
|
133 } |
|
134 } |
|
135 |
|
136 template <bool nv, class OopClosureType> |
|
137 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) { |
|
138 if (UseCompressedOops) { |
|
139 oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure); |
|
140 } else { |
|
141 oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure); |
|
142 } |
|
143 } |
|
144 |
|
145 #if INCLUDE_ALL_GCS |
|
146 template <bool nv, class OopClosureType> |
|
147 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) { |
|
148 if (UseCompressedOops) { |
|
149 oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure); |
|
150 } else { |
|
151 oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure); |
|
152 } |
|
153 } |
|
154 #endif |
|
155 |
|
156 template <bool nv, class OopClosureType> |
|
157 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) { |
|
158 if (UseCompressedOops) { |
|
159 oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr); |
|
160 } else { |
|
161 oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr); |
|
162 } |
|
163 } |
|
164 |
|
165 template <bool nv, class OopClosureType> |
|
166 ALWAYSINLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { |
|
167 if (Devirtualizer<nv>::do_metadata(closure)) { |
|
168 Devirtualizer<nv>::do_klass(closure, this); |
|
169 } |
|
170 |
|
171 oop_oop_iterate_oop_maps<nv>(obj, closure); |
|
172 |
|
173 return size_helper(); |
|
174 } |
|
175 |
|
176 #if INCLUDE_ALL_GCS |
|
177 template <bool nv, class OopClosureType> |
|
178 ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) { |
|
179 assert(!Devirtualizer<nv>::do_metadata(closure), |
|
180 "Code to handle metadata is not implemented"); |
|
181 |
|
182 oop_oop_iterate_oop_maps_reverse<nv>(obj, closure); |
|
183 |
|
184 return size_helper(); |
|
185 } |
|
186 #endif |
|
187 |
|
188 template <bool nv, class OopClosureType> |
|
189 ALWAYSINLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { |
|
190 if (Devirtualizer<nv>::do_metadata(closure)) { |
|
191 if (mr.contains(obj)) { |
|
192 Devirtualizer<nv>::do_klass(closure, this); |
|
193 } |
|
194 } |
|
195 |
|
196 oop_oop_iterate_oop_maps_bounded<nv>(obj, closure, mr); |
|
197 |
|
198 return size_helper(); |
|
199 } |
|
200 |
|
201 #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
|
202 OOP_OOP_ITERATE_DEFN( InstanceKlass, OopClosureType, nv_suffix) \ |
|
203 OOP_OOP_ITERATE_DEFN_BOUNDED( InstanceKlass, OopClosureType, nv_suffix) \ |
|
204 OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceKlass, OopClosureType, nv_suffix) |
|
205 |
|
206 #endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP |