23 */ |
23 */ |
24 |
24 |
25 // Implementation of all inlined member functions defined in oop.hpp |
25 // Implementation of all inlined member functions defined in oop.hpp |
26 // We need a separate file to avoid circular references |
26 // We need a separate file to avoid circular references |
27 |
27 |
28 |
|
29 inline void oopDesc::release_set_mark(markOop m) { |
28 inline void oopDesc::release_set_mark(markOop m) { |
30 OrderAccess::release_store_ptr(&_mark, m); |
29 OrderAccess::release_store_ptr(&_mark, m); |
31 } |
30 } |
32 |
31 |
33 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { |
32 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { |
34 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); |
33 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); |
|
34 } |
|
35 |
|
36 inline klassOop oopDesc::klass() const { |
|
37 if (UseCompressedOops) { |
|
38 return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass); |
|
39 // can be NULL in CMS, but isn't supported on CMS yet. |
|
40 } else { |
|
41 return _metadata._klass; |
|
42 } |
|
43 } |
|
44 |
|
45 inline int oopDesc::klass_gap_offset_in_bytes() { |
|
46 assert(UseCompressedOops, "only applicable to compressed headers"); |
|
47 return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop); |
|
48 } |
|
49 |
|
50 inline oop* oopDesc::klass_addr() { |
|
51 // Only used internally and with CMS and will not work with |
|
52 // UseCompressedOops |
|
53 assert(!UseCompressedOops, "only supported with uncompressed oops"); |
|
54 return (oop*) &_metadata._klass; |
|
55 } |
|
56 |
|
57 inline narrowOop* oopDesc::compressed_klass_addr() { |
|
58 assert(UseCompressedOops, "only called by compressed oops"); |
|
59 return (narrowOop*) &_metadata._compressed_klass; |
35 } |
60 } |
36 |
61 |
37 inline void oopDesc::set_klass(klassOop k) { |
62 inline void oopDesc::set_klass(klassOop k) { |
38 // since klasses are promoted no store check is needed |
63 // since klasses are promoted no store check is needed |
39 assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop"); |
64 assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop"); |
40 assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop"); |
65 assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop"); |
41 oop_store_without_check((oop*) &_klass, (oop) k); |
66 if (UseCompressedOops) { |
|
67 // zero the gap when the klass is set, by zeroing the pointer sized |
|
68 // part of the union. |
|
69 _metadata._klass = NULL; |
|
70 oop_store_without_check(compressed_klass_addr(), (oop)k); |
|
71 } else { |
|
72 oop_store_without_check(klass_addr(), (oop) k); |
|
73 } |
42 } |
74 } |
43 |
75 |
44 inline void oopDesc::set_klass_to_list_ptr(oop k) { |
76 inline void oopDesc::set_klass_to_list_ptr(oop k) { |
45 // This is only to be used during GC, for from-space objects, so no |
77 // This is only to be used during GC, for from-space objects, so no |
46 // barrier is needed. |
78 // barrier is needed. |
47 _klass = (klassOop)k; |
79 if (UseCompressedOops) { |
|
80 _metadata._compressed_klass = encode_heap_oop_not_null(k); |
|
81 } else { |
|
82 _metadata._klass = (klassOop)k; |
|
83 } |
48 } |
84 } |
49 |
85 |
50 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); } |
86 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); } |
51 inline Klass* oopDesc::blueprint() const { return klass()->klass_part(); } |
87 inline Klass* oopDesc::blueprint() const { return klass()->klass_part(); } |
52 |
88 |
68 inline bool oopDesc::is_constantPoolCache() const { return blueprint()->oop_is_constantPoolCache(); } |
104 inline bool oopDesc::is_constantPoolCache() const { return blueprint()->oop_is_constantPoolCache(); } |
69 inline bool oopDesc::is_compiledICHolder() const { return blueprint()->oop_is_compiledICHolder(); } |
105 inline bool oopDesc::is_compiledICHolder() const { return blueprint()->oop_is_compiledICHolder(); } |
70 |
106 |
71 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } |
107 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; } |
72 |
108 |
73 inline oop* oopDesc::obj_field_addr(int offset) const { return (oop*) field_base(offset); } |
109 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); } |
74 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } |
110 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); } |
75 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } |
111 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); } |
76 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); } |
112 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); } |
77 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } |
113 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); } |
78 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } |
114 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); } |
79 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } |
115 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); } |
80 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } |
116 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); } |
81 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } |
117 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); } |
82 |
118 inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); } |
83 inline oop oopDesc::obj_field(int offset) const { return *obj_field_addr(offset); } |
119 |
84 inline void oopDesc::obj_field_put(int offset, oop value) { oop_store(obj_field_addr(offset), value); } |
120 |
|
121 // Functions for getting and setting oops within instance objects. |
|
122 // If the oops are compressed, the type passed to these overloaded functions |
|
123 // is narrowOop. All functions are overloaded so they can be called by |
|
124 // template functions without conditionals (the compiler instantiates via |
|
125 // the right type and inlines the appopriate code). |
|
126 |
|
127 inline bool oopDesc::is_null(oop obj) { return obj == NULL; } |
|
128 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; } |
|
129 |
|
130 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit |
|
131 // offset from the heap base. Saving the check for null can save instructions |
|
132 // in inner GC loops so these are separated. |
|
133 |
|
134 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) { |
|
135 assert(!is_null(v), "oop value can never be zero"); |
|
136 address heap_base = Universe::heap_base(); |
|
137 uint64_t result = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1) >> LogMinObjAlignmentInBytes); |
|
138 assert((result & 0xffffffff00000000L) == 0, "narrow oop overflow"); |
|
139 return (narrowOop)result; |
|
140 } |
|
141 |
|
142 inline narrowOop oopDesc::encode_heap_oop(oop v) { |
|
143 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v); |
|
144 } |
|
145 |
|
146 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) { |
|
147 assert(!is_null(v), "narrow oop value can never be zero"); |
|
148 address heap_base = Universe::heap_base(); |
|
149 return (oop)(void*)((uintptr_t)heap_base + ((uintptr_t)v << LogMinObjAlignmentInBytes)); |
|
150 } |
|
151 |
|
152 inline oop oopDesc::decode_heap_oop(narrowOop v) { |
|
153 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v); |
|
154 } |
|
155 |
|
156 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; } |
|
157 inline oop oopDesc::decode_heap_oop(oop v) { return v; } |
|
158 |
|
159 // Load an oop out of the Java heap as is without decoding. |
|
160 // Called by GC to check for null before decoding. |
|
161 inline oop oopDesc::load_heap_oop(oop* p) { return *p; } |
|
162 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; } |
|
163 |
|
164 // Load and decode an oop out of the Java heap into a wide oop. |
|
165 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; } |
|
166 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) { |
|
167 return decode_heap_oop_not_null(*p); |
|
168 } |
|
169 |
|
170 // Load and decode an oop out of the heap accepting null |
|
171 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; } |
|
172 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) { |
|
173 return decode_heap_oop(*p); |
|
174 } |
|
175 |
|
176 // Store already encoded heap oop into the heap. |
|
177 inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; } |
|
178 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; } |
|
179 |
|
180 // Encode and store a heap oop. |
|
181 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) { |
|
182 *p = encode_heap_oop_not_null(v); |
|
183 } |
|
184 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; } |
|
185 |
|
186 // Encode and store a heap oop allowing for null. |
|
187 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) { |
|
188 *p = encode_heap_oop(v); |
|
189 } |
|
190 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; } |
|
191 |
|
192 // Store heap oop as is for volatile fields. |
|
193 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) { |
|
194 OrderAccess::release_store_ptr(p, v); |
|
195 } |
|
196 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p, |
|
197 narrowOop v) { |
|
198 OrderAccess::release_store(p, v); |
|
199 } |
|
200 |
|
201 inline void oopDesc::release_encode_store_heap_oop_not_null( |
|
202 volatile narrowOop* p, oop v) { |
|
203 // heap oop is not pointer sized. |
|
204 OrderAccess::release_store(p, encode_heap_oop_not_null(v)); |
|
205 } |
|
206 |
|
207 inline void oopDesc::release_encode_store_heap_oop_not_null( |
|
208 volatile oop* p, oop v) { |
|
209 OrderAccess::release_store_ptr(p, v); |
|
210 } |
|
211 |
|
212 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p, |
|
213 oop v) { |
|
214 OrderAccess::release_store_ptr(p, v); |
|
215 } |
|
216 inline void oopDesc::release_encode_store_heap_oop( |
|
217 volatile narrowOop* p, oop v) { |
|
218 OrderAccess::release_store(p, encode_heap_oop(v)); |
|
219 } |
|
220 |
|
221 |
|
222 // These functions are only used to exchange oop fields in instances, |
|
223 // not headers. |
|
224 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) { |
|
225 if (UseCompressedOops) { |
|
226 // encode exchange value from oop to T |
|
227 narrowOop val = encode_heap_oop(exchange_value); |
|
228 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest); |
|
229 // decode old from T to oop |
|
230 return decode_heap_oop(old); |
|
231 } else { |
|
232 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest); |
|
233 } |
|
234 } |
|
235 |
|
236 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value, |
|
237 volatile HeapWord *dest, |
|
238 oop compare_value) { |
|
239 if (UseCompressedOops) { |
|
240 // encode exchange and compare value from oop to T |
|
241 narrowOop val = encode_heap_oop(exchange_value); |
|
242 narrowOop cmp = encode_heap_oop(compare_value); |
|
243 |
|
244 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp); |
|
245 // decode old from T to oop |
|
246 return decode_heap_oop(old); |
|
247 } else { |
|
248 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value); |
|
249 } |
|
250 } |
|
251 |
|
252 // In order to put or get a field out of an instance, must first check |
|
253 // if the field has been compressed and uncompress it. |
|
254 inline oop oopDesc::obj_field(int offset) const { |
|
255 return UseCompressedOops ? |
|
256 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) : |
|
257 load_decode_heap_oop(obj_field_addr<oop>(offset)); |
|
258 } |
|
259 inline void oopDesc::obj_field_put(int offset, oop value) { |
|
260 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) : |
|
261 oop_store(obj_field_addr<oop>(offset), value); |
|
262 } |
|
263 inline void oopDesc::obj_field_raw_put(int offset, oop value) { |
|
264 UseCompressedOops ? |
|
265 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) : |
|
266 encode_store_heap_oop(obj_field_addr<oop>(offset), value); |
|
267 } |
85 |
268 |
86 inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } |
269 inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); } |
87 inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; } |
270 inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; } |
88 |
271 |
89 inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); } |
272 inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); } |
105 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; } |
288 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; } |
106 |
289 |
107 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); } |
290 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); } |
108 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; } |
291 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; } |
109 |
292 |
110 inline oop oopDesc::obj_field_acquire(int offset) const { return (oop)OrderAccess::load_ptr_acquire(obj_field_addr(offset)); } |
293 inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); } |
111 inline void oopDesc::release_obj_field_put(int offset, oop value) { oop_store((volatile oop*)obj_field_addr(offset), value); } |
294 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; } |
|
295 |
|
296 inline oop oopDesc::obj_field_acquire(int offset) const { |
|
297 return UseCompressedOops ? |
|
298 decode_heap_oop((narrowOop) |
|
299 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset))) |
|
300 : decode_heap_oop((oop) |
|
301 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset))); |
|
302 } |
|
303 inline void oopDesc::release_obj_field_put(int offset, oop value) { |
|
304 UseCompressedOops ? |
|
305 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) : |
|
306 oop_store((volatile oop*) obj_field_addr<oop>(offset), value); |
|
307 } |
112 |
308 |
113 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); } |
309 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); } |
114 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); } |
310 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); } |
115 |
311 |
116 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); } |
312 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); } |
222 |
417 |
223 inline bool oopDesc::is_parsable() { |
418 inline bool oopDesc::is_parsable() { |
224 return blueprint()->oop_is_parsable(this); |
419 return blueprint()->oop_is_parsable(this); |
225 } |
420 } |
226 |
421 |
227 |
422 inline void update_barrier_set(void* p, oop v) { |
228 inline void update_barrier_set(oop *p, oop v) { |
|
229 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); |
423 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); |
230 oopDesc::bs()->write_ref_field(p, v); |
424 oopDesc::bs()->write_ref_field(p, v); |
231 } |
425 } |
232 |
426 |
233 |
427 template <class T> inline void oop_store(T* p, oop v) { |
234 inline void oop_store(oop* p, oop v) { |
|
235 if (always_do_update_barrier) { |
428 if (always_do_update_barrier) { |
236 oop_store((volatile oop*)p, v); |
429 oop_store((volatile T*)p, v); |
237 } else { |
430 } else { |
238 *p = v; |
431 oopDesc::encode_store_heap_oop(p, v); |
239 update_barrier_set(p, v); |
432 update_barrier_set(p, v); |
240 } |
433 } |
241 } |
434 } |
242 |
435 |
243 inline void oop_store(volatile oop* p, oop v) { |
436 template <class T> inline void oop_store(volatile T* p, oop v) { |
244 // Used by release_obj_field_put, so use release_store_ptr. |
437 // Used by release_obj_field_put, so use release_store_ptr. |
245 OrderAccess::release_store_ptr(p, v); |
438 oopDesc::release_encode_store_heap_oop(p, v); |
246 update_barrier_set((oop *)p, v); |
439 update_barrier_set((void*)p, v); |
247 } |
440 } |
248 |
441 |
249 inline void oop_store_without_check(oop* p, oop v) { |
442 template <class T> inline void oop_store_without_check(T* p, oop v) { |
250 // XXX YSR FIX ME!!! |
|
251 if (always_do_update_barrier) { |
|
252 oop_store(p, v); |
|
253 } else { |
|
254 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v), |
|
255 "oop store without store check failed"); |
|
256 *p = v; |
|
257 } |
|
258 } |
|
259 |
|
260 // When it absolutely has to get there. |
|
261 inline void oop_store_without_check(volatile oop* p, oop v) { |
|
262 // XXX YSR FIX ME!!! |
443 // XXX YSR FIX ME!!! |
263 if (always_do_update_barrier) { |
444 if (always_do_update_barrier) { |
264 oop_store(p, v); |
445 oop_store(p, v); |
265 } else { |
446 } else { |
266 assert(!Universe::heap()->barrier_set()-> |
447 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v), |
267 write_ref_needs_barrier((oop *)p, v), |
|
268 "oop store without store check failed"); |
448 "oop store without store check failed"); |
269 OrderAccess::release_store_ptr(p, v); |
449 oopDesc::encode_store_heap_oop(p, v); |
270 } |
450 } |
271 } |
451 } |
272 |
452 |
|
453 // When it absolutely has to get there. |
|
454 template <class T> inline void oop_store_without_check(volatile T* p, oop v) { |
|
455 // XXX YSR FIX ME!!! |
|
456 if (always_do_update_barrier) { |
|
457 oop_store(p, v); |
|
458 } else { |
|
459 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v), |
|
460 "oop store without store check failed"); |
|
461 oopDesc::release_encode_store_heap_oop(p, v); |
|
462 } |
|
463 } |
|
464 |
|
465 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops |
|
466 // (without having to remember the function name this calls). |
|
467 inline void oop_store_raw(HeapWord* addr, oop value) { |
|
468 if (UseCompressedOops) { |
|
469 oopDesc::encode_store_heap_oop((narrowOop*)addr, value); |
|
470 } else { |
|
471 oopDesc::encode_store_heap_oop((oop*)addr, value); |
|
472 } |
|
473 } |
273 |
474 |
274 // Used only for markSweep, scavenging |
475 // Used only for markSweep, scavenging |
275 inline bool oopDesc::is_gc_marked() const { |
476 inline bool oopDesc::is_gc_marked() const { |
276 return mark()->is_marked(); |
477 return mark()->is_marked(); |
277 } |
478 } |
430 } else { |
633 } else { |
431 return slow_identity_hash(); |
634 return slow_identity_hash(); |
432 } |
635 } |
433 } |
636 } |
434 |
637 |
435 |
|
436 inline void oopDesc::oop_iterate_header(OopClosure* blk) { |
638 inline void oopDesc::oop_iterate_header(OopClosure* blk) { |
437 blk->do_oop((oop*)&_klass); |
639 if (UseCompressedOops) { |
438 } |
640 blk->do_oop(compressed_klass_addr()); |
439 |
641 } else { |
|
642 blk->do_oop(klass_addr()); |
|
643 } |
|
644 } |
440 |
645 |
441 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) { |
646 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) { |
442 if (mr.contains(&_klass)) blk->do_oop((oop*)&_klass); |
647 if (UseCompressedOops) { |
443 } |
648 if (mr.contains(compressed_klass_addr())) { |
444 |
649 blk->do_oop(compressed_klass_addr()); |
|
650 } |
|
651 } else { |
|
652 if (mr.contains(klass_addr())) blk->do_oop(klass_addr()); |
|
653 } |
|
654 } |
445 |
655 |
446 inline int oopDesc::adjust_pointers() { |
656 inline int oopDesc::adjust_pointers() { |
447 debug_only(int check_size = size()); |
657 debug_only(int check_size = size()); |
448 int s = blueprint()->oop_adjust_pointers(this); |
658 int s = blueprint()->oop_adjust_pointers(this); |
449 assert(s == check_size, "should be the same"); |
659 assert(s == check_size, "should be the same"); |
450 return s; |
660 return s; |
451 } |
661 } |
452 |
662 |
453 inline void oopDesc::adjust_header() { |
663 inline void oopDesc::adjust_header() { |
454 MarkSweep::adjust_pointer((oop*)&_klass); |
664 if (UseCompressedOops) { |
|
665 MarkSweep::adjust_pointer(compressed_klass_addr()); |
|
666 } else { |
|
667 MarkSweep::adjust_pointer(klass_addr()); |
|
668 } |
455 } |
669 } |
456 |
670 |
457 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
671 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
458 \ |
672 \ |
459 inline int oopDesc::oop_iterate(OopClosureType* blk) { \ |
673 inline int oopDesc::oop_iterate(OopClosureType* blk) { \ |