20 * CA 95054 USA or visit www.sun.com if you need additional information or |
20 * CA 95054 USA or visit www.sun.com if you need additional information or |
21 * have any questions. |
21 * have any questions. |
22 * |
22 * |
23 */ |
23 */ |
24 |
24 |
25 CompactibleSpace* DefNewGeneration::first_compaction_space() const { |
25 // Methods of protected closure types |
26 return eden(); |
26 |
|
27 template <class T> |
|
28 inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) { |
|
29 #ifdef ASSERT |
|
30 { |
|
31 // We never expect to see a null reference being processed |
|
32 // as a weak reference. |
|
33 assert (!oopDesc::is_null(*p), "expected non-null ref"); |
|
34 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
|
35 assert (obj->is_oop(), "expected an oop while scanning weak refs"); |
|
36 } |
|
37 #endif // ASSERT |
|
38 |
|
39 _cl->do_oop_nv(p); |
|
40 |
|
41 // Card marking is trickier for weak refs. |
|
42 // This oop is a 'next' field which was filled in while we |
|
43 // were discovering weak references. While we might not need |
|
44 // to take a special action to keep this reference alive, we |
|
45 // will need to dirty a card as the field was modified. |
|
46 // |
|
47 // Alternatively, we could create a method which iterates through |
|
48 // each generation, allowing them in turn to examine the modified |
|
49 // field. |
|
50 // |
|
51 // We could check that p is also in an older generation, but |
|
52 // dirty cards in the youngest gen are never scanned, so the |
|
53 // extra check probably isn't worthwhile. |
|
54 if (Universe::heap()->is_in_reserved(p)) { |
|
55 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
|
56 _rs->inline_write_ref_field_gc(p, obj); |
|
57 } |
27 } |
58 } |
28 |
59 |
29 HeapWord* DefNewGeneration::allocate(size_t word_size, |
60 template <class T> |
30 bool is_tlab) { |
61 inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) { |
31 // This is the slow-path allocation for the DefNewGeneration. |
62 #ifdef ASSERT |
32 // Most allocations are fast-path in compiled code. |
63 { |
33 // We try to allocate from the eden. If that works, we are happy. |
64 // We never expect to see a null reference being processed |
34 // Note that since DefNewGeneration supports lock-free allocation, we |
65 // as a weak reference. |
35 // have to use it here, as well. |
66 assert (!oopDesc::is_null(*p), "expected non-null ref"); |
36 HeapWord* result = eden()->par_allocate(word_size); |
67 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
37 if (result != NULL) { |
68 assert (obj->is_oop(), "expected an oop while scanning weak refs"); |
38 return result; |
|
39 } |
69 } |
40 do { |
70 #endif // ASSERT |
41 HeapWord* old_limit = eden()->soft_end(); |
|
42 if (old_limit < eden()->end()) { |
|
43 // Tell the next generation we reached a limit. |
|
44 HeapWord* new_limit = |
|
45 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); |
|
46 if (new_limit != NULL) { |
|
47 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); |
|
48 } else { |
|
49 assert(eden()->soft_end() == eden()->end(), |
|
50 "invalid state after allocation_limit_reached returned null"); |
|
51 } |
|
52 } else { |
|
53 // The allocation failed and the soft limit is equal to the hard limit, |
|
54 // there are no reasons to do an attempt to allocate |
|
55 assert(old_limit == eden()->end(), "sanity check"); |
|
56 break; |
|
57 } |
|
58 // Try to allocate until succeeded or the soft limit can't be adjusted |
|
59 result = eden()->par_allocate(word_size); |
|
60 } while (result == NULL); |
|
61 |
71 |
62 // If the eden is full and the last collection bailed out, we are running |
72 _cl->do_oop_nv(p); |
63 // out of heap space, and we try to allocate the from-space, too. |
73 |
64 // allocate_from_space can't be inlined because that would introduce a |
74 // Optimized for Defnew generation if it's the youngest generation: |
65 // circular dependency at compile time. |
75 // we set a younger_gen card if we have an older->youngest |
66 if (result == NULL) { |
76 // generation pointer. |
67 result = allocate_from_space(word_size); |
77 oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
|
78 if (((HeapWord*)obj < _boundary) && Universe::heap()->is_in_reserved(p)) { |
|
79 _rs->inline_write_ref_field_gc(p, obj); |
68 } |
80 } |
69 return result; |
|
70 } |
81 } |
71 |
|
72 HeapWord* DefNewGeneration::par_allocate(size_t word_size, |
|
73 bool is_tlab) { |
|
74 return eden()->par_allocate(word_size); |
|
75 } |
|
76 |
|
77 void DefNewGeneration::gc_prologue(bool full) { |
|
78 // Ensure that _end and _soft_end are the same in eden space. |
|
79 eden()->set_soft_end(eden()->end()); |
|
80 } |
|
81 |
|
82 size_t DefNewGeneration::tlab_capacity() const { |
|
83 return eden()->capacity(); |
|
84 } |
|
85 |
|
86 size_t DefNewGeneration::unsafe_max_tlab_alloc() const { |
|
87 return unsafe_max_alloc_nogc(); |
|
88 } |
|