1 /* |
1 /* |
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
53 inline HeapWord* |
53 inline HeapWord* |
54 G1OffsetTableContigSpace::block_start_const(const void* p) const { |
54 G1OffsetTableContigSpace::block_start_const(const void* p) const { |
55 return _offsets.block_start_const(p); |
55 return _offsets.block_start_const(p); |
56 } |
56 } |
57 |
57 |
|
58 inline void HeapRegion::note_start_of_marking() { |
|
59 init_top_at_conc_mark_count(); |
|
60 _next_marked_bytes = 0; |
|
61 _next_top_at_mark_start = top(); |
|
62 } |
|
63 |
|
64 inline void HeapRegion::note_end_of_marking() { |
|
65 _prev_top_at_mark_start = _next_top_at_mark_start; |
|
66 _prev_marked_bytes = _next_marked_bytes; |
|
67 _next_marked_bytes = 0; |
|
68 |
|
69 assert(_prev_marked_bytes <= |
|
70 (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) * |
|
71 HeapWordSize, "invariant"); |
|
72 } |
|
73 |
|
74 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { |
|
75 if (during_initial_mark) { |
|
76 if (is_survivor()) { |
|
77 assert(false, "should not allocate survivors during IM"); |
|
78 } else { |
|
79 // During initial-mark we'll explicitly mark any objects on old |
|
80 // regions that are pointed to by roots. Given that explicit |
|
81 // marks only make sense under NTAMS it'd be nice if we could |
|
82 // check that condition if we wanted to. Given that we don't |
|
83 // know where the top of this region will end up, we simply set |
|
84 // NTAMS to the end of the region so all marks will be below |
|
85 // NTAMS. We'll set it to the actual top when we retire this region. |
|
86 _next_top_at_mark_start = end(); |
|
87 } |
|
88 } else { |
|
89 if (is_survivor()) { |
|
90 // This is how we always allocate survivors. |
|
91 assert(_next_top_at_mark_start == bottom(), "invariant"); |
|
92 } else { |
|
93 // We could have re-used this old region as to-space over a |
|
94 // couple of GCs since the start of the concurrent marking |
|
95 // cycle. This means that [bottom,NTAMS) will contain objects |
|
96 // copied up to and including initial-mark and [NTAMS, top) |
|
97 // will contain objects copied during the concurrent marking cycle. |
|
98 assert(top() >= _next_top_at_mark_start, "invariant"); |
|
99 } |
|
100 } |
|
101 } |
|
102 |
|
103 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) { |
|
104 if (during_initial_mark) { |
|
105 if (is_survivor()) { |
|
106 assert(false, "should not allocate survivors during IM"); |
|
107 } else { |
|
108 // See the comment for note_start_of_copying() for the details |
|
109 // on this. |
|
110 assert(_next_top_at_mark_start == end(), "pre-condition"); |
|
111 _next_top_at_mark_start = top(); |
|
112 } |
|
113 } else { |
|
114 if (is_survivor()) { |
|
115 // This is how we always allocate survivors. |
|
116 assert(_next_top_at_mark_start == bottom(), "invariant"); |
|
117 } else { |
|
118 // See the comment for note_start_of_copying() for the details |
|
119 // on this. |
|
120 assert(top() >= _next_top_at_mark_start, "invariant"); |
|
121 } |
|
122 } |
|
123 } |
|
124 |
58 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |
125 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |