author | hseigel |
Mon, 05 Mar 2018 10:29:23 -0500 | |
changeset 49340 | 4e82736053ae |
parent 48007 | ab3959df2115 |
child 49373 | 47b5652f2928 |
permissions | -rw-r--r-- |
38133 | 1 |
/* |
49340
4e82736053ae
8191102: Incorrect include file use in classLoader.hpp
hseigel
parents:
48007
diff
changeset
|
2 |
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. |
38133 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 |
* or visit www.oracle.com if you need additional information or have any |
|
21 |
* questions. |
|
22 |
* |
|
23 |
*/ |
|
24 |
||
25 |
#ifndef SHARE_VM_CODE_COMPILEDMETHOD_HPP |
|
26 |
#define SHARE_VM_CODE_COMPILEDMETHOD_HPP |
|
27 |
||
28 |
#include "code/codeBlob.hpp" |
|
29 |
#include "code/pcDesc.hpp" |
|
30 |
#include "oops/metadata.hpp" |
|
31 |
||
32 |
class Dependencies; |
|
33 |
class ExceptionHandlerTable; |
|
34 |
class ImplicitExceptionTable; |
|
35 |
class AbstractCompiler; |
|
36 |
class xmlStream; |
|
37 |
class CompiledStaticCall; |
|
42650 | 38 |
class NativeCallWrapper; |
38133 | 39 |
|
40 |
// This class is used internally by nmethods, to cache |
|
41 |
// exception/pc/handler information. |
|
42 |
||
43 |
class ExceptionCache : public CHeapObj<mtCode> { |
|
44 |
friend class VMStructs; |
|
45 |
private: |
|
46 |
enum { cache_size = 16 }; |
|
47 |
Klass* _exception_type; |
|
48 |
address _pc[cache_size]; |
|
49 |
address _handler[cache_size]; |
|
50 |
volatile int _count; |
|
51 |
ExceptionCache* _next; |
|
52 |
||
53 |
address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; } |
|
54 |
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } |
|
55 |
address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; } |
|
56 |
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } |
|
49340
4e82736053ae
8191102: Incorrect include file use in classLoader.hpp
hseigel
parents:
48007
diff
changeset
|
57 |
int count(); |
38133 | 58 |
// increment_count is only called under lock, but there may be concurrent readers. |
49340
4e82736053ae
8191102: Incorrect include file use in classLoader.hpp
hseigel
parents:
48007
diff
changeset
|
59 |
void increment_count(); |
38133 | 60 |
|
61 |
public: |
|
62 |
||
63 |
ExceptionCache(Handle exception, address pc, address handler); |
|
64 |
||
65 |
Klass* exception_type() { return _exception_type; } |
|
66 |
ExceptionCache* next() { return _next; } |
|
67 |
void set_next(ExceptionCache *ec) { _next = ec; } |
|
68 |
||
69 |
address match(Handle exception, address pc); |
|
70 |
bool match_exception_with_space(Handle exception) ; |
|
71 |
address test_address(address addr); |
|
72 |
bool add_address_and_handler(address addr, address handler) ; |
|
73 |
}; |
|
74 |
||
75 |
class nmethod; |
|
76 |
||
77 |
// cache pc descs found in earlier inquiries |
|
78 |
class PcDescCache VALUE_OBJ_CLASS_SPEC { |
|
79 |
friend class VMStructs; |
|
80 |
private: |
|
81 |
enum { cache_size = 4 }; |
|
82 |
// The array elements MUST be volatile! Several threads may modify |
|
83 |
// and read from the cache concurrently. find_pc_desc_internal has |
|
84 |
// returned wrong results. C++ compiler (namely xlC12) may duplicate |
|
85 |
// C++ field accesses if the elements are not volatile. |
|
86 |
typedef PcDesc* PcDescPtr; |
|
87 |
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found |
|
88 |
public: |
|
89 |
PcDescCache() { debug_only(_pc_descs[0] = NULL); } |
|
90 |
void reset_to(PcDesc* initial_pc_desc); |
|
91 |
PcDesc* find_pc_desc(int pc_offset, bool approximate); |
|
92 |
void add_pc_desc(PcDesc* pc_desc); |
|
93 |
PcDesc* last_pc_desc() { return _pc_descs[0]; } |
|
94 |
}; |
|
95 |
||
96 |
class PcDescSearch { |
|
97 |
private: |
|
98 |
address _code_begin; |
|
99 |
PcDesc* _lower; |
|
100 |
PcDesc* _upper; |
|
101 |
public: |
|
102 |
PcDescSearch(address code, PcDesc* lower, PcDesc* upper) : |
|
103 |
_code_begin(code), _lower(lower), _upper(upper) |
|
104 |
{ |
|
105 |
} |
|
106 |
||
107 |
address code_begin() const { return _code_begin; } |
|
108 |
PcDesc* scopes_pcs_begin() const { return _lower; } |
|
109 |
PcDesc* scopes_pcs_end() const { return _upper; } |
|
110 |
}; |
|
111 |
||
112 |
class PcDescContainer VALUE_OBJ_CLASS_SPEC { |
|
113 |
private: |
|
114 |
PcDescCache _pc_desc_cache; |
|
115 |
public: |
|
116 |
PcDescContainer() {} |
|
117 |
||
118 |
PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search); |
|
119 |
void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); } |
|
120 |
||
121 |
PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) { |
|
122 |
address base_address = search.code_begin(); |
|
123 |
PcDesc* desc = _pc_desc_cache.last_pc_desc(); |
|
124 |
if (desc != NULL && desc->pc_offset() == pc - base_address) { |
|
125 |
return desc; |
|
126 |
} |
|
127 |
return find_pc_desc_internal(pc, approximate, search); |
|
128 |
} |
|
129 |
}; |
|
130 |
||
131 |
||
132 |
class CompiledMethod : public CodeBlob { |
|
133 |
friend class VMStructs; |
|
134 |
friend class NMethodSweeper; |
|
135 |
||
136 |
void init_defaults(); |
|
137 |
protected: |
|
138 |
enum MarkForDeoptimizationStatus { |
|
139 |
not_marked, |
|
140 |
deoptimize, |
|
141 |
deoptimize_noupdate |
|
142 |
}; |
|
143 |
||
144 |
MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization |
|
145 |
||
146 |
bool _is_far_code; // Code is far from CodeCache. |
|
147 |
// Have to use far call instructions to call it from code in CodeCache. |
|
148 |
// set during construction |
|
149 |
unsigned int _has_unsafe_access:1; // May fault due to unsafe access. |
|
150 |
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? |
|
151 |
unsigned int _lazy_critical_native:1; // Lazy JNI critical native |
|
152 |
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints |
|
153 |
||
154 |
Method* _method; |
|
155 |
address _scopes_data_begin; |
|
156 |
// All deoptee's will resume execution at this location described by |
|
157 |
// this address. |
|
158 |
address _deopt_handler_begin; |
|
159 |
// All deoptee's at a MethodHandle call site will resume execution |
|
160 |
// at this location described by this offset. |
|
161 |
address _deopt_mh_handler_begin; |
|
162 |
||
163 |
PcDescContainer _pc_desc_container; |
|
164 |
ExceptionCache * volatile _exception_cache; |
|
165 |
||
166 |
virtual void flush() = 0; |
|
167 |
protected: |
|
42040
70ec5a09a0d5
8166377: is_compiled_by_jvmci hot in some profiles - improve nmethod compiler type detection
neliasso
parents:
38133
diff
changeset
|
168 |
CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments); |
70ec5a09a0d5
8166377: is_compiled_by_jvmci hot in some profiles - improve nmethod compiler type detection
neliasso
parents:
38133
diff
changeset
|
169 |
CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments); |
38133 | 170 |
|
171 |
public: |
|
172 |
virtual bool is_compiled() const { return true; } |
|
173 |
||
174 |
bool has_unsafe_access() const { return _has_unsafe_access; } |
|
175 |
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } |
|
176 |
||
177 |
bool has_method_handle_invokes() const { return _has_method_handle_invokes; } |
|
178 |
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } |
|
179 |
||
180 |
bool is_lazy_critical_native() const { return _lazy_critical_native; } |
|
181 |
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; } |
|
182 |
||
183 |
bool has_wide_vectors() const { return _has_wide_vectors; } |
|
184 |
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; } |
|
185 |
||
48007
ab3959df2115
8043070: nmethod::verify_interrupt_point() shouldn't enter safepoint
iveresov
parents:
47634
diff
changeset
|
186 |
enum { not_installed = -1, // in construction, only the owner doing the construction is |
ab3959df2115
8043070: nmethod::verify_interrupt_point() shouldn't enter safepoint
iveresov
parents:
47634
diff
changeset
|
187 |
// allowed to advance state |
ab3959df2115
8043070: nmethod::verify_interrupt_point() shouldn't enter safepoint
iveresov
parents:
47634
diff
changeset
|
188 |
in_use = 0, // executable nmethod |
ab3959df2115
8043070: nmethod::verify_interrupt_point() shouldn't enter safepoint
iveresov
parents:
47634
diff
changeset
|
189 |
not_used = 1, // not entrant, but revivable |
ab3959df2115
8043070: nmethod::verify_interrupt_point() shouldn't enter safepoint
iveresov
parents:
47634
diff
changeset
|
190 |
not_entrant = 2, // marked for deoptimization but activations may still exist, |
38133 | 191 |
// will be transformed to zombie when all activations are gone |
48007
ab3959df2115
8043070: nmethod::verify_interrupt_point() shouldn't enter safepoint
iveresov
parents:
47634
diff
changeset
|
192 |
zombie = 3, // no activations exist, nmethod is ready for purge |
ab3959df2115
8043070: nmethod::verify_interrupt_point() shouldn't enter safepoint
iveresov
parents:
47634
diff
changeset
|
193 |
unloaded = 4 // there should be no activations, should not be called, |
38133 | 194 |
// will be transformed to zombie immediately |
195 |
}; |
|
196 |
||
197 |
virtual bool is_in_use() const = 0; |
|
198 |
virtual int comp_level() const = 0; |
|
199 |
virtual int compile_id() const = 0; |
|
200 |
||
201 |
virtual address verified_entry_point() const = 0; |
|
202 |
virtual void log_identity(xmlStream* log) const = 0; |
|
203 |
virtual void log_state_change() const = 0; |
|
204 |
virtual bool make_not_used() = 0; |
|
205 |
virtual bool make_not_entrant() = 0; |
|
206 |
virtual bool make_entrant() = 0; |
|
207 |
virtual address entry_point() const = 0; |
|
208 |
virtual bool make_zombie() = 0; |
|
209 |
virtual bool is_osr_method() const = 0; |
|
210 |
virtual int osr_entry_bci() const = 0; |
|
211 |
Method* method() const { return _method; } |
|
212 |
virtual void print_pcs() = 0; |
|
213 |
bool is_native_method() const { return _method != NULL && _method->is_native(); } |
|
214 |
bool is_java_method() const { return _method != NULL && !_method->is_native(); } |
|
215 |
||
216 |
// ScopeDesc retrieval operation |
|
217 |
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } |
|
46796 | 218 |
// pc_desc_near returns the first PcDesc at or after the given pc. |
38133 | 219 |
PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } |
220 |
||
221 |
// ScopeDesc for an instruction |
|
222 |
ScopeDesc* scope_desc_at(address pc); |
|
46796 | 223 |
ScopeDesc* scope_desc_near(address pc); |
38133 | 224 |
|
225 |
bool is_at_poll_return(address pc); |
|
226 |
bool is_at_poll_or_poll_return(address pc); |
|
227 |
||
228 |
bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; } |
|
229 |
void mark_for_deoptimization(bool inc_recompile_counts = true) { |
|
230 |
_mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate); |
|
231 |
} |
|
232 |
bool update_recompile_counts() const { |
|
233 |
// Update recompile counts when either the update is explicitly requested (deoptimize) |
|
234 |
// or the nmethod is not marked for deoptimization at all (not_marked). |
|
235 |
// The latter happens during uncommon traps when deoptimized nmethod is made not entrant. |
|
236 |
return _mark_for_deoptimization_status != deoptimize_noupdate; |
|
237 |
} |
|
238 |
||
239 |
// tells whether frames described by this nmethod can be deoptimized |
|
240 |
// note: native wrappers cannot be deoptimized. |
|
241 |
bool can_be_deoptimized() const { return is_java_method(); } |
|
242 |
||
243 |
virtual oop oop_at(int index) const = 0; |
|
244 |
virtual Metadata* metadata_at(int index) const = 0; |
|
245 |
||
246 |
address scopes_data_begin() const { return _scopes_data_begin; } |
|
247 |
virtual address scopes_data_end() const = 0; |
|
248 |
int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); } |
|
249 |
||
250 |
virtual PcDesc* scopes_pcs_begin() const = 0; |
|
251 |
virtual PcDesc* scopes_pcs_end() const = 0; |
|
252 |
int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); } |
|
253 |
||
254 |
address insts_begin() const { return code_begin(); } |
|
255 |
address insts_end() const { return stub_begin(); } |
|
43474
8fbf946045f6
8172844: Assert fails in deoptimization due to original PC at the end of code section
zmajo
parents:
42650
diff
changeset
|
256 |
// Returns true if a given address is in the 'insts' section. The method |
8fbf946045f6
8172844: Assert fails in deoptimization due to original PC at the end of code section
zmajo
parents:
42650
diff
changeset
|
257 |
// insts_contains_inclusive() is end-inclusive. |
38133 | 258 |
bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); } |
43474
8fbf946045f6
8172844: Assert fails in deoptimization due to original PC at the end of code section
zmajo
parents:
42650
diff
changeset
|
259 |
bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); } |
8fbf946045f6
8172844: Assert fails in deoptimization due to original PC at the end of code section
zmajo
parents:
42650
diff
changeset
|
260 |
|
38133 | 261 |
int insts_size() const { return insts_end() - insts_begin(); } |
262 |
||
263 |
virtual address consts_begin() const = 0; |
|
264 |
virtual address consts_end() const = 0; |
|
265 |
bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); } |
|
266 |
int consts_size() const { return consts_end() - consts_begin(); } |
|
267 |
||
268 |
virtual address stub_begin() const = 0; |
|
269 |
virtual address stub_end() const = 0; |
|
270 |
bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); } |
|
271 |
int stub_size() const { return stub_end() - stub_begin(); } |
|
272 |
||
273 |
virtual address handler_table_begin() const = 0; |
|
274 |
virtual address handler_table_end() const = 0; |
|
275 |
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } |
|
276 |
int handler_table_size() const { return handler_table_end() - handler_table_begin(); } |
|
277 |
||
46364
00a21c0ff97e
8173795: AOT support in raw_exception_handler_for_return_address is broken
dlong
parents:
43474
diff
changeset
|
278 |
virtual address exception_begin() const = 0; |
00a21c0ff97e
8173795: AOT support in raw_exception_handler_for_return_address is broken
dlong
parents:
43474
diff
changeset
|
279 |
|
38133 | 280 |
virtual address nul_chk_table_begin() const = 0; |
281 |
virtual address nul_chk_table_end() const = 0; |
|
282 |
bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } |
|
283 |
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } |
|
284 |
||
285 |
virtual oop* oop_addr_at(int index) const = 0; |
|
286 |
virtual Metadata** metadata_addr_at(int index) const = 0; |
|
287 |
virtual void set_original_pc(const frame* fr, address pc) = 0; |
|
288 |
||
289 |
// Exception cache support |
|
290 |
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here. |
|
291 |
ExceptionCache* exception_cache() const { return _exception_cache; } |
|
292 |
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } |
|
49340
4e82736053ae
8191102: Incorrect include file use in classLoader.hpp
hseigel
parents:
48007
diff
changeset
|
293 |
void release_set_exception_cache(ExceptionCache *ec); |
38133 | 294 |
address handler_for_exception_and_pc(Handle exception, address pc); |
295 |
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); |
|
296 |
void clean_exception_cache(BoolObjectClosure* is_alive); |
|
297 |
||
298 |
void add_exception_cache_entry(ExceptionCache* new_entry); |
|
299 |
ExceptionCache* exception_cache_entry_for_exception(Handle exception); |
|
300 |
||
301 |
// MethodHandle |
|
302 |
bool is_method_handle_return(address return_pc); |
|
303 |
address deopt_mh_handler_begin() const { return _deopt_mh_handler_begin; } |
|
304 |
||
305 |
address deopt_handler_begin() const { return _deopt_handler_begin; } |
|
306 |
virtual address get_original_pc(const frame* fr) = 0; |
|
307 |
// Deopt |
|
308 |
// Return true is the PC is one would expect if the frame is being deopted. |
|
309 |
bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); } |
|
310 |
bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); } |
|
311 |
bool is_deopt_entry(address pc); |
|
312 |
||
313 |
virtual bool can_convert_to_zombie() = 0; |
|
314 |
virtual const char* compile_kind() const = 0; |
|
315 |
virtual int get_state() const = 0; |
|
316 |
||
317 |
const char* state() const; |
|
318 |
||
319 |
bool is_far_code() const { return _is_far_code; } |
|
320 |
||
321 |
bool inlinecache_check_contains(address addr) const { |
|
322 |
return (addr >= code_begin() && addr < verified_entry_point()); |
|
323 |
} |
|
324 |
||
325 |
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f); |
|
326 |
||
327 |
// implicit exceptions support |
|
328 |
virtual address continuation_for_implicit_exception(address pc) { return NULL; } |
|
329 |
||
330 |
static address get_deopt_original_pc(const frame* fr); |
|
331 |
||
332 |
// Inline cache support |
|
333 |
void cleanup_inline_caches(bool clean_all = false); |
|
334 |
virtual void clear_inline_caches(); |
|
335 |
void clear_ic_stubs(); |
|
336 |
||
337 |
// Verify and count cached icholder relocations. |
|
338 |
int verify_icholder_relocations(); |
|
339 |
void verify_oop_relocations(); |
|
340 |
||
341 |
virtual bool is_evol_dependent_on(Klass* dependee) = 0; |
|
342 |
// Fast breakpoint support. Tells if this compiled method is |
|
343 |
// dependent on the given method. Returns true if this nmethod |
|
344 |
// corresponds to the given method as well. |
|
345 |
virtual bool is_dependent_on_method(Method* dependee) = 0; |
|
346 |
||
42650 | 347 |
virtual NativeCallWrapper* call_wrapper_at(address call) const = 0; |
348 |
virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0; |
|
349 |
virtual address call_instruction_address(address pc) const = 0; |
|
350 |
||
351 |
virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0; |
|
352 |
virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0; |
|
353 |
virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0; |
|
354 |
||
38133 | 355 |
Method* attached_method(address call_pc); |
356 |
Method* attached_method_before_pc(address pc); |
|
357 |
||
358 |
virtual void metadata_do(void f(Metadata*)) = 0; |
|
359 |
||
360 |
// GC support |
|
361 |
||
362 |
void set_unloading_next(CompiledMethod* next) { _unloading_next = next; } |
|
363 |
CompiledMethod* unloading_next() { return _unloading_next; } |
|
364 |
||
365 |
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive); |
|
366 |
||
367 |
// Check that all metadata is still alive |
|
368 |
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive); |
|
369 |
||
370 |
virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); |
|
371 |
// The parallel versions are used by G1. |
|
372 |
virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred); |
|
373 |
virtual void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred); |
|
374 |
||
375 |
static unsigned char global_unloading_clock() { return _global_unloading_clock; } |
|
376 |
static void increase_unloading_clock(); |
|
377 |
||
378 |
void set_unloading_clock(unsigned char unloading_clock); |
|
379 |
unsigned char unloading_clock(); |
|
380 |
||
381 |
protected: |
|
382 |
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) = 0; |
|
383 |
#if INCLUDE_JVMCI |
|
384 |
virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) = 0; |
|
385 |
#endif |
|
386 |
||
387 |
private: |
|
388 |
// GC support to help figure out if an nmethod has been |
|
389 |
// cleaned/unloaded by the current GC. |
|
390 |
static unsigned char _global_unloading_clock; |
|
391 |
||
392 |
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod |
|
393 |
||
394 |
PcDesc* find_pc_desc(address pc, bool approximate) { |
|
395 |
return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end())); |
|
396 |
} |
|
397 |
||
398 |
protected: |
|
399 |
union { |
|
400 |
// Used by G1 to chain nmethods. |
|
401 |
CompiledMethod* _unloading_next; |
|
402 |
// Used by non-G1 GCs to chain nmethods. |
|
403 |
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods |
|
404 |
}; |
|
405 |
}; |
|
406 |
||
407 |
#endif //SHARE_VM_CODE_COMPILEDMETHOD_HPP |