author | stefank |
Fri, 14 Feb 2014 09:29:56 +0100 | |
changeset 22883 | 5378704451dc |
parent 22248 | 34f19df648a2 |
child 22872 | b6902ee5bc8d |
permissions | -rw-r--r-- |
1 | 1 |
/* |
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
2 |
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
1 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
7 |
* published by the Free Software Foundation. |
|
8 |
* |
|
9 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
13 |
* accompanied this code). |
|
14 |
* |
|
15 |
* You should have received a copy of the GNU General Public License version |
|
16 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
17 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 |
* |
|
5547
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1
diff
changeset
|
19 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1
diff
changeset
|
20 |
* or visit www.oracle.com if you need additional information or have any |
f4b087cbb361
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1
diff
changeset
|
21 |
* questions. |
1 | 22 |
* |
23 |
*/ |
|
24 |
||
7397 | 25 |
#include "precompiled.hpp" |
26 |
#include "memory/allocation.hpp" |
|
27 |
#include "memory/allocation.inline.hpp" |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
28 |
#include "memory/genCollectedHeap.hpp" |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
29 |
#include "memory/metaspaceShared.hpp" |
7397 | 30 |
#include "memory/resourceArea.hpp" |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
31 |
#include "memory/universe.hpp" |
13195 | 32 |
#include "runtime/atomic.hpp" |
7397 | 33 |
#include "runtime/os.hpp" |
34 |
#include "runtime/task.hpp" |
|
35 |
#include "runtime/threadCritical.hpp" |
|
13195 | 36 |
#include "services/memTracker.hpp" |
7397 | 37 |
#include "utilities/ostream.hpp" |
13195 | 38 |
|
7397 | 39 |
#ifdef TARGET_OS_FAMILY_linux |
40 |
# include "os_linux.inline.hpp" |
|
41 |
#endif |
|
42 |
#ifdef TARGET_OS_FAMILY_solaris |
|
43 |
# include "os_solaris.inline.hpp" |
|
44 |
#endif |
|
45 |
#ifdef TARGET_OS_FAMILY_windows |
|
46 |
# include "os_windows.inline.hpp" |
|
47 |
#endif |
|
10565 | 48 |
#ifdef TARGET_OS_FAMILY_bsd |
49 |
# include "os_bsd.inline.hpp" |
|
50 |
#endif |
|
1 | 51 |
|
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
52 |
void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } |
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
53 |
void StackObj::operator delete(void* p) { ShouldNotCallThis(); } |
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
54 |
void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } |
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
55 |
void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } |
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
56 |
|
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
57 |
void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } |
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
58 |
void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } |
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
59 |
void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } |
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
60 |
void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } |
1 | 61 |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
62 |
void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, |
17858
c292f8791cca
8014912: Restore PrintSharedSpaces functionality after NPG
iklam
parents:
17376
diff
changeset
|
63 |
size_t word_size, bool read_only, |
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
64 |
MetaspaceObj::Type type, TRAPS) throw() { |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
65 |
// Klass has it's own operator new |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
66 |
return Metaspace::allocate(loader_data, word_size, read_only, |
17858
c292f8791cca
8014912: Restore PrintSharedSpaces functionality after NPG
iklam
parents:
17376
diff
changeset
|
67 |
type, CHECK_NULL); |
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
68 |
} |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
69 |
|
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
70 |
bool MetaspaceObj::is_shared() const { |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
71 |
return MetaspaceShared::is_in_shared_space(this); |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
72 |
} |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
73 |
|
14579
7f6ce6e3dd80
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
14120
diff
changeset
|
74 |
bool MetaspaceObj::is_metaspace_object() const { |
22201
9c2ccfa3a5fe
8029178: Parallel class loading test anonymous-simple gets SIGSEGV in Metaspace::contains
coleenp
parents:
19696
diff
changeset
|
75 |
return ClassLoaderDataGraph::contains((void*)this); |
14579
7f6ce6e3dd80
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
14120
diff
changeset
|
76 |
} |
7f6ce6e3dd80
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
coleenp
parents:
14120
diff
changeset
|
77 |
|
13728
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
78 |
void MetaspaceObj::print_address_on(outputStream* st) const { |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
79 |
st->print(" {"INTPTR_FORMAT"}", this); |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
80 |
} |
882756847a04
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
13195
diff
changeset
|
81 |
|
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
82 |
void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { |
1 | 83 |
address res; |
84 |
switch (type) { |
|
85 |
case C_HEAP: |
|
13195 | 86 |
res = (address)AllocateHeap(size, flags, CALLER_PC); |
6180 | 87 |
DEBUG_ONLY(set_allocation_type(res, C_HEAP);) |
1 | 88 |
break; |
89 |
case RESOURCE_AREA: |
|
6183
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
6180
diff
changeset
|
90 |
// new(size) sets allocation type RESOURCE_AREA. |
1 | 91 |
res = (address)operator new(size); |
92 |
break; |
|
93 |
default: |
|
94 |
ShouldNotReachHere(); |
|
95 |
} |
|
96 |
return res; |
|
97 |
} |
|
98 |
||
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
99 |
void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { |
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
100 |
return (address) operator new(size, type, flags); |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
101 |
} |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
102 |
|
14083
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
103 |
void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, |
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
104 |
allocation_type type, MEMFLAGS flags) throw() { |
14083
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
105 |
//should only call this with std::nothrow, use other operator new() otherwise |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
106 |
address res; |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
107 |
switch (type) { |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
108 |
case C_HEAP: |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
109 |
res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
110 |
DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
111 |
break; |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
112 |
case RESOURCE_AREA: |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
113 |
// new(size) sets allocation type RESOURCE_AREA. |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
114 |
res = (address)operator new(size, std::nothrow); |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
115 |
break; |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
116 |
default: |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
117 |
ShouldNotReachHere(); |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
118 |
} |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
119 |
return res; |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
120 |
} |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
121 |
|
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
122 |
void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, |
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
123 |
allocation_type type, MEMFLAGS flags) throw() { |
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
124 |
return (address)operator new(size, nothrow_constant, type, flags); |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
125 |
} |
14083
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
126 |
|
1 | 127 |
void ResourceObj::operator delete(void* p) { |
128 |
assert(((ResourceObj *)p)->allocated_on_C_heap(), |
|
129 |
"delete only allowed for C_HEAP objects"); |
|
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
130 |
DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) |
1 | 131 |
FreeHeap(p); |
132 |
} |
|
133 |
||
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
134 |
void ResourceObj::operator delete [](void* p) { |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
135 |
operator delete(p); |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
136 |
} |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
137 |
|
6180 | 138 |
#ifdef ASSERT |
139 |
void ResourceObj::set_allocation_type(address res, allocation_type type) { |
|
140 |
// Set allocation type in the resource object |
|
141 |
uintptr_t allocation = (uintptr_t)res; |
|
22243 | 142 |
assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " PTR_FORMAT, res)); |
6180 | 143 |
assert(type <= allocation_mask, "incorrect allocation type"); |
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
144 |
ResourceObj* resobj = (ResourceObj *)res; |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
145 |
resobj->_allocation_t[0] = ~(allocation + type); |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
146 |
if (type != STACK_OR_EMBEDDED) { |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
147 |
// Called from operator new() and CollectionSetChooser(), |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
148 |
// set verification value. |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
149 |
resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
150 |
} |
6180 | 151 |
} |
152 |
||
6183
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
6180
diff
changeset
|
153 |
ResourceObj::allocation_type ResourceObj::get_allocation_type() const { |
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
154 |
assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
155 |
return (allocation_type)((~_allocation_t[0]) & allocation_mask); |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
156 |
} |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
157 |
|
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
158 |
bool ResourceObj::is_type_set() const { |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
159 |
allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
160 |
return get_allocation_type() == type && |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
161 |
(_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); |
6180 | 162 |
} |
163 |
||
6183
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
6180
diff
changeset
|
164 |
ResourceObj::ResourceObj() { // default constructor |
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
165 |
if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
166 |
// Operator new() is not called for allocations |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
167 |
// on stack and for embedded objects. |
6180 | 168 |
set_allocation_type((address)this, STACK_OR_EMBEDDED); |
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
169 |
} else if (allocated_on_stack()) { // STACK_OR_EMBEDDED |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
170 |
// For some reason we got a value which resembles |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
171 |
// an embedded or stack object (operator new() does not |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
172 |
// set such type). Keep it since it is valid value |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
173 |
// (even if it was garbage). |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
174 |
// Ignore garbage in other fields. |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
175 |
} else if (is_type_set()) { |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
176 |
// Operator new() was called and type was set. |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
177 |
assert(!allocated_on_stack(), |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
178 |
err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
179 |
this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); |
6180 | 180 |
} else { |
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
181 |
// Operator new() was not called. |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
182 |
// Assume that it is embedded or stack object. |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
183 |
set_allocation_type((address)this, STACK_OR_EMBEDDED); |
6180 | 184 |
} |
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
185 |
_allocation_t[1] = 0; // Zap verification value |
6180 | 186 |
} |
187 |
||
6183
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
6180
diff
changeset
|
188 |
ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor |
6180 | 189 |
// Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. |
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
190 |
// Note: garbage may resembles valid value. |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
191 |
assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
192 |
err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
193 |
this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); |
6180 | 194 |
set_allocation_type((address)this, STACK_OR_EMBEDDED); |
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
195 |
_allocation_t[1] = 0; // Zap verification value |
6180 | 196 |
} |
197 |
||
198 |
ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment |
|
199 |
// Used in InlineTree::ok_to_inline() for WarmCallInfo. |
|
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
200 |
assert(allocated_on_stack(), |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
201 |
err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
202 |
this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
203 |
// Keep current _allocation_t value; |
6180 | 204 |
return *this; |
205 |
} |
|
206 |
||
207 |
ResourceObj::~ResourceObj() { |
|
6183
4c74cfe14f20
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
kvn
parents:
6180
diff
changeset
|
208 |
// allocated_on_C_heap() also checks that encoded (in _allocation) address == this. |
7440
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
209 |
if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. |
eabaf35910a1
6993125: runThese crashes with assert(Thread::current()->on_local_stack((address)this))
kvn
parents:
7397
diff
changeset
|
210 |
_allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type |
6180 | 211 |
} |
212 |
} |
|
213 |
#endif // ASSERT |
|
214 |
||
215 |
||
1 | 216 |
void trace_heap_malloc(size_t size, const char* name, void* p) { |
217 |
// A lock is not needed here - tty uses a lock internally |
|
8320 | 218 |
tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name); |
1 | 219 |
} |
220 |
||
221 |
||
222 |
void trace_heap_free(void* p) { |
|
223 |
// A lock is not needed here - tty uses a lock internally |
|
224 |
tty->print_cr("Heap free " INTPTR_FORMAT, p); |
|
225 |
} |
|
226 |
||
227 |
//-------------------------------------------------------------------------------------- |
|
228 |
// ChunkPool implementation |
|
229 |
||
230 |
// MT-safe pool of chunks to reduce malloc/free thrashing |
|
231 |
// NB: not using Mutex because pools are used before Threads are initialized |
|
13195 | 232 |
class ChunkPool: public CHeapObj<mtInternal> { |
1 | 233 |
Chunk* _first; // first cached Chunk; its first word points to next chunk |
234 |
size_t _num_chunks; // number of unused chunks in pool |
|
235 |
size_t _num_used; // number of chunks currently checked out |
|
236 |
const size_t _size; // size of each chunk (must be uniform) |
|
237 |
||
18686 | 238 |
// Our four static pools |
1 | 239 |
static ChunkPool* _large_pool; |
240 |
static ChunkPool* _medium_pool; |
|
241 |
static ChunkPool* _small_pool; |
|
18686 | 242 |
static ChunkPool* _tiny_pool; |
1 | 243 |
|
244 |
// return first element or null |
|
245 |
void* get_first() { |
|
246 |
Chunk* c = _first; |
|
247 |
if (_first) { |
|
248 |
_first = _first->next(); |
|
249 |
_num_chunks--; |
|
250 |
} |
|
251 |
return c; |
|
252 |
} |
|
253 |
||
254 |
public: |
|
255 |
// All chunks in a ChunkPool has the same size |
|
256 |
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } |
|
257 |
||
258 |
// Allocate a new chunk from the pool (might expand the pool) |
|
18055
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
259 |
_NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) { |
1 | 260 |
assert(bytes == _size, "bad size"); |
261 |
void* p = NULL; |
|
13195 | 262 |
// No VM lock can be taken inside ThreadCritical lock, so os::malloc |
263 |
// should be done outside ThreadCritical lock due to NMT |
|
1 | 264 |
{ ThreadCritical tc; |
265 |
_num_used++; |
|
266 |
p = get_first(); |
|
267 |
} |
|
13195 | 268 |
if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); |
18055
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
269 |
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { |
17087
f0b76c4c93a0
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
ccheung
parents:
17031
diff
changeset
|
270 |
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate"); |
18055
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
271 |
} |
1 | 272 |
return p; |
273 |
} |
|
274 |
||
275 |
// Return a chunk to the pool |
|
276 |
void free(Chunk* chunk) { |
|
277 |
assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); |
|
278 |
ThreadCritical tc; |
|
279 |
_num_used--; |
|
280 |
||
281 |
// Add chunk to list |
|
282 |
chunk->set_next(_first); |
|
283 |
_first = chunk; |
|
284 |
_num_chunks++; |
|
285 |
} |
|
286 |
||
287 |
// Prune the pool |
|
288 |
void free_all_but(size_t n) { |
|
13195 | 289 |
Chunk* cur = NULL; |
290 |
Chunk* next; |
|
291 |
{ |
|
1 | 292 |
// if we have more than n chunks, free all of them |
293 |
ThreadCritical tc; |
|
294 |
if (_num_chunks > n) { |
|
295 |
// free chunks at end of queue, for better locality |
|
13195 | 296 |
cur = _first; |
1 | 297 |
for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); |
298 |
||
299 |
if (cur != NULL) { |
|
13195 | 300 |
next = cur->next(); |
1 | 301 |
cur->set_next(NULL); |
302 |
cur = next; |
|
303 |
||
13195 | 304 |
_num_chunks = n; |
305 |
} |
|
306 |
} |
|
307 |
} |
|
308 |
||
309 |
// Free all remaining chunks, outside of ThreadCritical |
|
310 |
// to avoid deadlock with NMT |
|
1 | 311 |
while(cur != NULL) { |
312 |
next = cur->next(); |
|
13195 | 313 |
os::free(cur, mtChunk); |
1 | 314 |
cur = next; |
315 |
} |
|
316 |
} |
|
317 |
||
318 |
// Accessors to preallocated pool's |
|
319 |
static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } |
|
320 |
static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } |
|
321 |
static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } |
|
18686 | 322 |
static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; } |
1 | 323 |
|
324 |
static void initialize() { |
|
325 |
_large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); |
|
326 |
_medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); |
|
327 |
_small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); |
|
18686 | 328 |
_tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size()); |
1 | 329 |
} |
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
330 |
|
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
331 |
static void clean() { |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
332 |
enum { BlocksToKeep = 5 }; |
18686 | 333 |
_tiny_pool->free_all_but(BlocksToKeep); |
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
334 |
_small_pool->free_all_but(BlocksToKeep); |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
335 |
_medium_pool->free_all_but(BlocksToKeep); |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
336 |
_large_pool->free_all_but(BlocksToKeep); |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
337 |
} |
1 | 338 |
}; |
339 |
||
340 |
ChunkPool* ChunkPool::_large_pool = NULL; |
|
341 |
ChunkPool* ChunkPool::_medium_pool = NULL; |
|
342 |
ChunkPool* ChunkPool::_small_pool = NULL; |
|
18686 | 343 |
ChunkPool* ChunkPool::_tiny_pool = NULL; |
1 | 344 |
|
345 |
void chunkpool_init() { |
|
346 |
ChunkPool::initialize(); |
|
347 |
} |
|
348 |
||
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
349 |
void |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
350 |
Chunk::clean_chunk_pool() { |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
351 |
ChunkPool::clean(); |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
352 |
} |
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
353 |
|
1 | 354 |
|
355 |
//-------------------------------------------------------------------------------------- |
|
356 |
// ChunkPoolCleaner implementation |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
357 |
// |
1 | 358 |
|
359 |
class ChunkPoolCleaner : public PeriodicTask { |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
360 |
enum { CleaningInterval = 5000 }; // cleaning interval in ms |
1 | 361 |
|
362 |
public: |
|
363 |
ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} |
|
364 |
void task() { |
|
6176
4d9030fe341f
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
5547
diff
changeset
|
365 |
ChunkPool::clean(); |
1 | 366 |
} |
367 |
}; |
|
368 |
||
369 |
//-------------------------------------------------------------------------------------- |
|
370 |
// Chunk implementation |
|
371 |
||
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
372 |
void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() { |
1 | 373 |
// requested_size is equal to sizeof(Chunk) but in order for the arena |
374 |
// allocations to come out aligned as expected the size must be aligned |
|
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
375 |
// to expected arena alignment. |
1 | 376 |
// expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. |
377 |
assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); |
|
378 |
size_t bytes = ARENA_ALIGN(requested_size) + length; |
|
379 |
switch (length) { |
|
18055
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
380 |
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode); |
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
381 |
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode); |
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
382 |
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode); |
18686 | 383 |
case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode); |
1 | 384 |
default: { |
18055
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
385 |
void* p = os::malloc(bytes, mtChunk, CALLER_PC); |
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
386 |
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { |
17087
f0b76c4c93a0
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
ccheung
parents:
17031
diff
changeset
|
387 |
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new"); |
18055
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
388 |
} |
1 | 389 |
return p; |
390 |
} |
|
391 |
} |
|
392 |
} |
|
393 |
||
394 |
void Chunk::operator delete(void* p) { |
|
395 |
Chunk* c = (Chunk*)p; |
|
396 |
switch (c->length()) { |
|
397 |
case Chunk::size: ChunkPool::large_pool()->free(c); break; |
|
398 |
case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; |
|
399 |
case Chunk::init_size: ChunkPool::small_pool()->free(c); break; |
|
18686 | 400 |
case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break; |
13195 | 401 |
default: os::free(c, mtChunk); |
1 | 402 |
} |
403 |
} |
|
404 |
||
405 |
Chunk::Chunk(size_t length) : _len(length) { |
|
406 |
_next = NULL; // Chain on the linked list |
|
407 |
} |
|
408 |
||
409 |
||
410 |
void Chunk::chop() { |
|
411 |
Chunk *k = this; |
|
412 |
while( k ) { |
|
413 |
Chunk *tmp = k->next(); |
|
414 |
// clear out this chunk (to detect allocation bugs) |
|
415 |
if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); |
|
416 |
delete k; // Free chunk (was malloc'd) |
|
417 |
k = tmp; |
|
418 |
} |
|
419 |
} |
|
420 |
||
421 |
void Chunk::next_chop() { |
|
422 |
_next->chop(); |
|
423 |
_next = NULL; |
|
424 |
} |
|
425 |
||
426 |
||
427 |
void Chunk::start_chunk_pool_cleaner_task() { |
|
428 |
#ifdef ASSERT |
|
429 |
static bool task_created = false; |
|
430 |
assert(!task_created, "should not start chuck pool cleaner twice"); |
|
431 |
task_created = true; |
|
432 |
#endif |
|
433 |
ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); |
|
434 |
cleaner->enroll(); |
|
435 |
} |
|
436 |
||
437 |
//------------------------------Arena------------------------------------------ |
|
13195 | 438 |
NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) |
1 | 439 |
|
440 |
Arena::Arena(size_t init_size) { |
|
441 |
size_t round_size = (sizeof (char *)) - 1; |
|
442 |
init_size = (init_size+round_size) & ~round_size; |
|
18055
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
443 |
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size); |
1 | 444 |
_hwm = _chunk->bottom(); // Save the cached hwm, max |
445 |
_max = _chunk->top(); |
|
446 |
set_size_in_bytes(init_size); |
|
13195 | 447 |
NOT_PRODUCT(Atomic::inc(&_instance_count);) |
1 | 448 |
} |
449 |
||
450 |
Arena::Arena() { |
|
18055
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
451 |
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size); |
1 | 452 |
_hwm = _chunk->bottom(); // Save the cached hwm, max |
453 |
_max = _chunk->top(); |
|
454 |
set_size_in_bytes(Chunk::init_size); |
|
13195 | 455 |
NOT_PRODUCT(Atomic::inc(&_instance_count);) |
1 | 456 |
} |
457 |
||
458 |
Arena *Arena::move_contents(Arena *copy) { |
|
459 |
copy->destruct_contents(); |
|
460 |
copy->_chunk = _chunk; |
|
461 |
copy->_hwm = _hwm; |
|
462 |
copy->_max = _max; |
|
463 |
copy->_first = _first; |
|
14120
7d298141c258
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
14083
diff
changeset
|
464 |
|
7d298141c258
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
14083
diff
changeset
|
465 |
// workaround rare racing condition, which could double count |
7d298141c258
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
14083
diff
changeset
|
466 |
// the arena size by native memory tracking |
7d298141c258
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
14083
diff
changeset
|
467 |
size_t size = size_in_bytes(); |
7d298141c258
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
14083
diff
changeset
|
468 |
set_size_in_bytes(0); |
7d298141c258
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
14083
diff
changeset
|
469 |
copy->set_size_in_bytes(size); |
1 | 470 |
// Destroy original arena |
471 |
reset(); |
|
472 |
return copy; // Return Arena with contents |
|
473 |
} |
|
474 |
||
475 |
Arena::~Arena() { |
|
476 |
destruct_contents(); |
|
13195 | 477 |
NOT_PRODUCT(Atomic::dec(&_instance_count);) |
478 |
} |
|
479 |
||
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
480 |
void* Arena::operator new(size_t size) throw() { |
13195 | 481 |
assert(false, "Use dynamic memory type binding"); |
482 |
return NULL; |
|
483 |
} |
|
484 |
||
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
485 |
void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() { |
13195 | 486 |
assert(false, "Use dynamic memory type binding"); |
487 |
return NULL; |
|
488 |
} |
|
489 |
||
490 |
// dynamic memory type binding |
|
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
491 |
void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { |
13195 | 492 |
#ifdef ASSERT |
493 |
void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); |
|
494 |
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); |
|
495 |
return p; |
|
496 |
#else |
|
497 |
return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); |
|
498 |
#endif |
|
499 |
} |
|
500 |
||
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
501 |
void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { |
13195 | 502 |
#ifdef ASSERT |
503 |
void* p = os::malloc(size, flags|otArena, CALLER_PC); |
|
504 |
if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); |
|
505 |
return p; |
|
506 |
#else |
|
507 |
return os::malloc(size, flags|otArena, CALLER_PC); |
|
508 |
#endif |
|
509 |
} |
|
510 |
||
511 |
void Arena::operator delete(void* p) { |
|
512 |
FreeHeap(p); |
|
1 | 513 |
} |
514 |
||
515 |
// Destroy this arenas contents and reset to empty |
|
516 |
void Arena::destruct_contents() { |
|
517 |
if (UseMallocOnly && _first != NULL) { |
|
518 |
char* end = _first->next() ? _first->top() : _hwm; |
|
519 |
free_malloced_objects(_first, _first->bottom(), end, _hwm); |
|
520 |
} |
|
14120
7d298141c258
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
14083
diff
changeset
|
521 |
// reset size before chop to avoid a rare racing condition |
7d298141c258
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
14083
diff
changeset
|
522 |
// that can have total arena memory exceed total chunk memory |
7d298141c258
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
zgu
parents:
14083
diff
changeset
|
523 |
set_size_in_bytes(0); |
1 | 524 |
_first->chop(); |
525 |
reset(); |
|
526 |
} |
|
527 |
||
13195 | 528 |
// This is high traffic method, but many calls actually don't |
529 |
// change the size |
|
530 |
void Arena::set_size_in_bytes(size_t size) { |
|
531 |
if (_size_in_bytes != size) { |
|
532 |
_size_in_bytes = size; |
|
533 |
MemTracker::record_arena_size((address)this, size); |
|
534 |
} |
|
535 |
} |
|
1 | 536 |
|
537 |
// Total of all Chunks in arena |
|
538 |
size_t Arena::used() const { |
|
539 |
size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk |
|
540 |
register Chunk *k = _first; |
|
541 |
while( k != _chunk) { // Whilst have Chunks in a row |
|
542 |
sum += k->length(); // Total size of this Chunk |
|
543 |
k = k->next(); // Bump along to next Chunk |
|
544 |
} |
|
545 |
return sum; // Return total consumed space. |
|
546 |
} |
|
547 |
||
8481
42a79b703814
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
8320
diff
changeset
|
548 |
void Arena::signal_out_of_memory(size_t sz, const char* whence) const { |
17087
f0b76c4c93a0
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
ccheung
parents:
17031
diff
changeset
|
549 |
vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence); |
8481
42a79b703814
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
8320
diff
changeset
|
550 |
} |
1 | 551 |
|
552 |
// Grow a new Chunk |
|
14083
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
553 |
void* Arena::grow(size_t x, AllocFailType alloc_failmode) { |
1 | 554 |
// Get minimal required size. Either real big, or even bigger for giant objs |
555 |
size_t len = MAX2(x, (size_t) Chunk::size); |
|
556 |
||
557 |
Chunk *k = _chunk; // Get filled-up chunk address |
|
18055
ba8c01e0d016
7158805: Better rewriting of nested subroutine calls
hseigel
parents:
14579
diff
changeset
|
558 |
_chunk = new (alloc_failmode, len) Chunk(len); |
1 | 559 |
|
8481
42a79b703814
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
8320
diff
changeset
|
560 |
if (_chunk == NULL) { |
14083
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
561 |
return NULL; |
8481
42a79b703814
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
8320
diff
changeset
|
562 |
} |
1 | 563 |
if (k) k->set_next(_chunk); // Append new chunk to end of linked list |
564 |
else _first = _chunk; |
|
565 |
_hwm = _chunk->bottom(); // Save the cached hwm, max |
|
566 |
_max = _chunk->top(); |
|
567 |
set_size_in_bytes(size_in_bytes() + len); |
|
568 |
void* result = _hwm; |
|
569 |
_hwm += x; |
|
570 |
return result; |
|
571 |
} |
|
572 |
||
573 |
||
574 |
||
575 |
// Reallocate storage in Arena. |
|
14083
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
576 |
void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) { |
1 | 577 |
assert(new_size >= 0, "bad size"); |
578 |
if (new_size == 0) return NULL; |
|
579 |
#ifdef ASSERT |
|
580 |
if (UseMallocOnly) { |
|
581 |
// always allocate a new object (otherwise we'll free this one twice) |
|
14083
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
582 |
char* copy = (char*)Amalloc(new_size, alloc_failmode); |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
583 |
if (copy == NULL) { |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
584 |
return NULL; |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
585 |
} |
1 | 586 |
size_t n = MIN2(old_size, new_size); |
587 |
if (n > 0) memcpy(copy, old_ptr, n); |
|
588 |
Afree(old_ptr,old_size); // Mostly done to keep stats accurate |
|
589 |
return copy; |
|
590 |
} |
|
591 |
#endif |
|
592 |
char *c_old = (char*)old_ptr; // Handy name |
|
593 |
// Stupid fast special case |
|
594 |
if( new_size <= old_size ) { // Shrink in-place |
|
595 |
if( c_old+old_size == _hwm) // Attempt to free the excess bytes |
|
596 |
_hwm = c_old+new_size; // Adjust hwm |
|
597 |
return c_old; |
|
598 |
} |
|
599 |
||
600 |
// make sure that new_size is legal |
|
601 |
size_t corrected_new_size = ARENA_ALIGN(new_size); |
|
602 |
||
603 |
// See if we can resize in-place |
|
604 |
if( (c_old+old_size == _hwm) && // Adjusting recent thing |
|
605 |
(c_old+corrected_new_size <= _max) ) { // Still fits where it sits |
|
606 |
_hwm = c_old+corrected_new_size; // Adjust hwm |
|
607 |
return c_old; // Return old pointer |
|
608 |
} |
|
609 |
||
610 |
// Oops, got to relocate guts |
|
14083
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
611 |
void *new_ptr = Amalloc(new_size, alloc_failmode); |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
612 |
if (new_ptr == NULL) { |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
613 |
return NULL; |
103054a71a30
8000617: It should be possible to allocate memory without the VM dying.
nloodin
parents:
13728
diff
changeset
|
614 |
} |
1 | 615 |
memcpy( new_ptr, c_old, old_size ); |
616 |
Afree(c_old,old_size); // Mostly done to keep stats accurate |
|
617 |
return new_ptr; |
|
618 |
} |
|
619 |
||
620 |
||
621 |
// Determine if pointer belongs to this Arena or not. |
|
622 |
bool Arena::contains( const void *ptr ) const { |
|
623 |
#ifdef ASSERT |
|
624 |
if (UseMallocOnly) { |
|
625 |
// really slow, but not easy to make fast |
|
626 |
if (_chunk == NULL) return false; |
|
627 |
char** bottom = (char**)_chunk->bottom(); |
|
628 |
for (char** p = (char**)_hwm - 1; p >= bottom; p--) { |
|
629 |
if (*p == ptr) return true; |
|
630 |
} |
|
631 |
for (Chunk *c = _first; c != NULL; c = c->next()) { |
|
632 |
if (c == _chunk) continue; // current chunk has been processed |
|
633 |
char** bottom = (char**)c->bottom(); |
|
634 |
for (char** p = (char**)c->top() - 1; p >= bottom; p--) { |
|
635 |
if (*p == ptr) return true; |
|
636 |
} |
|
637 |
} |
|
638 |
return false; |
|
639 |
} |
|
640 |
#endif |
|
641 |
if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) |
|
642 |
return true; // Check for in this chunk |
|
643 |
for (Chunk *c = _first; c; c = c->next()) { |
|
644 |
if (c == _chunk) continue; // current chunk has been processed |
|
645 |
if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { |
|
646 |
return true; // Check for every chunk in Arena |
|
647 |
} |
|
648 |
} |
|
649 |
return false; // Not in any Chunk, so not in Arena |
|
650 |
} |
|
651 |
||
652 |
||
653 |
#ifdef ASSERT |
|
654 |
void* Arena::malloc(size_t size) { |
|
655 |
assert(UseMallocOnly, "shouldn't call"); |
|
656 |
// use malloc, but save pointer in res. area for later freeing |
|
657 |
char** save = (char**)internal_malloc_4(sizeof(char*)); |
|
13195 | 658 |
return (*save = (char*)os::malloc(size, mtChunk)); |
1 | 659 |
} |
660 |
||
661 |
// for debugging with UseMallocOnly |
|
662 |
void* Arena::internal_malloc_4(size_t x) { |
|
663 |
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); |
|
8481
42a79b703814
6878713: Verifier heap corruption, relating to backward jsrs
kamg
parents:
8320
diff
changeset
|
664 |
check_for_overflow(x, "Arena::internal_malloc_4"); |
1 | 665 |
if (_hwm + x > _max) { |
666 |
return grow(x); |
|
667 |
} else { |
|
668 |
char *old = _hwm; |
|
669 |
_hwm += x; |
|
670 |
return old; |
|
671 |
} |
|
672 |
} |
|
673 |
#endif |
|
674 |
||
675 |
||
676 |
//-------------------------------------------------------------------------------------- |
|
677 |
// Non-product code |
|
678 |
||
679 |
#ifndef PRODUCT |
|
680 |
// The global operator new should never be called since it will usually indicate |
|
681 |
// a memory leak. Use CHeapObj as the base class of such objects to make it explicit |
|
682 |
// that they're allocated on the C heap. |
|
683 |
// Commented out in product version to avoid conflicts with third-party C++ native code. |
|
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
684 |
// On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
685 |
// from jdk source and causing data corruption. Such as |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
686 |
// Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
687 |
// define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
688 |
// |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
689 |
#ifndef ALLOW_OPERATOR_NEW_USAGE |
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
690 |
void* operator new(size_t size) throw() { |
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
691 |
assert(false, "Should not call global operator new"); |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
692 |
return 0; |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
693 |
} |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
694 |
|
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
695 |
void* operator new [](size_t size) throw() { |
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
696 |
assert(false, "Should not call global operator new[]"); |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
697 |
return 0; |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
698 |
} |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
699 |
|
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
700 |
void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { |
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
701 |
assert(false, "Should not call global operator new"); |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
702 |
return 0; |
1 | 703 |
} |
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
704 |
|
19696
bd5a0131bde1
8021954: VM SIGSEGV during classloading on MacOS; hs_err_pid file produced
coleenp
parents:
18686
diff
changeset
|
705 |
void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() { |
17376
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
706 |
assert(false, "Should not call global operator new[]"); |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
707 |
return 0; |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
708 |
} |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
709 |
|
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
710 |
void operator delete(void* p) { |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
711 |
assert(false, "Should not call global delete"); |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
712 |
} |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
713 |
|
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
714 |
void operator delete [](void* p) { |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
715 |
assert(false, "Should not call global delete []"); |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
716 |
} |
4ee999c3c007
8012902: remove use of global operator new - take 2
minqi
parents:
17087
diff
changeset
|
717 |
#endif // ALLOW_OPERATOR_NEW_USAGE |
1 | 718 |
|
719 |
void AllocatedObj::print() const { print_on(tty); } |
|
720 |
void AllocatedObj::print_value() const { print_value_on(tty); } |
|
721 |
||
722 |
void AllocatedObj::print_on(outputStream* st) const { |
|
723 |
st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this); |
|
724 |
} |
|
725 |
||
726 |
void AllocatedObj::print_value_on(outputStream* st) const { |
|
727 |
st->print("AllocatedObj(" INTPTR_FORMAT ")", this); |
|
728 |
} |
|
729 |
||
8320 | 730 |
julong Arena::_bytes_allocated = 0; |
731 |
||
732 |
void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); } |
|
1 | 733 |
|
734 |
AllocStats::AllocStats() { |
|
8320 | 735 |
start_mallocs = os::num_mallocs; |
736 |
start_frees = os::num_frees; |
|
1 | 737 |
start_malloc_bytes = os::alloc_bytes; |
8320 | 738 |
start_mfree_bytes = os::free_bytes; |
739 |
start_res_bytes = Arena::_bytes_allocated; |
|
1 | 740 |
} |
741 |
||
8320 | 742 |
julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } |
743 |
julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } |
|
744 |
julong AllocStats::num_frees() { return os::num_frees - start_frees; } |
|
745 |
julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } |
|
746 |
julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } |
|
1 | 747 |
void AllocStats::print() { |
8320 | 748 |
tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " |
749 |
UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", |
|
750 |
num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); |
|
1 | 751 |
} |
752 |
||
753 |
||
754 |
// debugging code |
|
755 |
inline void Arena::free_all(char** start, char** end) { |
|
756 |
for (char** p = start; p < end; p++) if (*p) os::free(*p); |
|
757 |
} |
|
758 |
||
759 |
void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { |
|
760 |
assert(UseMallocOnly, "should not call"); |
|
761 |
// free all objects malloced since resource mark was created; resource area |
|
762 |
// contains their addresses |
|
763 |
if (chunk->next()) { |
|
764 |
// this chunk is full, and some others too |
|
765 |
for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { |
|
766 |
char* top = c->top(); |
|
767 |
if (c->next() == NULL) { |
|
768 |
top = hwm2; // last junk is only used up to hwm2 |
|
769 |
assert(c->contains(hwm2), "bad hwm2"); |
|
770 |
} |
|
771 |
free_all((char**)c->bottom(), (char**)top); |
|
772 |
} |
|
773 |
assert(chunk->contains(hwm), "bad hwm"); |
|
774 |
assert(chunk->contains(max), "bad max"); |
|
775 |
free_all((char**)hwm, (char**)max); |
|
776 |
} else { |
|
777 |
// this chunk was partially used |
|
778 |
assert(chunk->contains(hwm), "bad hwm"); |
|
779 |
assert(chunk->contains(hwm2), "bad hwm2"); |
|
780 |
free_all((char**)hwm, (char**)hwm2); |
|
781 |
} |
|
782 |
} |
|
783 |
||
784 |
||
785 |
ReallocMark::ReallocMark() { |
|
786 |
#ifdef ASSERT |
|
787 |
Thread *thread = ThreadLocalStorage::get_thread_slow(); |
|
788 |
_nesting = thread->resource_area()->nesting(); |
|
789 |
#endif |
|
790 |
} |
|
791 |
||
792 |
void ReallocMark::check() { |
|
793 |
#ifdef ASSERT |
|
794 |
if (_nesting != Thread::current()->resource_area()->nesting()) { |
|
795 |
fatal("allocation bug: array could grow within nested ResourceMark"); |
|
796 |
} |
|
797 |
#endif |
|
798 |
} |
|
799 |
||
800 |
#endif // Non-product |