49 // may have been configured, can be read more accurately from proc fs etc. |
50 // may have been configured, can be read more accurately from proc fs etc. |
50 #ifndef MAX_PID |
51 #ifndef MAX_PID |
51 #define MAX_PID INT_MAX |
52 #define MAX_PID INT_MAX |
52 #endif |
53 #endif |
53 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID) |
54 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID) |
|
55 |
|
56 #ifndef MAP_ANONYMOUS |
|
57 #define MAP_ANONYMOUS MAP_ANON |
|
58 #endif |
|
59 |
|
60 #define check_with_errno(check_type, cond, msg) \ |
|
61 do { \ |
|
62 int err = errno; \ |
|
63 check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err), \ |
|
64 os::errno_name(err)); \ |
|
65 } while (false) |
|
66 |
|
67 #define assert_with_errno(cond, msg) check_with_errno(assert, cond, msg) |
|
68 #define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg) |
54 |
69 |
55 // Check core dump limit and report possible place where core can be found |
70 // Check core dump limit and report possible place where core can be found |
56 void os::check_dump_limit(char* buffer, size_t bufferSize) { |
71 void os::check_dump_limit(char* buffer, size_t bufferSize) { |
57 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { |
72 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { |
58 jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line"); |
73 jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line"); |
143 void os::wait_for_keypress_at_exit(void) { |
158 void os::wait_for_keypress_at_exit(void) { |
144 // don't do anything on posix platforms |
159 // don't do anything on posix platforms |
145 return; |
160 return; |
146 } |
161 } |
147 |
162 |
|
163 int os::create_file_for_heap(const char* dir) { |
|
164 |
|
165 const char name_template[] = "/jvmheap.XXXXXX"; |
|
166 |
|
167 char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal); |
|
168 if (fullname == NULL) { |
|
169 vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno))); |
|
170 return -1; |
|
171 } |
|
172 (void)strncpy(fullname, dir, strlen(dir)+1); |
|
173 (void)strncat(fullname, name_template, strlen(name_template)); |
|
174 |
|
175 os::native_path(fullname); |
|
176 |
|
177 sigset_t set, oldset; |
|
178 int ret = sigfillset(&set); |
|
179 assert_with_errno(ret == 0, "sigfillset returned error"); |
|
180 |
|
181 // set the file creation mask. |
|
182 mode_t file_mode = S_IRUSR | S_IWUSR; |
|
183 |
|
184 // create a new file. |
|
185 int fd = mkstemp(fullname); |
|
186 |
|
187 if (fd < 0) { |
|
188 warning("Could not create file for heap with template %s", fullname); |
|
189 os::free(fullname); |
|
190 return -1; |
|
191 } |
|
192 |
|
193 // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted. |
|
194 ret = unlink(fullname); |
|
195 assert_with_errno(ret == 0, "unlink returned error"); |
|
196 |
|
197 os::free(fullname); |
|
198 return fd; |
|
199 } |
|
200 |
|
201 static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) { |
|
202 char * addr; |
|
203 int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS; |
|
204 if (requested_addr != NULL) { |
|
205 assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size"); |
|
206 flags |= MAP_FIXED; |
|
207 } |
|
208 |
|
209 // Map reserved/uncommitted pages PROT_NONE so we fail early if we |
|
210 // touch an uncommitted page. Otherwise, the read/write might |
|
211 // succeed if we have enough swap space to back the physical page. |
|
212 addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, |
|
213 flags, -1, 0); |
|
214 |
|
215 if (addr != MAP_FAILED) { |
|
216 MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC); |
|
217 return addr; |
|
218 } |
|
219 return NULL; |
|
220 } |
|
221 |
|
222 static int util_posix_fallocate(int fd, off_t offset, off_t len) { |
|
223 #ifdef __APPLE__ |
|
224 fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len }; |
|
225 // First we try to get a continuous chunk of disk space |
|
226 int ret = fcntl(fd, F_PREALLOCATE, &store); |
|
227 if (ret == -1) { |
|
228 // Maybe we are too fragmented, try to allocate non-continuous range |
|
229 store.fst_flags = F_ALLOCATEALL; |
|
230 ret = fcntl(fd, F_PREALLOCATE, &store); |
|
231 } |
|
232 if(ret != -1) { |
|
233 return ftruncate(fd, len); |
|
234 } |
|
235 return -1; |
|
236 #else |
|
237 return posix_fallocate(fd, offset, len); |
|
238 #endif |
|
239 } |
|
240 |
|
241 // Map the given address range to the provided file descriptor. |
|
242 char* os::map_memory_to_file(char* base, size_t size, int fd) { |
|
243 assert(fd != -1, "File descriptor is not valid"); |
|
244 |
|
245 // allocate space for the file |
|
246 if (util_posix_fallocate(fd, 0, (off_t)size) != 0) { |
|
247 vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory.")); |
|
248 return NULL; |
|
249 } |
|
250 |
|
251 int prot = PROT_READ | PROT_WRITE; |
|
252 int flags = MAP_SHARED; |
|
253 if (base != NULL) { |
|
254 flags |= MAP_FIXED; |
|
255 } |
|
256 char* addr = (char*)mmap(base, size, prot, flags, fd, 0); |
|
257 |
|
258 if (addr == MAP_FAILED) { |
|
259 return NULL; |
|
260 } |
|
261 if (base != NULL && addr != base) { |
|
262 if (!os::release_memory(addr, size)) { |
|
263 warning("Could not release memory on unsuccessful file mapping"); |
|
264 } |
|
265 return NULL; |
|
266 } |
|
267 return addr; |
|
268 } |
|
269 |
|
270 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) { |
|
271 assert(fd != -1, "File descriptor is not valid"); |
|
272 assert(base != NULL, "Base cannot be NULL"); |
|
273 |
|
274 return map_memory_to_file(base, size, fd); |
|
275 } |
|
276 |
148 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED, |
277 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED, |
149 // so on posix, unmap the section at the start and at the end of the chunk that we mapped |
278 // so on posix, unmap the section at the start and at the end of the chunk that we mapped |
150 // rather than unmapping and remapping the whole chunk to get requested alignment. |
279 // rather than unmapping and remapping the whole chunk to get requested alignment. |
151 char* os::reserve_memory_aligned(size_t size, size_t alignment) { |
280 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) { |
152 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, |
281 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, |
153 "Alignment must be a multiple of allocation granularity (page size)"); |
282 "Alignment must be a multiple of allocation granularity (page size)"); |
154 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); |
283 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); |
155 |
284 |
156 size_t extra_size = size + alignment; |
285 size_t extra_size = size + alignment; |
157 assert(extra_size >= size, "overflow, size is too large to allow alignment"); |
286 assert(extra_size >= size, "overflow, size is too large to allow alignment"); |
158 |
287 |
159 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); |
288 char* extra_base; |
|
289 if (file_desc != -1) { |
|
290 // For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because |
|
291 // we need to deal with shrinking of the file space later when we release extra memory after alignment. |
|
292 // We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory. |
|
293 // So here to call a helper function while reserve memory for us. After we have a aligned base, |
|
294 // we will replace anonymous mapping with file mapping. |
|
295 extra_base = reserve_mmapped_memory(extra_size, NULL); |
|
296 if (extra_base != NULL) { |
|
297 MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC); |
|
298 } |
|
299 } else { |
|
300 extra_base = os::reserve_memory(extra_size, NULL, alignment); |
|
301 } |
160 |
302 |
161 if (extra_base == NULL) { |
303 if (extra_base == NULL) { |
162 return NULL; |
304 return NULL; |
163 } |
305 } |
164 |
306 |