50 #endif |
50 #endif |
51 #ifdef TARGET_OS_FAMILY_bsd |
51 #ifdef TARGET_OS_FAMILY_bsd |
52 # include "os_bsd.inline.hpp" |
52 # include "os_bsd.inline.hpp" |
53 #endif |
53 #endif |
54 |
54 |
55 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; } |
55 void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } |
56 void StackObj::operator delete(void* p) { ShouldNotCallThis(); } |
56 void StackObj::operator delete(void* p) { ShouldNotCallThis(); } |
57 void* StackObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; } |
57 void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } |
58 void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } |
58 void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } |
59 |
59 |
60 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; } |
60 void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } |
61 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } |
61 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } |
62 void* _ValueObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; } |
62 void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } |
63 void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } |
63 void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } |
64 |
64 |
65 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, |
65 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, |
66 size_t word_size, bool read_only, |
66 size_t word_size, bool read_only, |
67 MetaspaceObj::Type type, TRAPS) { |
67 MetaspaceObj::Type type, TRAPS) throw() { |
68 // Klass has it's own operator new |
68 // Klass has it's own operator new |
69 return Metaspace::allocate(loader_data, word_size, read_only, |
69 return Metaspace::allocate(loader_data, word_size, read_only, |
70 type, CHECK_NULL); |
70 type, CHECK_NULL); |
71 } |
71 } |
72 |
72 |
81 |
81 |
82 void MetaspaceObj::print_address_on(outputStream* st) const { |
82 void MetaspaceObj::print_address_on(outputStream* st) const { |
83 st->print(" {"INTPTR_FORMAT"}", this); |
83 st->print(" {"INTPTR_FORMAT"}", this); |
84 } |
84 } |
85 |
85 |
86 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) { |
86 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { |
87 address res; |
87 address res; |
88 switch (type) { |
88 switch (type) { |
89 case C_HEAP: |
89 case C_HEAP: |
90 res = (address)AllocateHeap(size, flags, CALLER_PC); |
90 res = (address)AllocateHeap(size, flags, CALLER_PC); |
91 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) |
91 DEBUG_ONLY(set_allocation_type(res, C_HEAP);) |
98 ShouldNotReachHere(); |
98 ShouldNotReachHere(); |
99 } |
99 } |
100 return res; |
100 return res; |
101 } |
101 } |
102 |
102 |
103 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) { |
103 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { |
104 return (address) operator new(size, type, flags); |
104 return (address) operator new(size, type, flags); |
105 } |
105 } |
106 |
106 |
107 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, |
107 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, |
108 allocation_type type, MEMFLAGS flags) { |
108 allocation_type type, MEMFLAGS flags) throw() { |
109 //should only call this with std::nothrow, use other operator new() otherwise |
109 //should only call this with std::nothrow, use other operator new() otherwise |
110 address res; |
110 address res; |
111 switch (type) { |
111 switch (type) { |
112 case C_HEAP: |
112 case C_HEAP: |
113 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); |
113 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); |
122 } |
122 } |
123 return res; |
123 return res; |
124 } |
124 } |
125 |
125 |
126 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, |
126 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, |
127 allocation_type type, MEMFLAGS flags) { |
127 allocation_type type, MEMFLAGS flags) throw() { |
128 return (address)operator new(size, nothrow_constant, type, flags); |
128 return (address)operator new(size, nothrow_constant, type, flags); |
129 } |
129 } |
130 |
130 |
131 void ResourceObj::operator delete(void* p) { |
131 void ResourceObj::operator delete(void* p) { |
132 assert(((ResourceObj *)p)->allocated_on_C_heap(), |
132 assert(((ResourceObj *)p)->allocated_on_C_heap(), |
371 }; |
371 }; |
372 |
372 |
373 //-------------------------------------------------------------------------------------- |
373 //-------------------------------------------------------------------------------------- |
374 // Chunk implementation |
374 // Chunk implementation |
375 |
375 |
376 void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) { |
376 void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() { |
377 // requested_size is equal to sizeof(Chunk) but in order for the arena |
377 // requested_size is equal to sizeof(Chunk) but in order for the arena |
378 // allocations to come out aligned as expected the size must be aligned |
378 // allocations to come out aligned as expected the size must be aligned |
379 // to expected arena alignment. |
379 // to expected arena alignment. |
380 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. |
380 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. |
381 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); |
381 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); |
479 Arena::~Arena() { |
479 Arena::~Arena() { |
480 destruct_contents(); |
480 destruct_contents(); |
481 NOT_PRODUCT(Atomic::dec(&_instance_count);) |
481 NOT_PRODUCT(Atomic::dec(&_instance_count);) |
482 } |
482 } |
483 |
483 |
484 void* Arena::operator new(size_t size) { |
484 void* Arena::operator new(size_t size) throw() { |
485 assert(false, "Use dynamic memory type binding"); |
485 assert(false, "Use dynamic memory type binding"); |
486 return NULL; |
486 return NULL; |
487 } |
487 } |
488 |
488 |
489 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) { |
489 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() { |
490 assert(false, "Use dynamic memory type binding"); |
490 assert(false, "Use dynamic memory type binding"); |
491 return NULL; |
491 return NULL; |
492 } |
492 } |
493 |
493 |
494 // dynamic memory type binding |
494 // dynamic memory type binding |
495 void* Arena::operator new(size_t size, MEMFLAGS flags) { |
495 void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { |
496 #ifdef ASSERT |
496 #ifdef ASSERT |
497 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); |
497 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); |
498 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); |
498 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); |
499 return p; |
499 return p; |
500 #else |
500 #else |
501 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); |
501 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); |
502 #endif |
502 #endif |
503 } |
503 } |
504 |
504 |
505 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) { |
505 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { |
506 #ifdef ASSERT |
506 #ifdef ASSERT |
507 void* p = os::malloc(size, flags|otArena, CALLER_PC); |
507 void* p = os::malloc(size, flags|otArena, CALLER_PC); |
508 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); |
508 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); |
509 return p; |
509 return p; |
510 #else |
510 #else |
689 // from jdk source and causing data corruption. Such as |
689 // from jdk source and causing data corruption. Such as |
690 // Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair |
690 // Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair |
691 // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. |
691 // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. |
692 // |
692 // |
693 #ifndef ALLOW_OPERATOR_NEW_USAGE |
693 #ifndef ALLOW_OPERATOR_NEW_USAGE |
694 void* operator new(size_t size){ |
694 void* operator new(size_t size) throw() { |
695 assert(false, "Should not call global operator new"); |
695 assert(false, "Should not call global operator new"); |
696 return 0; |
696 return 0; |
697 } |
697 } |
698 |
698 |
699 void* operator new [](size_t size){ |
699 void* operator new [](size_t size) throw() { |
700 assert(false, "Should not call global operator new[]"); |
700 assert(false, "Should not call global operator new[]"); |
701 return 0; |
701 return 0; |
702 } |
702 } |
703 |
703 |
704 void* operator new(size_t size, const std::nothrow_t& nothrow_constant){ |
704 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { |
705 assert(false, "Should not call global operator new"); |
705 assert(false, "Should not call global operator new"); |
706 return 0; |
706 return 0; |
707 } |
707 } |
708 |
708 |
709 void* operator new [](size_t size, std::nothrow_t& nothrow_constant){ |
709 void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() { |
710 assert(false, "Should not call global operator new[]"); |
710 assert(false, "Should not call global operator new[]"); |
711 return 0; |
711 return 0; |
712 } |
712 } |
713 |
713 |
714 void operator delete(void* p) { |
714 void operator delete(void* p) { |