692 // NarrowOopHeapBaseMin + heap_size < 4Gb |
692 // NarrowOopHeapBaseMin + heap_size < 4Gb |
693 // ZeroBased - Use zero based compressed oops with encoding when |
693 // ZeroBased - Use zero based compressed oops with encoding when |
694 // NarrowOopHeapBaseMin + heap_size < 32Gb |
694 // NarrowOopHeapBaseMin + heap_size < 32Gb |
695 // HeapBased - Use compressed oops with heap base + encoding. |
695 // HeapBased - Use compressed oops with heap base + encoding. |
696 |
696 |
697 // 4Gb |
|
698 static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1); |
|
699 // 32Gb |
|
700 // OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes; |
|
701 |
|
702 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) { |
|
703 assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be"); |
|
704 assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be"); |
|
705 assert(is_size_aligned(heap_size, alignment), "Must be"); |
|
706 |
|
707 uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment); |
|
708 |
|
709 size_t base = 0; |
|
710 #ifdef _LP64 |
|
711 if (UseCompressedOops) { |
|
712 assert(mode == UnscaledNarrowOop || |
|
713 mode == ZeroBasedNarrowOop || |
|
714 mode == HeapBasedNarrowOop, "mode is invalid"); |
|
715 const size_t total_size = heap_size + heap_base_min_address_aligned; |
|
716 // Return specified base for the first request. |
|
717 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { |
|
718 base = heap_base_min_address_aligned; |
|
719 |
|
720 // If the total size is small enough to allow UnscaledNarrowOop then |
|
721 // just use UnscaledNarrowOop. |
|
722 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) { |
|
723 if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) && |
|
724 (Universe::narrow_oop_shift() == 0)) { |
|
725 // Use 32-bits oops without encoding and |
|
726 // place heap's top on the 4Gb boundary |
|
727 base = (UnscaledOopHeapMax - heap_size); |
|
728 } else { |
|
729 // Can't reserve with NarrowOopShift == 0 |
|
730 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); |
|
731 |
|
732 if (mode == UnscaledNarrowOop || |
|
733 mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) { |
|
734 |
|
735 // Use zero based compressed oops with encoding and |
|
736 // place heap's top on the 32Gb boundary in case |
|
737 // total_size > 4Gb or failed to reserve below 4Gb. |
|
738 uint64_t heap_top = OopEncodingHeapMax; |
|
739 |
|
740 // For small heaps, save some space for compressed class pointer |
|
741 // space so it can be decoded with no base. |
|
742 if (UseCompressedClassPointers && !UseSharedSpaces && |
|
743 OopEncodingHeapMax <= 32*G) { |
|
744 |
|
745 uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment); |
|
746 assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space, |
|
747 alignment), "difference must be aligned too"); |
|
748 uint64_t new_top = OopEncodingHeapMax-class_space; |
|
749 |
|
750 if (total_size <= new_top) { |
|
751 heap_top = new_top; |
|
752 } |
|
753 } |
|
754 |
|
755 // Align base to the adjusted top of the heap |
|
756 base = heap_top - heap_size; |
|
757 } |
|
758 } |
|
759 } else { |
|
760 // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or |
|
761 // HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb. |
|
762 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); |
|
763 } |
|
764 |
|
765 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks |
|
766 // used in ReservedHeapSpace() constructors. |
|
767 // The final values will be set in initialize_heap() below. |
|
768 if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) { |
|
769 // Use zero based compressed oops |
|
770 Universe::set_narrow_oop_base(NULL); |
|
771 // Don't need guard page for implicit checks in indexed |
|
772 // addressing mode with zero based Compressed Oops. |
|
773 Universe::set_narrow_oop_use_implicit_null_checks(true); |
|
774 } else { |
|
775 // Set to a non-NULL value so the ReservedSpace ctor computes |
|
776 // the correct no-access prefix. |
|
777 // The final value will be set in initialize_heap() below. |
|
778 Universe::set_narrow_oop_base((address)UnscaledOopHeapMax); |
|
779 #if defined(_WIN64) || defined(AIX) |
|
780 if (UseLargePages) { |
|
781 // Cannot allocate guard pages for implicit checks in indexed |
|
782 // addressing mode when large pages are specified on windows. |
|
783 Universe::set_narrow_oop_use_implicit_null_checks(false); |
|
784 } |
|
785 #endif // _WIN64 |
|
786 } |
|
787 } |
|
788 #endif |
|
789 |
|
790 assert(is_ptr_aligned((char*)base, alignment), "Must be"); |
|
791 return (char*)base; // also return NULL (don't care) for 32-bit VM |
|
792 } |
|
793 |
|
794 jint Universe::initialize_heap() { |
697 jint Universe::initialize_heap() { |
795 |
698 |
796 if (UseParallelGC) { |
699 if (UseParallelGC) { |
797 #if INCLUDE_ALL_GCS |
700 #if INCLUDE_ALL_GCS |
798 Universe::_collectedHeap = new ParallelScavengeHeap(); |
701 Universe::_collectedHeap = new ParallelScavengeHeap(); |
842 // This also makes implicit null checking work, because the |
745 // This also makes implicit null checking work, because the |
843 // memory+1 page below heap_base needs to cause a signal. |
746 // memory+1 page below heap_base needs to cause a signal. |
844 // See needs_explicit_null_check. |
747 // See needs_explicit_null_check. |
845 // Only set the heap base for compressed oops because it indicates |
748 // Only set the heap base for compressed oops because it indicates |
846 // compressed oops for pstack code. |
749 // compressed oops for pstack code. |
847 if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) { |
750 if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) { |
848 // Can't reserve heap below 32Gb. |
751 // Didn't reserve heap below 4Gb. Must shift. |
849 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap() |
|
850 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); |
752 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); |
851 #ifdef AIX |
753 } |
852 // There is no protected page before the heap. This assures all oops |
754 if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) { |
853 // are decoded so that NULL is preserved, so this page will not be accessed. |
755 // Did reserve heap below 32Gb. Can use base == 0; |
854 Universe::set_narrow_oop_use_implicit_null_checks(false); |
|
855 #endif |
|
856 } else { |
|
857 Universe::set_narrow_oop_base(0); |
756 Universe::set_narrow_oop_base(0); |
858 #ifdef _WIN64 |
|
859 if (!Universe::narrow_oop_use_implicit_null_checks()) { |
|
860 // Don't need guard page for implicit checks in indexed addressing |
|
861 // mode with zero based Compressed Oops. |
|
862 Universe::set_narrow_oop_use_implicit_null_checks(true); |
|
863 } |
|
864 #endif // _WIN64 |
|
865 if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) { |
|
866 // Can't reserve heap below 4Gb. |
|
867 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); |
|
868 } else { |
|
869 Universe::set_narrow_oop_shift(0); |
|
870 } |
|
871 } |
757 } |
872 |
758 |
873 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base()); |
759 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base()); |
874 |
760 |
875 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { |
761 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { |
876 Universe::print_compressed_oops_mode(); |
762 Universe::print_compressed_oops_mode(); |
877 } |
763 } |
|
764 |
|
765 // Tell tests in which mode we run. |
|
766 Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode", |
|
767 narrow_oop_mode_to_string(narrow_oop_mode()), |
|
768 false)); |
878 } |
769 } |
879 // Universe::narrow_oop_base() is one page below the heap. |
770 // Universe::narrow_oop_base() is one page below the heap. |
880 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - |
771 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - |
881 os::vm_page_size()) || |
772 os::vm_page_size()) || |
882 Universe::narrow_oop_base() == NULL, "invalid value"); |
773 Universe::narrow_oop_base() == NULL, "invalid value"); |
901 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M); |
792 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M); |
902 |
793 |
903 tty->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode())); |
794 tty->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode())); |
904 |
795 |
905 if (Universe::narrow_oop_base() != 0) { |
796 if (Universe::narrow_oop_base() != 0) { |
906 tty->print(":" PTR_FORMAT, Universe::narrow_oop_base()); |
797 tty->print(": " PTR_FORMAT, Universe::narrow_oop_base()); |
907 } |
798 } |
908 |
799 |
909 if (Universe::narrow_oop_shift() != 0) { |
800 if (Universe::narrow_oop_shift() != 0) { |
910 tty->print(", Oop shift amount: %d", Universe::narrow_oop_shift()); |
801 tty->print(", Oop shift amount: %d", Universe::narrow_oop_shift()); |
911 } |
802 } |
912 |
803 |
|
804 if (!Universe::narrow_oop_use_implicit_null_checks()) { |
|
805 tty->print(", no protected page in front of the heap"); |
|
806 } |
|
807 |
913 tty->cr(); |
808 tty->cr(); |
914 tty->cr(); |
809 tty->cr(); |
915 } |
810 } |
916 |
811 |
917 // Reserve the Java heap, which is now the same for all GCs. |
|
918 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { |
812 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { |
|
813 |
919 assert(alignment <= Arguments::conservative_max_heap_alignment(), |
814 assert(alignment <= Arguments::conservative_max_heap_alignment(), |
920 err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT, |
815 err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT, |
921 alignment, Arguments::conservative_max_heap_alignment())); |
816 alignment, Arguments::conservative_max_heap_alignment())); |
|
817 |
922 size_t total_reserved = align_size_up(heap_size, alignment); |
818 size_t total_reserved = align_size_up(heap_size, alignment); |
923 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), |
819 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), |
924 "heap size is too big for compressed oops"); |
820 "heap size is too big for compressed oops"); |
925 |
821 |
926 bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size()); |
822 bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size()); |
927 assert(!UseLargePages |
823 assert(!UseLargePages |
928 || UseParallelGC |
824 || UseParallelGC |
929 || use_large_pages, "Wrong alignment to use large pages"); |
825 || use_large_pages, "Wrong alignment to use large pages"); |
930 |
826 |
931 char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop); |
827 // Now create the space. |
932 |
828 ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages); |
933 ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr); |
829 |
934 |
830 if (total_rs.is_reserved()) { |
935 if (UseCompressedOops) { |
831 assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0), |
936 if (addr != NULL && !total_rs.is_reserved()) { |
832 "must be exactly of required size and alignment"); |
937 // Failed to reserve at specified address - the requested memory |
833 // We are good. |
938 // region is taken already, for example, by 'java' launcher. |
834 |
939 // Try again to reserver heap higher. |
835 if (UseCompressedOops) { |
940 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop); |
836 // Universe::initialize_heap() will reset this to NULL if unscaled |
941 |
837 // or zero-based narrow oops are actually used. |
942 ReservedHeapSpace total_rs0(total_reserved, alignment, |
838 // Else heap start and base MUST differ, so that NULL can be encoded nonambigous. |
943 use_large_pages, addr); |
839 Universe::set_narrow_oop_base((address)total_rs.compressed_oop_base()); |
944 |
840 } |
945 if (addr != NULL && !total_rs0.is_reserved()) { |
841 |
946 // Failed to reserve at specified address again - give up. |
|
947 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop); |
|
948 assert(addr == NULL, ""); |
|
949 |
|
950 ReservedHeapSpace total_rs1(total_reserved, alignment, |
|
951 use_large_pages, addr); |
|
952 total_rs = total_rs1; |
|
953 } else { |
|
954 total_rs = total_rs0; |
|
955 } |
|
956 } |
|
957 } |
|
958 |
|
959 if (!total_rs.is_reserved()) { |
|
960 vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K)); |
|
961 return total_rs; |
842 return total_rs; |
962 } |
843 } |
963 |
844 |
964 if (UseCompressedOops) { |
845 vm_exit_during_initialization( |
965 // Universe::initialize_heap() will reset this to NULL if unscaled |
846 err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", |
966 // or zero-based narrow oops are actually used. |
847 total_reserved/K)); |
967 address base = (address)(total_rs.base() - os::vm_page_size()); |
848 |
968 Universe::set_narrow_oop_base(base); |
849 // satisfy compiler |
969 } |
850 ShouldNotReachHere(); |
970 return total_rs; |
851 return ReservedHeapSpace(0, 0, false); |
971 } |
852 } |
972 |
853 |
973 |
854 |
974 // It's the caller's responsibility to ensure glitch-freedom |
855 // It's the caller's responsibility to ensure glitch-freedom |
975 // (if required). |
856 // (if required). |