3875 __ movl(rdx, Address(rcx, Klass::layout_helper_offset())); |
3874 __ movl(rdx, Address(rcx, Klass::layout_helper_offset())); |
3876 // test to see if it has a finalizer or is malformed in some way |
3875 // test to see if it has a finalizer or is malformed in some way |
3877 __ testl(rdx, Klass::_lh_instance_slow_path_bit); |
3876 __ testl(rdx, Klass::_lh_instance_slow_path_bit); |
3878 __ jcc(Assembler::notZero, slow_case); |
3877 __ jcc(Assembler::notZero, slow_case); |
3879 |
3878 |
|
3879 // Allocate the instance: |
|
3880 // If TLAB is enabled: |
|
3881 // Try to allocate in the TLAB. |
|
3882 // If fails, go to the slow path. |
|
3883 // Else If inline contiguous allocations are enabled: |
|
3884 // Try to allocate in eden. |
|
3885 // If fails due to heap end, go to slow path. |
3880 // |
3886 // |
3881 // Allocate the instance |
3887 // If TLAB is enabled OR inline contiguous is enabled: |
3882 // 1) Try to allocate in the TLAB |
3888 // Initialize the allocation. |
3883 // 2) if fail and the object is large allocate in the shared Eden |
3889 // Exit. |
3884 // 3) if the above fails (or is not applicable), go to a slow case |
3890 // |
3885 // (creates a new TLAB, etc.) |
3891 // Go to slow path. |
3886 |
3892 |
3887 const bool allow_shared_alloc = |
3893 const bool allow_shared_alloc = |
3888 Universe::heap()->supports_inline_contig_alloc(); |
3894 Universe::heap()->supports_inline_contig_alloc(); |
3889 |
3895 |
3890 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx); |
3896 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx); |
3896 |
3902 |
3897 if (UseTLAB) { |
3903 if (UseTLAB) { |
3898 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); |
3904 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); |
3899 __ lea(rbx, Address(rax, rdx, Address::times_1)); |
3905 __ lea(rbx, Address(rax, rdx, Address::times_1)); |
3900 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset()))); |
3906 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset()))); |
3901 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); |
3907 __ jcc(Assembler::above, slow_case); |
3902 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx); |
3908 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx); |
3903 if (ZeroTLAB) { |
3909 if (ZeroTLAB) { |
3904 // the fields have been already cleared |
3910 // the fields have been already cleared |
3905 __ jmp(initialize_header); |
3911 __ jmp(initialize_header); |
3906 } else { |
3912 } else { |
3907 // initialize both the header and fields |
3913 // initialize both the header and fields |
3908 __ jmp(initialize_object); |
3914 __ jmp(initialize_object); |
3909 } |
3915 } |
3910 } |
3916 } else { |
3911 |
3917 // Allocation in the shared Eden, if allowed. |
3912 // Allocation in the shared Eden, if allowed. |
|
3913 // |
|
3914 // rdx: instance size in bytes |
|
3915 if (allow_shared_alloc) { |
|
3916 __ bind(allocate_shared); |
|
3917 |
|
3918 ExternalAddress heap_top((address)Universe::heap()->top_addr()); |
|
3919 ExternalAddress heap_end((address)Universe::heap()->end_addr()); |
|
3920 |
|
3921 Label retry; |
|
3922 __ bind(retry); |
|
3923 __ movptr(rax, heap_top); |
|
3924 __ lea(rbx, Address(rax, rdx, Address::times_1)); |
|
3925 __ cmpptr(rbx, heap_end); |
|
3926 __ jcc(Assembler::above, slow_case); |
|
3927 |
|
3928 // Compare rax, with the top addr, and if still equal, store the new |
|
3929 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was |
|
3930 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. |
|
3931 // |
3918 // |
3932 // rax,: object begin |
|
3933 // rbx,: object end |
|
3934 // rdx: instance size in bytes |
3919 // rdx: instance size in bytes |
3935 __ locked_cmpxchgptr(rbx, heap_top); |
3920 if (allow_shared_alloc) { |
3936 |
3921 ExternalAddress heap_top((address)Universe::heap()->top_addr()); |
3937 // if someone beat us on the allocation, try again, otherwise continue |
3922 ExternalAddress heap_end((address)Universe::heap()->end_addr()); |
3938 __ jcc(Assembler::notEqual, retry); |
3923 |
3939 |
3924 Label retry; |
3940 __ incr_allocated_bytes(thread, rdx, 0); |
3925 __ bind(retry); |
3941 } |
3926 __ movptr(rax, heap_top); |
3942 |
3927 __ lea(rbx, Address(rax, rdx, Address::times_1)); |
3943 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { |
3928 __ cmpptr(rbx, heap_end); |
|
3929 __ jcc(Assembler::above, slow_case); |
|
3930 |
|
3931 // Compare rax, with the top addr, and if still equal, store the new |
|
3932 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was |
|
3933 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. |
|
3934 // |
|
3935 // rax,: object begin |
|
3936 // rbx,: object end |
|
3937 // rdx: instance size in bytes |
|
3938 __ locked_cmpxchgptr(rbx, heap_top); |
|
3939 |
|
3940 // if someone beat us on the allocation, try again, otherwise continue |
|
3941 __ jcc(Assembler::notEqual, retry); |
|
3942 |
|
3943 __ incr_allocated_bytes(thread, rdx, 0); |
|
3944 } |
|
3945 } |
|
3946 |
|
3947 // If UseTLAB or allow_shared_alloc are true, the object is created above and |
|
3948 // there is an initialize need. Otherwise, skip and go to the slow path. |
|
3949 if (UseTLAB || allow_shared_alloc) { |
3944 // The object is initialized before the header. If the object size is |
3950 // The object is initialized before the header. If the object size is |
3945 // zero, go directly to the header initialization. |
3951 // zero, go directly to the header initialization. |
3946 __ bind(initialize_object); |
3952 __ bind(initialize_object); |
3947 __ decrement(rdx, sizeof(oopDesc)); |
3953 __ decrement(rdx, sizeof(oopDesc)); |
3948 __ jcc(Assembler::zero, initialize_header); |
3954 __ jcc(Assembler::zero, initialize_header); |