src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
changeset 49010 9010e596f391
parent 47216 71c04702a3d5
child 49018 f7caa2aecc86
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp	Fri Feb 16 11:22:53 2018 -0800
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp	Fri Feb 16 14:23:30 2018 -0800
@@ -994,8 +994,8 @@
           __ set_info("fast new_instance init check", dont_gc_arguments);
         }
 
-        if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
-            UseTLAB && FastTLABRefill) {
+        if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && UseTLAB
+            && Universe::heap()->supports_inline_contig_alloc()) {
           Label slow_path;
           Register obj_size = rcx;
           Register t1       = rbx;
@@ -1030,21 +1030,8 @@
           // if we got here then the TLAB allocation failed, so try
           // refilling the TLAB or allocating directly from eden.
           Label retry_tlab, try_eden;
-          const Register thread =
-            __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
-
-          __ bind(retry_tlab);
-
-          // get the instance size (size is postive so movl is fine for 64bit)
-          __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
-
-          __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
-
-          __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true);
-          __ verify_oop(obj);
-          __ pop(rbx);
-          __ pop(rdi);
-          __ ret(0);
+          const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+          NOT_LP64(get_thread(thread_reg));
 
           __ bind(try_eden);
           // get the instance size (size is postive so movl is fine for 64bit)
@@ -1128,24 +1115,13 @@
         }
 #endif // ASSERT
 
-        if (UseTLAB && FastTLABRefill) {
+        // If we got here, the TLAB allocation failed, so try allocating from
+        // eden if inline contiguous allocations are supported.
+        if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
           Register arr_size = rsi;
           Register t1       = rcx;  // must be rcx for use as shift count
           Register t2       = rdi;
           Label slow_path;
-          assert_different_registers(length, klass, obj, arr_size, t1, t2);
-
-          // check that array length is small enough for fast path.
-          __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
-          __ jcc(Assembler::above, slow_path);
-
-          // if we got here then the TLAB allocation failed, so try
-          // refilling the TLAB or allocating directly from eden.
-          Label retry_tlab, try_eden;
-          const Register thread =
-            __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
-
-          __ bind(retry_tlab);
 
           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
           // since size is positive movl does right thing on 64bit
@@ -1160,36 +1136,11 @@
           __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
           __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
 
-          __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
-
-          __ initialize_header(obj, klass, length, t1, t2);
-          __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
-          assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
-          assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
-          __ andptr(t1, Klass::_lh_header_size_mask);
-          __ subptr(arr_size, t1);  // body length
-          __ addptr(t1, obj);       // body start
-          if (!ZeroTLAB) {
-            __ initialize_body(t1, arr_size, 0, t2);
-          }
-          __ verify_oop(obj);
-          __ ret(0);
+          __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
 
-          __ bind(try_eden);
-          // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
-          // since size is positive movl does right thing on 64bit
-          __ movl(t1, Address(klass, Klass::layout_helper_offset()));
-          // since size is postive movl does right thing on 64bit
-          __ movl(arr_size, length);
-          assert(t1 == rcx, "fixed register usage");
-          __ shlptr(arr_size /* by t1=rcx, mod 32 */);
-          __ shrptr(t1, Klass::_lh_header_size_shift);
-          __ andptr(t1, Klass::_lh_header_size_mask);
-          __ addptr(arr_size, t1);
-          __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
-          __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
-
-          __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
+          // Using t2 for non 64-bit.
+          const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread);
+          NOT_LP64(get_thread(thread_reg));
           __ incr_allocated_bytes(thread, arr_size, 0);
 
           __ initialize_header(obj, klass, length, t1, t2);