4378 |
4377 |
4379 // test to see if it has a finalizer or is malformed in some way |
4378 // test to see if it has a finalizer or is malformed in some way |
4380 // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number |
4379 // Klass::_lh_instance_slow_path_bit is really a bit mask, not bit number |
4381 __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case); |
4380 __ tbnz(Rsize, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case); |
4382 |
4381 |
|
4382 // Allocate the instance: |
|
4383 // If TLAB is enabled: |
|
4384 // Try to allocate in the TLAB. |
|
4385 // If fails, go to the slow path. |
|
4386 // Else If inline contiguous allocations are enabled: |
|
4387 // Try to allocate in eden. |
|
4388 // If fails due to heap end, go to slow path. |
4383 // |
4389 // |
4384 // Allocate the instance |
4390 // If TLAB is enabled OR inline contiguous is enabled: |
4385 // 1) Try to allocate in the TLAB |
4391 // Initialize the allocation. |
4386 // 2) if fail and the object is large allocate in the shared Eden |
4392 // Exit. |
4387 // 3) if the above fails (or is not applicable), go to a slow case |
4393 // |
4388 // (creates a new TLAB, etc.) |
4394 // Go to slow path. |
4389 |
|
4390 if (UseTLAB) { |
4395 if (UseTLAB) { |
4391 const Register Rtlab_top = R1_tmp; |
4396 const Register Rtlab_top = R1_tmp; |
4392 const Register Rtlab_end = R2_tmp; |
4397 const Register Rtlab_end = R2_tmp; |
4393 assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end); |
4398 assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end); |
4394 |
4399 |
4395 __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset())); |
4400 __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset())); |
4396 __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset()))); |
4401 __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset()))); |
4397 __ add(Rtlab_top, Robj, Rsize); |
4402 __ add(Rtlab_top, Robj, Rsize); |
4398 __ cmp(Rtlab_top, Rtlab_end); |
4403 __ cmp(Rtlab_top, Rtlab_end); |
4399 __ b(allow_shared_alloc ? allocate_shared : slow_case, hi); |
4404 __ b(slow_case, hi); |
4400 __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset())); |
4405 __ str(Rtlab_top, Address(Rthread, JavaThread::tlab_top_offset())); |
4401 if (ZeroTLAB) { |
4406 if (ZeroTLAB) { |
4402 // the fields have been already cleared |
4407 // the fields have been already cleared |
4403 __ b(initialize_header); |
4408 __ b(initialize_header); |
4404 } else { |
4409 } else { |
4405 // initialize both the header and fields |
4410 // initialize both the header and fields |
4406 __ b(initialize_object); |
4411 __ b(initialize_object); |
4407 } |
4412 } |
4408 } |
4413 } else { |
4409 |
4414 // Allocation in the shared Eden, if allowed. |
4410 // Allocation in the shared Eden, if allowed. |
4415 if (allow_shared_alloc) { |
4411 if (allow_shared_alloc) { |
4416 const Register Rheap_top_addr = R2_tmp; |
4412 __ bind(allocate_shared); |
4417 const Register Rheap_top = R5_tmp; |
4413 |
4418 const Register Rheap_end = Rtemp; |
4414 const Register Rheap_top_addr = R2_tmp; |
4419 assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR); |
4415 const Register Rheap_top = R5_tmp; |
4420 |
4416 const Register Rheap_end = Rtemp; |
4421 // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS |
4417 assert_different_registers(Robj, Rklass, Rsize, Rheap_top_addr, Rheap_top, Rheap_end, LR); |
4422 __ ldr_literal(Rheap_top_addr, Lheap_top_addr); |
4418 |
4423 |
4419 // heap_end now (re)loaded in the loop since also used as a scratch register in the CAS |
4424 Label retry; |
4420 __ ldr_literal(Rheap_top_addr, Lheap_top_addr); |
4425 __ bind(retry); |
4421 |
4426 |
4422 Label retry; |
4427 #ifdef AARCH64 |
4423 __ bind(retry); |
4428 __ ldxr(Robj, Rheap_top_addr); |
4424 |
4429 #else |
4425 #ifdef AARCH64 |
4430 __ ldr(Robj, Address(Rheap_top_addr)); |
4426 __ ldxr(Robj, Rheap_top_addr); |
4431 #endif // AARCH64 |
4427 #else |
4432 |
4428 __ ldr(Robj, Address(Rheap_top_addr)); |
4433 __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr())); |
4429 #endif // AARCH64 |
4434 __ add(Rheap_top, Robj, Rsize); |
4430 |
4435 __ cmp(Rheap_top, Rheap_end); |
4431 __ ldr(Rheap_end, Address(Rheap_top_addr, (intptr_t)Universe::heap()->end_addr()-(intptr_t)Universe::heap()->top_addr())); |
4436 __ b(slow_case, hi); |
4432 __ add(Rheap_top, Robj, Rsize); |
4437 |
4433 __ cmp(Rheap_top, Rheap_end); |
4438 // Update heap top atomically. |
4434 __ b(slow_case, hi); |
4439 // If someone beats us on the allocation, try again, otherwise continue. |
4435 |
4440 #ifdef AARCH64 |
4436 // Update heap top atomically. |
4441 __ stxr(Rtemp2, Rheap_top, Rheap_top_addr); |
4437 // If someone beats us on the allocation, try again, otherwise continue. |
4442 __ cbnz_w(Rtemp2, retry); |
4438 #ifdef AARCH64 |
4443 #else |
4439 __ stxr(Rtemp2, Rheap_top, Rheap_top_addr); |
4444 __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/); |
4440 __ cbnz_w(Rtemp2, retry); |
4445 __ b(retry, ne); |
4441 #else |
4446 #endif // AARCH64 |
4442 __ atomic_cas_bool(Robj, Rheap_top, Rheap_top_addr, 0, Rheap_end/*scratched*/); |
4447 |
4443 __ b(retry, ne); |
4448 __ incr_allocated_bytes(Rsize, Rtemp); |
4444 #endif // AARCH64 |
4449 } |
4445 |
|
4446 __ incr_allocated_bytes(Rsize, Rtemp); |
|
4447 } |
4450 } |
4448 |
4451 |
4449 if (UseTLAB || allow_shared_alloc) { |
4452 if (UseTLAB || allow_shared_alloc) { |
4450 const Register Rzero0 = R1_tmp; |
4453 const Register Rzero0 = R1_tmp; |
4451 const Register Rzero1 = R2_tmp; |
4454 const Register Rzero1 = R2_tmp; |