--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -991,8 +991,8 @@
int offset = -1;
switch (c->type()) {
+ case T_FLOAT: type = T_INT; // Float constants are stored by int store instructions.
case T_INT:
- case T_FLOAT:
case T_ADDRESS: {
LIR_Opr tmp = FrameMap::O7_opr;
int value = c->as_jint_bits();
@@ -1202,6 +1202,7 @@
__ stw(tmp, to.base(), to.disp());
break;
}
+ case T_ADDRESS:
case T_OBJECT: {
Register tmp = O7;
Address from = frame_map()->address_for_slot(src->single_stack_ix());
@@ -1355,7 +1356,6 @@
}
}
-
void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
bool wide, bool unaligned) {
@@ -2265,10 +2265,10 @@
op->obj()->as_register() == O0 &&
op->klass()->as_register() == G5, "must be");
if (op->init_check()) {
+ add_debug_info_for_null_check_here(op->stub()->info());
__ ldub(op->klass()->as_register(),
in_bytes(InstanceKlass::init_state_offset()),
op->tmp1()->as_register());
- add_debug_info_for_null_check_here(op->stub()->info());
__ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized);
__ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
__ delayed()->nop();
--- a/src/hotspot/cpu/sparc/c1_globals_sparc.hpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_globals_sparc.hpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,32 +32,32 @@
// (see c1_globals.hpp)
#ifndef TIERED
-define_pd_global(bool, BackgroundCompilation, true );
-define_pd_global(bool, CICompileOSR, true );
-define_pd_global(bool, InlineIntrinsics, true );
-define_pd_global(bool, PreferInterpreterNativeStubs, false);
-define_pd_global(bool, ProfileTraps, false);
-define_pd_global(bool, UseOnStackReplacement, true );
-define_pd_global(bool, TieredCompilation, false);
-define_pd_global(intx, CompileThreshold, 1000 ); // Design center runs on 1.3.1
+define_pd_global(bool, BackgroundCompilation, true );
+define_pd_global(bool, CICompileOSR, true );
+define_pd_global(bool, InlineIntrinsics, true );
+define_pd_global(bool, PreferInterpreterNativeStubs, false);
+define_pd_global(bool, ProfileTraps, false);
+define_pd_global(bool, UseOnStackReplacement, true );
+define_pd_global(bool, TieredCompilation, false);
+define_pd_global(intx, CompileThreshold, 1000 ); // Design center runs on 1.3.1
-define_pd_global(intx, OnStackReplacePercentage, 1400 );
-define_pd_global(bool, UseTLAB, true );
-define_pd_global(bool, ProfileInterpreter, false);
-define_pd_global(intx, FreqInlineSize, 325 );
-define_pd_global(bool, ResizeTLAB, true );
-define_pd_global(intx, ReservedCodeCacheSize, 32*M );
-define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
-define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
-define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
-define_pd_global(intx, CodeCacheExpansionSize, 32*K );
-define_pd_global(uintx, CodeCacheMinBlockLength, 1);
-define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
-define_pd_global(size_t, MetaspaceSize, 12*M );
-define_pd_global(bool, NeverActAsServerClassMachine, true );
-define_pd_global(size_t, NewSizeThreadIncrease, 16*K );
-define_pd_global(uint64_t, MaxRAM, 1ULL*G);
-define_pd_global(intx, InitialCodeCacheSize, 160*K);
+define_pd_global(intx, OnStackReplacePercentage, 1400 );
+define_pd_global(bool, UseTLAB, true );
+define_pd_global(bool, ProfileInterpreter, false);
+define_pd_global(intx, FreqInlineSize, 325 );
+define_pd_global(bool, ResizeTLAB, true );
+define_pd_global(uintx, ReservedCodeCacheSize, 32*M );
+define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M );
+define_pd_global(uintx, ProfiledCodeHeapSize, 14*M );
+define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
+define_pd_global(uintx, CodeCacheExpansionSize, 32*K );
+define_pd_global(uintx, CodeCacheMinBlockLength, 1);
+define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
+define_pd_global(size_t, MetaspaceSize, 12*M );
+define_pd_global(bool, NeverActAsServerClassMachine, true );
+define_pd_global(size_t, NewSizeThreadIncrease, 16*K );
+define_pd_global(uint64_t, MaxRAM, 1ULL*G);
+define_pd_global(uintx, InitialCodeCacheSize, 160*K);
#endif // !TIERED
define_pd_global(bool, UseTypeProfile, false);
--- a/src/hotspot/cpu/sparc/c2_globals_sparc.hpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/sparc/c2_globals_sparc.hpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -71,12 +71,12 @@
// sequence of instructions to load a 64 bit pointer.
//
// InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, ReservedCodeCacheSize, 48*M);
-define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
-define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
-define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
-define_pd_global(intx, CodeCacheExpansionSize, 64*K);
+define_pd_global(uintx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
+define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
+define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
+define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
+define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
--- a/src/hotspot/cpu/sparc/compiledIC_sparc.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/sparc/compiledIC_sparc.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -107,8 +107,8 @@
#ifdef ASSERT
// read the value once
- intptr_t data = method_holder->data();
- address destination = jump->jump_destination();
+ volatile intptr_t data = method_holder->data();
+ volatile address destination = jump->jump_destination();
assert(data == 0 || data == (intptr_t)callee(),
"a) MT-unsafe modification of inline cache");
assert(destination == (address)-1 || destination == entry,
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -837,6 +837,20 @@
case BarrierSet::G1SATBCTLogging:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
+ Register tmp = O5;
+ assert_different_registers(addr, count, tmp);
+ Label filtered;
+ // Is marking active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
+ } else {
+ guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
+ "Assumption");
+ __ ldsb(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
+ }
+ // Is marking active?
+ __ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
+
__ save_frame(0);
// Save the necessary global regs... will be used after.
if (addr->is_global()) {
@@ -856,6 +870,9 @@
__ mov(L1, count);
}
__ restore();
+
+ __ bind(filtered);
+ DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
}
break;
case BarrierSet::CardTableForRS:
--- a/src/hotspot/cpu/x86/assembler_x86.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1510,11 +1510,11 @@
}
void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
- assert(entry != NULL, "call most probably wrong");
InstructionMark im(this);
emit_int8((unsigned char)0xE8);
intptr_t disp = entry - (pc() + sizeof(int32_t));
- assert(is_simm32(disp), "must be 32bit offset (call2)");
+ // Entry is NULL in case of a scratch emit.
+ assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp);
// Technically, should use call32_operand, but this format is
// implied by the fact that we're emitting a call instruction.
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1543,10 +1543,10 @@
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
if (op->init_check()) {
+ add_debug_info_for_null_check_here(op->stub()->info());
__ cmpb(Address(op->klass()->as_register(),
InstanceKlass::init_state_offset()),
InstanceKlass::fully_initialized);
- add_debug_info_for_null_check_here(op->stub()->info());
__ jcc(Assembler::notEqual, *op->stub()->entry());
}
__ allocate_object(op->obj()->as_register(),
@@ -2580,7 +2580,9 @@
move_regs(lreg, rax);
int idivl_offset = __ corrected_idivl(rreg);
- add_debug_info_for_div0(idivl_offset, info);
+ if (ImplicitDiv0Checks) {
+ add_debug_info_for_div0(idivl_offset, info);
+ }
if (code == lir_irem) {
move_regs(rdx, dreg); // result is in rdx
} else {
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -572,6 +572,8 @@
if (!ImplicitDiv0Checks) {
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0));
__ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
+ // Idiv/irem cannot trap (passing info would generate an assertion).
+ info = NULL;
}
LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation
if (x->op() == Bytecodes::_irem) {
--- a/src/hotspot/cpu/x86/c1_globals_x86.hpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_globals_x86.hpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,15 +45,15 @@
define_pd_global(intx, OnStackReplacePercentage, 933 );
define_pd_global(intx, FreqInlineSize, 325 );
define_pd_global(size_t, NewSizeThreadIncrease, 4*K );
-define_pd_global(intx, InitialCodeCacheSize, 160*K);
-define_pd_global(intx, ReservedCodeCacheSize, 32*M );
-define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
-define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
-define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
+define_pd_global(uintx, InitialCodeCacheSize, 160*K);
+define_pd_global(uintx, ReservedCodeCacheSize, 32*M );
+define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M );
+define_pd_global(uintx, ProfiledCodeHeapSize, 14*M );
+define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(bool, ProfileInterpreter, false);
-define_pd_global(intx, CodeCacheExpansionSize, 32*K );
-define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
-define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
+define_pd_global(uintx, CodeCacheExpansionSize, 32*K );
+define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
+define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
define_pd_global(size_t, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
--- a/src/hotspot/cpu/x86/c2_globals_x86.hpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/c2_globals_x86.hpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -48,26 +48,26 @@
define_pd_global(intx, MinJumpTableSize, 10);
define_pd_global(intx, LoopPercentProfileLimit, 30);
#ifdef AMD64
-define_pd_global(intx, INTPRESSURE, 13);
-define_pd_global(intx, FLOATPRESSURE, 14);
-define_pd_global(intx, InteriorEntryAlignment, 16);
-define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
-define_pd_global(intx, LoopUnrollLimit, 60);
+define_pd_global(intx, INTPRESSURE, 13);
+define_pd_global(intx, FLOATPRESSURE, 14);
+define_pd_global(intx, InteriorEntryAlignment, 16);
+define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
+define_pd_global(intx, LoopUnrollLimit, 60);
// InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, CodeCacheExpansionSize, 64*K);
+define_pd_global(uintx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
// Ergonomics related flags
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
#else
-define_pd_global(intx, INTPRESSURE, 6);
-define_pd_global(intx, FLOATPRESSURE, 6);
-define_pd_global(intx, InteriorEntryAlignment, 4);
+define_pd_global(intx, INTPRESSURE, 6);
+define_pd_global(intx, FLOATPRESSURE, 6);
+define_pd_global(intx, InteriorEntryAlignment, 4);
define_pd_global(size_t, NewSizeThreadIncrease, 4*K);
-define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
+define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
// InitialCodeCacheSize derived from specjbb2000 run.
-define_pd_global(intx, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
-define_pd_global(intx, CodeCacheExpansionSize, 32*K);
+define_pd_global(uintx, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
// Ergonomics related flags
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
@@ -84,10 +84,10 @@
define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
define_pd_global(bool, IdealizeClearArrayNode, true);
-define_pd_global(intx, ReservedCodeCacheSize, 48*M);
-define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
-define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
-define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
+define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
+define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
+define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
+define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -160,8 +160,8 @@
#ifdef ASSERT
// read the value once
- intptr_t data = method_holder->data();
- address destination = jump->jump_destination();
+ volatile intptr_t data = method_holder->data();
+ volatile address destination = jump->jump_destination();
assert(data == 0 || data == (intptr_t)callee(),
"a) MT-unsafe modification of inline cache");
assert(destination == (address)-1 || destination == entry,
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -589,7 +589,7 @@
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
if (!TraceMethodHandles) return;
- BLOCK_COMMENT("trace_method_handle {");
+ BLOCK_COMMENT(err_msg("trace_method_handle %s {", adaptername));
__ enter();
__ andptr(rsp, -16); // align stack if needed for FPU state
__ pusha();
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -145,7 +145,7 @@
// We assume caller has already has return address slot on the stack
// We push epb twice in this sequence because we want the real rbp,
// to be under the return like a normal enter and we want to use pusha
- // We push by hand instead of pusing push
+ // We push by hand instead of using push.
__ enter();
__ pusha();
__ pushf();
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -679,10 +679,28 @@
case BarrierSet::G1SATBCTLogging:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!uninitialized_target) {
+ Register thread = rax;
+ Label filtered;
+ __ push(thread);
+ __ get_thread(thread);
+ Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
+ SATBMarkQueue::byte_offset_of_active()));
+ // Is marking active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ cmpl(in_progress, 0);
+ } else {
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ cmpb(in_progress, 0);
+ }
+ __ pop(thread);
+ __ jcc(Assembler::equal, filtered);
+
__ pusha(); // push registers
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
start, count);
__ popa();
+
+ __ bind(filtered);
}
break;
case BarrierSet::CardTableForRS:
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1201,6 +1201,18 @@
case BarrierSet::G1SATBCTLogging:
// With G1, don't generate the call if we statically know that the target in uninitialized
if (!dest_uninitialized) {
+ Label filtered;
+ Address in_progress(r15_thread, in_bytes(JavaThread::satb_mark_queue_offset() +
+ SATBMarkQueue::byte_offset_of_active()));
+ // Is marking active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ cmpl(in_progress, 0);
+ } else {
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ cmpb(in_progress, 0);
+ }
+ __ jcc(Assembler::equal, filtered);
+
__ pusha(); // push registers
if (count == c_rarg0) {
if (addr == c_rarg1) {
@@ -1216,6 +1228,8 @@
}
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
__ popa();
+
+ __ bind(filtered);
}
break;
case BarrierSet::CardTableForRS:
--- a/src/hotspot/cpu/x86/x86_32.ad Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/cpu/x86/x86_32.ad Fri Jan 19 17:01:34 2018 +0100
@@ -1,5 +1,5 @@
//
-// Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
//----------REGISTER DEFINITION BLOCK------------------------------------------
// This information is used by the matcher and the register allocator to
// describe individual registers and classes of registers within the target
-// archtecture.
+// architecture.
register %{
//----------Architecture Description Register Definitions----------------------
--- a/src/hotspot/share/runtime/globals.hpp Mon Jan 22 12:04:12 2018 +0100
+++ b/src/hotspot/share/runtime/globals.hpp Fri Jan 19 17:01:34 2018 +0100
@@ -78,21 +78,21 @@
define_pd_global(intx, CompileThreshold, 0);
-define_pd_global(intx, OnStackReplacePercentage, 0);
-define_pd_global(bool, ResizeTLAB, false);
-define_pd_global(intx, FreqInlineSize, 0);
+define_pd_global(intx, OnStackReplacePercentage, 0);
+define_pd_global(bool, ResizeTLAB, false);
+define_pd_global(intx, FreqInlineSize, 0);
define_pd_global(size_t, NewSizeThreadIncrease, 4*K);
-define_pd_global(intx, InlineClassNatives, true);
-define_pd_global(intx, InlineUnsafeOps, true);
-define_pd_global(intx, InitialCodeCacheSize, 160*K);
-define_pd_global(intx, ReservedCodeCacheSize, 32*M);
-define_pd_global(intx, NonProfiledCodeHeapSize, 0);
-define_pd_global(intx, ProfiledCodeHeapSize, 0);
-define_pd_global(intx, NonNMethodCodeHeapSize, 32*M);
+define_pd_global(bool, InlineClassNatives, true);
+define_pd_global(bool, InlineUnsafeOps, true);
+define_pd_global(uintx, InitialCodeCacheSize, 160*K);
+define_pd_global(uintx, ReservedCodeCacheSize, 32*M);
+define_pd_global(uintx, NonProfiledCodeHeapSize, 0);
+define_pd_global(uintx, ProfiledCodeHeapSize, 0);
+define_pd_global(uintx, NonNMethodCodeHeapSize, 32*M);
-define_pd_global(intx, CodeCacheExpansionSize, 32*K);
-define_pd_global(intx, CodeCacheMinBlockLength, 1);
-define_pd_global(intx, CodeCacheMinimumUseSpace, 200*K);
+define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
+define_pd_global(uintx, CodeCacheMinBlockLength, 1);
+define_pd_global(uintx, CodeCacheMinimumUseSpace, 200*K);
define_pd_global(size_t, MetaspaceSize, ScaleForWordSize(4*M));
define_pd_global(bool, NeverActAsServerClassMachine, true);
define_pd_global(uint64_t,MaxRAM, 1ULL*G);