--- a/make/hotspot/symbols/symbols-unix Fri Mar 09 00:28:50 2018 +0100
+++ b/make/hotspot/symbols/symbols-unix Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
JVM_ActiveProcessorCount
JVM_ArrayCopy
JVM_AssertionStatusDirectives
+JVM_BeforeHalt
JVM_CallStackWalk
JVM_Clone
JVM_ConstantPoolGetClassAt
--- a/make/mapfiles/libjava/mapfile-vers Fri Mar 09 00:28:50 2018 +0100
+++ b/make/mapfiles/libjava/mapfile-vers Fri Mar 02 21:00:12 2018 +0100
@@ -206,6 +206,7 @@
Java_java_lang_Runtime_totalMemory;
Java_java_lang_Runtime_availableProcessors;
Java_java_lang_SecurityManager_getClassContext;
+ Java_java_lang_Shutdown_beforeHalt;
Java_java_lang_Shutdown_halt0;
Java_java_lang_StackTraceElement_initStackTraceElement;
Java_java_lang_StackTraceElement_initStackTraceElements;
--- a/src/hotspot/cpu/aarch64/aarch64.ad Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/aarch64/aarch64.ad Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, Red Hat Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@@ -995,6 +995,7 @@
source_hpp %{
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "opto/addnode.hpp"
@@ -4438,8 +4439,8 @@
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
- %}
-
+ %}
+
// The only difference between aarch64_enc_cmpxchg and
// aarch64_enc_cmpxchg_acq is that we use load-acquire in the
@@ -5845,7 +5846,7 @@
%{
// Get base of card map
predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
- (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
+ (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base());
match(ConP);
op_cost(0);
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -2048,21 +2048,21 @@
starti;
f(0,31), f((int)T & 1, 30);
f(op1, 29, 21), f(0, 20, 16), f(op2, 15, 12);
- f((int)T >> 1, 11, 10), rf(Xn, 5), rf(Vt, 0);
+ f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0);
}
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn,
int imm, int op1, int op2) {
starti;
f(0,31), f((int)T & 1, 30);
f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12);
- f((int)T >> 1, 11, 10), rf(Xn, 5), rf(Vt, 0);
+ f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0);
}
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn,
Register Xm, int op1, int op2) {
starti;
f(0,31), f((int)T & 1, 30);
f(op1 | 0b100, 29, 21), rf(Xm, 16), f(op2, 15, 12);
- f((int)T >> 1, 11, 10), rf(Xn, 5), rf(Vt, 0);
+ f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0);
}
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Address a, int op1, int op2) {
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -30,6 +30,8 @@
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/compiledICHolder.hpp"
@@ -42,6 +44,7 @@
#include "runtime/vframeArray.hpp"
#include "vmreg_aarch64.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@@ -1162,10 +1165,6 @@
// arg0: store_address
Address store_addr(rfp, 2*BytesPerWord);
- BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
Label done;
Label runtime;
@@ -1186,13 +1185,13 @@
assert_different_registers(card_offset, byte_map_base, rscratch1);
f.load_argument(0, card_offset);
- __ lsr(card_offset, card_offset, CardTableModRefBS::card_shift);
+ __ lsr(card_offset, card_offset, CardTable::card_shift);
__ load_byte_map_base(byte_map_base);
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
- __ cmpw(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+ __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
__ br(Assembler::EQ, done);
- assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
+ assert((int)CardTable::dirty_card_val() == 0, "must be 0");
__ membar(Assembler::StoreLoad);
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -29,8 +29,9 @@
#include "jvm.h"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
-
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_aarch64.hpp"
@@ -42,10 +43,12 @@
#include "runtime/biasedLocking.hpp"
#include "runtime/icache.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -1794,18 +1797,63 @@
void MacroAssembler::membar(Membar_mask_bits order_constraint) {
address prev = pc() - NativeMembar::instruction_size;
- if (prev == code()->last_membar()) {
+ address last = code()->last_insn();
+ if (last != NULL && nativeInstruction_at(last)->is_Membar() && prev == last) {
NativeMembar *bar = NativeMembar_at(prev);
// We are merging two memory barrier instructions. On AArch64 we
// can do this simply by ORing them together.
bar->set_kind(bar->get_kind() | order_constraint);
BLOCK_COMMENT("merged membar");
} else {
- code()->set_last_membar(pc());
+ code()->set_last_insn(pc());
dmb(Assembler::barrier(order_constraint));
}
}
+bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
+ if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) {
+ merge_ldst(rt, adr, size_in_bytes, is_store);
+ code()->clear_last_insn();
+ return true;
+ } else {
+ assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported.");
+ const unsigned mask = size_in_bytes - 1;
+ if (adr.getMode() == Address::base_plus_offset &&
+ (adr.offset() & mask) == 0) { // only supports base_plus_offset.
+ code()->set_last_insn(pc());
+ }
+ return false;
+ }
+}
+
+void MacroAssembler::ldr(Register Rx, const Address &adr) {
+ // We always try to merge two adjacent loads into one ldp.
+ if (!try_merge_ldst(Rx, adr, 8, false)) {
+ Assembler::ldr(Rx, adr);
+ }
+}
+
+void MacroAssembler::ldrw(Register Rw, const Address &adr) {
+ // We always try to merge two adjacent loads into one ldp.
+ if (!try_merge_ldst(Rw, adr, 4, false)) {
+ Assembler::ldrw(Rw, adr);
+ }
+}
+
+void MacroAssembler::str(Register Rx, const Address &adr) {
+ // We always try to merge two adjacent stores into one stp.
+ if (!try_merge_ldst(Rx, adr, 8, true)) {
+ Assembler::str(Rx, adr);
+ }
+}
+
+void MacroAssembler::strw(Register Rw, const Address &adr) {
+ // We always try to merge two adjacent stores into one stp.
+ if (!try_merge_ldst(Rw, adr, 4, true)) {
+ Assembler::strw(Rw, adr);
+ }
+}
+
// MacroAssembler routines found actually to be needed
void MacroAssembler::push(Register src)
@@ -2576,6 +2624,143 @@
return Address(base, offset);
}
+// Checks whether offset is aligned.
+// Returns true if it is, else false.
+bool MacroAssembler::merge_alignment_check(Register base,
+ size_t size,
+ long cur_offset,
+ long prev_offset) const {
+ if (AvoidUnalignedAccesses) {
+ if (base == sp) {
+ // Checks whether low offset if aligned to pair of registers.
+ long pair_mask = size * 2 - 1;
+ long offset = prev_offset > cur_offset ? cur_offset : prev_offset;
+ return (offset & pair_mask) == 0;
+ } else { // If base is not sp, we can't guarantee the access is aligned.
+ return false;
+ }
+ } else {
+ long mask = size - 1;
+ // Load/store pair instruction only supports element size aligned offset.
+ return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
+ }
+}
+
+// Checks whether current and previous loads/stores can be merged.
+// Returns true if it can be merged, else false.
+bool MacroAssembler::ldst_can_merge(Register rt,
+ const Address &adr,
+ size_t cur_size_in_bytes,
+ bool is_store) const {
+ address prev = pc() - NativeInstruction::instruction_size;
+ address last = code()->last_insn();
+
+ if (last == NULL || !nativeInstruction_at(last)->is_Imm_LdSt()) {
+ return false;
+ }
+
+ if (adr.getMode() != Address::base_plus_offset || prev != last) {
+ return false;
+ }
+
+ NativeLdSt* prev_ldst = NativeLdSt_at(prev);
+ size_t prev_size_in_bytes = prev_ldst->size_in_bytes();
+
+ assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging.");
+ assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging.");
+
+ if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) {
+ return false;
+ }
+
+ long max_offset = 63 * prev_size_in_bytes;
+ long min_offset = -64 * prev_size_in_bytes;
+
+ assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
+
+ // Only same base can be merged.
+ if (adr.base() != prev_ldst->base()) {
+ return false;
+ }
+
+ long cur_offset = adr.offset();
+ long prev_offset = prev_ldst->offset();
+ size_t diff = abs(cur_offset - prev_offset);
+ if (diff != prev_size_in_bytes) {
+ return false;
+ }
+
+ // Following cases can not be merged:
+ // ldr x2, [x2, #8]
+ // ldr x3, [x2, #16]
+ // or:
+ // ldr x2, [x3, #8]
+ // ldr x2, [x3, #16]
+ // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL.
+ if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) {
+ return false;
+ }
+
+ long low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
+ // Offset range must be in ldp/stp instruction's range.
+ if (low_offset > max_offset || low_offset < min_offset) {
+ return false;
+ }
+
+ if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) {
+ return true;
+ }
+
+ return false;
+}
+
+// Merge current load/store with previous load/store into ldp/stp.
+void MacroAssembler::merge_ldst(Register rt,
+ const Address &adr,
+ size_t cur_size_in_bytes,
+ bool is_store) {
+
+ assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged.");
+
+ Register rt_low, rt_high;
+ address prev = pc() - NativeInstruction::instruction_size;
+ NativeLdSt* prev_ldst = NativeLdSt_at(prev);
+
+ long offset;
+
+ if (adr.offset() < prev_ldst->offset()) {
+ offset = adr.offset();
+ rt_low = rt;
+ rt_high = prev_ldst->target();
+ } else {
+ offset = prev_ldst->offset();
+ rt_low = prev_ldst->target();
+ rt_high = rt;
+ }
+
+ Address adr_p = Address(prev_ldst->base(), offset);
+ // Overwrite previous generated binary.
+ code_section()->set_end(prev);
+
+ const int sz = prev_ldst->size_in_bytes();
+ assert(sz == 8 || sz == 4, "only supports 64/32bit merging.");
+ if (!is_store) {
+ BLOCK_COMMENT("merged ldr pair");
+ if (sz == 8) {
+ ldp(rt_low, rt_high, adr_p);
+ } else {
+ ldpw(rt_low, rt_high, adr_p);
+ }
+ } else {
+ BLOCK_COMMENT("merged str pair");
+ if (sz == 8) {
+ stp(rt_low, rt_high, adr_p);
+ } else {
+ stpw(rt_low, rt_high, adr_p);
+ }
+ }
+}
+
/**
* Multiply 64 bit by 64 bit first loop.
*/
@@ -3433,16 +3618,16 @@
// register obj is destroyed afterwards.
BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableForRS ||
- bs->kind() == BarrierSet::CardTableExtension,
+ assert(bs->kind() == BarrierSet::CardTableModRef,
"Wrong barrier set kind");
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
- lsr(obj, obj, CardTableModRefBS::card_shift);
-
- assert(CardTableModRefBS::dirty_card_val() == 0, "must be");
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+ lsr(obj, obj, CardTable::card_shift);
+
+ assert(CardTable::dirty_card_val() == 0, "must be");
load_byte_map_base(rscratch1);
@@ -3944,8 +4129,9 @@
DirtyCardQueue::byte_offset_of_buf()));
BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label runtime;
@@ -3962,20 +4148,20 @@
// storing region crossing non-NULL, is card already dirty?
- ExternalAddress cardtable((address) ct->byte_map_base);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ ExternalAddress cardtable((address) ct->byte_map_base());
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
const Register card_addr = tmp;
- lsr(card_addr, store_addr, CardTableModRefBS::card_shift);
+ lsr(card_addr, store_addr, CardTable::card_shift);
// get the address of the card
load_byte_map_base(tmp2);
add(card_addr, card_addr, tmp2);
ldrb(tmp2, Address(card_addr));
- cmpw(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+ cmpw(tmp2, (int)G1CardTable::g1_young_card_val());
br(Assembler::EQ, done);
- assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
+ assert((int)CardTable::dirty_card_val() == 0, "must be 0");
membar(Assembler::StoreLoad);
@@ -4152,7 +4338,7 @@
bind(loop);
sub(len, len, unroll);
for (int i = -unroll; i < 0; i++)
- str(zr, Address(t1, i * wordSize));
+ Assembler::str(zr, Address(t1, i * wordSize));
bind(entry);
add(t1, t1, unroll * wordSize);
cbnz(len, loop);
@@ -4329,7 +4515,7 @@
void MacroAssembler::load_byte_map_base(Register reg) {
jbyte *byte_map_base =
- ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base;
+ ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base();
if (is_valid_AArch64_address((address)byte_map_base)) {
// Strictly speaking the byte_map_base isn't an address at all,
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -150,11 +150,19 @@
void bind(Label& L) {
Assembler::bind(L);
- code()->clear_last_membar();
+ code()->clear_last_insn();
}
void membar(Membar_mask_bits order_constraint);
+ using Assembler::ldr;
+ using Assembler::str;
+
+ void ldr(Register Rx, const Address &adr);
+ void ldrw(Register Rw, const Address &adr);
+ void str(Register Rx, const Address &adr);
+ void strw(Register Rx, const Address &adr);
+
// Frame creation and destruction shared between JITs.
void build_frame(int framesize);
void remove_frame(int framesize);
@@ -1290,6 +1298,17 @@
// Uses rscratch2 if the address is not directly reachable
Address spill_address(int size, int offset, Register tmp=rscratch2);
+ bool merge_alignment_check(Register base, size_t size, long cur_offset, long prev_offset) const;
+
+ // Check whether two loads/stores can be merged into ldp/stp.
+ bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const;
+
+ // Merge current load/store with previous load/store into ldp/stp.
+ void merge_ldst(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store);
+
+ // Try to merge two loads/stores into ldp/stp. If success, returns true else false.
+ bool try_merge_ldst(Register rt, const Address &adr, size_t cur_size_in_bytes, bool is_store);
+
public:
void spill(Register Rx, bool is64, int offset) {
if (is64) {
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -131,6 +131,13 @@
return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 &&
Instruction_aarch64::extract(insn, 7, 0) == 0b10111111;
}
+
+ bool is_Imm_LdSt() {
+ unsigned int insn = uint_at(0);
+ return Instruction_aarch64::extract(insn, 29, 27) == 0b111 &&
+ Instruction_aarch64::extract(insn, 23, 23) == 0b0 &&
+ Instruction_aarch64::extract(insn, 26, 25) == 0b00;
+ }
};
inline NativeInstruction* nativeInstruction_at(address address) {
@@ -532,4 +539,57 @@
return (NativeMembar*)addr;
}
+class NativeLdSt : public NativeInstruction {
+private:
+ int32_t size() { return Instruction_aarch64::extract(uint_at(0), 31, 30); }
+ // Check whether instruction is with unscaled offset.
+ bool is_ldst_ur() {
+ return (Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000010 ||
+ Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000000) &&
+ Instruction_aarch64::extract(uint_at(0), 11, 10) == 0b00;
+ }
+ bool is_ldst_unsigned_offset() {
+ return Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100101 ||
+ Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100100;
+ }
+public:
+ Register target() {
+ uint32_t r = Instruction_aarch64::extract(uint_at(0), 4, 0);
+ return r == 0x1f ? zr : as_Register(r);
+ }
+ Register base() {
+ uint32_t b = Instruction_aarch64::extract(uint_at(0), 9, 5);
+ return b == 0x1f ? sp : as_Register(b);
+ }
+ int64_t offset() {
+ if (is_ldst_ur()) {
+ return Instruction_aarch64::sextract(uint_at(0), 20, 12);
+ } else if (is_ldst_unsigned_offset()) {
+ return Instruction_aarch64::extract(uint_at(0), 21, 10) << size();
+ } else {
+ // others like: pre-index or post-index.
+ ShouldNotReachHere();
+ return 0;
+ }
+ }
+ size_t size_in_bytes() { return 1 << size(); }
+ bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); }
+ bool is_load() {
+ assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 ||
+ Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str");
+
+ return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01;
+ }
+ bool is_store() {
+ assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 ||
+ Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str");
+
+ return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00;
+ }
+};
+
+inline NativeLdSt *NativeLdSt_at(address addr) {
+ assert(nativeInstruction_at(addr)->is_Imm_LdSt(), "no immediate load/store found");
+ return (NativeLdSt*)addr;
+}
#endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,6 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/instanceOop.hpp"
@@ -652,9 +654,7 @@
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
__ pop(saved_regs, sp);
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
- case BarrierSet::ModRef:
+ case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@@ -695,16 +695,16 @@
__ pop(saved_regs, sp);
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_loop;
- __ lsr(start, start, CardTableModRefBS::card_shift);
- __ lsr(end, end, CardTableModRefBS::card_shift);
+ __ lsr(start, start, CardTable::card_shift);
+ __ lsr(end, end, CardTable::card_shift);
__ sub(end, end, start); // number of bytes to copy
const Register count = end; // 'end' register contains bytes count now
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -184,8 +184,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
if (val == noreg) {
__ store_heap_oop_null(obj);
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -193,7 +193,9 @@
}
// Enable vendor specific features
- if (_cpu == CPU_CAVIUM) {
+
+ // ThunderX
+ if (_cpu == CPU_CAVIUM && (_model == 0xA1)) {
if (_variant == 0) _features |= CPU_DMB_ATOMICS;
if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
@@ -202,6 +204,20 @@
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, (_variant > 0));
}
}
+ // ThunderX2
+ if ((_cpu == CPU_CAVIUM && (_model == 0xAF)) ||
+ (_cpu == CPU_BROADCOM && (_model == 0x516))) {
+ if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
+ FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
+ }
+ if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
+ FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
+ }
+ if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
+ FLAG_SET_DEFAULT(UseFPUForSpilling, true);
+ }
+ }
+
if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _features |= CPU_A53MAC;
if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
// If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
#include "ci/ciArray.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
@@ -475,22 +477,21 @@
}
void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
- assert(CardTableModRefBS::dirty_card_val() == 0,
+ assert(CardTable::dirty_card_val() == 0,
"Cannot use ZR register (aarch64) or the register containing the card table base address directly (aarch32) otherwise");
#ifdef AARCH64
// AARCH64 has a register that is constant zero. We can use that one to set the
// value in the card table to dirty.
__ move(FrameMap::ZR_opr, card_addr);
#else // AARCH64
- CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
- if(((intx)ct->byte_map_base & 0xff) == 0) {
+ if((ci_card_table_address_as<intx>() & 0xff) == 0) {
// If the card table base address is aligned to 256 bytes, we can use the register
// that contains the card_table_base_address.
__ move(value, card_addr);
} else {
// Otherwise we need to create a register containing that value.
LIR_Opr tmp_zero = new_register(T_INT);
- __ move(LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()), tmp_zero);
+ __ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero);
__ move(tmp_zero, card_addr);
}
#endif // AARCH64
@@ -510,14 +511,14 @@
}
#ifdef AARCH64
- LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
+ LIR_Address* shifted_reg_operand = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BYTE);
LIR_Opr tmp2 = tmp;
- __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTableModRefBS::card_shift)
+ __ add(tmp, LIR_OprFact::address(shifted_reg_operand), tmp2); // tmp2 = tmp + (addr >> CardTable::card_shift)
LIR_Address* card_addr = new LIR_Address(tmp2, T_BYTE);
#else
// Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load
// byte instruction does not support the addressing mode we need.
- LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BOOLEAN);
+ LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
#endif
if (UseCondCardMark) {
if (UseConcMarkSweepGC) {
@@ -527,7 +528,7 @@
__ move(card_addr, cur_value);
LabelObj* L_already_dirty = new LabelObj();
- __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val()));
+ __ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val()));
__ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
set_card(tmp, card_addr);
__ branch_destination(L_already_dirty->label());
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,9 @@
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_arm.hpp"
#include "oops/compiledICHolder.hpp"
@@ -40,6 +43,7 @@
#include "utilities/align.hpp"
#include "vmreg_arm.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@@ -608,8 +612,6 @@
__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
- BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
Label done;
Label recheck;
Label runtime;
@@ -619,8 +621,7 @@
Address buffer(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() +
DirtyCardQueue::byte_offset_of_buf()));
- AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
// save at least the registers that need saving if the runtime is called
#ifdef AARCH64
@@ -649,12 +650,12 @@
// explicitly specify that 'cardtable' has a relocInfo::none
// type.
__ lea(r_card_base_1, cardtable);
- __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTableModRefBS::card_shift));
+ __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
// first quick check without barrier
__ ldrb(r_tmp2, Address(r_card_addr_0));
- __ cmp(r_tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+ __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
__ b(recheck, ne);
__ bind(done);
@@ -675,14 +676,14 @@
// reload card state after the barrier that ensures the stored oop was visible
__ ldrb(r_tmp2, Address(r_card_addr_0));
- assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code");
+ assert(CardTable::dirty_card_val() == 0, "adjust this code");
__ cbz(r_tmp2, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
- assert(0 == (int)CardTableModRefBS::dirty_card_val(), "adjust this code");
- if (((intptr_t)ct->byte_map_base & 0xff) == 0) {
+ assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
+ if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
// Card table is aligned so the lowest byte of the table address base is zero.
__ strb(r_card_base_1, Address(r_card_addr_0));
} else {
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interp_masm_arm.hpp"
@@ -410,12 +411,12 @@
void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
// Check barrier set type (should be card table) and element size
BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableForRS ||
- bs->kind() == BarrierSet::CardTableExtension,
+ assert(bs->kind() == BarrierSet::CardTableModRef,
"Wrong barrier set kind");
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "Adjust store check code");
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
// Load card table base address.
@@ -433,19 +434,19 @@
rarely accessed area of thread descriptor).
*/
// TODO-AARCH64 Investigate if mov_slow is faster than ldr from Rthread on AArch64
- mov_address(card_table_base, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
+ mov_address(card_table_base, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
}
// The 2nd part of the store check.
void InterpreterMacroAssembler::store_check_part2(Register obj, Register card_table_base, Register tmp) {
assert_different_registers(obj, card_table_base, tmp);
- assert(CardTableModRefBS::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
+ assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
#ifdef AARCH64
- add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTableModRefBS::card_shift));
+ add(card_table_base, card_table_base, AsmOperand(obj, lsr, CardTable::card_shift));
Address card_table_addr(card_table_base);
#else
- Address card_table_addr(card_table_base, obj, lsr, CardTableModRefBS::card_shift);
+ Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);
#endif
if (UseCondCardMark) {
@@ -472,8 +473,9 @@
#ifdef AARCH64
strb(ZR, card_table_addr);
#else
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
- if ((((uintptr_t)ct->byte_map_base & 0xff) == 0)) {
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
+ CardTable* ct = ctbs->card_table();
+ if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
// Card table is aligned so the lowest byte of the table address base is zero.
// This works only if the code is not saved for later use, possibly
// in a context where the base would no longer be aligned.
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "ci/ciEnv.hpp"
#include "code/nativeInst.hpp"
#include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@@ -43,6 +44,7 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -2265,7 +2267,8 @@
DirtyCardQueue::byte_offset_of_buf()));
BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
Label done;
Label runtime;
@@ -2286,18 +2289,18 @@
// storing region crossing non-NULL, is card already dirty?
const Register card_addr = tmp1;
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
- mov_address(tmp2, (address)ct->byte_map_base, symbolic_Relocation::card_table_reference);
- add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTableModRefBS::card_shift));
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+ mov_address(tmp2, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
+ add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));
ldrb(tmp2, Address(card_addr));
- cmp(tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+ cmp(tmp2, (int)G1CardTable::g1_young_card_val());
b(done, eq);
membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
- assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code");
+ assert(CardTable::dirty_card_val() == 0, "adjust this code");
ldrb(tmp2, Address(card_addr));
cbz(tmp2, done);
@@ -3023,7 +3026,6 @@
}
#endif // COMPILER2
-
// Must preserve condition codes, or C2 encodeKlass_not_null rule
// must be changed.
void MacroAssembler::encode_klass_not_null(Register r) {
@@ -3261,4 +3263,3 @@
}
#endif // COMPILER2
-
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_arm.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_arm.hpp"
#include "oops/instanceOop.hpp"
@@ -2907,8 +2909,7 @@
__ pop(saved_regs | R9ifScratched);
#endif // AARCH64
}
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@@ -2961,12 +2962,12 @@
#endif // !AARCH64
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
BLOCK_COMMENT("CardTablePostBarrier");
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_cardtable_loop, L_done;
@@ -2975,12 +2976,12 @@
__ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
__ sub(count, count, BytesPerHeapOop); // last addr
- __ logical_shift_right(addr, addr, CardTableModRefBS::card_shift);
- __ logical_shift_right(count, count, CardTableModRefBS::card_shift);
+ __ logical_shift_right(addr, addr, CardTable::card_shift);
+ __ logical_shift_right(count, count, CardTable::card_shift);
__ sub(count, count, addr); // nb of cards
// warning: Rthread has not been preserved
- __ mov_address(tmp, (address) ct->byte_map_base, symbolic_Relocation::card_table_reference);
+ __ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference);
__ add(addr,tmp, addr);
Register zero = __ zero_register(tmp);
@@ -2992,8 +2993,6 @@
__ BIND(L_done);
}
break;
- case BarrierSet::ModRef:
- break;
default:
ShouldNotReachHere();
}
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -228,8 +228,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
if (is_null) {
__ store_heap_oop_null(new_val, obj);
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,6 +27,9 @@
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/compiledICHolder.hpp"
@@ -40,6 +43,7 @@
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@@ -795,7 +799,7 @@
Register tmp = R0;
Register addr = R14;
Register tmp2 = R15;
- jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
+ jbyte* byte_map_base = ci_card_table_address();
Label restart, refill, ret;
@@ -803,26 +807,26 @@
__ std(addr, -8, R1_SP);
__ std(tmp2, -16, R1_SP);
- __ srdi(addr, R0, CardTableModRefBS::card_shift); // Addr is passed in R0.
+ __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
__ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
__ add(addr, tmp2, addr);
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
// Return if young card.
- __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::g1_young_card_val());
+ __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
__ beq(CCR0, ret);
// Return if sequential consistent value is already dirty.
__ membar(Assembler::StoreLoad);
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
- __ cmpwi(CCR0, tmp, G1SATBCardTableModRefBS::dirty_card_val());
+ __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
__ beq(CCR0, ret);
// Not dirty.
// First, dirty it.
- __ li(tmp, G1SATBCardTableModRefBS::dirty_card_val());
+ __ li(tmp, G1CardTable::dirty_card_val());
__ stb(tmp, 0, addr);
int dirty_card_q_index_byte_offset =
--- a/src/hotspot/cpu/ppc/frame_ppc.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/ppc/frame_ppc.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -32,6 +32,7 @@
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubCodeGenerator.hpp"
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@@ -43,6 +44,7 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -3036,20 +3038,20 @@
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
CardTableModRefBS* bs =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
- assert(bs->kind() == BarrierSet::CardTableForRS ||
- bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
+ assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
+ CardTable* ct = bs->card_table();
#ifdef ASSERT
cmpdi(CCR0, Rnew_val, 0);
asm_assert_ne("null oop not allowed", 0x321);
#endif
- card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
+ card_table_write(ct->byte_map_base(), Rtmp, Rstore_addr);
}
// Write the card table byte.
void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
assert_different_registers(Robj, Rtmp, R0);
load_const_optimized(Rtmp, (address)byte_map_base, R0);
- srdi(Robj, Robj, CardTableModRefBS::card_shift);
+ srdi(Robj, Robj, CardTable::card_shift);
li(R0, 0); // dirty
if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
stbx(R0, Rtmp, Robj);
@@ -3171,6 +3173,7 @@
G1SATBCardTableLoggingModRefBS* bs =
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
+ CardTable* ct = bs->card_table();
// Does store cross heap regions?
if (G1RSBarrierRegionFilter) {
@@ -3187,26 +3190,26 @@
#endif
// Storing region crossing non-NULL, is card already dirty?
- assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
const Register Rcard_addr = Rtmp1;
Register Rbase = Rtmp2;
- load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
-
- srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
+ load_const_optimized(Rbase, (address)ct->byte_map_base(), /*temp*/ Rtmp3);
+
+ srdi(Rcard_addr, Rstore_addr, CardTable::card_shift);
// Get the address of the card.
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
- cmpwi(CCR0, Rtmp3, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+ cmpwi(CCR0, Rtmp3, (int)G1CardTable::g1_young_card_val());
beq(CCR0, filtered);
membar(Assembler::StoreLoad);
lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr); // Reload after membar.
- cmpwi(CCR0, Rtmp3 /* card value */, CardTableModRefBS::dirty_card_val());
+ cmpwi(CCR0, Rtmp3 /* card value */, CardTable::dirty_card_val());
beq(CCR0, filtered);
// Storing a region crossing, non-NULL oop, card is clean.
// Dirty card and log.
- li(Rtmp3, CardTableModRefBS::dirty_card_val());
+ li(Rtmp3, CardTable::dirty_card_val());
//release(); // G1: oops are allowed to get visible after dirty marking.
stbx(Rtmp3, Rbase, Rcard_addr);
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/instanceOop.hpp"
@@ -667,9 +669,7 @@
__ bind(filtered);
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
- case BarrierSet::ModRef:
+ case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@@ -703,8 +703,7 @@
__ restore_LR_CR(R0);
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
Label Lskip_loop, Lstore_loop;
if (UseConcMarkSweepGC) {
@@ -712,19 +711,20 @@
__ release();
}
- CardTableModRefBS* const ct = barrier_set_cast<CardTableModRefBS>(bs);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ CardTableModRefBS* const ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* const ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp);
__ sldi(count, count, LogBytesPerHeapOop);
__ addi(count, count, -BytesPerHeapOop);
__ add(count, addr, count);
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
- __ srdi(addr, addr, CardTableModRefBS::card_shift);
- __ srdi(count, count, CardTableModRefBS::card_shift);
+ __ srdi(addr, addr, CardTable::card_shift);
+ __ srdi(count, count, CardTable::card_shift);
__ subf(count, addr, count);
assert_different_registers(R0, addr, count, tmp);
- __ load_const(tmp, (address)ct->byte_map_base);
+ __ load_const(tmp, (address)ct->byte_map_base());
__ addic_(count, count, 1);
__ beq(CCR0, Lskip_loop);
__ li(R0, 0);
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -103,8 +103,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
Label Lnull, Ldone;
if (Rval != noreg) {
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,6 +27,9 @@
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_s390.hpp"
#include "oops/compiledICHolder.hpp"
@@ -40,6 +43,7 @@
#include "vmreg_s390.inline.hpp"
#include "registerSaver_s390.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@@ -845,7 +849,7 @@
Register r1 = Z_R6; // Must be saved/restored.
Register r2 = Z_R7; // Must be saved/restored.
Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
- jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
+ jbyte* byte_map_base = ci_card_table_address();
// Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
__ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
@@ -854,17 +858,17 @@
// Calculate address of card corresponding to the updated oop slot.
AddressLiteral rs(byte_map_base);
- __ z_srlg(addr_card, addr_oop, CardTableModRefBS::card_shift);
+ __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
addr_oop = noreg; // dead now
__ load_const_optimized(cardtable, rs); // cardtable := <card table base>
__ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
- __ z_cli(0, addr_card, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+ __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
__ z_bre(young_card);
__ z_sync(); // Required to support concurrent cleaning.
- __ z_cli(0, addr_card, (int)CardTableModRefBS::dirty_card_val());
+ __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
__ z_brne(not_already_dirty);
__ bind(young_card);
@@ -877,7 +881,7 @@
__ bind(not_already_dirty);
// First, dirty it: [addr_card] := 0
- __ z_mvi(0, addr_card, CardTableModRefBS::dirty_card_val());
+ __ z_mvi(0, addr_card, CardTable::dirty_card_val());
Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
Register buf = r2;
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,6 +27,7 @@
#include "asm/codeBuffer.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
@@ -50,6 +51,7 @@
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -3502,12 +3504,13 @@
// Write to card table for modification at store_addr - register is destroyed afterwards.
void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
- CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableForRS ||
- bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
assert_different_registers(store_addr, tmp);
- z_srlg(store_addr, store_addr, CardTableModRefBS::card_shift);
- load_absolute_address(tmp, (address)bs->byte_map_base);
+ z_srlg(store_addr, store_addr, CardTable::card_shift);
+ load_absolute_address(tmp, (address)ct->byte_map_base());
z_agr(store_addr, tmp);
z_mvi(0, store_addr, 0); // Store byte 0.
}
@@ -3707,6 +3710,7 @@
assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
+ CardTable* ct = bs->card_table();
assert(bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
BLOCK_COMMENT("g1_write_barrier_post {");
@@ -3733,33 +3737,33 @@
Rnew_val = noreg; // end of lifetime
// Storing region crossing non-NULL, is card already dirty?
- assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
// Make sure not to use Z_R0 for any of these registers.
Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
// calculate address of card
- load_const_optimized(Rbase, (address)bs->byte_map_base); // Card table base.
- z_srlg(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift); // Index into card table.
+ load_const_optimized(Rbase, (address)ct->byte_map_base()); // Card table base.
+ z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift); // Index into card table.
z_algr(Rcard_addr, Rbase); // Explicit calculation needed for cli.
Rbase = noreg; // end of lifetime
// Filter young.
- assert((unsigned int)G1SATBCardTableModRefBS::g1_young_card_val() <= 255, "otherwise check this code");
- z_cli(0, Rcard_addr, (int)G1SATBCardTableModRefBS::g1_young_card_val());
+ assert((unsigned int)G1CardTable::g1_young_card_val() <= 255, "otherwise check this code");
+ z_cli(0, Rcard_addr, (int)G1CardTable::g1_young_card_val());
z_bre(filtered);
// Check the card value. If dirty, we're done.
// This also avoids false sharing of the (already dirty) card.
z_sync(); // Required to support concurrent cleaning.
- assert((unsigned int)CardTableModRefBS::dirty_card_val() <= 255, "otherwise check this code");
- z_cli(0, Rcard_addr, CardTableModRefBS::dirty_card_val()); // Reload after membar.
+ assert((unsigned int)CardTable::dirty_card_val() <= 255, "otherwise check this code");
+ z_cli(0, Rcard_addr, CardTable::dirty_card_val()); // Reload after membar.
z_bre(filtered);
// Storing a region crossing, non-NULL oop, card is clean.
// Dirty card and log.
- z_mvi(0, Rcard_addr, CardTableModRefBS::dirty_card_val());
+ z_mvi(0, Rcard_addr, CardTable::dirty_card_val());
Register Rcard_addr_x = Rcard_addr;
Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
--- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,6 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "registerSaver_s390.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "nativeInst_s390.hpp"
@@ -722,8 +724,7 @@
__ bind(filtered);
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
case BarrierSet::ModRef:
break;
default:
@@ -761,14 +762,14 @@
}
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
// These cases formerly known as
// void array_store_check(Register addr, Register count, bool branchToEnd).
{
NearLabel doXC, done;
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(Z_R0, Z_R1, addr, count);
// Nothing to do if count <= 0.
@@ -787,11 +788,11 @@
__ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
// Get base address of card table.
- __ load_const_optimized(Z_R1, (address)ct->byte_map_base);
+ __ load_const_optimized(Z_R1, (address)ct->byte_map_base());
// count = (count>>shift) - (addr>>shift)
- __ z_srlg(addr, addr, CardTableModRefBS::card_shift);
- __ z_srlg(count, count, CardTableModRefBS::card_shift);
+ __ z_srlg(addr, addr, CardTable::card_shift);
+ __ z_srlg(count, count, CardTable::card_shift);
// Prefetch first elements of card table for update.
if (VM_Version::has_Prefetch()) {
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -260,8 +260,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
if (val_is_null) {
__ store_heap_oop_null(val, offset, base);
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -35,6 +35,7 @@
#include "gc/shared/collectedHeap.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/objArrayKlass.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,9 @@
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/compiledICHolder.hpp"
@@ -38,6 +41,7 @@
#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@@ -843,22 +847,22 @@
Register cardtable = G5;
Register tmp = G1_scratch;
Register tmp2 = G3_scratch;
- jbyte* byte_map_base = barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base;
+ jbyte* byte_map_base = ci_card_table_address();
Label not_already_dirty, restart, refill, young_card;
- __ srlx(addr, CardTableModRefBS::card_shift, addr);
+ __ srlx(addr, CardTable::card_shift, addr);
AddressLiteral rs(byte_map_base);
__ set(rs, cardtable); // cardtable := <card table base>
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
- __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
+ __ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
- assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
+ assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "jvm.h"
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@@ -35,6 +36,7 @@
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/safepoint.hpp"
@@ -44,6 +46,7 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -658,7 +661,7 @@
void MacroAssembler::card_table_write(jbyte* byte_map_base,
Register tmp, Register obj) {
- srlx(obj, CardTableModRefBS::card_shift, obj);
+ srlx(obj, CardTable::card_shift, obj);
assert(tmp != obj, "need separate temp reg");
set((address) byte_map_base, tmp);
stb(G0, tmp, obj);
@@ -3574,17 +3577,17 @@
Label not_already_dirty, restart, refill, young_card;
- __ srlx(O0, CardTableModRefBS::card_shift, O0);
+ __ srlx(O0, CardTable::card_shift, O0);
AddressLiteral addrlit(byte_map_base);
__ set(addrlit, O1); // O1 := <card table base>
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
- __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
+ __ cmp_and_br_short(O2, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
__ ldub(O0, O1, O2); // O2 := [O0 + O1]
- assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
+ assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
__ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
__ bind(young_card);
@@ -3664,6 +3667,7 @@
G1SATBCardTableLoggingModRefBS* bs =
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(Universe::heap()->barrier_set());
+ CardTable* ct = bs->card_table();
if (G1RSBarrierRegionFilter) {
xor3(store_addr, new_val, tmp);
@@ -3704,7 +3708,8 @@
if (dirty_card_log_enqueue == 0) {
G1SATBCardTableLoggingModRefBS* bs =
barrier_set_cast<G1SATBCardTableLoggingModRefBS>(heap->barrier_set());
- generate_dirty_card_log_enqueue(bs->byte_map_base);
+ CardTable *ct = bs->card_table();
+ generate_dirty_card_log_enqueue(ct->byte_map_base());
assert(dirty_card_log_enqueue != 0, "postcondition.");
}
if (satb_log_enqueue_with_frame == 0) {
@@ -3726,9 +3731,10 @@
if (new_val == G0) return;
CardTableModRefBS* bs =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
- assert(bs->kind() == BarrierSet::CardTableForRS ||
- bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
- card_table_write(bs->byte_map_base, tmp, store_addr);
+ CardTable* ct = bs->card_table();
+
+ assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
+ card_table_write(ct->byte_map_base(), tmp, store_addr);
}
// ((OopHandle)result).resolve();
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -24,6 +24,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/instanceOop.hpp"
@@ -875,9 +877,7 @@
DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
- case BarrierSet::ModRef:
+ case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@@ -908,11 +908,11 @@
__ restore();
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
assert_different_registers(addr, count, tmp);
Label L_loop, L_done;
@@ -923,10 +923,10 @@
__ sub(count, BytesPerHeapOop, count);
__ add(count, addr, count);
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
- __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
- __ srl_ptr(count, CardTableModRefBS::card_shift, count);
+ __ srl_ptr(addr, CardTable::card_shift, addr);
+ __ srl_ptr(count, CardTable::card_shift, count);
__ sub(count, addr, count);
- AddressLiteral rs(ct->byte_map_base);
+ AddressLiteral rs(ct->byte_map_base());
__ set(rs, tmp);
__ BIND(L_loop);
__ stb(G0, tmp, addr);
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -90,8 +90,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
if (index == noreg ) {
assert(Assembler::is_simm13(offset), "fix this code");
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,9 @@
#include "c1/c1_Defs.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/compiledICHolder.hpp"
@@ -39,6 +42,7 @@
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif
@@ -1632,10 +1636,6 @@
// arg0: store_address
Address store_addr(rbp, 2*BytesPerWord);
- CardTableModRefBS* ct =
- barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
Label done;
Label enqueued;
Label runtime;
@@ -1657,25 +1657,25 @@
const Register card_addr = rcx;
f.load_argument(0, card_addr);
- __ shrptr(card_addr, CardTableModRefBS::card_shift);
+ __ shrptr(card_addr, CardTable::card_shift);
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
// a valid address and therefore is not properly handled by the relocation code.
- __ movptr(cardtable, (intptr_t)ct->byte_map_base);
+ __ movptr(cardtable, ci_card_table_address_as<intptr_t>());
__ addptr(card_addr, cardtable);
NOT_LP64(__ get_thread(thread);)
- __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
+ __ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
__ jcc(Assembler::equal, done);
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
- __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+ __ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
__ jcc(Assembler::equal, done);
// storing region crossing non-NULL, card is clean.
// dirty card and log.
- __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+ __ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
const Register tmp = rdx;
__ push(rdx);
--- a/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -346,8 +346,9 @@
_from -= Interpreter::stackElementSize;
if (_num_args < Argument::n_float_register_parameters_c-1) {
+ assert((_num_args*2) < BitsPerWord, "_num_args*2 is out of range");
*_reg_args++ = from_obj;
- *_fp_identifiers |= (intptr_t)(0x01 << (_num_args*2)); // mark as float
+ *_fp_identifiers |= ((intptr_t)0x01 << (_num_args*2)); // mark as float
_num_args++;
} else {
*_to++ = from_obj;
@@ -360,8 +361,9 @@
_from -= 2*Interpreter::stackElementSize;
if (_num_args < Argument::n_float_register_parameters_c-1) {
+ assert((_num_args*2) < BitsPerWord, "_num_args*2 is out of range");
*_reg_args++ = from_obj;
- *_fp_identifiers |= (intptr_t)(0x3 << (_num_args*2)); // mark as double
+ *_fp_identifiers |= ((intptr_t)0x3 << (_num_args*2)); // mark as double
_num_args++;
} else {
*_to++ = from_obj;
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -27,6 +27,7 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
@@ -45,6 +46,7 @@
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -5407,9 +5409,10 @@
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
DirtyCardQueue::byte_offset_of_buf()));
- CardTableModRefBS* ct =
+ CardTableModRefBS* ctbs =
barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label done;
Label runtime;
@@ -5432,24 +5435,24 @@
const Register cardtable = tmp2;
movptr(card_addr, store_addr);
- shrptr(card_addr, CardTableModRefBS::card_shift);
+ shrptr(card_addr, CardTable::card_shift);
// Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
// a valid address and therefore is not properly handled by the relocation code.
- movptr(cardtable, (intptr_t)ct->byte_map_base);
+ movptr(cardtable, (intptr_t)ct->byte_map_base());
addptr(card_addr, cardtable);
- cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
+ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
jcc(Assembler::equal, done);
membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
- cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
jcc(Assembler::equal, done);
// storing a region crossing, non-NULL oop, card is clean.
// dirty card and log.
- movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
cmpl(queue_index, 0);
jcc(Assembler::equal, runtime);
@@ -5494,14 +5497,14 @@
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableForRS ||
- bs->kind() == BarrierSet::CardTableExtension,
+ assert(bs->kind() == BarrierSet::CardTableModRef,
"Wrong barrier set kind");
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
- shrptr(obj, CardTableModRefBS::card_shift);
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+ shrptr(obj, CardTable::card_shift);
Address card_addr;
@@ -5510,7 +5513,7 @@
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
- intptr_t disp = (intptr_t) ct->byte_map_base;
+ intptr_t disp = (intptr_t) ct->byte_map_base();
if (is_simm32(disp)) {
card_addr = Address(noreg, obj, Address::times_1, disp);
} else {
@@ -5518,12 +5521,12 @@
// displacement and done in a single instruction given favorable mapping and a
// smarter version of as_Address. However, 'ExternalAddress' generates a relocation
// entry and that entry is not properly handled by the relocation code.
- AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
+ AddressLiteral cardtable((address)ct->byte_map_base(), relocInfo::none);
Address index(noreg, obj, Address::times_1);
card_addr = as_Address(ArrayAddress(cardtable, index));
}
- int dirty = CardTableModRefBS::dirty_card_val();
+ int dirty = CardTable::dirty_card_val();
if (UseCondCardMark) {
Label L_already_dirty;
if (UseConcMarkSweepGC) {
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@@ -705,9 +707,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
- case BarrierSet::ModRef:
+ case BarrierSet::CardTableModRef:
break;
default :
ShouldNotReachHere();
@@ -739,22 +739,22 @@
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
Label L_loop;
const Register end = count; // elements count; end == start+count-1
assert_different_registers(start, end);
__ lea(end, Address(start, count, Address::times_ptr, -wordSize));
- __ shrptr(start, CardTableModRefBS::card_shift);
- __ shrptr(end, CardTableModRefBS::card_shift);
+ __ shrptr(start, CardTable::card_shift);
+ __ shrptr(end, CardTable::card_shift);
__ subptr(end, start); // end --> count
__ BIND(L_loop);
- intptr_t disp = (intptr_t) ct->byte_map_base;
+ intptr_t disp = (intptr_t) ct->byte_map_base();
Address cardtable(start, count, Address::times_1, disp);
__ movb(cardtable, 0);
__ decrement(count);
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -25,6 +25,9 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@@ -1232,9 +1235,7 @@
__ bind(filtered);
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
- case BarrierSet::ModRef:
+ case BarrierSet::CardTableModRef:
break;
default:
ShouldNotReachHere();
@@ -1272,12 +1273,8 @@
__ popa();
}
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
Label L_loop, L_done;
const Register end = count;
@@ -1286,11 +1283,11 @@
__ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
__ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
- __ shrptr(start, CardTableModRefBS::card_shift);
- __ shrptr(end, CardTableModRefBS::card_shift);
+ __ shrptr(start, CardTable::card_shift);
+ __ shrptr(end, CardTable::card_shift);
__ subptr(end, start); // end --> cards count
- int64_t disp = (int64_t) ct->byte_map_base;
+ int64_t disp = ci_card_table_address_as<int64_t>();
__ mov64(scratch, disp);
__ addptr(start, scratch);
__ BIND(L_loop);
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -198,8 +198,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
{
if (val == noreg) {
__ store_heap_oop_null(obj);
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -41,6 +41,7 @@
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
--- a/src/hotspot/os/linux/osContainer_linux.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/os/linux/osContainer_linux.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -414,9 +414,9 @@
}
-char * OSContainer::container_type() {
+const char * OSContainer::container_type() {
if (is_containerized()) {
- return (char *)"cgroupv1";
+ return "cgroupv1";
} else {
return NULL;
}
--- a/src/hotspot/os/linux/osContainer_linux.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/os/linux/osContainer_linux.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -40,7 +40,7 @@
public:
static void init();
static inline bool is_containerized();
- static char * container_type();
+ static const char * container_type();
static jlong memory_limit_in_bytes();
static jlong memory_and_swap_limit_in_bytes();
--- a/src/hotspot/os/linux/os_linux.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/os/linux/os_linux.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -177,20 +177,17 @@
if (OSContainer::is_containerized()) {
jlong mem_limit, mem_usage;
- if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
- if ((mem_usage = OSContainer::memory_usage_in_bytes()) > 0) {
- if (mem_limit > mem_usage) {
- avail_mem = (julong)mem_limit - (julong)mem_usage;
- } else {
- avail_mem = 0;
- }
- log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
- return avail_mem;
- } else {
- log_debug(os,container)("container memory usage call failed: " JLONG_FORMAT, mem_usage);
- }
- } else {
- log_debug(os,container)("container memory unlimited or failed: " JLONG_FORMAT, mem_limit);
+ if ((mem_limit = OSContainer::memory_limit_in_bytes()) < 1) {
+ log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",
+ mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
+ }
+ if (mem_limit > 0 && (mem_usage = OSContainer::memory_usage_in_bytes()) < 1) {
+ log_debug(os, container)("container memory usage failed: " JLONG_FORMAT ", using host value", mem_usage);
+ }
+ if (mem_limit > 0 && mem_usage > 0 ) {
+ avail_mem = mem_limit > mem_usage ? (julong)mem_limit - (julong)mem_usage : 0;
+ log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
+ return avail_mem;
}
}
@@ -201,22 +198,18 @@
}
julong os::physical_memory() {
+ jlong phys_mem = 0;
if (OSContainer::is_containerized()) {
jlong mem_limit;
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
- return (julong)mem_limit;
- } else {
- if (mem_limit == OSCONTAINER_ERROR) {
- log_debug(os,container)("container memory limit call failed");
- }
- if (mem_limit == -1) {
- log_debug(os,container)("container memory unlimited, using host value");
- }
+ return phys_mem;
}
- }
-
- jlong phys_mem = Linux::physical_memory();
+ log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", using host value",
+ mem_limit == OSCONTAINER_ERROR ? "failed" : "unlimited", mem_limit);
+ }
+
+ phys_mem = Linux::physical_memory();
log_trace(os)("total system memory: " JLONG_FORMAT, phys_mem);
return phys_mem;
}
@@ -2135,63 +2128,54 @@
}
void os::Linux::print_container_info(outputStream* st) {
- if (OSContainer::is_containerized()) {
- st->print("container (cgroup) information:\n");
-
- char *p = OSContainer::container_type();
- if (p == NULL)
- st->print("container_type() failed\n");
- else {
- st->print("container_type: %s\n", p);
- }
-
- p = OSContainer::cpu_cpuset_cpus();
- if (p == NULL)
- st->print("cpu_cpuset_cpus() failed\n");
- else {
- st->print("cpu_cpuset_cpus: %s\n", p);
- free(p);
- }
-
- p = OSContainer::cpu_cpuset_memory_nodes();
- if (p < 0)
- st->print("cpu_memory_nodes() failed\n");
- else {
- st->print("cpu_memory_nodes: %s\n", p);
- free(p);
- }
-
- int i = OSContainer::active_processor_count();
- if (i < 0)
- st->print("active_processor_count() failed\n");
- else
- st->print("active_processor_count: %d\n", i);
-
- i = OSContainer::cpu_quota();
- st->print("cpu_quota: %d\n", i);
-
- i = OSContainer::cpu_period();
- st->print("cpu_period: %d\n", i);
-
- i = OSContainer::cpu_shares();
- st->print("cpu_shares: %d\n", i);
-
- jlong j = OSContainer::memory_limit_in_bytes();
- st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j);
-
- j = OSContainer::memory_and_swap_limit_in_bytes();
- st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j);
-
- j = OSContainer::memory_soft_limit_in_bytes();
- st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j);
-
- j = OSContainer::OSContainer::memory_usage_in_bytes();
- st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j);
-
- j = OSContainer::OSContainer::memory_max_usage_in_bytes();
- st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j);
- st->cr();
- }
+ if (!OSContainer::is_containerized()) {
+ return;
+ }
+
+ st->print("container (cgroup) information:\n");
+
+ const char *p_ct = OSContainer::container_type();
+ st->print("container_type: %s\n", p_ct != NULL ? p_ct : "failed");
+
+ char *p = OSContainer::cpu_cpuset_cpus();
+ st->print("cpu_cpuset_cpus: %s\n", p != NULL ? p : "failed");
+ free(p);
+
+ p = OSContainer::cpu_cpuset_memory_nodes();
+ st->print("cpu_memory_nodes: %s\n", p != NULL ? p : "failed");
+ free(p);
+
+ int i = OSContainer::active_processor_count();
+ if (i > 0) {
+ st->print("active_processor_count: %d\n", i);
+ } else {
+ st->print("active_processor_count: failed\n");
+ }
+
+ i = OSContainer::cpu_quota();
+ st->print("cpu_quota: %d\n", i);
+
+ i = OSContainer::cpu_period();
+ st->print("cpu_period: %d\n", i);
+
+ i = OSContainer::cpu_shares();
+ st->print("cpu_shares: %d\n", i);
+
+ jlong j = OSContainer::memory_limit_in_bytes();
+ st->print("memory_limit_in_bytes: " JLONG_FORMAT "\n", j);
+
+ j = OSContainer::memory_and_swap_limit_in_bytes();
+ st->print("memory_and_swap_limit_in_bytes: " JLONG_FORMAT "\n", j);
+
+ j = OSContainer::memory_soft_limit_in_bytes();
+ st->print("memory_soft_limit_in_bytes: " JLONG_FORMAT "\n", j);
+
+ j = OSContainer::OSContainer::memory_usage_in_bytes();
+ st->print("memory_usage_in_bytes: " JLONG_FORMAT "\n", j);
+
+ j = OSContainer::OSContainer::memory_max_usage_in_bytes();
+ st->print("memory_max_usage_in_bytes: " JLONG_FORMAT "\n", j);
+ st->cr();
}
void os::print_memory_info(outputStream* st) {
@@ -3069,10 +3053,12 @@
return res != (uintptr_t) MAP_FAILED;
}
-static address get_stack_commited_bottom(address bottom, size_t size) {
- address nbot = bottom;
- address ntop = bottom + size;
-
+// If there is no page mapped/committed, top (bottom + size) is returned
+static address get_stack_mapped_bottom(address bottom,
+ size_t size,
+ bool committed_only /* must have backing pages */) {
+ // address used to test if the page is mapped/committed
+ address test_addr = bottom + size;
size_t page_sz = os::vm_page_size();
unsigned pages = size / page_sz;
@@ -3084,38 +3070,39 @@
while (imin < imax) {
imid = (imax + imin) / 2;
- nbot = ntop - (imid * page_sz);
+ test_addr = bottom + (imid * page_sz);
// Use a trick with mincore to check whether the page is mapped or not.
// mincore sets vec to 1 if page resides in memory and to 0 if page
// is swapped output but if page we are asking for is unmapped
// it returns -1,ENOMEM
- mincore_return_value = mincore(nbot, page_sz, vec);
-
- if (mincore_return_value == -1) {
- // Page is not mapped go up
- // to find first mapped page
- if (errno != EAGAIN) {
- assert(errno == ENOMEM, "Unexpected mincore errno");
- imax = imid;
+ mincore_return_value = mincore(test_addr, page_sz, vec);
+
+ if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) {
+ // Page is not mapped/committed go up
+ // to find first mapped/committed page
+ if ((mincore_return_value == -1 && errno != EAGAIN)
+ || (committed_only && (vec[0] & 0x01) == 0)) {
+ assert(mincore_return_value != -1 || errno == ENOMEM, "Unexpected mincore errno");
+
+ imin = imid + 1;
}
} else {
- // Page is mapped go down
- // to find first not mapped page
- imin = imid + 1;
+ // mapped/committed, go down
+ imax= imid;
}
}
- nbot = nbot + page_sz;
-
- // Adjust stack bottom one page up if last checked page is not mapped
- if (mincore_return_value == -1) {
- nbot = nbot + page_sz;
- }
-
- return nbot;
-}
-
+ // Adjust stack bottom one page up if last checked page is not mapped/committed
+ if (mincore_return_value == -1 || (committed_only && (vec[0] & 0x01) == 0)) {
+ assert(mincore_return_value != -1 || (errno != EAGAIN && errno != ENOMEM),
+ "Should not get to here");
+
+ test_addr = test_addr + page_sz;
+ }
+
+ return test_addr;
+}
// Linux uses a growable mapping for the stack, and if the mapping for
// the stack guard pages is not removed when we detach a thread the
@@ -3153,9 +3140,9 @@
if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
// Fallback to slow path on all errors, including EAGAIN
- stack_extent = (uintptr_t) get_stack_commited_bottom(
- os::Linux::initial_thread_stack_bottom(),
- (size_t)addr - stack_extent);
+ stack_extent = (uintptr_t) get_stack_mapped_bottom(os::Linux::initial_thread_stack_bottom(),
+ (size_t)addr - stack_extent,
+ false /* committed_only */);
}
if (stack_extent < (uintptr_t)addr) {
@@ -3182,6 +3169,11 @@
return os::uncommit_memory(addr, size);
}
+size_t os::committed_stack_size(address bottom, size_t size) {
+ address bot = get_stack_mapped_bottom(bottom, size, true /* committed_only */);
+ return size_t(bottom + size - bot);
+}
+
// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
// at 'requested_addr'. If there are existing memory mappings at the same
// location, however, they will be overwritten. If 'fixed' is false,
--- a/src/hotspot/os/posix/os_posix.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/os/posix/os_posix.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -331,8 +331,15 @@
return aligned_base;
}
-int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
- return vsnprintf(buf, len, fmt, args);
+int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
+ // All supported POSIX platforms provide C99 semantics.
+ int result = ::vsnprintf(buf, len, fmt, args);
+ // If an encoding error occurred (result < 0) then it's not clear
+ // whether the buffer is NUL terminated, so ensure it is.
+ if ((result < 0) && (len > 0)) {
+ buf[len - 1] = '\0';
+ }
+ return result;
}
int os::get_fileno(FILE* fp) {
--- a/src/hotspot/os/windows/os_windows.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/os/windows/os_windows.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -363,6 +363,25 @@
return sz;
}
+size_t os::committed_stack_size(address bottom, size_t size) {
+ MEMORY_BASIC_INFORMATION minfo;
+ address top = bottom + size;
+ size_t committed_size = 0;
+
+ while (committed_size < size) {
+ // top is exclusive
+ VirtualQuery(top - 1, &minfo, sizeof(minfo));
+ if ((minfo.State & MEM_COMMIT) != 0) {
+ committed_size += minfo.RegionSize;
+ top -= minfo.RegionSize;
+ } else {
+ break;
+ }
+ }
+
+ return MIN2(committed_size, size);
+}
+
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
const struct tm* time_struct_ptr = localtime(clock);
if (time_struct_ptr != NULL) {
@@ -1494,13 +1513,39 @@
if (nl != NULL) *nl = '\0';
}
-int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
- int ret = vsnprintf(buf, len, fmt, args);
- // Get the correct buffer size if buf is too small
- if (ret < 0) {
- return _vscprintf(fmt, args);
- }
- return ret;
+int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
+#if _MSC_VER >= 1900
+ // Starting with Visual Studio 2015, vsnprint is C99 compliant.
+ int result = ::vsnprintf(buf, len, fmt, args);
+ // If an encoding error occurred (result < 0) then it's not clear
+ // whether the buffer is NUL terminated, so ensure it is.
+ if ((result < 0) && (len > 0)) {
+ buf[len - 1] = '\0';
+ }
+ return result;
+#else
+ // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
+ // _vsnprintf, whose behavior seems to be *mostly* consistent across
+ // versions. However, when len == 0, avoid _vsnprintf too, and just
+ // go straight to _vscprintf. The output is going to be truncated in
+ // that case, except in the unusual case of empty output. More
+ // importantly, the documentation for various versions of Visual Studio
+ // are inconsistent about the behavior of _vsnprintf when len == 0,
+ // including it possibly being an error.
+ int result = -1;
+ if (len > 0) {
+ result = _vsnprintf(buf, len, fmt, args);
+ // If output (including NUL terminator) is truncated, the buffer
+ // won't be NUL terminated. Add the trailing NUL specified by C99.
+ if ((result < 0) || (result >= len)) {
+ buf[len - 1] = '\0';
+ }
+ }
+ if (result < 0) {
+ result = _vscprintf(fmt, args);
+ }
+ return result;
+#endif // _MSC_VER dispatch
}
static inline time_t get_mtime(const char* filename) {
--- a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/metaspaceShared.hpp"
@@ -42,7 +43,7 @@
}
if (bs->is_a(BarrierSet::CardTableModRef)) {
- _card_table_base = (address) (barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base);
+ _card_table_base = (address) (barrier_set_cast<CardTableModRefBS>(bs)->card_table()->byte_map_base());
} else {
_card_table_base = NULL;
}
--- a/src/hotspot/share/aot/aotCodeHeap.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -25,7 +25,10 @@
#include "aot/aotCodeHeap.hpp"
#include "aot/aotLoader.hpp"
+#include "ci/ciUtilities.hpp"
#include "classfile/javaAssertions.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/gcLocker.hpp"
#include "interpreter/abstractInterpreter.hpp"
@@ -539,8 +542,7 @@
_lib_symbols_initialized = true;
CollectedHeap* heap = Universe::heap();
- CardTableModRefBS* ct = (CardTableModRefBS*)(heap->barrier_set());
- SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ct->byte_map_base);
+ SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ci_card_table_address());
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_top_address", address, (heap->supports_inline_contig_alloc() ? heap->top_addr() : NULL));
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_end_address", address, (heap->supports_inline_contig_alloc() ? heap->end_addr() : NULL));
SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page());
--- a/src/hotspot/share/asm/codeBuffer.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/asm/codeBuffer.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -380,7 +380,7 @@
OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
Arena* _overflow_arena;
- address _last_membar; // used to merge consecutive memory barriers
+ address _last_insn; // used to merge consecutive memory barriers, loads or stores.
address _decode_begin; // start address for decode
address decode_begin();
@@ -395,7 +395,7 @@
_decode_begin = NULL;
_overflow_arena = NULL;
_code_strings = CodeStrings();
- _last_membar = NULL;
+ _last_insn = NULL;
}
void initialize(address code_start, csize_t code_size) {
@@ -587,9 +587,9 @@
OopRecorder* oop_recorder() const { return _oop_recorder; }
CodeStrings& strings() { return _code_strings; }
- address last_membar() const { return _last_membar; }
- void set_last_membar(address a) { _last_membar = a; }
- void clear_last_membar() { set_last_membar(NULL); }
+ address last_insn() const { return _last_insn; }
+ void set_last_insn(address a) { _last_insn = a; }
+ void clear_last_insn() { set_last_insn(NULL); }
void free_strings() {
if (!_code_strings.is_null()) {
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciObjArray.hpp"
+#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "runtime/arguments.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -1461,11 +1463,7 @@
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
- // No pre barriers
- break;
- case BarrierSet::ModRef:
+ case BarrierSet::CardTableModRef:
// No pre barriers
break;
default :
@@ -1481,13 +1479,9 @@
G1SATBCardTableModRef_post_barrier(addr, new_val);
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
CardTableModRef_post_barrier(addr, new_val);
break;
- case BarrierSet::ModRef:
- // No post barriers
- break;
default :
ShouldNotReachHere();
}
@@ -1616,9 +1610,7 @@
////////////////////////////////////////////////////////////////////////
void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(_bs);
- assert(sizeof(*(ct->byte_map_base)) == sizeof(jbyte), "adjust this code");
- LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base);
+ LIR_Const* card_table_base = new LIR_Const(ci_card_table_address());
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
// ptr cannot be an object because we use this barrier for array card marks
@@ -1640,9 +1632,9 @@
LIR_Opr tmp = new_pointer_register();
if (TwoOperandLIRForm) {
__ move(addr, tmp);
- __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
+ __ unsigned_shift_right(tmp, CardTable::card_shift, tmp);
} else {
- __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
+ __ unsigned_shift_right(addr, CardTable::card_shift, tmp);
}
LIR_Address* card_addr;
@@ -1652,7 +1644,7 @@
card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
}
- LIR_Opr dirty = LIR_OprFact::intConst(CardTableModRefBS::dirty_card_val());
+ LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
if (UseCondCardMark) {
LIR_Opr cur_value = new_register(T_INT);
if (UseConcMarkSweepGC) {
--- a/src/hotspot/share/ci/ciEnv.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/ci/ciEnv.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,7 @@
#include "prims/jvmtiExport.hpp"
#include "runtime/init.hpp"
#include "runtime/reflection.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp"
#include "trace/tracing.hpp"
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
#include "oops/oop.inline.hpp"
#include "oops/fieldStreams.hpp"
#include "runtime/fieldDescriptor.hpp"
+#include "runtime/jniHandles.inline.hpp"
// ciInstanceKlass
//
--- a/src/hotspot/share/ci/ciObject.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/ci/ciObject.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "ci/ciUtilities.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/jniHandles.inline.hpp"
// ciObject
//
@@ -98,6 +99,14 @@
}
// ------------------------------------------------------------------
+// ciObject::get_oop
+//
+// Get the oop of this ciObject.
+oop ciObject::get_oop() const {
+ return JNIHandles::resolve_non_null(_handle);
+}
+
+// ------------------------------------------------------------------
// ciObject::klass
//
// Get the ciKlass of this ciObject.
--- a/src/hotspot/share/ci/ciObject.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/ci/ciObject.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -67,10 +67,7 @@
jobject handle() const { return _handle; }
// Get the VM oop that this object holds.
- oop get_oop() const {
- assert(_handle != NULL, "null oop");
- return JNIHandles::resolve_non_null(_handle);
- }
+ oop get_oop() const;
void init_flags_from(oop x);
--- a/src/hotspot/share/ci/ciUtilities.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/ci/ciUtilities.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -24,6 +24,9 @@
#include "precompiled.hpp"
#include "ci/ciUtilities.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "memory/universe.hpp"
// ciUtilities
//
@@ -43,3 +46,13 @@
char c = type2char(t);
return c ? c : 'X';
}
+
+// ------------------------------------------------------------------
+// card_table_base
+jbyte *ci_card_table_address() {
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust users of this code");
+ return ct->byte_map_base();
+}
--- a/src/hotspot/share/ci/ciUtilities.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/ci/ciUtilities.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -27,6 +27,7 @@
#include "ci/ciEnv.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "utilities/globalDefinitions.hpp"
// The following routines and definitions are used internally in the
// compiler interface.
@@ -114,4 +115,9 @@
const char* basictype_to_str(BasicType t);
const char basictype_to_char(BasicType t);
+jbyte *ci_card_table_address();
+template <typename T> T ci_card_table_address_as() {
+ return reinterpret_cast<T>(ci_card_table_address());
+}
+
#endif // SHARE_VM_CI_CIUTILITIES_HPP
--- a/src/hotspot/share/classfile/javaClasses.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/classfile/javaClasses.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -53,6 +53,7 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
--- a/src/hotspot/share/classfile/modules.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/classfile/modules.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
#include "runtime/arguments.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/reflection.hpp"
#include "utilities/stringUtils.hpp"
#include "utilities/utf8.hpp"
--- a/src/hotspot/share/classfile/verifier.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/classfile/verifier.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/thread.hpp"
--- a/src/hotspot/share/code/debugInfo.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/code/debugInfo.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.hpp"
// Constructors
--- a/src/hotspot/share/code/dependencies.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/code/dependencies.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,7 @@
#include "oops/objArrayKlass.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/copy.hpp"
--- a/src/hotspot/share/code/nmethod.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/code/nmethod.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -44,6 +44,7 @@
#include "oops/oop.inline.hpp"
#include "prims/jvmtiImpl.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/share/code/oopRecorder.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/code/oopRecorder.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -29,6 +29,7 @@
#include "code/oopRecorder.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/jniHandles.inline.hpp"
#ifdef ASSERT
template <class T> int ValueRecorder<T>::_find_index_calls = 0;
--- a/src/hotspot/share/code/relocInfo_ext.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/code/relocInfo_ext.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "code/codeCache.hpp"
#include "code/relocInfo.hpp"
#include "code/relocInfo_ext.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
@@ -59,8 +60,9 @@
}
case symbolic_Relocation::card_table_reference: {
BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
- return (address)ct->byte_map_base;
+ CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTable* ct = ctbs->card_table();
+ return (address)ct->byte_map_base();
}
case symbolic_Relocation::mark_bits_reference: {
return (address)Universe::verify_mark_bits();
--- a/src/hotspot/share/compiler/disassembler.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/compiler/disassembler.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,9 +23,11 @@
*/
#include "precompiled.hpp"
+#include "ci/ciUtilities.hpp"
#include "classfile/javaClasses.hpp"
#include "code/codeCache.hpp"
#include "compiler/disassembler.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/resourceArea.hpp"
@@ -318,7 +320,7 @@
BarrierSet* bs = Universe::heap()->barrier_set();
if (bs->is_a(BarrierSet::CardTableModRef) &&
- adr == (address)(barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base)) {
+ adr == ci_card_table_address_as<address>()) {
st->print("word_map_base");
if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr));
return;
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -88,9 +88,9 @@
_parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
"CompactibleFreeListSpace._dict_par_lock", true,
Monitor::_safepoint_check_never),
- _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
+ _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
CMSRescanMultiple),
- _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
+ _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
CMSConcMarkMultiple),
_collector(NULL),
_preconsumptionDirtyCardClosure(NULL)
@@ -609,7 +609,7 @@
FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
CMSCollector* collector,
ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) :
FilteringDCTOC(sp, cl, precision, boundary),
@@ -693,7 +693,7 @@
DirtyCardToOopClosure*
CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
@@ -2828,7 +2828,7 @@
}
const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
- const size_t ergo_max = _old_gen->reserved().word_size() / (CardTableModRefBS::card_size_in_words * BitsPerWord);
+ const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
return ergo_max;
}
@@ -2865,15 +2865,15 @@
// The "size" of each task is fixed according to rescan_task_size.
assert(n_threads > 0, "Unexpected n_threads argument");
const size_t task_size = marking_task_size();
- assert(task_size > CardTableModRefBS::card_size_in_words &&
- (task_size % CardTableModRefBS::card_size_in_words == 0),
+ assert(task_size > CardTable::card_size_in_words &&
+ (task_size % CardTable::card_size_in_words == 0),
"Otherwise arithmetic below would be incorrect");
MemRegion span = _old_gen->reserved();
if (low != NULL) {
if (span.contains(low)) {
// Align low down to a card boundary so that
// we can use block_offset_careful() on span boundaries.
- HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);
+ HeapWord* aligned_low = align_down(low, CardTable::card_size);
// Clip span prefix at aligned_low
span = span.intersection(MemRegion(aligned_low, span.end()));
} else if (low > span.end()) {
@@ -2881,7 +2881,7 @@
} // else use entire span
}
assert(span.is_empty() ||
- ((uintptr_t)span.start() % CardTableModRefBS::card_size == 0),
+ ((uintptr_t)span.start() % CardTable::card_size == 0),
"span should start at a card boundary");
size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "gc/cms/adaptiveFreeList.hpp"
#include "gc/cms/promotionInfo.hpp"
#include "gc/shared/blockOffsetTable.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/space.hpp"
#include "logging/log.hpp"
#include "memory/binaryTreeDictionary.hpp"
@@ -432,7 +433,7 @@
// Override: provides a DCTO_CL specific to this kind of space.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel);
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -448,7 +448,7 @@
_start_sampling(false),
_between_prologue_and_epilogue(false),
_markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
- _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
+ _modUnionTable((CardTable::card_shift - LogHeapWordSize),
-1 /* lock-free */, "No_lock" /* dummy */),
_modUnionClosurePar(&_modUnionTable),
// Adjust my span to cover old (cms) gen
@@ -900,7 +900,7 @@
// card size.
MemRegion mr(start,
align_up(start + obj_size,
- CardTableModRefBS::card_size /* bytes */));
+ CardTable::card_size /* bytes */));
if (par) {
_modUnionTable.par_mark_range(mr);
} else {
@@ -3223,7 +3223,7 @@
if (sp->used_region().contains(_restart_addr)) {
// Align down to a card boundary for the start of 0th task
// for this space.
- aligned_start = align_down(_restart_addr, CardTableModRefBS::card_size);
+ aligned_start = align_down(_restart_addr, CardTable::card_size);
}
size_t chunk_size = sp->marking_task_size();
@@ -4026,17 +4026,16 @@
startTimer();
sample_eden();
// Get and clear dirty region from card table
- dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
- MemRegion(nextAddr, endAddr),
- true,
- CardTableModRefBS::precleaned_card_val());
+ dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr),
+ true,
+ CardTable::precleaned_card_val());
assert(dirtyRegion.start() >= nextAddr,
"returned region inconsistent?");
}
lastAddr = dirtyRegion.end();
numDirtyCards =
- dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
+ dirtyRegion.word_size()/CardTable::card_size_in_words;
if (!dirtyRegion.is_empty()) {
stopTimer();
@@ -4050,7 +4049,7 @@
if (stop_point != NULL) {
assert((_collectorState == AbortablePreclean && should_abort_preclean()),
"Should only be AbortablePreclean.");
- _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
+ _ct->invalidate(MemRegion(stop_point, dirtyRegion.end()));
if (should_abort_preclean()) {
break; // out of preclean loop
} else {
@@ -4577,7 +4576,7 @@
SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
assert(pst->valid(), "Uninitialized use?");
uint nth_task = 0;
- const int alignment = CardTableModRefBS::card_size * BitsPerWord;
+ const int alignment = CardTable::card_size * BitsPerWord;
MemRegion span = sp->used_region();
HeapWord* start_addr = span.start();
HeapWord* end_addr = align_up(span.end(), alignment);
@@ -4603,7 +4602,7 @@
// precleaned, and setting the corresponding bits in the mod union
// table. Since we have been careful to partition at Card and MUT-word
// boundaries no synchronization is needed between parallel threads.
- _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
+ _collector->_ct->dirty_card_iterate(this_span,
&modUnionClosure);
// Having transferred these marks into the modUnionTable,
@@ -4914,16 +4913,14 @@
// mod union table.
{
ModUnionClosure modUnionClosure(&_modUnionTable);
- _ct->ct_bs()->dirty_card_iterate(
- _cmsGen->used_region(),
- &modUnionClosure);
+ _ct->dirty_card_iterate(_cmsGen->used_region(),
+ &modUnionClosure);
}
// Having transferred these marks into the modUnionTable, we just need
// to rescan the marked objects on the dirty cards in the modUnionTable.
// The initial marking may have been done during an asynchronous
// collection so there may be dirty bits in the mod-union table.
- const int alignment =
- CardTableModRefBS::card_size * BitsPerWord;
+ const int alignment = CardTable::card_size * BitsPerWord;
{
// ... First handle dirty cards in CMS gen
markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
@@ -5633,9 +5630,9 @@
}
assert(sz > 0, "size must be nonzero");
HeapWord* next_block = addr + sz;
- HeapWord* next_card = align_up(next_block, CardTableModRefBS::card_size);
- assert(align_down((uintptr_t)addr, CardTableModRefBS::card_size) <
- align_down((uintptr_t)next_card, CardTableModRefBS::card_size),
+ HeapWord* next_card = align_up(next_block, CardTable::card_size);
+ assert(align_down((uintptr_t)addr, CardTable::card_size) <
+ align_down((uintptr_t)next_card, CardTable::card_size),
"must be different cards");
return next_card;
}
@@ -6294,7 +6291,7 @@
assert(_markStack->isEmpty(), "would cause duplicates on stack");
assert(_span.contains(addr), "Out of bounds _finger?");
_finger = addr;
- _threshold = align_up(_finger, CardTableModRefBS::card_size);
+ _threshold = align_up(_finger, CardTable::card_size);
}
// Should revisit to see if this should be restructured for
@@ -6321,7 +6318,7 @@
// during the preclean or remark phase. (CMSCleanOnEnter)
if (CMSCleanOnEnter) {
size_t sz = _collector->block_size_using_printezis_bits(addr);
- HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
+ HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
MemRegion redirty_range = MemRegion(addr, end_card_addr);
assert(!redirty_range.is_empty(), "Arithmetical tautology");
// Bump _threshold to end_card_addr; note that
@@ -6408,9 +6405,9 @@
// _threshold is always kept card-aligned but _finger isn't
// always card-aligned.
HeapWord* old_threshold = _threshold;
- assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
+ assert(is_aligned(old_threshold, CardTable::card_size),
"_threshold should always be card-aligned");
- _threshold = align_up(_finger, CardTableModRefBS::card_size);
+ _threshold = align_up(_finger, CardTable::card_size);
MemRegion mr(old_threshold, _threshold);
assert(!mr.is_empty(), "Control point invariant");
assert(_span.contains(mr), "Should clear within span");
@@ -6520,9 +6517,9 @@
// _threshold is always kept card-aligned but _finger isn't
// always card-aligned.
HeapWord* old_threshold = _threshold;
- assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
+ assert(is_aligned(old_threshold, CardTable::card_size),
"_threshold should always be card-aligned");
- _threshold = align_up(_finger, CardTableModRefBS::card_size);
+ _threshold = align_up(_finger, CardTable::card_size);
MemRegion mr(old_threshold, _threshold);
assert(!mr.is_empty(), "Control point invariant");
assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
@@ -6890,7 +6887,7 @@
// are required.
if (obj->is_objArray()) {
size_t sz = obj->size();
- HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
+ HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
MemRegion redirty_range = MemRegion(addr, end_card_addr);
assert(!redirty_range.is_empty(), "Arithmetical tautology");
_mod_union_table->mark_range(redirty_range);
@@ -7003,15 +7000,15 @@
}
void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
- assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
+ assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
"mr should be aligned to start at a card boundary");
// We'd like to assert:
- // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
+ // assert(mr.word_size()%CardTable::card_size_in_words == 0,
// "mr should be a range of cards");
// However, that would be too strong in one case -- the last
// partition ends at _unallocated_block which, in general, can be
// an arbitrary boundary, not necessarily card aligned.
- _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
+ _num_dirty_cards += mr.word_size()/CardTable::card_size_in_words;
_space->object_iterate_mem(mr, &_scan_cl);
}
@@ -7620,7 +7617,7 @@
// table.
if (obj->is_objArray()) {
size_t sz = obj->size();
- HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
+ HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
MemRegion redirty_range = MemRegion(addr, end_card_addr);
assert(!redirty_range.is_empty(), "Arithmetical tautology");
_collector->_modUnionTable.mark_range(redirty_range);
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -77,7 +77,7 @@
// methods are used). This is essentially a wrapper around the BitMap class,
// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
// we have _shifter == 0. and for the mod union table we have
-// shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
+// shifter == CardTable::card_shift - LogHeapWordSize.)
// XXX 64-bit issues in BitMap?
class CMSBitMap VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -448,7 +448,7 @@
// This is superfluous except at the end of the space;
// we should do better than this XXX
MemRegion mr2(mr.start(), align_up(mr.end(),
- CardTableModRefBS::card_size /* bytes */));
+ CardTable::card_size /* bytes */));
_t->mark_range(mr2);
}
@@ -457,7 +457,7 @@
// This is superfluous except at the end of the space;
// we should do better than this XXX
MemRegion mr2(mr.start(), align_up(mr.end(),
- CardTableModRefBS::card_size /* bytes */));
+ CardTable::card_size /* bytes */));
_t->par_mark_range(mr2);
}
--- a/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,7 @@
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
-void CardTableModRefBSForCTRS::
+void CardTableRS::
non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopsInGenClosure* cl,
CardTableRS* ct,
@@ -82,7 +82,7 @@
}
void
-CardTableModRefBSForCTRS::
+CardTableRS::
process_stride(Space* sp,
MemRegion used,
jint stride, int n_strides,
@@ -162,7 +162,7 @@
}
void
-CardTableModRefBSForCTRS::
+CardTableRS::
process_chunk_boundaries(Space* sp,
DirtyCardToOopClosure* dcto_cl,
MemRegion chunk_mr,
@@ -371,7 +371,7 @@
}
void
-CardTableModRefBSForCTRS::
+CardTableRS::
get_LNC_array_for_space(Space* sp,
jbyte**& lowest_non_clean,
uintptr_t& lowest_non_clean_base_chunk_index,
--- a/src/hotspot/share/gc/g1/g1CardCounts.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CardCounts.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,12 +40,12 @@
size_t G1CardCounts::compute_size(size_t mem_region_size_in_words) {
// We keep card counts for every card, so the size of the card counts table must
// be the same as the card table.
- return G1SATBCardTableLoggingModRefBS::compute_size(mem_region_size_in_words);
+ return G1CardTable::compute_size(mem_region_size_in_words);
}
size_t G1CardCounts::heap_map_factor() {
// See G1CardCounts::compute_size() why we reuse the card table value.
- return G1SATBCardTableLoggingModRefBS::heap_map_factor();
+ return G1CardTable::heap_map_factor();
}
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
@@ -72,8 +72,8 @@
// threshold limit is no more than this.
guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
- _ct_bs = _g1h->g1_barrier_set();
- _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
+ _ct = _g1h->card_table();
+ _ct_bot = _ct->byte_for_const(_g1h->reserved_region().start());
_card_counts = (jubyte*) mapper->reserved().start();
_reserved_max_card_num = mapper->reserved().byte_size();
@@ -116,17 +116,17 @@
void G1CardCounts::clear_range(MemRegion mr) {
if (has_count_table()) {
- const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start());
+ const jbyte* from_card_ptr = _ct->byte_for_const(mr.start());
// We use the last address in the range as the range could represent the
// last region in the heap. In which case trying to find the card will be an
// OOB access to the card table.
- const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last());
+ const jbyte* last_card_ptr = _ct->byte_for_const(mr.last());
#ifdef ASSERT
- HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
+ HeapWord* start_addr = _ct->addr_for(from_card_ptr);
assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
- HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
- assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
+ HeapWord* last_addr = _ct->addr_for(last_card_ptr);
+ assert((last_addr + G1CardTable::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card.");
#endif // ASSERT
// Clear the counts for the (exclusive) card range.
--- a/src/hotspot/share/gc/g1/g1CardCounts.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CardCounts.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
#define SHARE_VM_GC_G1_G1CARDCOUNTS_HPP
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "memory/allocation.hpp"
#include "memory/virtualspace.hpp"
@@ -56,6 +57,7 @@
G1CardCountsMappingChangedListener _listener;
G1CollectedHeap* _g1h;
+ G1CardTable* _ct;
// The table of counts
jubyte* _card_counts;
@@ -66,9 +68,6 @@
// CardTable bottom.
const jbyte* _ct_bot;
- // Barrier set
- CardTableModRefBS* _ct_bs;
-
// Returns true if the card counts table has been reserved.
bool has_reserved_count_table() { return _card_counts != NULL; }
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CardLiveData.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,10 +68,10 @@
assert(max_capacity % num_max_regions == 0,
"Given capacity must be evenly divisible by region size.");
size_t region_size = max_capacity / num_max_regions;
- assert(region_size % (G1SATBCardTableModRefBS::card_size * BitsPerWord) == 0,
+ assert(region_size % (G1CardTable::card_size * BitsPerWord) == 0,
"Region size must be evenly divisible by area covered by a single word.");
_max_capacity = max_capacity;
- _cards_per_region = region_size / G1SATBCardTableModRefBS::card_size;
+ _cards_per_region = region_size / G1CardTable::card_size;
_live_regions_size_in_bits = live_region_bitmap_size_in_bits();
_live_regions = allocate_large_bitmap(_live_regions_size_in_bits);
@@ -85,11 +85,11 @@
}
size_t G1CardLiveData::live_region_bitmap_size_in_bits() const {
- return _max_capacity / (_cards_per_region << G1SATBCardTableModRefBS::card_shift);
+ return _max_capacity / (_cards_per_region << G1CardTable::card_shift);
}
size_t G1CardLiveData::live_card_bitmap_size_in_bits() const {
- return _max_capacity >> G1SATBCardTableModRefBS::card_shift;
+ return _max_capacity >> G1CardTable::card_shift;
}
// Helper class that provides functionality to generate the Live Data Count
@@ -132,7 +132,7 @@
void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
- BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
+ BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
_card_bm.clear_range(start_idx, end_idx);
}
@@ -140,7 +140,7 @@
// Mark the card liveness bitmap for the object spanning from start to end.
void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
- BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTableModRefBS::card_size));
+ BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
@@ -168,7 +168,7 @@
// by the card shift -- address 0 corresponds to card number 0. One
// must subtract the card num of the bottom of the heap to obtain a
// card table index.
- BitMap::idx_t card_num = uintptr_t(addr) >> CardTableModRefBS::card_shift;
+ BitMap::idx_t card_num = uintptr_t(addr) >> G1CardTable::card_shift;
return card_num - _heap_card_bias;
}
@@ -262,7 +262,7 @@
// Calculate the card number for the bottom of the heap. Used
// in biasing indexes into the accounting card bitmaps.
_heap_card_bias =
- uintptr_t(base_address) >> CardTableModRefBS::card_shift;
+ uintptr_t(base_address) >> G1CardTable::card_shift;
}
};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1CardTable.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/shared/memset_with_concurrent_readers.hpp"
+#include "logging/log.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+bool G1CardTable::mark_card_deferred(size_t card_index) {
+ jbyte val = _byte_map[card_index];
+ // It's already processed
+ if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
+ return false;
+ }
+
+ // Cached bit can be installed either on a clean card or on a claimed card.
+ jbyte new_val = val;
+ if (val == clean_card_val()) {
+ new_val = (jbyte)deferred_card_val();
+ } else {
+ if (val & claimed_card_val()) {
+ new_val = val | (jbyte)deferred_card_val();
+ }
+ }
+ if (new_val != val) {
+ Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
+ }
+ return true;
+}
+
+void G1CardTable::g1_mark_as_young(const MemRegion& mr) {
+ jbyte *const first = byte_for(mr.start());
+ jbyte *const last = byte_after(mr.last());
+
+ memset_with_concurrent_readers(first, g1_young_gen, last - first);
+}
+
+#ifndef PRODUCT
+void G1CardTable::verify_g1_young_region(MemRegion mr) {
+ verify_region(mr, g1_young_gen, true);
+}
+#endif
+
+void G1CardTableChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
+ // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
+ MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
+ _card_table->clear(mr);
+}
+
+void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) {
+ mapper->set_mapping_changed_listener(&_listener);
+
+ _byte_map_size = mapper->reserved().byte_size();
+
+ _guard_index = cards_required(_whole_heap.word_size()) - 1;
+ _last_valid_index = _guard_index - 1;
+
+ HeapWord* low_bound = _whole_heap.start();
+ HeapWord* high_bound = _whole_heap.end();
+
+ _cur_covered_regions = 1;
+ _covered[0] = _whole_heap;
+
+ _byte_map = (jbyte*) mapper->reserved().start();
+ _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+ assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
+ assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
+
+ log_trace(gc, barrier)("G1CardTable::G1CardTable: ");
+ log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
+ p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
+ log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base));
+}
+
+bool G1CardTable::is_in_young(oop obj) const {
+ volatile jbyte* p = byte_for(obj);
+ return *p == G1CardTable::g1_young_card_val();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1CardTable.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CARDTABLE_HPP
+#define SHARE_VM_GC_G1_G1CARDTABLE_HPP
+
+#include "gc/g1/g1RegionToSpaceMapper.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/macros.hpp"
+
+class G1CardTable;
+class G1RegionToSpaceMapper;
+
+class G1CardTableChangedListener : public G1MappingChangedListener {
+ private:
+ G1CardTable* _card_table;
+ public:
+ G1CardTableChangedListener() : _card_table(NULL) { }
+
+ void set_card_table(G1CardTable* card_table) { _card_table = card_table; }
+
+ virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
+};
+
+class G1CardTable: public CardTable {
+ friend class VMStructs;
+ friend class G1CardTableChangedListener;
+
+ G1CardTableChangedListener _listener;
+
+ enum G1CardValues {
+ g1_young_gen = CT_MR_BS_last_reserved << 1
+ };
+
+public:
+ G1CardTable(MemRegion whole_heap): CardTable(whole_heap, /* scanned concurrently */ true), _listener() {
+ _listener.set_card_table(this);
+ }
+ bool is_card_dirty(size_t card_index) {
+ return _byte_map[card_index] == dirty_card_val();
+ }
+
+ static jbyte g1_young_card_val() { return g1_young_gen; }
+
+/*
+ Claimed and deferred bits are used together in G1 during the evacuation
+ pause. These bits can have the following state transitions:
+ 1. The claimed bit can be put over any other card state. Except that
+ the "dirty -> dirty and claimed" transition is checked for in
+ G1 code and is not used.
+ 2. Deferred bit can be set only if the previous state of the card
+ was either clean or claimed. mark_card_deferred() is wait-free.
+ We do not care if the operation is be successful because if
+ it does not it will only result in duplicate entry in the update
+ buffer because of the "cache-miss". So it's not worth spinning.
+ */
+
+ bool is_card_claimed(size_t card_index) {
+ jbyte val = _byte_map[card_index];
+ return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
+ }
+
+ inline void set_card_claimed(size_t card_index);
+
+ void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
+ void g1_mark_as_young(const MemRegion& mr);
+
+ bool mark_card_deferred(size_t card_index);
+
+ bool is_card_deferred(size_t card_index) {
+ jbyte val = _byte_map[card_index];
+ return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
+ }
+
+ static size_t compute_size(size_t mem_region_size_in_words) {
+ size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
+ return ReservedSpace::allocation_align_size_up(number_of_slots);
+ }
+
+ // Returns how many bytes of the heap a single byte of the Card Table corresponds to.
+ static size_t heap_map_factor() { return card_size; }
+
+ void initialize() {}
+ void initialize(G1RegionToSpaceMapper* mapper);
+
+ virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
+
+ virtual bool is_in_young(oop obj) const;
+};
+
+#endif // SHARE_VM_GC_G1_G1CARDTABLE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1CardTable.inline.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
+#define SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
+
+#include "gc/g1/g1CardTable.hpp"
+
+void G1CardTable::set_card_claimed(size_t card_index) {
+ jbyte val = _byte_map[card_index];
+ if (val == clean_card_val()) {
+ val = (jbyte)claimed_card_val();
+ } else {
+ val |= (jbyte)claimed_card_val();
+ }
+ _byte_map[card_index] = val;
+}
+
+#endif // SHARE_VM_GC_G1_G1CARDTABLE_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -52,6 +52,7 @@
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RootProcessor.hpp"
+#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/g1YCTypes.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
@@ -103,10 +104,10 @@
private:
size_t _num_dirtied;
G1CollectedHeap* _g1h;
- G1SATBCardTableLoggingModRefBS* _g1_bs;
+ G1CardTable* _g1_ct;
HeapRegion* region_for_card(jbyte* card_ptr) const {
- return _g1h->heap_region_containing(_g1_bs->addr_for(card_ptr));
+ return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
}
bool will_become_free(HeapRegion* hr) const {
@@ -117,14 +118,14 @@
public:
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
- _num_dirtied(0), _g1h(g1h), _g1_bs(g1h->g1_barrier_set()) { }
+ _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
HeapRegion* hr = region_for_card(card_ptr);
// Should only dirty cards in regions that won't be freed.
if (!will_become_free(hr)) {
- *card_ptr = CardTableModRefBS::dirty_card_val();
+ *card_ptr = G1CardTable::dirty_card_val();
_num_dirtied++;
}
@@ -1465,6 +1466,7 @@
_young_gen_sampling_thread(NULL),
_collector_policy(collector_policy),
_soft_ref_policy(),
+ _card_table(NULL),
_memory_manager("G1 Young Generation", "end of minor GC"),
_full_gc_memory_manager("G1 Old Generation", "end of major GC"),
_eden_pool(NULL),
@@ -1616,11 +1618,13 @@
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
// Create the barrier set for the entire reserved region.
- G1SATBCardTableLoggingModRefBS* bs
- = new G1SATBCardTableLoggingModRefBS(reserved_region());
+ G1CardTable* ct = new G1CardTable(reserved_region());
+ ct->initialize();
+ G1SATBCardTableLoggingModRefBS* bs = new G1SATBCardTableLoggingModRefBS(ct);
bs->initialize();
assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
set_barrier_set(bs);
+ _card_table = ct;
// Create the hot card cache.
_hot_card_cache = new G1HotCardCache(this);
@@ -1651,8 +1655,8 @@
G1RegionToSpaceMapper* cardtable_storage =
create_aux_memory_mapper("Card Table",
- G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
- G1SATBCardTableLoggingModRefBS::heap_map_factor());
+ G1CardTable::compute_size(g1_rs.size() / HeapWordSize),
+ G1CardTable::heap_map_factor());
G1RegionToSpaceMapper* card_counts_storage =
create_aux_memory_mapper("Card Counts Table",
@@ -1666,7 +1670,7 @@
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
- g1_barrier_set()->initialize(cardtable_storage);
+ _card_table->initialize(cardtable_storage);
// Do later initialization work for concurrent refinement.
_hot_card_cache->initialize(card_counts_storage);
@@ -1676,7 +1680,7 @@
guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
// Also create a G1 rem set.
- _g1_rem_set = new G1RemSet(this, g1_barrier_set(), _hot_card_cache);
+ _g1_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
_g1_rem_set->initialize(max_capacity(), max_regions());
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
@@ -2691,17 +2695,17 @@
if (!r->rem_set()->is_empty()) {
guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
"Found a not-small remembered set here. This is inconsistent with previous assumptions.");
- G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
+ G1CardTable* ct = g1h->card_table();
HeapRegionRemSetIterator hrrs(r->rem_set());
size_t card_index;
while (hrrs.has_next(card_index)) {
- jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
+ jbyte* card_ptr = (jbyte*)ct->byte_for_index(card_index);
// The remembered set might contain references to already freed
// regions. Filter out such entries to avoid failing card table
// verification.
- if (g1h->is_in_closed_subset(bs->addr_for(card_ptr))) {
- if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
- *card_ptr = CardTableModRefBS::dirty_card_val();
+ if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) {
+ if (*card_ptr != G1CardTable::dirty_card_val()) {
+ *card_ptr = G1CardTable::dirty_card_val();
_dcq.enqueue(card_ptr);
}
}
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -28,6 +28,7 @@
#include "gc/g1/evacuationInfo.hpp"
#include "gc/g1/g1AllocationContext.hpp"
#include "gc/g1/g1BiasedArray.hpp"
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
@@ -150,6 +151,7 @@
WorkGang* _workers;
G1CollectorPolicy* _collector_policy;
+ G1CardTable* _card_table;
SoftRefPolicy _soft_ref_policy;
@@ -1178,6 +1180,10 @@
G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; }
+ G1CardTable* card_table() const {
+ return _card_table;
+ }
+
// Iteration functions.
// Iterate over all objects, calling "cl.do_object" on each.
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -123,7 +123,7 @@
assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
MemRegion mr(start, end);
- g1_barrier_set()->g1_mark_as_young(mr);
+ card_table()->g1_mark_as_young(mr);
}
inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,12 +38,12 @@
class UpdateRSetDeferred : public ExtendedOopClosure {
private:
G1CollectedHeap* _g1;
- DirtyCardQueue *_dcq;
- G1SATBCardTableModRefBS* _ct_bs;
+ DirtyCardQueue* _dcq;
+ G1CardTable* _ct;
public:
UpdateRSetDeferred(DirtyCardQueue* dcq) :
- _g1(G1CollectedHeap::heap()), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
+ _g1(G1CollectedHeap::heap()), _ct(_g1->card_table()), _dcq(dcq) {}
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
virtual void do_oop( oop* p) { do_oop_work(p); }
@@ -59,9 +59,9 @@
if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) {
return;
}
- size_t card_index = _ct_bs->index_for(p);
- if (_ct_bs->mark_card_deferred(card_index)) {
- _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
+ size_t card_index = _ct->index_for(p);
+ if (_ct->mark_card_deferred(card_index)) {
+ _dcq->enqueue((jbyte*)_ct->byte_for_index(card_index));
}
}
};
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -112,7 +112,7 @@
hr->reset_gc_time_stamp();
hr->rem_set()->clear();
- _g1h->g1_barrier_set()->clear(MemRegion(hr->bottom(), hr->end()));
+ _g1h->card_table()->clear(MemRegion(hr->bottom(), hr->end()));
if (_g1h->g1_hot_card_cache()->use_cache()) {
_g1h->g1_hot_card_cache()->reset_card_counts(hr);
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -604,10 +604,9 @@
#ifndef PRODUCT
class G1VerifyCardTableCleanup: public HeapRegionClosure {
G1HeapVerifier* _verifier;
- G1SATBCardTableModRefBS* _ct_bs;
public:
- G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs)
- : _verifier(verifier), _ct_bs(ct_bs) { }
+ G1VerifyCardTableCleanup(G1HeapVerifier* verifier)
+ : _verifier(verifier) { }
virtual bool do_heap_region(HeapRegion* r) {
if (r->is_survivor()) {
_verifier->verify_dirty_region(r);
@@ -620,16 +619,16 @@
void G1HeapVerifier::verify_card_table_cleanup() {
if (G1VerifyCTCleanup || VerifyAfterGC) {
- G1VerifyCardTableCleanup cleanup_verifier(this, _g1h->g1_barrier_set());
+ G1VerifyCardTableCleanup cleanup_verifier(this);
_g1h->heap_region_iterate(&cleanup_verifier);
}
}
void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) {
// All of the region should be clean.
- G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
+ G1CardTable* ct = _g1h->card_table();
MemRegion mr(hr->bottom(), hr->end());
- ct_bs->verify_not_dirty_region(mr);
+ ct->verify_not_dirty_region(mr);
}
void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) {
@@ -640,12 +639,12 @@
// not dirty that area (one less thing to have to do while holding
// a lock). So we can only verify that [bottom(),pre_dummy_top()]
// is dirty.
- G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
+ G1CardTable* ct = _g1h->card_table();
MemRegion mr(hr->bottom(), hr->pre_dummy_top());
if (hr->is_young()) {
- ct_bs->verify_g1_young_region(mr);
+ ct->verify_g1_young_region(mr);
} else {
- ct_bs->verify_dirty_region(mr);
+ ct->verify_dirty_region(mr);
}
}
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
: _g1h(g1h),
_refs(g1h->task_queue(worker_id)),
_dcq(&g1h->dirty_card_queue_set()),
- _ct_bs(g1h->g1_barrier_set()),
+ _ct(g1h->card_table()),
_closures(NULL),
_hash_seed(17),
_worker_id(worker_id),
@@ -390,7 +390,6 @@
return forward_ptr;
}
}
-
G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) :
_g1h(g1h),
_states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)),
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
G1CollectedHeap* _g1h;
RefToScanQueue* _refs;
DirtyCardQueue _dcq;
- G1SATBCardTableModRefBS* _ct_bs;
+ G1CardTable* _ct;
G1EvacuationRootClosures* _closures;
G1PLABAllocator* _plab_allocator;
@@ -72,7 +72,7 @@
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
DirtyCardQueue& dirty_card_queue() { return _dcq; }
- G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
+ G1CardTable* ct() { return _ct; }
InCSetState dest(InCSetState original) const {
assert(original.is_valid(),
@@ -104,10 +104,10 @@
// If the field originates from the to-space, we don't need to include it
// in the remembered set updates.
if (!from->is_young()) {
- size_t card_index = ctbs()->index_for(p);
+ size_t card_index = ct()->index_for(p);
// If the card hasn't been added to the buffer, do it.
- if (ctbs()->mark_card_deferred(card_index)) {
- dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
+ if (ct()->mark_card_deferred(card_index)) {
+ dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
}
}
}
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
+#include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1FromCardCache.hpp"
@@ -74,7 +75,7 @@
static size_t chunk_size() { return M; }
void work(uint worker_id) {
- G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
+ G1CardTable* ct = _g1h->card_table();
while (_cur_dirty_regions < _num_dirty_regions) {
size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length;
@@ -83,7 +84,7 @@
for (size_t i = next; i < max; i++) {
HeapRegion* r = _g1h->region_at(_dirty_region_list[i]);
if (!r->is_survivor()) {
- ct_bs->clear(MemRegion(r->bottom(), r->end()));
+ ct->clear(MemRegion(r->bottom(), r->end()));
}
}
}
@@ -280,12 +281,12 @@
};
G1RemSet::G1RemSet(G1CollectedHeap* g1,
- CardTableModRefBS* ct_bs,
+ G1CardTable* ct,
G1HotCardCache* hot_card_cache) :
_g1(g1),
_scan_state(new G1RemSetScanState()),
_num_conc_refined_cards(0),
- _ct_bs(ct_bs),
+ _ct(ct),
_g1p(_g1->g1_policy()),
_hot_card_cache(hot_card_cache),
_prev_period_summary() {
@@ -328,7 +329,7 @@
_worker_i(worker_i) {
_g1h = G1CollectedHeap::heap();
_bot = _g1h->bot();
- _ct_bs = _g1h->g1_barrier_set();
+ _ct = _g1h->card_table();
}
void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
@@ -345,7 +346,7 @@
}
void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){
- _ct_bs->set_card_claimed(card_index);
+ _ct->set_card_claimed(card_index);
_scan_state->add_dirty_region(region_idx_for_card);
}
@@ -381,7 +382,7 @@
_cards_claimed++;
// If the card is dirty, then G1 will scan it during Update RS.
- if (_ct_bs->is_card_claimed(card_index) || _ct_bs->is_card_dirty(card_index)) {
+ if (_ct->is_card_claimed(card_index) || _ct->is_card_dirty(card_index)) {
continue;
}
@@ -535,15 +536,15 @@
_g1->heap_region_par_iterate_from_worker_offset(&scrub_cl, hrclaimer, worker_num);
}
-inline void check_card_ptr(jbyte* card_ptr, CardTableModRefBS* ct_bs) {
+inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
#ifdef ASSERT
G1CollectedHeap* g1 = G1CollectedHeap::heap();
- assert(g1->is_in_exact(ct_bs->addr_for(card_ptr)),
+ assert(g1->is_in_exact(ct->addr_for(card_ptr)),
"Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
p2i(card_ptr),
- ct_bs->index_for(ct_bs->addr_for(card_ptr)),
- p2i(ct_bs->addr_for(card_ptr)),
- g1->addr_to_region(ct_bs->addr_for(card_ptr)));
+ ct->index_for(ct->addr_for(card_ptr)),
+ p2i(ct->addr_for(card_ptr)),
+ g1->addr_to_region(ct->addr_for(card_ptr)));
#endif
}
@@ -551,15 +552,15 @@
uint worker_i) {
assert(!_g1->is_gc_active(), "Only call concurrently");
- check_card_ptr(card_ptr, _ct_bs);
+ check_card_ptr(card_ptr, _ct);
// If the card is no longer dirty, nothing to do.
- if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
+ if (*card_ptr != G1CardTable::dirty_card_val()) {
return;
}
// Construct the region representing the card.
- HeapWord* start = _ct_bs->addr_for(card_ptr);
+ HeapWord* start = _ct->addr_for(card_ptr);
// And find the region containing it.
HeapRegion* r = _g1->heap_region_containing(start);
@@ -619,7 +620,7 @@
return;
} else if (card_ptr != orig_card_ptr) {
// Original card was inserted and an old card was evicted.
- start = _ct_bs->addr_for(card_ptr);
+ start = _ct->addr_for(card_ptr);
r = _g1->heap_region_containing(start);
// Check whether the region formerly in the cache should be
@@ -654,7 +655,7 @@
// Okay to clean and process the card now. There are still some
// stale card cases that may be detected by iteration and dealt with
// as iteration failure.
- *const_cast<volatile jbyte*>(card_ptr) = CardTableModRefBS::clean_card_val();
+ *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val();
// This fence serves two purposes. First, the card must be cleaned
// before processing the contents. Second, we can't proceed with
@@ -666,7 +667,7 @@
// Don't use addr_for(card_ptr + 1) which can ask for
// a card beyond the heap.
- HeapWord* end = start + CardTableModRefBS::card_size_in_words;
+ HeapWord* end = start + G1CardTable::card_size_in_words;
MemRegion dirty_region(start, MIN2(scan_limit, end));
assert(!dirty_region.is_empty(), "sanity");
@@ -683,8 +684,8 @@
if (!card_processed) {
// The card might have gotten re-dirtied and re-enqueued while we
// worked. (In fact, it's pretty likely.)
- if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
- *card_ptr = CardTableModRefBS::dirty_card_val();
+ if (*card_ptr != G1CardTable::dirty_card_val()) {
+ *card_ptr = G1CardTable::dirty_card_val();
MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag);
DirtyCardQueue* sdcq =
@@ -700,20 +701,20 @@
G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
assert(_g1->is_gc_active(), "Only call during GC");
- check_card_ptr(card_ptr, _ct_bs);
+ check_card_ptr(card_ptr, _ct);
// If the card is no longer dirty, nothing to do. This covers cards that were already
// scanned as parts of the remembered sets.
- if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
+ if (*card_ptr != G1CardTable::dirty_card_val()) {
return false;
}
// We claim lazily (so races are possible but they're benign), which reduces the
// number of potential duplicate scans (multiple threads may enqueue the same card twice).
- *card_ptr = CardTableModRefBS::clean_card_val() | CardTableModRefBS::claimed_card_val();
+ *card_ptr = G1CardTable::clean_card_val() | G1CardTable::claimed_card_val();
// Construct the region representing the card.
- HeapWord* card_start = _ct_bs->addr_for(card_ptr);
+ HeapWord* card_start = _ct->addr_for(card_ptr);
// And find the region containing it.
uint const card_region_idx = _g1->addr_to_region(card_start);
@@ -726,7 +727,7 @@
// Don't use addr_for(card_ptr + 1) which can ask for
// a card beyond the heap.
- HeapWord* card_end = card_start + CardTableModRefBS::card_size_in_words;
+ HeapWord* card_end = card_start + G1CardTable::card_size_in_words;
MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
assert(!dirty_region.is_empty(), "sanity");
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1CardLiveData.hpp"
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1RemSetSummary.hpp"
#include "gc/g1/heapRegion.hpp"
#include "memory/allocation.hpp"
@@ -72,7 +73,7 @@
G1CollectedHeap* _g1;
size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
- CardTableModRefBS* _ct_bs;
+ G1CardTable* _ct;
G1Policy* _g1p;
G1HotCardCache* _hot_card_cache;
@@ -93,7 +94,7 @@
void cleanupHRRS();
G1RemSet(G1CollectedHeap* g1,
- CardTableModRefBS* ct_bs,
+ G1CardTable* ct,
G1HotCardCache* hot_card_cache);
~G1RemSet();
@@ -162,7 +163,7 @@
CodeBlobClosure* _code_root_cl;
G1BlockOffsetTable* _bot;
- G1SATBCardTableModRefBS *_ct_bs;
+ G1CardTable *_ct;
double _strong_code_root_scan_time_sec;
uint _worker_i;
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -23,22 +23,20 @@
*/
#include "precompiled.hpp"
+#include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/satbMarkQueue.hpp"
-#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
#include "runtime/thread.inline.hpp"
G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(
- MemRegion whole_heap,
+ G1CardTable* card_table,
const BarrierSet::FakeRtti& fake_rtti) :
- CardTableModRefBS(whole_heap, fake_rtti.add_tag(BarrierSet::G1SATBCT))
+ CardTableModRefBS(card_table, fake_rtti.add_tag(BarrierSet::G1SATBCT))
{ }
void G1SATBCardTableModRefBS::enqueue(oop pre_val) {
@@ -80,88 +78,17 @@
}
}
-bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
- jbyte val = _byte_map[card_index];
- // It's already processed
- if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
- return false;
- }
-
- // Cached bit can be installed either on a clean card or on a claimed card.
- jbyte new_val = val;
- if (val == clean_card_val()) {
- new_val = (jbyte)deferred_card_val();
- } else {
- if (val & claimed_card_val()) {
- new_val = val | (jbyte)deferred_card_val();
- }
- }
- if (new_val != val) {
- Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
- }
- return true;
-}
-
-void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
- jbyte *const first = byte_for(mr.start());
- jbyte *const last = byte_after(mr.last());
-
- memset_with_concurrent_readers(first, g1_young_gen, last - first);
-}
-
-#ifndef PRODUCT
-void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
- verify_region(mr, g1_young_gen, true);
-}
-#endif
-
-void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
- // Default value for a clean card on the card table is -1. So we cannot take advantage of the zero_filled parameter.
- MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
- _card_table->clear(mr);
-}
-
G1SATBCardTableLoggingModRefBS::
-G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
- G1SATBCardTableModRefBS(whole_heap, BarrierSet::FakeRtti(G1SATBCTLogging)),
- _dcqs(JavaThread::dirty_card_queue_set()),
- _listener()
-{
- _listener.set_card_table(this);
-}
-
-void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) {
- initialize_deferred_card_mark_barriers();
- mapper->set_mapping_changed_listener(&_listener);
-
- _byte_map_size = mapper->reserved().byte_size();
-
- _guard_index = cards_required(_whole_heap.word_size()) - 1;
- _last_valid_index = _guard_index - 1;
-
- HeapWord* low_bound = _whole_heap.start();
- HeapWord* high_bound = _whole_heap.end();
-
- _cur_covered_regions = 1;
- _covered[0] = _whole_heap;
-
- _byte_map = (jbyte*) mapper->reserved().start();
- byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
- assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
- assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
-
- log_trace(gc, barrier)("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: ");
- log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
- p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
- log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
-}
+G1SATBCardTableLoggingModRefBS(G1CardTable* card_table) :
+ G1SATBCardTableModRefBS(card_table, BarrierSet::FakeRtti(G1SATBCTLogging)),
+ _dcqs(JavaThread::dirty_card_queue_set()) {}
void G1SATBCardTableLoggingModRefBS::write_ref_field_post_slow(volatile jbyte* byte) {
// In the slow path, we know a card is not young
- assert(*byte != g1_young_gen, "slow path invoked without filtering");
+ assert(*byte != G1CardTable::g1_young_card_val(), "slow path invoked without filtering");
OrderAccess::storeload();
- if (*byte != dirty_card) {
- *byte = dirty_card;
+ if (*byte != G1CardTable::dirty_card_val()) {
+ *byte = G1CardTable::dirty_card_val();
Thread* thr = Thread::current();
if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr;
@@ -174,16 +101,15 @@
}
}
-void
-G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
+void G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr) {
if (mr.is_empty()) {
return;
}
- volatile jbyte* byte = byte_for(mr.start());
- jbyte* last_byte = byte_for(mr.last());
+ volatile jbyte* byte = _card_table->byte_for(mr.start());
+ jbyte* last_byte = _card_table->byte_for(mr.last());
Thread* thr = Thread::current();
// skip all consecutive young cards
- for (; byte <= last_byte && *byte == g1_young_gen; byte++);
+ for (; byte <= last_byte && *byte == G1CardTable::g1_young_card_val(); byte++);
if (byte <= last_byte) {
OrderAccess::storeload();
@@ -191,11 +117,11 @@
if (thr->is_Java_thread()) {
JavaThread* jt = (JavaThread*)thr;
for (; byte <= last_byte; byte++) {
- if (*byte == g1_young_gen) {
+ if (*byte == G1CardTable::g1_young_card_val()) {
continue;
}
- if (*byte != dirty_card) {
- *byte = dirty_card;
+ if (*byte != G1CardTable::dirty_card_val()) {
+ *byte = G1CardTable::dirty_card_val();
jt->dirty_card_queue().enqueue(byte);
}
}
@@ -203,11 +129,11 @@
MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag);
for (; byte <= last_byte; byte++) {
- if (*byte == g1_young_gen) {
+ if (*byte == G1CardTable::g1_young_card_val()) {
continue;
}
- if (*byte != dirty_card) {
- *byte = dirty_card;
+ if (*byte != G1CardTable::dirty_card_val()) {
+ *byte = G1CardTable::dirty_card_val();
_dcqs.shared_dirty_card_queue()->enqueue(byte);
}
}
@@ -215,11 +141,6 @@
}
}
-bool G1SATBCardTableModRefBS::is_in_young(oop obj) const {
- volatile jbyte* p = byte_for((void*)obj);
- return *p == g1_young_card_val();
-}
-
void G1SATBCardTableLoggingModRefBS::on_thread_attach(JavaThread* thread) {
// This method initializes the SATB and dirty card queues before a
// JavaThread is added to the Java thread list. Right now, we don't
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -33,6 +33,8 @@
class DirtyCardQueueSet;
class G1SATBCardTableLoggingModRefBS;
+class CardTable;
+class G1CardTable;
// This barrier is specialized to use a logging barrier to support
// snapshot-at-the-beginning marking.
@@ -40,16 +42,10 @@
class G1SATBCardTableModRefBS: public CardTableModRefBS {
friend class VMStructs;
protected:
- enum G1CardValues {
- g1_young_gen = CT_MR_BS_last_reserved << 1
- };
-
- G1SATBCardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
+ G1SATBCardTableModRefBS(G1CardTable* table, const BarrierSet::FakeRtti& fake_rtti);
~G1SATBCardTableModRefBS() { }
public:
- static int g1_young_card_val() { return g1_young_gen; }
-
// Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph.
static void enqueue(oop pre_val);
@@ -62,38 +58,6 @@
template <DecoratorSet decorators, typename T>
void write_ref_field_pre(T* field);
-
-/*
- Claimed and deferred bits are used together in G1 during the evacuation
- pause. These bits can have the following state transitions:
- 1. The claimed bit can be put over any other card state. Except that
- the "dirty -> dirty and claimed" transition is checked for in
- G1 code and is not used.
- 2. Deferred bit can be set only if the previous state of the card
- was either clean or claimed. mark_card_deferred() is wait-free.
- We do not care if the operation is be successful because if
- it does not it will only result in duplicate entry in the update
- buffer because of the "cache-miss". So it's not worth spinning.
- */
-
- bool is_card_claimed(size_t card_index) {
- jbyte val = _byte_map[card_index];
- return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
- }
-
- inline void set_card_claimed(size_t card_index);
-
- void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
- void g1_mark_as_young(const MemRegion& mr);
-
- bool mark_card_deferred(size_t card_index);
-
- bool is_card_deferred(size_t card_index) {
- jbyte val = _byte_map[card_index];
- return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
- }
-
- virtual bool is_in_young(oop obj) const;
};
template<>
@@ -106,42 +70,14 @@
typedef G1SATBCardTableModRefBS type;
};
-class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
- private:
- G1SATBCardTableLoggingModRefBS* _card_table;
- public:
- G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
-
- void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
-
- virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
-};
-
// Adds card-table logging to the post-barrier.
// Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
- friend class G1SATBCardTableLoggingModRefBSChangedListener;
private:
- G1SATBCardTableLoggingModRefBSChangedListener _listener;
DirtyCardQueueSet& _dcqs;
public:
- static size_t compute_size(size_t mem_region_size_in_words) {
- size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
- return ReservedSpace::allocation_align_size_up(number_of_slots);
- }
-
- // Returns how many bytes of the heap a single byte of the Card Table corresponds to.
- static size_t heap_map_factor() {
- return CardTableModRefBS::card_size;
- }
-
- G1SATBCardTableLoggingModRefBS(MemRegion whole_heap);
-
- virtual void initialize() { }
- virtual void initialize(G1RegionToSpaceMapper* mapper);
-
- virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
+ G1SATBCardTableLoggingModRefBS(G1CardTable* card_table);
// NB: if you do a whole-heap invalidation, the "usual invariant" defined
// above no longer applies.
@@ -157,10 +93,6 @@
virtual void on_thread_attach(JavaThread* thread);
virtual void on_thread_detach(JavaThread* thread);
- virtual bool card_mark_must_follow_store() const {
- return true;
- }
-
// Callbacks for runtime accesses.
template <DecoratorSet decorators, typename BarrierSetT = G1SATBCardTableLoggingModRefBS>
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -25,8 +25,9 @@
#ifndef SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
#define SHARE_VM_GC_G1_G1SATBCARDTABLEMODREFBS_INLINE_HPP
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/shared/accessBarrierSupport.inline.hpp"
-#include "gc/g1/g1SATBCardTableModRefBS.hpp"
template <DecoratorSet decorators, typename T>
inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) {
@@ -43,23 +44,13 @@
template <DecoratorSet decorators, typename T>
inline void G1SATBCardTableLoggingModRefBS::write_ref_field_post(T* field, oop new_val) {
- volatile jbyte* byte = byte_for(field);
- if (*byte != g1_young_gen) {
+ volatile jbyte* byte = _card_table->byte_for(field);
+ if (*byte != G1CardTable::g1_young_card_val()) {
// Take a slow path for cards in old
write_ref_field_post_slow(byte);
}
}
-void G1SATBCardTableModRefBS::set_card_claimed(size_t card_index) {
- jbyte val = _byte_map[card_index];
- if (val == clean_card_val()) {
- val = (jbyte)claimed_card_val();
- } else {
- val |= (jbyte)claimed_card_val();
- }
- _byte_map[card_index] = val;
-}
-
inline void G1SATBCardTableModRefBS::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
// Archive roots need to be enqueued since they add subgraphs to the
--- a/src/hotspot/share/gc/g1/heapRegion.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -100,7 +100,7 @@
guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
guarantee(CardsPerRegion == 0, "we should only set it once");
- CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
+ CardsPerRegion = GrainBytes >> G1CardTable::card_shift;
if (G1HeapRegionSize != GrainBytes) {
FLAG_SET_ERGO(size_t, G1HeapRegionSize, GrainBytes);
@@ -139,9 +139,8 @@
assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
HeapRegionRemSet* hrrs = rem_set();
hrrs->clear();
- CardTableModRefBS* ct_bs =
- barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
- ct_bs->clear(MemRegion(bottom(), end()));
+ G1CardTable* ct = G1CollectedHeap::heap()->card_table();
+ ct->clear(MemRegion(bottom(), end()));
}
void HeapRegion::calc_gc_efficiency() {
@@ -463,7 +462,7 @@
class G1VerificationClosure : public OopClosure {
protected:
G1CollectedHeap* _g1h;
- CardTableModRefBS* _bs;
+ G1CardTable *_ct;
oop _containing_obj;
bool _failures;
int _n_failures;
@@ -473,7 +472,7 @@
// _vo == UseNextMarking -> use "next" marking information,
// _vo == UseFullMarking -> use "next" marking bitmap but no TAMS.
G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) :
- _g1h(g1h), _bs(barrier_set_cast<CardTableModRefBS>(g1h->barrier_set())),
+ _g1h(g1h), _ct(g1h->card_table()),
_containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) {
}
@@ -576,9 +575,9 @@
if (from != NULL && to != NULL &&
from != to &&
!to->is_pinned()) {
- jbyte cv_obj = *_bs->byte_for_const(_containing_obj);
- jbyte cv_field = *_bs->byte_for_const(p);
- const jbyte dirty = CardTableModRefBS::dirty_card_val();
+ jbyte cv_obj = *_ct->byte_for_const(_containing_obj);
+ jbyte cv_field = *_ct->byte_for_const(p);
+ const jbyte dirty = G1CardTable::dirty_card_val();
bool is_bad = !(from->is_young()
|| to->rem_set()->contains_reference(p)
@@ -834,7 +833,6 @@
CompactibleSpace::clear(mangle_space);
reset_bot();
}
-
#ifndef PRODUCT
void G1ContiguousSpace::mangle_unused_area() {
mangle_unused_area_complete();
--- a/src/hotspot/share/gc/g1/heapRegion.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
#include "gc/g1/heapRegionType.hpp"
#include "gc/g1/survRateGroup.hpp"
#include "gc/shared/ageTable.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "utilities/macros.hpp"
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -103,7 +103,7 @@
if (loc_hr->is_in_reserved(from)) {
size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
CardIdx_t from_card = (CardIdx_t)
- hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
+ hw_offset >> (G1CardTable::card_shift - LogHeapWordSize);
assert((size_t)from_card < HeapRegion::CardsPerRegion,
"Must be in range.");
@@ -170,7 +170,7 @@
bool contains_reference(OopOrNarrowOopStar from) const {
assert(hr()->is_in_reserved(from), "Precondition.");
size_t card_ind = pointer_delta(from, hr()->bottom(),
- CardTableModRefBS::card_size);
+ G1CardTable::card_size);
return _bm.at(card_ind);
}
@@ -354,7 +354,7 @@
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
uint cur_hrm_ind = _hr->hrm_index();
- int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
+ int from_card = (int)(uintptr_t(from) >> G1CardTable::card_shift);
if (G1FromCardCache::contains_or_replace(tid, cur_hrm_ind, from_card)) {
assert(contains_reference(from), "We just found " PTR_FORMAT " in the FromCardCache", p2i(from));
@@ -382,7 +382,7 @@
uintptr_t from_hr_bot_card_index =
uintptr_t(from_hr->bottom())
- >> CardTableModRefBS::card_shift;
+ >> G1CardTable::card_shift;
CardIdx_t card_index = from_card - from_hr_bot_card_index;
assert((size_t)card_index < HeapRegion::CardsPerRegion,
"Must be in range.");
@@ -671,9 +671,9 @@
} else {
uintptr_t from_card =
- (uintptr_t(from) >> CardTableModRefBS::card_shift);
+ (uintptr_t(from) >> G1CardTable::card_shift);
uintptr_t hr_bot_card_index =
- uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
+ uintptr_t(hr->bottom()) >> G1CardTable::card_shift;
assert(from_card >= hr_bot_card_index, "Inv");
CardIdx_t card_index = from_card - hr_bot_card_index;
assert((size_t)card_index < HeapRegion::CardsPerRegion,
--- a/src/hotspot/share/gc/g1/sparsePRT.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/g1/sparsePRT.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
// Check that the card array element type can represent all cards in the region.
// Choose a large SparsePRTEntry::card_elem_t (e.g. CardIdx_t) if required.
assert(((size_t)1 << (sizeof(SparsePRTEntry::card_elem_t) * BitsPerByte)) *
- G1SATBCardTableModRefBS::card_size >= HeapRegionBounds::max_size(), "precondition");
+ G1CardTable::card_size >= HeapRegionBounds::max_size(), "precondition");
assert(G1RSetSparseRegionEntries > 0, "precondition");
_region_ind = region_ind;
_next_index = RSHashTable::NullEntry;
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -509,7 +509,7 @@
}
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
- ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
+ ParallelScavengeHeap::heap()->barrier_set()->card_table()->resize_covered_region(cmr);
space_invariants();
}
--- a/src/hotspot/share/gc/parallel/cardTableExtension.cpp Fri Mar 09 00:28:50 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,683 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/parallel/cardTableExtension.hpp"
-#include "gc/parallel/gcTaskManager.hpp"
-#include "gc/parallel/objectStartArray.inline.hpp"
-#include "gc/parallel/parallelScavengeHeap.inline.hpp"
-#include "gc/parallel/psPromotionManager.inline.hpp"
-#include "gc/parallel/psScavenge.hpp"
-#include "gc/parallel/psTasks.hpp"
-#include "gc/parallel/psYoungGen.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/prefetch.inline.hpp"
-#include "utilities/align.hpp"
-
-// Checks an individual oop for missing precise marks. Mark
-// may be either dirty or newgen.
-class CheckForUnmarkedOops : public OopClosure {
- private:
- PSYoungGen* _young_gen;
- CardTableExtension* _card_table;
- HeapWord* _unmarked_addr;
-
- protected:
- template <class T> void do_oop_work(T* p) {
- oop obj = oopDesc::load_decode_heap_oop(p);
- if (_young_gen->is_in_reserved(obj) &&
- !_card_table->addr_is_marked_imprecise(p)) {
- // Don't overwrite the first missing card mark
- if (_unmarked_addr == NULL) {
- _unmarked_addr = (HeapWord*)p;
- }
- }
- }
-
- public:
- CheckForUnmarkedOops(PSYoungGen* young_gen, CardTableExtension* card_table) :
- _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
-
- virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
- virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
-
- bool has_unmarked_oop() {
- return _unmarked_addr != NULL;
- }
-};
-
-// Checks all objects for the existence of some type of mark,
-// precise or imprecise, dirty or newgen.
-class CheckForUnmarkedObjects : public ObjectClosure {
- private:
- PSYoungGen* _young_gen;
- CardTableExtension* _card_table;
-
- public:
- CheckForUnmarkedObjects() {
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- _young_gen = heap->young_gen();
- _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
- // No point in asserting barrier set type here. Need to make CardTableExtension
- // a unique barrier set type.
- }
-
- // Card marks are not precise. The current system can leave us with
- // a mismatch of precise marks and beginning of object marks. This means
- // we test for missing precise marks first. If any are found, we don't
- // fail unless the object head is also unmarked.
- virtual void do_object(oop obj) {
- CheckForUnmarkedOops object_check(_young_gen, _card_table);
- obj->oop_iterate_no_header(&object_check);
- if (object_check.has_unmarked_oop()) {
- guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
- }
- }
-};
-
-// Checks for precise marking of oops as newgen.
-class CheckForPreciseMarks : public OopClosure {
- private:
- PSYoungGen* _young_gen;
- CardTableExtension* _card_table;
-
- protected:
- template <class T> void do_oop_work(T* p) {
- oop obj = oopDesc::load_decode_heap_oop_not_null(p);
- if (_young_gen->is_in_reserved(obj)) {
- assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
- _card_table->set_card_newgen(p);
- }
- }
-
- public:
- CheckForPreciseMarks( PSYoungGen* young_gen, CardTableExtension* card_table ) :
- _young_gen(young_gen), _card_table(card_table) { }
-
- virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
- virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
-};
-
-// We get passed the space_top value to prevent us from traversing into
-// the old_gen promotion labs, which cannot be safely parsed.
-
-// Do not call this method if the space is empty.
-// It is a waste to start tasks and get here only to
-// do no work. If this method needs to be called
-// when the space is empty, fix the calculation of
-// end_card to allow sp_top == sp->bottom().
-
-void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
- MutableSpace* sp,
- HeapWord* space_top,
- PSPromotionManager* pm,
- uint stripe_number,
- uint stripe_total) {
- int ssize = 128; // Naked constant! Work unit = 64k.
- int dirty_card_count = 0;
-
- // It is a waste to get here if empty.
- assert(sp->bottom() < sp->top(), "Should not be called if empty");
- oop* sp_top = (oop*)space_top;
- jbyte* start_card = byte_for(sp->bottom());
- jbyte* end_card = byte_for(sp_top - 1) + 1;
- oop* last_scanned = NULL; // Prevent scanning objects more than once
- // The width of the stripe ssize*stripe_total must be
- // consistent with the number of stripes so that the complete slice
- // is covered.
- size_t slice_width = ssize * stripe_total;
- for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
- jbyte* worker_start_card = slice + stripe_number * ssize;
- if (worker_start_card >= end_card)
- return; // We're done.
-
- jbyte* worker_end_card = worker_start_card + ssize;
- if (worker_end_card > end_card)
- worker_end_card = end_card;
-
- // We do not want to scan objects more than once. In order to accomplish
- // this, we assert that any object with an object head inside our 'slice'
- // belongs to us. We may need to extend the range of scanned cards if the
- // last object continues into the next 'slice'.
- //
- // Note! ending cards are exclusive!
- HeapWord* slice_start = addr_for(worker_start_card);
- HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
-
-#ifdef ASSERT
- if (GCWorkerDelayMillis > 0) {
- // Delay 1 worker so that it proceeds after all the work
- // has been completed.
- if (stripe_number < 2) {
- os::sleep(Thread::current(), GCWorkerDelayMillis, false);
- }
- }
-#endif
-
- // If there are not objects starting within the chunk, skip it.
- if (!start_array->object_starts_in_range(slice_start, slice_end)) {
- continue;
- }
- // Update our beginning addr
- HeapWord* first_object = start_array->object_start(slice_start);
- debug_only(oop* first_object_within_slice = (oop*) first_object;)
- if (first_object < slice_start) {
- last_scanned = (oop*)(first_object + oop(first_object)->size());
- debug_only(first_object_within_slice = last_scanned;)
- worker_start_card = byte_for(last_scanned);
- }
-
- // Update the ending addr
- if (slice_end < (HeapWord*)sp_top) {
- // The subtraction is important! An object may start precisely at slice_end.
- HeapWord* last_object = start_array->object_start(slice_end - 1);
- slice_end = last_object + oop(last_object)->size();
- // worker_end_card is exclusive, so bump it one past the end of last_object's
- // covered span.
- worker_end_card = byte_for(slice_end) + 1;
-
- if (worker_end_card > end_card)
- worker_end_card = end_card;
- }
-
- assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
- assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
- assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
- // Note that worker_start_card >= worker_end_card is legal, and happens when
- // an object spans an entire slice.
- assert(worker_start_card <= end_card, "worker start card beyond end card");
- assert(worker_end_card <= end_card, "worker end card beyond end card");
-
- jbyte* current_card = worker_start_card;
- while (current_card < worker_end_card) {
- // Find an unclean card.
- while (current_card < worker_end_card && card_is_clean(*current_card)) {
- current_card++;
- }
- jbyte* first_unclean_card = current_card;
-
- // Find the end of a run of contiguous unclean cards
- while (current_card < worker_end_card && !card_is_clean(*current_card)) {
- while (current_card < worker_end_card && !card_is_clean(*current_card)) {
- current_card++;
- }
-
- if (current_card < worker_end_card) {
- // Some objects may be large enough to span several cards. If such
- // an object has more than one dirty card, separated by a clean card,
- // we will attempt to scan it twice. The test against "last_scanned"
- // prevents the redundant object scan, but it does not prevent newly
- // marked cards from being cleaned.
- HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
- size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
- HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
- jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
- assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
- if (ending_card_of_last_object > current_card) {
- // This means the object spans the next complete card.
- // We need to bump the current_card to ending_card_of_last_object
- current_card = ending_card_of_last_object;
- }
- }
- }
- jbyte* following_clean_card = current_card;
-
- if (first_unclean_card < worker_end_card) {
- oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
- assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
- // "p" should always be >= "last_scanned" because newly GC dirtied
- // cards are no longer scanned again (see comment at end
- // of loop on the increment of "current_card"). Test that
- // hypothesis before removing this code.
- // If this code is removed, deal with the first time through
- // the loop when the last_scanned is the object starting in
- // the previous slice.
- assert((p >= last_scanned) ||
- (last_scanned == first_object_within_slice),
- "Should no longer be possible");
- if (p < last_scanned) {
- // Avoid scanning more than once; this can happen because
- // newgen cards set by GC may a different set than the
- // originally dirty set
- p = last_scanned;
- }
- oop* to = (oop*)addr_for(following_clean_card);
-
- // Test slice_end first!
- if ((HeapWord*)to > slice_end) {
- to = (oop*)slice_end;
- } else if (to > sp_top) {
- to = sp_top;
- }
-
- // we know which cards to scan, now clear them
- if (first_unclean_card <= worker_start_card+1)
- first_unclean_card = worker_start_card+1;
- if (following_clean_card >= worker_end_card-1)
- following_clean_card = worker_end_card-1;
-
- while (first_unclean_card < following_clean_card) {
- *first_unclean_card++ = clean_card;
- }
-
- const int interval = PrefetchScanIntervalInBytes;
- // scan all objects in the range
- if (interval != 0) {
- while (p < to) {
- Prefetch::write(p, interval);
- oop m = oop(p);
- assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
- pm->push_contents(m);
- p += m->size();
- }
- pm->drain_stacks_cond_depth();
- } else {
- while (p < to) {
- oop m = oop(p);
- assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
- pm->push_contents(m);
- p += m->size();
- }
- pm->drain_stacks_cond_depth();
- }
- last_scanned = p;
- }
- // "current_card" is still the "following_clean_card" or
- // the current_card is >= the worker_end_card so the
- // loop will not execute again.
- assert((current_card == following_clean_card) ||
- (current_card >= worker_end_card),
- "current_card should only be incremented if it still equals "
- "following_clean_card");
- // Increment current_card so that it is not processed again.
- // It may now be dirty because a old-to-young pointer was
- // found on it an updated. If it is now dirty, it cannot be
- // be safely cleaned in the next iteration.
- current_card++;
- }
- }
-}
-
-// This should be called before a scavenge.
-void CardTableExtension::verify_all_young_refs_imprecise() {
- CheckForUnmarkedObjects check;
-
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- PSOldGen* old_gen = heap->old_gen();
-
- old_gen->object_iterate(&check);
-}
-
-// This should be called immediately after a scavenge, before mutators resume.
-void CardTableExtension::verify_all_young_refs_precise() {
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- PSOldGen* old_gen = heap->old_gen();
-
- CheckForPreciseMarks check(
- heap->young_gen(),
- barrier_set_cast<CardTableExtension>(heap->barrier_set()));
-
- old_gen->oop_iterate_no_header(&check);
-
- verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
-}
-
-void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) {
- CardTableExtension* card_table =
- barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
-
- jbyte* bot = card_table->byte_for(mr.start());
- jbyte* top = card_table->byte_for(mr.end());
- while(bot <= top) {
- assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
- if (*bot == verify_card)
- *bot = youngergen_card;
- bot++;
- }
-}
-
-bool CardTableExtension::addr_is_marked_imprecise(void *addr) {
- jbyte* p = byte_for(addr);
- jbyte val = *p;
-
- if (card_is_dirty(val))
- return true;
-
- if (card_is_newgen(val))
- return true;
-
- if (card_is_clean(val))
- return false;
-
- assert(false, "Found unhandled card mark type");
-
- return false;
-}
-
-// Also includes verify_card
-bool CardTableExtension::addr_is_marked_precise(void *addr) {
- jbyte* p = byte_for(addr);
- jbyte val = *p;
-
- if (card_is_newgen(val))
- return true;
-
- if (card_is_verify(val))
- return true;
-
- if (card_is_clean(val))
- return false;
-
- if (card_is_dirty(val))
- return false;
-
- assert(false, "Found unhandled card mark type");
-
- return false;
-}
-
-// Assumes that only the base or the end changes. This allows indentification
-// of the region that is being resized. The
-// CardTableModRefBS::resize_covered_region() is used for the normal case
-// where the covered regions are growing or shrinking at the high end.
-// The method resize_covered_region_by_end() is analogous to
-// CardTableModRefBS::resize_covered_region() but
-// for regions that grow or shrink at the low end.
-void CardTableExtension::resize_covered_region(MemRegion new_region) {
-
- for (int i = 0; i < _cur_covered_regions; i++) {
- if (_covered[i].start() == new_region.start()) {
- // Found a covered region with the same start as the
- // new region. The region is growing or shrinking
- // from the start of the region.
- resize_covered_region_by_start(new_region);
- return;
- }
- if (_covered[i].start() > new_region.start()) {
- break;
- }
- }
-
- int changed_region = -1;
- for (int j = 0; j < _cur_covered_regions; j++) {
- if (_covered[j].end() == new_region.end()) {
- changed_region = j;
- // This is a case where the covered region is growing or shrinking
- // at the start of the region.
- assert(changed_region != -1, "Don't expect to add a covered region");
- assert(_covered[changed_region].byte_size() != new_region.byte_size(),
- "The sizes should be different here");
- resize_covered_region_by_end(changed_region, new_region);
- return;
- }
- }
- // This should only be a new covered region (where no existing
- // covered region matches at the start or the end).
- assert(_cur_covered_regions < _max_covered_regions,
- "An existing region should have been found");
- resize_covered_region_by_start(new_region);
-}
-
-void CardTableExtension::resize_covered_region_by_start(MemRegion new_region) {
- CardTableModRefBS::resize_covered_region(new_region);
- debug_only(verify_guard();)
-}
-
-void CardTableExtension::resize_covered_region_by_end(int changed_region,
- MemRegion new_region) {
- assert(SafepointSynchronize::is_at_safepoint(),
- "Only expect an expansion at the low end at a GC");
- debug_only(verify_guard();)
-#ifdef ASSERT
- for (int k = 0; k < _cur_covered_regions; k++) {
- if (_covered[k].end() == new_region.end()) {
- assert(changed_region == k, "Changed region is incorrect");
- break;
- }
- }
-#endif
-
- // Commit new or uncommit old pages, if necessary.
- if (resize_commit_uncommit(changed_region, new_region)) {
- // Set the new start of the committed region
- resize_update_committed_table(changed_region, new_region);
- }
-
- // Update card table entries
- resize_update_card_table_entries(changed_region, new_region);
-
- // Update the covered region
- resize_update_covered_table(changed_region, new_region);
-
- int ind = changed_region;
- log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
- log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
- ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
- log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
- ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
- log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
- p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
- log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
- p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
-
- debug_only(verify_guard();)
-}
-
-bool CardTableExtension::resize_commit_uncommit(int changed_region,
- MemRegion new_region) {
- bool result = false;
- // Commit new or uncommit old pages, if necessary.
- MemRegion cur_committed = _committed[changed_region];
- assert(_covered[changed_region].end() == new_region.end(),
- "The ends of the regions are expected to match");
- // Extend the start of this _committed region to
- // to cover the start of any previous _committed region.
- // This forms overlapping regions, but never interior regions.
- HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
- if (min_prev_start < cur_committed.start()) {
- // Only really need to set start of "cur_committed" to
- // the new start (min_prev_start) but assertion checking code
- // below use cur_committed.end() so make it correct.
- MemRegion new_committed =
- MemRegion(min_prev_start, cur_committed.end());
- cur_committed = new_committed;
- }
-#ifdef ASSERT
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
- "Starts should have proper alignment");
-#endif
-
- jbyte* new_start = byte_for(new_region.start());
- // Round down because this is for the start address
- HeapWord* new_start_aligned =
- (HeapWord*)align_down((uintptr_t)new_start, os::vm_page_size());
- // The guard page is always committed and should not be committed over.
- // This method is used in cases where the generation is growing toward
- // lower addresses but the guard region is still at the end of the
- // card table. That still makes sense when looking for writes
- // off the end of the card table.
- if (new_start_aligned < cur_committed.start()) {
- // Expand the committed region
- //
- // Case A
- // |+ guard +|
- // |+ cur committed +++++++++|
- // |+ new committed +++++++++++++++++|
- //
- // Case B
- // |+ guard +|
- // |+ cur committed +|
- // |+ new committed +++++++|
- //
- // These are not expected because the calculation of the
- // cur committed region and the new committed region
- // share the same end for the covered region.
- // Case C
- // |+ guard +|
- // |+ cur committed +|
- // |+ new committed +++++++++++++++++|
- // Case D
- // |+ guard +|
- // |+ cur committed +++++++++++|
- // |+ new committed +++++++|
-
- HeapWord* new_end_for_commit =
- MIN2(cur_committed.end(), _guard_region.start());
- if(new_start_aligned < new_end_for_commit) {
- MemRegion new_committed =
- MemRegion(new_start_aligned, new_end_for_commit);
- os::commit_memory_or_exit((char*)new_committed.start(),
- new_committed.byte_size(), !ExecMem,
- "card table expansion");
- }
- result = true;
- } else if (new_start_aligned > cur_committed.start()) {
- // Shrink the committed region
-#if 0 // uncommitting space is currently unsafe because of the interactions
- // of growing and shrinking regions. One region A can uncommit space
- // that it owns but which is being used by another region B (maybe).
- // Region B has not committed the space because it was already
- // committed by region A.
- MemRegion uncommit_region = committed_unique_to_self(changed_region,
- MemRegion(cur_committed.start(), new_start_aligned));
- if (!uncommit_region.is_empty()) {
- if (!os::uncommit_memory((char*)uncommit_region.start(),
- uncommit_region.byte_size())) {
- // If the uncommit fails, ignore it. Let the
- // committed table resizing go even though the committed
- // table will over state the committed space.
- }
- }
-#else
- assert(!result, "Should be false with current workaround");
-#endif
- }
- assert(_committed[changed_region].end() == cur_committed.end(),
- "end should not change");
- return result;
-}
-
-void CardTableExtension::resize_update_committed_table(int changed_region,
- MemRegion new_region) {
-
- jbyte* new_start = byte_for(new_region.start());
- // Set the new start of the committed region
- HeapWord* new_start_aligned =
- (HeapWord*)align_down(new_start, os::vm_page_size());
- MemRegion new_committed = MemRegion(new_start_aligned,
- _committed[changed_region].end());
- _committed[changed_region] = new_committed;
- _committed[changed_region].set_start(new_start_aligned);
-}
-
-void CardTableExtension::resize_update_card_table_entries(int changed_region,
- MemRegion new_region) {
- debug_only(verify_guard();)
- MemRegion original_covered = _covered[changed_region];
- // Initialize the card entries. Only consider the
- // region covered by the card table (_whole_heap)
- jbyte* entry;
- if (new_region.start() < _whole_heap.start()) {
- entry = byte_for(_whole_heap.start());
- } else {
- entry = byte_for(new_region.start());
- }
- jbyte* end = byte_for(original_covered.start());
- // If _whole_heap starts at the original covered regions start,
- // this loop will not execute.
- while (entry < end) { *entry++ = clean_card; }
-}
-
-void CardTableExtension::resize_update_covered_table(int changed_region,
- MemRegion new_region) {
- // Update the covered region
- _covered[changed_region].set_start(new_region.start());
- _covered[changed_region].set_word_size(new_region.word_size());
-
- // reorder regions. There should only be at most 1 out
- // of order.
- for (int i = _cur_covered_regions-1 ; i > 0; i--) {
- if (_covered[i].start() < _covered[i-1].start()) {
- MemRegion covered_mr = _covered[i-1];
- _covered[i-1] = _covered[i];
- _covered[i] = covered_mr;
- MemRegion committed_mr = _committed[i-1];
- _committed[i-1] = _committed[i];
- _committed[i] = committed_mr;
- break;
- }
- }
-#ifdef ASSERT
- for (int m = 0; m < _cur_covered_regions-1; m++) {
- assert(_covered[m].start() <= _covered[m+1].start(),
- "Covered regions out of order");
- assert(_committed[m].start() <= _committed[m+1].start(),
- "Committed regions out of order");
- }
-#endif
-}
-
-// Returns the start of any committed region that is lower than
-// the target committed region (index ind) and that intersects the
-// target region. If none, return start of target region.
-//
-// -------------
-// | |
-// -------------
-// ------------
-// | target |
-// ------------
-// -------------
-// | |
-// -------------
-// ^ returns this
-//
-// -------------
-// | |
-// -------------
-// ------------
-// | target |
-// ------------
-// -------------
-// | |
-// -------------
-// ^ returns this
-
-HeapWord* CardTableExtension::lowest_prev_committed_start(int ind) const {
- assert(_cur_covered_regions >= 0, "Expecting at least on region");
- HeapWord* min_start = _committed[ind].start();
- for (int j = 0; j < ind; j++) {
- HeapWord* this_start = _committed[j].start();
- if ((this_start < min_start) &&
- !(_committed[j].intersection(_committed[ind])).is_empty()) {
- min_start = this_start;
- }
- }
- return min_start;
-}
-
-bool CardTableExtension::is_in_young(oop obj) const {
- return ParallelScavengeHeap::heap()->is_in_young(obj);
-}
--- a/src/hotspot/share/gc/parallel/cardTableExtension.hpp Fri Mar 09 00:28:50 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
-#define SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
-
-#include "gc/shared/cardTableModRefBS.hpp"
-
-class MutableSpace;
-class ObjectStartArray;
-class PSPromotionManager;
-class GCTaskQueue;
-
-class CardTableExtension : public CardTableModRefBS {
- private:
- // Support methods for resizing the card table.
- // resize_commit_uncommit() returns true if the pages were committed or
- // uncommitted
- bool resize_commit_uncommit(int changed_region, MemRegion new_region);
- void resize_update_card_table_entries(int changed_region,
- MemRegion new_region);
- void resize_update_committed_table(int changed_region, MemRegion new_region);
- void resize_update_covered_table(int changed_region, MemRegion new_region);
-
- protected:
-
- static void verify_all_young_refs_precise_helper(MemRegion mr);
-
- public:
- enum ExtendedCardValue {
- youngergen_card = CardTableModRefBS::CT_MR_BS_last_reserved + 1,
- verify_card = CardTableModRefBS::CT_MR_BS_last_reserved + 5
- };
-
- CardTableExtension(MemRegion whole_heap) :
- CardTableModRefBS(
- whole_heap,
- BarrierSet::FakeRtti(BarrierSet::CardTableExtension))
- { }
-
- // Scavenge support
- void scavenge_contents_parallel(ObjectStartArray* start_array,
- MutableSpace* sp,
- HeapWord* space_top,
- PSPromotionManager* pm,
- uint stripe_number,
- uint stripe_total);
-
- // Verification
- static void verify_all_young_refs_imprecise();
- static void verify_all_young_refs_precise();
-
- bool addr_is_marked_imprecise(void *addr);
- bool addr_is_marked_precise(void *addr);
-
- void set_card_newgen(void* addr) { jbyte* p = byte_for(addr); *p = verify_card; }
-
- // Testers for entries
- static bool card_is_dirty(int value) { return value == dirty_card; }
- static bool card_is_newgen(int value) { return value == youngergen_card; }
- static bool card_is_clean(int value) { return value == clean_card; }
- static bool card_is_verify(int value) { return value == verify_card; }
-
- // Card marking
- void inline_write_ref_field_gc(void* field, oop new_val) {
- jbyte* byte = byte_for(field);
- *byte = youngergen_card;
- }
-
- // Adaptive size policy support
- // Allows adjustment of the base and size of the covered regions
- void resize_covered_region(MemRegion new_region);
- // Finds the covered region to resize based on the start address
- // of the covered regions.
- void resize_covered_region_by_start(MemRegion new_region);
- // Finds the covered region to resize based on the end address
- // of the covered regions.
- void resize_covered_region_by_end(int changed_region, MemRegion new_region);
- // Finds the lowest start address of a covered region that is
- // previous (i.e., lower index) to the covered region with index "ind".
- HeapWord* lowest_prev_committed_start(int ind) const;
-
-#ifdef ASSERT
-
- bool is_valid_card_address(jbyte* addr) {
- return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
- }
-
-#endif // ASSERT
-
- // ReduceInitialCardMarks support
- virtual bool is_in_young(oop obj) const;
-
- virtual bool card_mark_must_follow_store() const {
- return false;
- }
-};
-
-template<>
-struct BarrierSet::GetName<CardTableExtension> {
- static const BarrierSet::Name value = BarrierSet::CardTableExtension;
-};
-
-template<>
-struct BarrierSet::GetType<BarrierSet::CardTableExtension> {
- typedef ::CardTableExtension type;
-};
-
-#endif // SHARE_VM_GC_PARALLEL_CARDTABLEEXTENSION_HPP
--- a/src/hotspot/share/gc/parallel/objectStartArray.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
void ObjectStartArray::initialize(MemRegion reserved_region) {
// We're based on the assumption that we use the same
// size blocks as the card table.
- assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
+ assert((int)block_size == (int)CardTable::card_size, "Sanity");
assert((int)block_size <= 512, "block_size must be less than or equal to 512");
// Calculate how much space must be reserved
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -26,7 +26,6 @@
#include "code/codeCache.hpp"
#include "gc/parallel/adjoiningGenerations.hpp"
#include "gc/parallel/adjoiningVirtualSpaces.hpp"
-#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/generationSizer.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
@@ -70,7 +69,9 @@
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
- CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
+ PSCardTable* card_table = new PSCardTable(reserved_region());
+ card_table->initialize();
+ CardTableModRefBS* const barrier_set = new CardTableModRefBS(card_table);
barrier_set->initialize();
set_barrier_set(barrier_set);
@@ -625,6 +626,14 @@
return (ParallelScavengeHeap*)heap;
}
+CardTableModRefBS* ParallelScavengeHeap::barrier_set() {
+ return barrier_set_cast<CardTableModRefBS>(CollectedHeap::barrier_set());
+}
+
+PSCardTable* ParallelScavengeHeap::card_table() {
+ return static_cast<PSCardTable*>(barrier_set()->card_table());
+}
+
// Before delegating the resize to the young generation,
// the reserved space for the young and old generations
// may be changed to accommodate the desired resize.
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -30,6 +30,7 @@
#include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/parallel/psYoungGen.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
@@ -46,6 +47,7 @@
class MemoryManager;
class MemoryPool;
class PSAdaptiveSizePolicy;
+class PSCardTable;
class PSHeapSummary;
class ParallelScavengeHeap : public CollectedHeap {
@@ -125,6 +127,9 @@
static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
+ CardTableModRefBS* barrier_set();
+ PSCardTable* card_table();
+
AdjoiningGenerations* gens() { return _gens; }
// Returns JNI_OK on success
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psCardTable.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/gcTaskManager.hpp"
+#include "gc/parallel/objectStartArray.inline.hpp"
+#include "gc/parallel/parallelScavengeHeap.inline.hpp"
+#include "gc/parallel/psCardTable.hpp"
+#include "gc/parallel/psPromotionManager.inline.hpp"
+#include "gc/parallel/psScavenge.hpp"
+#include "gc/parallel/psTasks.hpp"
+#include "gc/parallel/psYoungGen.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
+#include "utilities/align.hpp"
+
+// Checks an individual oop for missing precise marks. Mark
+// may be either dirty or newgen.
+class CheckForUnmarkedOops : public OopClosure {
+ private:
+ PSYoungGen* _young_gen;
+ PSCardTable* _card_table;
+ HeapWord* _unmarked_addr;
+
+ protected:
+ template <class T> void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop(p);
+ if (_young_gen->is_in_reserved(obj) &&
+ !_card_table->addr_is_marked_imprecise(p)) {
+ // Don't overwrite the first missing card mark
+ if (_unmarked_addr == NULL) {
+ _unmarked_addr = (HeapWord*)p;
+ }
+ }
+ }
+
+ public:
+ CheckForUnmarkedOops(PSYoungGen* young_gen, PSCardTable* card_table) :
+ _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { }
+
+ virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); }
+
+ bool has_unmarked_oop() {
+ return _unmarked_addr != NULL;
+ }
+};
+
+// Checks all objects for the existence of some type of mark,
+// precise or imprecise, dirty or newgen.
+class CheckForUnmarkedObjects : public ObjectClosure {
+ private:
+ PSYoungGen* _young_gen;
+ PSCardTable* _card_table;
+
+ public:
+ CheckForUnmarkedObjects() {
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+ _young_gen = heap->young_gen();
+ _card_table = heap->card_table();
+ }
+
+ // Card marks are not precise. The current system can leave us with
+ // a mismatch of precise marks and beginning of object marks. This means
+ // we test for missing precise marks first. If any are found, we don't
+ // fail unless the object head is also unmarked.
+ virtual void do_object(oop obj) {
+ CheckForUnmarkedOops object_check(_young_gen, _card_table);
+ obj->oop_iterate_no_header(&object_check);
+ if (object_check.has_unmarked_oop()) {
+ guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
+ }
+ }
+};
+
+// Checks for precise marking of oops as newgen.
+class CheckForPreciseMarks : public OopClosure {
+ private:
+ PSYoungGen* _young_gen;
+ PSCardTable* _card_table;
+
+ protected:
+ template <class T> void do_oop_work(T* p) {
+ oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+ if (_young_gen->is_in_reserved(obj)) {
+ assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
+ _card_table->set_card_newgen(p);
+ }
+ }
+
+ public:
+ CheckForPreciseMarks(PSYoungGen* young_gen, PSCardTable* card_table) :
+ _young_gen(young_gen), _card_table(card_table) { }
+
+ virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); }
+ virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); }
+};
+
+// We get passed the space_top value to prevent us from traversing into
+// the old_gen promotion labs, which cannot be safely parsed.
+
+// Do not call this method if the space is empty.
+// It is a waste to start tasks and get here only to
+// do no work. If this method needs to be called
+// when the space is empty, fix the calculation of
+// end_card to allow sp_top == sp->bottom().
+
+void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array,
+ MutableSpace* sp,
+ HeapWord* space_top,
+ PSPromotionManager* pm,
+ uint stripe_number,
+ uint stripe_total) {
+ int ssize = 128; // Naked constant! Work unit = 64k.
+ int dirty_card_count = 0;
+
+ // It is a waste to get here if empty.
+ assert(sp->bottom() < sp->top(), "Should not be called if empty");
+ oop* sp_top = (oop*)space_top;
+ jbyte* start_card = byte_for(sp->bottom());
+ jbyte* end_card = byte_for(sp_top - 1) + 1;
+ oop* last_scanned = NULL; // Prevent scanning objects more than once
+ // The width of the stripe ssize*stripe_total must be
+ // consistent with the number of stripes so that the complete slice
+ // is covered.
+ size_t slice_width = ssize * stripe_total;
+ for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
+ jbyte* worker_start_card = slice + stripe_number * ssize;
+ if (worker_start_card >= end_card)
+ return; // We're done.
+
+ jbyte* worker_end_card = worker_start_card + ssize;
+ if (worker_end_card > end_card)
+ worker_end_card = end_card;
+
+ // We do not want to scan objects more than once. In order to accomplish
+ // this, we assert that any object with an object head inside our 'slice'
+ // belongs to us. We may need to extend the range of scanned cards if the
+ // last object continues into the next 'slice'.
+ //
+ // Note! ending cards are exclusive!
+ HeapWord* slice_start = addr_for(worker_start_card);
+ HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
+
+#ifdef ASSERT
+ if (GCWorkerDelayMillis > 0) {
+ // Delay 1 worker so that it proceeds after all the work
+ // has been completed.
+ if (stripe_number < 2) {
+ os::sleep(Thread::current(), GCWorkerDelayMillis, false);
+ }
+ }
+#endif
+
+ // If there are not objects starting within the chunk, skip it.
+ if (!start_array->object_starts_in_range(slice_start, slice_end)) {
+ continue;
+ }
+ // Update our beginning addr
+ HeapWord* first_object = start_array->object_start(slice_start);
+ debug_only(oop* first_object_within_slice = (oop*) first_object;)
+ if (first_object < slice_start) {
+ last_scanned = (oop*)(first_object + oop(first_object)->size());
+ debug_only(first_object_within_slice = last_scanned;)
+ worker_start_card = byte_for(last_scanned);
+ }
+
+ // Update the ending addr
+ if (slice_end < (HeapWord*)sp_top) {
+ // The subtraction is important! An object may start precisely at slice_end.
+ HeapWord* last_object = start_array->object_start(slice_end - 1);
+ slice_end = last_object + oop(last_object)->size();
+ // worker_end_card is exclusive, so bump it one past the end of last_object's
+ // covered span.
+ worker_end_card = byte_for(slice_end) + 1;
+
+ if (worker_end_card > end_card)
+ worker_end_card = end_card;
+ }
+
+ assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
+ assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
+ assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
+ // Note that worker_start_card >= worker_end_card is legal, and happens when
+ // an object spans an entire slice.
+ assert(worker_start_card <= end_card, "worker start card beyond end card");
+ assert(worker_end_card <= end_card, "worker end card beyond end card");
+
+ jbyte* current_card = worker_start_card;
+ while (current_card < worker_end_card) {
+ // Find an unclean card.
+ while (current_card < worker_end_card && card_is_clean(*current_card)) {
+ current_card++;
+ }
+ jbyte* first_unclean_card = current_card;
+
+ // Find the end of a run of contiguous unclean cards
+ while (current_card < worker_end_card && !card_is_clean(*current_card)) {
+ while (current_card < worker_end_card && !card_is_clean(*current_card)) {
+ current_card++;
+ }
+
+ if (current_card < worker_end_card) {
+ // Some objects may be large enough to span several cards. If such
+ // an object has more than one dirty card, separated by a clean card,
+ // we will attempt to scan it twice. The test against "last_scanned"
+ // prevents the redundant object scan, but it does not prevent newly
+ // marked cards from being cleaned.
+ HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
+ size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
+ HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
+ jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
+ assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
+ if (ending_card_of_last_object > current_card) {
+ // This means the object spans the next complete card.
+ // We need to bump the current_card to ending_card_of_last_object
+ current_card = ending_card_of_last_object;
+ }
+ }
+ }
+ jbyte* following_clean_card = current_card;
+
+ if (first_unclean_card < worker_end_card) {
+ oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card));
+ assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
+ // "p" should always be >= "last_scanned" because newly GC dirtied
+ // cards are no longer scanned again (see comment at end
+ // of loop on the increment of "current_card"). Test that
+ // hypothesis before removing this code.
+ // If this code is removed, deal with the first time through
+ // the loop when the last_scanned is the object starting in
+ // the previous slice.
+ assert((p >= last_scanned) ||
+ (last_scanned == first_object_within_slice),
+ "Should no longer be possible");
+ if (p < last_scanned) {
+ // Avoid scanning more than once; this can happen because
+ // newgen cards set by GC may a different set than the
+ // originally dirty set
+ p = last_scanned;
+ }
+ oop* to = (oop*)addr_for(following_clean_card);
+
+ // Test slice_end first!
+ if ((HeapWord*)to > slice_end) {
+ to = (oop*)slice_end;
+ } else if (to > sp_top) {
+ to = sp_top;
+ }
+
+ // we know which cards to scan, now clear them
+ if (first_unclean_card <= worker_start_card+1)
+ first_unclean_card = worker_start_card+1;
+ if (following_clean_card >= worker_end_card-1)
+ following_clean_card = worker_end_card-1;
+
+ while (first_unclean_card < following_clean_card) {
+ *first_unclean_card++ = clean_card;
+ }
+
+ const int interval = PrefetchScanIntervalInBytes;
+ // scan all objects in the range
+ if (interval != 0) {
+ while (p < to) {
+ Prefetch::write(p, interval);
+ oop m = oop(p);
+ assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
+ pm->push_contents(m);
+ p += m->size();
+ }
+ pm->drain_stacks_cond_depth();
+ } else {
+ while (p < to) {
+ oop m = oop(p);
+ assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m));
+ pm->push_contents(m);
+ p += m->size();
+ }
+ pm->drain_stacks_cond_depth();
+ }
+ last_scanned = p;
+ }
+ // "current_card" is still the "following_clean_card" or
+ // the current_card is >= the worker_end_card so the
+ // loop will not execute again.
+ assert((current_card == following_clean_card) ||
+ (current_card >= worker_end_card),
+ "current_card should only be incremented if it still equals "
+ "following_clean_card");
+ // Increment current_card so that it is not processed again.
+ // It may now be dirty because a old-to-young pointer was
+ // found on it an updated. If it is now dirty, it cannot be
+ // be safely cleaned in the next iteration.
+ current_card++;
+ }
+ }
+}
+
+// This should be called before a scavenge.
+void PSCardTable::verify_all_young_refs_imprecise() {
+ CheckForUnmarkedObjects check;
+
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+ PSOldGen* old_gen = heap->old_gen();
+
+ old_gen->object_iterate(&check);
+}
+
+// This should be called immediately after a scavenge, before mutators resume.
+void PSCardTable::verify_all_young_refs_precise() {
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+ PSOldGen* old_gen = heap->old_gen();
+
+ CheckForPreciseMarks check(heap->young_gen(), this);
+
+ old_gen->oop_iterate_no_header(&check);
+
+ verify_all_young_refs_precise_helper(old_gen->object_space()->used_region());
+}
+
+void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) {
+ jbyte* bot = byte_for(mr.start());
+ jbyte* top = byte_for(mr.end());
+ while (bot <= top) {
+ assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark");
+ if (*bot == verify_card)
+ *bot = youngergen_card;
+ bot++;
+ }
+}
+
+bool PSCardTable::addr_is_marked_imprecise(void *addr) {
+ jbyte* p = byte_for(addr);
+ jbyte val = *p;
+
+ if (card_is_dirty(val))
+ return true;
+
+ if (card_is_newgen(val))
+ return true;
+
+ if (card_is_clean(val))
+ return false;
+
+ assert(false, "Found unhandled card mark type");
+
+ return false;
+}
+
+// Also includes verify_card
+bool PSCardTable::addr_is_marked_precise(void *addr) {
+ jbyte* p = byte_for(addr);
+ jbyte val = *p;
+
+ if (card_is_newgen(val))
+ return true;
+
+ if (card_is_verify(val))
+ return true;
+
+ if (card_is_clean(val))
+ return false;
+
+ if (card_is_dirty(val))
+ return false;
+
+ assert(false, "Found unhandled card mark type");
+
+ return false;
+}
+
+// Assumes that only the base or the end changes. This allows indentification
+// of the region that is being resized. The
+// CardTableModRefBS::resize_covered_region() is used for the normal case
+// where the covered regions are growing or shrinking at the high end.
+// The method resize_covered_region_by_end() is analogous to
+// CardTableModRefBS::resize_covered_region() but
+// for regions that grow or shrink at the low end.
+void PSCardTable::resize_covered_region(MemRegion new_region) {
+ for (int i = 0; i < _cur_covered_regions; i++) {
+ if (_covered[i].start() == new_region.start()) {
+ // Found a covered region with the same start as the
+ // new region. The region is growing or shrinking
+ // from the start of the region.
+ resize_covered_region_by_start(new_region);
+ return;
+ }
+ if (_covered[i].start() > new_region.start()) {
+ break;
+ }
+ }
+
+ int changed_region = -1;
+ for (int j = 0; j < _cur_covered_regions; j++) {
+ if (_covered[j].end() == new_region.end()) {
+ changed_region = j;
+ // This is a case where the covered region is growing or shrinking
+ // at the start of the region.
+ assert(changed_region != -1, "Don't expect to add a covered region");
+ assert(_covered[changed_region].byte_size() != new_region.byte_size(),
+ "The sizes should be different here");
+ resize_covered_region_by_end(changed_region, new_region);
+ return;
+ }
+ }
+ // This should only be a new covered region (where no existing
+ // covered region matches at the start or the end).
+ assert(_cur_covered_regions < _max_covered_regions,
+ "An existing region should have been found");
+ resize_covered_region_by_start(new_region);
+}
+
+void PSCardTable::resize_covered_region_by_start(MemRegion new_region) {
+ CardTable::resize_covered_region(new_region);
+ debug_only(verify_guard();)
+}
+
+void PSCardTable::resize_covered_region_by_end(int changed_region,
+ MemRegion new_region) {
+ assert(SafepointSynchronize::is_at_safepoint(),
+ "Only expect an expansion at the low end at a GC");
+ debug_only(verify_guard();)
+#ifdef ASSERT
+ for (int k = 0; k < _cur_covered_regions; k++) {
+ if (_covered[k].end() == new_region.end()) {
+ assert(changed_region == k, "Changed region is incorrect");
+ break;
+ }
+ }
+#endif
+
+ // Commit new or uncommit old pages, if necessary.
+ if (resize_commit_uncommit(changed_region, new_region)) {
+ // Set the new start of the committed region
+ resize_update_committed_table(changed_region, new_region);
+ }
+
+ // Update card table entries
+ resize_update_card_table_entries(changed_region, new_region);
+
+ // Update the covered region
+ resize_update_covered_table(changed_region, new_region);
+
+ int ind = changed_region;
+ log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
+ log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
+ ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
+ log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
+ ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
+ log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
+ p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
+ log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
+ p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
+
+ debug_only(verify_guard();)
+}
+
+bool PSCardTable::resize_commit_uncommit(int changed_region,
+ MemRegion new_region) {
+ bool result = false;
+ // Commit new or uncommit old pages, if necessary.
+ MemRegion cur_committed = _committed[changed_region];
+ assert(_covered[changed_region].end() == new_region.end(),
+ "The ends of the regions are expected to match");
+ // Extend the start of this _committed region to
+ // to cover the start of any previous _committed region.
+ // This forms overlapping regions, but never interior regions.
+ HeapWord* min_prev_start = lowest_prev_committed_start(changed_region);
+ if (min_prev_start < cur_committed.start()) {
+ // Only really need to set start of "cur_committed" to
+ // the new start (min_prev_start) but assertion checking code
+ // below use cur_committed.end() so make it correct.
+ MemRegion new_committed =
+ MemRegion(min_prev_start, cur_committed.end());
+ cur_committed = new_committed;
+ }
+#ifdef ASSERT
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+ assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()),
+ "Starts should have proper alignment");
+#endif
+
+ jbyte* new_start = byte_for(new_region.start());
+ // Round down because this is for the start address
+ HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
+ // The guard page is always committed and should not be committed over.
+ // This method is used in cases where the generation is growing toward
+ // lower addresses but the guard region is still at the end of the
+ // card table. That still makes sense when looking for writes
+ // off the end of the card table.
+ if (new_start_aligned < cur_committed.start()) {
+ // Expand the committed region
+ //
+ // Case A
+ // |+ guard +|
+ // |+ cur committed +++++++++|
+ // |+ new committed +++++++++++++++++|
+ //
+ // Case B
+ // |+ guard +|
+ // |+ cur committed +|
+ // |+ new committed +++++++|
+ //
+ // These are not expected because the calculation of the
+ // cur committed region and the new committed region
+ // share the same end for the covered region.
+ // Case C
+ // |+ guard +|
+ // |+ cur committed +|
+ // |+ new committed +++++++++++++++++|
+ // Case D
+ // |+ guard +|
+ // |+ cur committed +++++++++++|
+ // |+ new committed +++++++|
+
+ HeapWord* new_end_for_commit =
+ MIN2(cur_committed.end(), _guard_region.start());
+ if(new_start_aligned < new_end_for_commit) {
+ MemRegion new_committed =
+ MemRegion(new_start_aligned, new_end_for_commit);
+ os::commit_memory_or_exit((char*)new_committed.start(),
+ new_committed.byte_size(), !ExecMem,
+ "card table expansion");
+ }
+ result = true;
+ } else if (new_start_aligned > cur_committed.start()) {
+ // Shrink the committed region
+#if 0 // uncommitting space is currently unsafe because of the interactions
+ // of growing and shrinking regions. One region A can uncommit space
+ // that it owns but which is being used by another region B (maybe).
+ // Region B has not committed the space because it was already
+ // committed by region A.
+ MemRegion uncommit_region = committed_unique_to_self(changed_region,
+ MemRegion(cur_committed.start(), new_start_aligned));
+ if (!uncommit_region.is_empty()) {
+ if (!os::uncommit_memory((char*)uncommit_region.start(),
+ uncommit_region.byte_size())) {
+ // If the uncommit fails, ignore it. Let the
+ // committed table resizing go even though the committed
+ // table will over state the committed space.
+ }
+ }
+#else
+ assert(!result, "Should be false with current workaround");
+#endif
+ }
+ assert(_committed[changed_region].end() == cur_committed.end(),
+ "end should not change");
+ return result;
+}
+
+void PSCardTable::resize_update_committed_table(int changed_region,
+ MemRegion new_region) {
+
+ jbyte* new_start = byte_for(new_region.start());
+ // Set the new start of the committed region
+ HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size());
+ MemRegion new_committed = MemRegion(new_start_aligned,
+ _committed[changed_region].end());
+ _committed[changed_region] = new_committed;
+ _committed[changed_region].set_start(new_start_aligned);
+}
+
+void PSCardTable::resize_update_card_table_entries(int changed_region,
+ MemRegion new_region) {
+ debug_only(verify_guard();)
+ MemRegion original_covered = _covered[changed_region];
+ // Initialize the card entries. Only consider the
+ // region covered by the card table (_whole_heap)
+ jbyte* entry;
+ if (new_region.start() < _whole_heap.start()) {
+ entry = byte_for(_whole_heap.start());
+ } else {
+ entry = byte_for(new_region.start());
+ }
+ jbyte* end = byte_for(original_covered.start());
+ // If _whole_heap starts at the original covered regions start,
+ // this loop will not execute.
+ while (entry < end) { *entry++ = clean_card; }
+}
+
+void PSCardTable::resize_update_covered_table(int changed_region,
+ MemRegion new_region) {
+ // Update the covered region
+ _covered[changed_region].set_start(new_region.start());
+ _covered[changed_region].set_word_size(new_region.word_size());
+
+ // reorder regions. There should only be at most 1 out
+ // of order.
+ for (int i = _cur_covered_regions-1 ; i > 0; i--) {
+ if (_covered[i].start() < _covered[i-1].start()) {
+ MemRegion covered_mr = _covered[i-1];
+ _covered[i-1] = _covered[i];
+ _covered[i] = covered_mr;
+ MemRegion committed_mr = _committed[i-1];
+ _committed[i-1] = _committed[i];
+ _committed[i] = committed_mr;
+ break;
+ }
+ }
+#ifdef ASSERT
+ for (int m = 0; m < _cur_covered_regions-1; m++) {
+ assert(_covered[m].start() <= _covered[m+1].start(),
+ "Covered regions out of order");
+ assert(_committed[m].start() <= _committed[m+1].start(),
+ "Committed regions out of order");
+ }
+#endif
+}
+
+// Returns the start of any committed region that is lower than
+// the target committed region (index ind) and that intersects the
+// target region. If none, return start of target region.
+//
+// -------------
+// | |
+// -------------
+// ------------
+// | target |
+// ------------
+// -------------
+// | |
+// -------------
+// ^ returns this
+//
+// -------------
+// | |
+// -------------
+// ------------
+// | target |
+// ------------
+// -------------
+// | |
+// -------------
+// ^ returns this
+
+HeapWord* PSCardTable::lowest_prev_committed_start(int ind) const {
+ assert(_cur_covered_regions >= 0, "Expecting at least on region");
+ HeapWord* min_start = _committed[ind].start();
+ for (int j = 0; j < ind; j++) {
+ HeapWord* this_start = _committed[j].start();
+ if ((this_start < min_start) &&
+ !(_committed[j].intersection(_committed[ind])).is_empty()) {
+ min_start = this_start;
+ }
+ }
+ return min_start;
+}
+
+bool PSCardTable::is_in_young(oop obj) const {
+ return ParallelScavengeHeap::heap()->is_in_young(obj);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psCardTable.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP
+#define SHARE_VM_GC_PARALLEL_PSCARDTABLE_HPP
+
+#include "gc/shared/cardTable.hpp"
+#include "oops/oop.hpp"
+
+class MutableSpace;
+class ObjectStartArray;
+class PSPromotionManager;
+class GCTaskQueue;
+
+class PSCardTable: public CardTable {
+ private:
+ // Support methods for resizing the card table.
+ // resize_commit_uncommit() returns true if the pages were committed or
+ // uncommitted
+ bool resize_commit_uncommit(int changed_region, MemRegion new_region);
+ void resize_update_card_table_entries(int changed_region,
+ MemRegion new_region);
+ void resize_update_committed_table(int changed_region, MemRegion new_region);
+ void resize_update_covered_table(int changed_region, MemRegion new_region);
+
+ void verify_all_young_refs_precise_helper(MemRegion mr);
+
+ enum ExtendedCardValue {
+ youngergen_card = CT_MR_BS_last_reserved + 1,
+ verify_card = CT_MR_BS_last_reserved + 5
+ };
+
+ public:
+ PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {}
+
+ static jbyte youngergen_card_val() { return youngergen_card; }
+ static jbyte verify_card_val() { return verify_card; }
+
+ // Scavenge support
+ void scavenge_contents_parallel(ObjectStartArray* start_array,
+ MutableSpace* sp,
+ HeapWord* space_top,
+ PSPromotionManager* pm,
+ uint stripe_number,
+ uint stripe_total);
+
+ bool addr_is_marked_imprecise(void *addr);
+ bool addr_is_marked_precise(void *addr);
+
+ void set_card_newgen(void* addr) { jbyte* p = byte_for(addr); *p = verify_card; }
+
+ // Testers for entries
+ static bool card_is_dirty(int value) { return value == dirty_card; }
+ static bool card_is_newgen(int value) { return value == youngergen_card; }
+ static bool card_is_clean(int value) { return value == clean_card; }
+ static bool card_is_verify(int value) { return value == verify_card; }
+
+ // Card marking
+ void inline_write_ref_field_gc(void* field, oop new_val) {
+ jbyte* byte = byte_for(field);
+ *byte = youngergen_card;
+ }
+
+ // ReduceInitialCardMarks support
+ bool is_in_young(oop obj) const;
+
+ // Adaptive size policy support
+ // Allows adjustment of the base and size of the covered regions
+ void resize_covered_region(MemRegion new_region);
+ // Finds the covered region to resize based on the start address
+ // of the covered regions.
+ void resize_covered_region_by_start(MemRegion new_region);
+ // Finds the covered region to resize based on the end address
+ // of the covered regions.
+ void resize_covered_region_by_end(int changed_region, MemRegion new_region);
+ // Finds the lowest start address of a covered region that is
+ // previous (i.e., lower index) to the covered region with index "ind".
+ HeapWord* lowest_prev_committed_start(int ind) const;
+
+#ifdef ASSERT
+ bool is_valid_card_address(jbyte* addr) {
+ return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
+ }
+#endif // ASSERT
+
+ // Verification
+ void verify_all_young_refs_imprecise();
+ void verify_all_young_refs_precise();
+};
+
+#endif // SHARE_VM_GC_PARALLEL_PSCARDTABLE
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -236,12 +236,12 @@
young_gen->to_space()->is_empty();
young_gen_empty = eden_empty && survivors_empty;
- ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
+ PSCardTable* card_table = heap->card_table();
MemRegion old_mr = heap->old_gen()->reserved();
if (young_gen_empty) {
- modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
+ card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
} else {
- modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
+ card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
}
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
+#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psMarkSweepDecorator.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
@@ -111,11 +112,8 @@
}
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- BarrierSet* bs = heap->barrier_set();
-
- bs->resize_covered_region(cmr);
-
- CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs);
+ PSCardTable* ct = heap->card_table();
+ ct->resize_covered_region(cmr);
// Verify that the start and end of this generation is the start of a card.
// If this wasn't true, a single card could span more than one generation,
@@ -386,7 +384,7 @@
size_t new_word_size = new_memregion.word_size();
start_array()->set_covered_region(new_memregion);
- ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(new_memregion);
+ ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);
// ALWAYS do this last!!
object_space()->initialize(new_memregion,
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1017,12 +1017,12 @@
bool young_gen_empty = eden_empty && from_space->is_empty() &&
to_space->is_empty();
- ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
+ PSCardTable* ct = heap->card_table();
MemRegion old_mr = heap->old_gen()->reserved();
if (young_gen_empty) {
- modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
+ ct->clear(MemRegion(old_mr.start(), old_mr.end()));
} else {
- modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
+ ct->invalidate(MemRegion(old_mr.start(), old_mr.end()));
}
// Delete metaspaces for unloaded class loaders and clean up loader_data graph
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
-#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
@@ -60,7 +59,7 @@
HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
int PSScavenge::_consecutive_skipped_scavenges = 0;
ReferenceProcessor* PSScavenge::_ref_processor = NULL;
-CardTableExtension* PSScavenge::_card_table = NULL;
+PSCardTable* PSScavenge::_card_table = NULL;
bool PSScavenge::_survivor_overflow = false;
uint PSScavenge::_tenuring_threshold = 0;
HeapWord* PSScavenge::_young_generation_boundary = NULL;
@@ -322,7 +321,7 @@
// Verify no unmarked old->young roots
if (VerifyRememberedSets) {
- CardTableExtension::verify_all_young_refs_imprecise();
+ heap->card_table()->verify_all_young_refs_imprecise();
}
assert(young_gen->to_space()->is_empty(),
@@ -617,8 +616,8 @@
if (VerifyRememberedSets) {
// Precise verification will give false positives. Until this is fixed,
// use imprecise verification.
- // CardTableExtension::verify_all_young_refs_precise();
- CardTableExtension::verify_all_young_refs_imprecise();
+ // heap->card_table()->verify_all_young_refs_precise();
+ heap->card_table()->verify_all_young_refs_imprecise();
}
if (log_is_enabled(Debug, gc, heap, exit)) {
@@ -778,7 +777,7 @@
NULL); // header provides liveness info
// Cache the cardtable
- _card_table = barrier_set_cast<CardTableExtension>(heap->barrier_set());
+ _card_table = heap->card_table();
_counters = new CollectorCounters("PSScavenge", 0);
}
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psScavenge.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_PARALLEL_PSSCAVENGE_HPP
#define SHARE_VM_GC_PARALLEL_PSSCAVENGE_HPP
-#include "gc/parallel/cardTableExtension.hpp"
+#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psVirtualspace.hpp"
#include "gc/shared/collectorCounters.hpp"
#include "gc/shared/gcTrace.hpp"
@@ -67,7 +67,7 @@
// Flags/counters
static ReferenceProcessor* _ref_processor; // Reference processor for scavenging.
static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing
- static CardTableExtension* _card_table; // We cache the card table for fast access.
+ static PSCardTable* _card_table; // We cache the card table for fast access.
static bool _survivor_overflow; // Overflow this collection
static uint _tenuring_threshold; // tenuring threshold for next scavenge
static elapsedTimer _accumulated_time; // total time spent on scavenge
@@ -89,7 +89,7 @@
static inline void save_to_space_top_before_gc();
// Private accessors
- static CardTableExtension* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; }
+ static PSCardTable* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; }
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
public:
--- a/src/hotspot/share/gc/parallel/psScavenge.inline.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psScavenge.inline.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP
#define SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP
-#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.hpp"
--- a/src/hotspot/share/gc/parallel/psTasks.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psTasks.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,9 @@
#include "aot/aotLoader.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
-#include "gc/parallel/cardTableExtension.hpp"
#include "gc/parallel/gcTaskManager.hpp"
#include "gc/parallel/psMarkSweep.hpp"
+#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psPromotionManager.hpp"
#include "gc/parallel/psPromotionManager.inline.hpp"
#include "gc/parallel/psScavenge.inline.hpp"
@@ -176,8 +176,7 @@
{
PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
- CardTableExtension* card_table =
- barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set());
+ PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table();
card_table->scavenge_contents_parallel(_old_gen->start_array(),
_old_gen->object_space(),
--- a/src/hotspot/share/gc/parallel/psTasks.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psTasks.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -148,7 +148,7 @@
// will be covered. In this example if 4 tasks have been created to cover
// all the stripes and there are only 3 threads, one of the threads will
// get the tasks with the 4th stripe. However, there is a dependence in
-// CardTableExtension::scavenge_contents_parallel() on the number
+// PSCardTable::scavenge_contents_parallel() on the number
// of tasks created. In scavenge_contents_parallel the distance
// to the next stripe is calculated based on the number of tasks.
// If the stripe width is ssize, a task's next stripe is at
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
- ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
+ ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr);
if (ZapUnusedHeapArea) {
// Mangle newly committed space immediately because it
@@ -870,7 +870,7 @@
MemRegion cmr((HeapWord*)virtual_space()->low(),
(HeapWord*)virtual_space()->high());
- ParallelScavengeHeap::heap()->barrier_set()->resize_covered_region(cmr);
+ ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr);
space_invariants();
}
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -189,7 +189,7 @@
(HeapWord*)_virtual_space.high());
GenCollectedHeap* gch = GenCollectedHeap::heap();
- gch->barrier_set()->resize_covered_region(cmr);
+ gch->rem_set()->resize_covered_region(cmr);
_eden_space = new ContiguousSpace();
_from_space = new ContiguousSpace();
@@ -454,7 +454,7 @@
SpaceDecorator::DontMangle);
MemRegion cmr((HeapWord*)_virtual_space.low(),
(HeapWord*)_virtual_space.high());
- gch->barrier_set()->resize_covered_region(cmr);
+ gch->rem_set()->resize_covered_region(cmr);
log_debug(gc, ergo, heap)(
"New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
@@ -634,7 +634,7 @@
{
// DefNew needs to run with n_threads == 0, to make sure the serial
// version of the card table scanning code is used.
- // See: CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel.
+ // See: CardTableRS::non_clean_card_iterate_possibly_parallel.
StrongRootsScope srs(0);
gch->young_process_roots(&srs,
--- a/src/hotspot/share/gc/shared/barrierSet.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -123,15 +123,6 @@
virtual void write_ref_array_work(MemRegion mr) = 0;
public:
- // Inform the BarrierSet that the the covered heap region that starts
- // with "base" has been changed to have the given size (possibly from 0,
- // for initialization.)
- virtual void resize_covered_region(MemRegion new_region) = 0;
-
- // If the barrier set imposes any alignment restrictions on boundaries
- // within the heap, this function tells whether they are met.
- virtual bool is_aligned(HeapWord* addr) = 0;
-
// Print a description of the memory for the barrier set
virtual void print_on(outputStream* st) const = 0;
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -29,25 +29,31 @@
#if INCLUDE_ALL_GCS
#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
- f(CardTableExtension) \
f(G1SATBCTLogging)
#else
#define FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
#endif
+#if INCLUDE_ALL_GCS
+#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f) \
+ f(G1SATBCT)
+#else
+#define FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+#endif
+
// Do something for each concrete barrier set part of the build.
#define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \
- f(CardTableForRS) \
+ f(CardTableModRef) \
FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
+ f(ModRef) \
+ FOR_EACH_ABSTRACT_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
+
// Do something for each known barrier set.
#define FOR_EACH_BARRIER_SET_DO(f) \
- f(ModRef) \
- f(CardTableModRef) \
- f(CardTableForRS) \
- f(CardTableExtension) \
- f(G1SATBCT) \
- f(G1SATBCTLogging)
+ FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
+ FOR_EACH_CONCRETE_BARRIER_SET_DO(f)
// To enable runtime-resolution of GC barriers on primitives, please
// define SUPPORT_BARRIER_ON_PRIMITIVES.
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,10 +29,8 @@
#include "gc/shared/modRefBarrierSet.inline.hpp"
#include "gc/shared/cardTableModRefBS.inline.hpp"
-#include "gc/shared/cardTableModRefBSForCTRS.hpp"
#if INCLUDE_ALL_GCS
-#include "gc/parallel/cardTableExtension.hpp" // Parallel support
#include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" // G1 support
#endif
--- a/src/hotspot/share/gc/shared/cardGeneration.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardGeneration.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -79,7 +79,7 @@
heap_word_size(_virtual_space.committed_size());
MemRegion mr(space()->bottom(), new_word_size);
// Expand card table
- GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
+ GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
// Expand shared block offset array
_bts->resize(new_word_size);
@@ -166,7 +166,7 @@
_bts->resize(new_word_size);
MemRegion mr(space()->bottom(), new_word_size);
// Shrink the card table
- GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr);
+ GenCollectedHeap::heap()->rem_set()->resize_covered_region(mr);
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + size;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/cardTable.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/space.inline.hpp"
+#include "logging/log.hpp"
+#include "memory/virtualspace.hpp"
+#include "runtime/java.hpp"
+#include "runtime/os.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/align.hpp"
+
+size_t CardTable::compute_byte_map_size() {
+ assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
+ "uninitialized, check declaration order");
+ assert(_page_size != 0, "uninitialized, check declaration order");
+ const size_t granularity = os::vm_allocation_granularity();
+ return align_up(_guard_index + 1, MAX2(_page_size, granularity));
+}
+
+CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
+ _scanned_concurrently(conc_scan),
+ _whole_heap(whole_heap),
+ _guard_index(0),
+ _guard_region(),
+ _last_valid_index(0),
+ _page_size(os::vm_page_size()),
+ _byte_map_size(0),
+ _covered(NULL),
+ _committed(NULL),
+ _cur_covered_regions(0),
+ _byte_map(NULL),
+ _byte_map_base(NULL)
+{
+ assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
+ assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
+
+ assert(card_size <= 512, "card_size must be less than 512"); // why?
+
+ _covered = new MemRegion[_max_covered_regions];
+ if (_covered == NULL) {
+ vm_exit_during_initialization("Could not allocate card table covered region set.");
+ }
+}
+
+CardTable::~CardTable() {
+ if (_covered) {
+ delete[] _covered;
+ _covered = NULL;
+ }
+ if (_committed) {
+ delete[] _committed;
+ _committed = NULL;
+ }
+}
+
+void CardTable::initialize() {
+ _guard_index = cards_required(_whole_heap.word_size()) - 1;
+ _last_valid_index = _guard_index - 1;
+
+ _byte_map_size = compute_byte_map_size();
+
+ HeapWord* low_bound = _whole_heap.start();
+ HeapWord* high_bound = _whole_heap.end();
+
+ _cur_covered_regions = 0;
+ _committed = new MemRegion[_max_covered_regions];
+ if (_committed == NULL) {
+ vm_exit_during_initialization("Could not allocate card table committed region set.");
+ }
+
+ const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
+ MAX2(_page_size, (size_t) os::vm_allocation_granularity());
+ ReservedSpace heap_rs(_byte_map_size, rs_align, false);
+
+ MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
+
+ os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
+ _page_size, heap_rs.base(), heap_rs.size());
+ if (!heap_rs.is_reserved()) {
+ vm_exit_during_initialization("Could not reserve enough space for the "
+ "card marking array");
+ }
+
+ // The assembler store_check code will do an unsigned shift of the oop,
+ // then add it to _byte_map_base, i.e.
+ //
+ // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
+ _byte_map = (jbyte*) heap_rs.base();
+ _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+ assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
+ assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
+
+ jbyte* guard_card = &_byte_map[_guard_index];
+ HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size);
+ _guard_region = MemRegion(guard_page, _page_size);
+ os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
+ !ExecMem, "card table last card");
+ *guard_card = last_card;
+
+ log_trace(gc, barrier)("CardTable::CardTable: ");
+ log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
+ p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
+ log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base));
+}
+
+int CardTable::find_covering_region_by_base(HeapWord* base) {
+ int i;
+ for (i = 0; i < _cur_covered_regions; i++) {
+ if (_covered[i].start() == base) return i;
+ if (_covered[i].start() > base) break;
+ }
+ // If we didn't find it, create a new one.
+ assert(_cur_covered_regions < _max_covered_regions,
+ "too many covered regions");
+ // Move the ones above up, to maintain sorted order.
+ for (int j = _cur_covered_regions; j > i; j--) {
+ _covered[j] = _covered[j-1];
+ _committed[j] = _committed[j-1];
+ }
+ int res = i;
+ _cur_covered_regions++;
+ _covered[res].set_start(base);
+ _covered[res].set_word_size(0);
+ jbyte* ct_start = byte_for(base);
+ HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size);
+ _committed[res].set_start(ct_start_aligned);
+ _committed[res].set_word_size(0);
+ return res;
+}
+
+int CardTable::find_covering_region_containing(HeapWord* addr) {
+ for (int i = 0; i < _cur_covered_regions; i++) {
+ if (_covered[i].contains(addr)) {
+ return i;
+ }
+ }
+ assert(0, "address outside of heap?");
+ return -1;
+}
+
+HeapWord* CardTable::largest_prev_committed_end(int ind) const {
+ HeapWord* max_end = NULL;
+ for (int j = 0; j < ind; j++) {
+ HeapWord* this_end = _committed[j].end();
+ if (this_end > max_end) max_end = this_end;
+ }
+ return max_end;
+}
+
+MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const {
+ MemRegion result = mr;
+ for (int r = 0; r < _cur_covered_regions; r += 1) {
+ if (r != self) {
+ result = result.minus(_committed[r]);
+ }
+ }
+ // Never include the guard page.
+ result = result.minus(_guard_region);
+ return result;
+}
+
+void CardTable::resize_covered_region(MemRegion new_region) {
+ // We don't change the start of a region, only the end.
+ assert(_whole_heap.contains(new_region),
+ "attempt to cover area not in reserved area");
+ debug_only(verify_guard();)
+ // collided is true if the expansion would push into another committed region
+ debug_only(bool collided = false;)
+ int const ind = find_covering_region_by_base(new_region.start());
+ MemRegion const old_region = _covered[ind];
+ assert(old_region.start() == new_region.start(), "just checking");
+ if (new_region.word_size() != old_region.word_size()) {
+ // Commit new or uncommit old pages, if necessary.
+ MemRegion cur_committed = _committed[ind];
+ // Extend the end of this _committed region
+ // to cover the end of any lower _committed regions.
+ // This forms overlapping regions, but never interior regions.
+ HeapWord* const max_prev_end = largest_prev_committed_end(ind);
+ if (max_prev_end > cur_committed.end()) {
+ cur_committed.set_end(max_prev_end);
+ }
+ // Align the end up to a page size (starts are already aligned).
+ HeapWord* new_end = (HeapWord*) byte_after(new_region.last());
+ HeapWord* new_end_aligned = align_up(new_end, _page_size);
+ assert(new_end_aligned >= new_end, "align up, but less");
+ // Check the other regions (excludes "ind") to ensure that
+ // the new_end_aligned does not intrude onto the committed
+ // space of another region.
+ int ri = 0;
+ for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
+ if (new_end_aligned > _committed[ri].start()) {
+ assert(new_end_aligned <= _committed[ri].end(),
+ "An earlier committed region can't cover a later committed region");
+ // Any region containing the new end
+ // should start at or beyond the region found (ind)
+ // for the new end (committed regions are not expected to
+ // be proper subsets of other committed regions).
+ assert(_committed[ri].start() >= _committed[ind].start(),
+ "New end of committed region is inconsistent");
+ new_end_aligned = _committed[ri].start();
+ // new_end_aligned can be equal to the start of its
+ // committed region (i.e., of "ind") if a second
+ // region following "ind" also start at the same location
+ // as "ind".
+ assert(new_end_aligned >= _committed[ind].start(),
+ "New end of committed region is before start");
+ debug_only(collided = true;)
+ // Should only collide with 1 region
+ break;
+ }
+ }
+#ifdef ASSERT
+ for (++ri; ri < _cur_covered_regions; ri++) {
+ assert(!_committed[ri].contains(new_end_aligned),
+ "New end of committed region is in a second committed region");
+ }
+#endif
+ // The guard page is always committed and should not be committed over.
+ // "guarded" is used for assertion checking below and recalls the fact
+ // that the would-be end of the new committed region would have
+ // penetrated the guard page.
+ HeapWord* new_end_for_commit = new_end_aligned;
+
+ DEBUG_ONLY(bool guarded = false;)
+ if (new_end_for_commit > _guard_region.start()) {
+ new_end_for_commit = _guard_region.start();
+ DEBUG_ONLY(guarded = true;)
+ }
+
+ if (new_end_for_commit > cur_committed.end()) {
+ // Must commit new pages.
+ MemRegion const new_committed =
+ MemRegion(cur_committed.end(), new_end_for_commit);
+
+ assert(!new_committed.is_empty(), "Region should not be empty here");
+ os::commit_memory_or_exit((char*)new_committed.start(),
+ new_committed.byte_size(), _page_size,
+ !ExecMem, "card table expansion");
+ // Use new_end_aligned (as opposed to new_end_for_commit) because
+ // the cur_committed region may include the guard region.
+ } else if (new_end_aligned < cur_committed.end()) {
+ // Must uncommit pages.
+ MemRegion const uncommit_region =
+ committed_unique_to_self(ind, MemRegion(new_end_aligned,
+ cur_committed.end()));
+ if (!uncommit_region.is_empty()) {
+ // It is not safe to uncommit cards if the boundary between
+ // the generations is moving. A shrink can uncommit cards
+ // owned by generation A but being used by generation B.
+ if (!UseAdaptiveGCBoundary) {
+ if (!os::uncommit_memory((char*)uncommit_region.start(),
+ uncommit_region.byte_size())) {
+ assert(false, "Card table contraction failed");
+ // The call failed so don't change the end of the
+ // committed region. This is better than taking the
+ // VM down.
+ new_end_aligned = _committed[ind].end();
+ }
+ } else {
+ new_end_aligned = _committed[ind].end();
+ }
+ }
+ }
+ // In any case, we can reset the end of the current committed entry.
+ _committed[ind].set_end(new_end_aligned);
+
+#ifdef ASSERT
+ // Check that the last card in the new region is committed according
+ // to the tables.
+ bool covered = false;
+ for (int cr = 0; cr < _cur_covered_regions; cr++) {
+ if (_committed[cr].contains(new_end - 1)) {
+ covered = true;
+ break;
+ }
+ }
+ assert(covered, "Card for end of new region not committed");
+#endif
+
+ // The default of 0 is not necessarily clean cards.
+ jbyte* entry;
+ if (old_region.last() < _whole_heap.start()) {
+ entry = byte_for(_whole_heap.start());
+ } else {
+ entry = byte_after(old_region.last());
+ }
+ assert(index_for(new_region.last()) < _guard_index,
+ "The guard card will be overwritten");
+ // This line commented out cleans the newly expanded region and
+ // not the aligned up expanded region.
+ // jbyte* const end = byte_after(new_region.last());
+ jbyte* const end = (jbyte*) new_end_for_commit;
+ assert((end >= byte_after(new_region.last())) || collided || guarded,
+ "Expect to be beyond new region unless impacting another region");
+ // do nothing if we resized downward.
+#ifdef ASSERT
+ for (int ri = 0; ri < _cur_covered_regions; ri++) {
+ if (ri != ind) {
+ // The end of the new committed region should not
+ // be in any existing region unless it matches
+ // the start of the next region.
+ assert(!_committed[ri].contains(end) ||
+ (_committed[ri].start() == (HeapWord*) end),
+ "Overlapping committed regions");
+ }
+ }
+#endif
+ if (entry < end) {
+ memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
+ }
+ }
+ // In any case, the covered size changes.
+ _covered[ind].set_word_size(new_region.word_size());
+
+ log_trace(gc, barrier)("CardTable::resize_covered_region: ");
+ log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
+ ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
+ log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
+ ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
+ log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
+ p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
+ log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
+ p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
+
+ // Touch the last card of the covered region to show that it
+ // is committed (or SEGV).
+ debug_only((void) (*byte_for(_covered[ind].last()));)
+ debug_only(verify_guard();)
+}
+
+// Note that these versions are precise! The scanning code has to handle the
+// fact that the write barrier may be either precise or imprecise.
+void CardTable::dirty_MemRegion(MemRegion mr) {
+ assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+ assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
+ jbyte* cur = byte_for(mr.start());
+ jbyte* last = byte_after(mr.last());
+ while (cur < last) {
+ *cur = dirty_card;
+ cur++;
+ }
+}
+
+void CardTable::clear_MemRegion(MemRegion mr) {
+ // Be conservative: only clean cards entirely contained within the
+ // region.
+ jbyte* cur;
+ if (mr.start() == _whole_heap.start()) {
+ cur = byte_for(mr.start());
+ } else {
+ assert(mr.start() > _whole_heap.start(), "mr is not covered.");
+ cur = byte_after(mr.start() - 1);
+ }
+ jbyte* last = byte_after(mr.last());
+ memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
+}
+
+void CardTable::clear(MemRegion mr) {
+ for (int i = 0; i < _cur_covered_regions; i++) {
+ MemRegion mri = mr.intersection(_covered[i]);
+ if (!mri.is_empty()) clear_MemRegion(mri);
+ }
+}
+
+void CardTable::dirty(MemRegion mr) {
+ jbyte* first = byte_for(mr.start());
+ jbyte* last = byte_after(mr.last());
+ memset(first, dirty_card, last-first);
+}
+
+// Unlike several other card table methods, dirty_card_iterate()
+// iterates over dirty cards ranges in increasing address order.
+void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) {
+ for (int i = 0; i < _cur_covered_regions; i++) {
+ MemRegion mri = mr.intersection(_covered[i]);
+ if (!mri.is_empty()) {
+ jbyte *cur_entry, *next_entry, *limit;
+ for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
+ cur_entry <= limit;
+ cur_entry = next_entry) {
+ next_entry = cur_entry + 1;
+ if (*cur_entry == dirty_card) {
+ size_t dirty_cards;
+ // Accumulate maximal dirty card range, starting at cur_entry
+ for (dirty_cards = 1;
+ next_entry <= limit && *next_entry == dirty_card;
+ dirty_cards++, next_entry++);
+ MemRegion cur_cards(addr_for(cur_entry),
+ dirty_cards*card_size_in_words);
+ cl->do_MemRegion(cur_cards);
+ }
+ }
+ }
+ }
+}
+
+MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr,
+ bool reset,
+ int reset_val) {
+ for (int i = 0; i < _cur_covered_regions; i++) {
+ MemRegion mri = mr.intersection(_covered[i]);
+ if (!mri.is_empty()) {
+ jbyte* cur_entry, *next_entry, *limit;
+ for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
+ cur_entry <= limit;
+ cur_entry = next_entry) {
+ next_entry = cur_entry + 1;
+ if (*cur_entry == dirty_card) {
+ size_t dirty_cards;
+ // Accumulate maximal dirty card range, starting at cur_entry
+ for (dirty_cards = 1;
+ next_entry <= limit && *next_entry == dirty_card;
+ dirty_cards++, next_entry++);
+ MemRegion cur_cards(addr_for(cur_entry),
+ dirty_cards*card_size_in_words);
+ if (reset) {
+ for (size_t i = 0; i < dirty_cards; i++) {
+ cur_entry[i] = reset_val;
+ }
+ }
+ return cur_cards;
+ }
+ }
+ }
+ }
+ return MemRegion(mr.end(), mr.end());
+}
+
+uintx CardTable::ct_max_alignment_constraint() {
+ return card_size * os::vm_page_size();
+}
+
+void CardTable::verify_guard() {
+ // For product build verification
+ guarantee(_byte_map[_guard_index] == last_card,
+ "card table guard has been modified");
+}
+
+void CardTable::invalidate(MemRegion mr) {
+ assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
+ assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
+ for (int i = 0; i < _cur_covered_regions; i++) {
+ MemRegion mri = mr.intersection(_covered[i]);
+ if (!mri.is_empty()) dirty_MemRegion(mri);
+ }
+}
+
+void CardTable::verify() {
+ verify_guard();
+}
+
+#ifndef PRODUCT
+void CardTable::verify_region(MemRegion mr,
+ jbyte val, bool val_equals) {
+ jbyte* start = byte_for(mr.start());
+ jbyte* end = byte_for(mr.last());
+ bool failures = false;
+ for (jbyte* curr = start; curr <= end; ++curr) {
+ jbyte curr_val = *curr;
+ bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
+ if (failed) {
+ if (!failures) {
+ log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
+ log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val);
+ failures = true;
+ }
+ log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
+ p2i(curr), p2i(addr_for(curr)),
+ p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
+ (int) curr_val);
+ }
+ }
+ guarantee(!failures, "there should not have been any failures");
+}
+
+void CardTable::verify_not_dirty_region(MemRegion mr) {
+ verify_region(mr, dirty_card, false /* val_equals */);
+}
+
+void CardTable::verify_dirty_region(MemRegion mr) {
+ verify_region(mr, dirty_card, true /* val_equals */);
+}
+#endif
+
+void CardTable::print_on(outputStream* st) const {
+ st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT,
+ p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base));
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/cardTable.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_CARDTABLE_HPP
+#define SHARE_VM_GC_SHARED_CARDTABLE_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/memRegion.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/align.hpp"
+
+class CardTable: public CHeapObj<mtGC> {
+ friend class VMStructs;
+protected:
+ // The declaration order of these const fields is important; see the
+ // constructor before changing.
+ const bool _scanned_concurrently;
+ const MemRegion _whole_heap; // the region covered by the card table
+ size_t _guard_index; // index of very last element in the card
+ // table; it is set to a guard value
+ // (last_card) and should never be modified
+ size_t _last_valid_index; // index of the last valid element
+ const size_t _page_size; // page size used when mapping _byte_map
+ size_t _byte_map_size; // in bytes
+ jbyte* _byte_map; // the card marking array
+ jbyte* _byte_map_base;
+
+ int _cur_covered_regions;
+
+ // The covered regions should be in address order.
+ MemRegion* _covered;
+ // The committed regions correspond one-to-one to the covered regions.
+ // They represent the card-table memory that has been committed to service
+ // the corresponding covered region. It may be that committed region for
+ // one covered region corresponds to a larger region because of page-size
+ // roundings. Thus, a committed region for one covered region may
+ // actually extend onto the card-table space for the next covered region.
+ MemRegion* _committed;
+
+ // The last card is a guard card, and we commit the page for it so
+ // we can use the card for verification purposes. We make sure we never
+ // uncommit the MemRegion for that page.
+ MemRegion _guard_region;
+
+ inline size_t compute_byte_map_size();
+
+ // Finds and return the index of the region, if any, to which the given
+ // region would be contiguous. If none exists, assign a new region and
+ // returns its index. Requires that no more than the maximum number of
+ // covered regions defined in the constructor are ever in use.
+ int find_covering_region_by_base(HeapWord* base);
+
+ // Same as above, but finds the region containing the given address
+ // instead of starting at a given base address.
+ int find_covering_region_containing(HeapWord* addr);
+
+ // Returns the leftmost end of a committed region corresponding to a
+ // covered region before covered region "ind", or else "NULL" if "ind" is
+ // the first covered region.
+ HeapWord* largest_prev_committed_end(int ind) const;
+
+ // Returns the part of the region mr that doesn't intersect with
+ // any committed region other than self. Used to prevent uncommitting
+ // regions that are also committed by other regions. Also protects
+ // against uncommitting the guard region.
+ MemRegion committed_unique_to_self(int self, MemRegion mr) const;
+
+ // Some barrier sets create tables whose elements correspond to parts of
+ // the heap; the CardTableModRefBS is an example. Such barrier sets will
+ // normally reserve space for such tables, and commit parts of the table
+ // "covering" parts of the heap that are committed. At most one covered
+ // region per generation is needed.
+ static const int _max_covered_regions = 2;
+
+ enum CardValues {
+ clean_card = -1,
+ // The mask contains zeros in places for all other values.
+ clean_card_mask = clean_card - 31,
+
+ dirty_card = 0,
+ precleaned_card = 1,
+ claimed_card = 2,
+ deferred_card = 4,
+ last_card = 8,
+ CT_MR_BS_last_reserved = 16
+ };
+
+ // a word's worth (row) of clean card values
+ static const intptr_t clean_card_row = (intptr_t)(-1);
+
+public:
+ CardTable(MemRegion whole_heap, bool conc_scan);
+ virtual ~CardTable();
+ virtual void initialize();
+
+ // The kinds of precision a CardTableModRefBS may offer.
+ enum PrecisionStyle {
+ Precise,
+ ObjHeadPreciseArray
+ };
+
+ // Tells what style of precision this card table offers.
+ PrecisionStyle precision() {
+ return ObjHeadPreciseArray; // Only one supported for now.
+ }
+
+ // *** Barrier set functions.
+
+ // Initialization utilities; covered_words is the size of the covered region
+ // in, um, words.
+ inline size_t cards_required(size_t covered_words) {
+ // Add one for a guard card, used to detect errors.
+ const size_t words = align_up(covered_words, card_size_in_words);
+ return words / card_size_in_words + 1;
+ }
+
+ // Dirty the bytes corresponding to "mr" (not all of which must be
+ // covered.)
+ void dirty_MemRegion(MemRegion mr);
+
+ // Clear (to clean_card) the bytes entirely contained within "mr" (not
+ // all of which must be covered.)
+ void clear_MemRegion(MemRegion mr);
+
+ // Return true if "p" is at the start of a card.
+ bool is_card_aligned(HeapWord* p) {
+ jbyte* pcard = byte_for(p);
+ return (addr_for(pcard) == p);
+ }
+
+ // Mapping from address to card marking array entry
+ jbyte* byte_for(const void* p) const {
+ assert(_whole_heap.contains(p),
+ "Attempt to access p = " PTR_FORMAT " out of bounds of "
+ " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
+ p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
+ jbyte* result = &_byte_map_base[uintptr_t(p) >> card_shift];
+ assert(result >= _byte_map && result < _byte_map + _byte_map_size,
+ "out of bounds accessor for card marking array");
+ return result;
+ }
+
+ // The card table byte one after the card marking array
+ // entry for argument address. Typically used for higher bounds
+ // for loops iterating through the card table.
+ jbyte* byte_after(const void* p) const {
+ return byte_for(p) + 1;
+ }
+
+ virtual void invalidate(MemRegion mr);
+ void clear(MemRegion mr);
+ void dirty(MemRegion mr);
+
+ // Provide read-only access to the card table array.
+ const jbyte* byte_for_const(const void* p) const {
+ return byte_for(p);
+ }
+ const jbyte* byte_after_const(const void* p) const {
+ return byte_after(p);
+ }
+
+ // Mapping from card marking array entry to address of first word
+ HeapWord* addr_for(const jbyte* p) const {
+ assert(p >= _byte_map && p < _byte_map + _byte_map_size,
+ "out of bounds access to card marking array. p: " PTR_FORMAT
+ " _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT,
+ p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size));
+ size_t delta = pointer_delta(p, _byte_map_base, sizeof(jbyte));
+ HeapWord* result = (HeapWord*) (delta << card_shift);
+ assert(_whole_heap.contains(result),
+ "Returning result = " PTR_FORMAT " out of bounds of "
+ " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
+ p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
+ return result;
+ }
+
+ // Mapping from address to card marking array index.
+ size_t index_for(void* p) {
+ assert(_whole_heap.contains(p),
+ "Attempt to access p = " PTR_FORMAT " out of bounds of "
+ " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
+ p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
+ return byte_for(p) - _byte_map;
+ }
+
+ const jbyte* byte_for_index(const size_t card_index) const {
+ return _byte_map + card_index;
+ }
+
+ // Resize one of the regions covered by the remembered set.
+ virtual void resize_covered_region(MemRegion new_region);
+
+ // *** Card-table-RemSet-specific things.
+
+ static uintx ct_max_alignment_constraint();
+
+ // Apply closure "cl" to the dirty cards containing some part of
+ // MemRegion "mr".
+ void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
+
+ // Return the MemRegion corresponding to the first maximal run
+ // of dirty cards lying completely within MemRegion mr.
+ // If reset is "true", then sets those card table entries to the given
+ // value.
+ MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
+ int reset_val);
+
+ // Constants
+ enum SomePublicConstants {
+ card_shift = 9,
+ card_size = 1 << card_shift,
+ card_size_in_words = card_size / sizeof(HeapWord)
+ };
+
+ static jbyte clean_card_val() { return clean_card; }
+ static jbyte clean_card_mask_val() { return clean_card_mask; }
+ static jbyte dirty_card_val() { return dirty_card; }
+ static jbyte claimed_card_val() { return claimed_card; }
+ static jbyte precleaned_card_val() { return precleaned_card; }
+ static jbyte deferred_card_val() { return deferred_card; }
+ static intptr_t clean_card_row_val() { return clean_card_row; }
+
+ // Card marking array base (adjusted for heap low boundary)
+ // This would be the 0th element of _byte_map, if the heap started at 0x0.
+ // But since the heap starts at some higher address, this points to somewhere
+ // before the beginning of the actual _byte_map.
+ jbyte* byte_map_base() const { return _byte_map_base; }
+ bool scanned_concurrently() const { return _scanned_concurrently; }
+
+ virtual bool is_in_young(oop obj) const = 0;
+
+ // Print a description of the memory for the card table
+ virtual void print_on(outputStream* st) const;
+
+ void verify();
+ void verify_guard();
+
+ // val_equals -> it will check that all cards covered by mr equal val
+ // !val_equals -> it will check that all cards covered by mr do not equal val
+ void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
+ void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
+ void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
+};
+
+#endif // SHARE_VM_GC_SHARED_CARDTABLE_HPP
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -39,490 +39,38 @@
// enumerate ref fields that have been modified (since the last
// enumeration.)
-size_t CardTableModRefBS::compute_byte_map_size()
-{
- assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
- "uninitialized, check declaration order");
- assert(_page_size != 0, "uninitialized, check declaration order");
- const size_t granularity = os::vm_allocation_granularity();
- return align_up(_guard_index + 1, MAX2(_page_size, granularity));
-}
-
CardTableModRefBS::CardTableModRefBS(
- MemRegion whole_heap,
+ CardTable* card_table,
const BarrierSet::FakeRtti& fake_rtti) :
ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
- _whole_heap(whole_heap),
- _guard_index(0),
- _guard_region(),
- _last_valid_index(0),
- _page_size(os::vm_page_size()),
- _byte_map_size(0),
- _covered(NULL),
- _committed(NULL),
- _cur_covered_regions(0),
- _byte_map(NULL),
- byte_map_base(NULL),
- _defer_initial_card_mark(false)
-{
- assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
- assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
+ _defer_initial_card_mark(false),
+ _card_table(card_table)
+{}
- assert(card_size <= 512, "card_size must be less than 512"); // why?
-
- _covered = new MemRegion[_max_covered_regions];
- if (_covered == NULL) {
- vm_exit_during_initialization("Could not allocate card table covered region set.");
- }
-}
+CardTableModRefBS::CardTableModRefBS(CardTable* card_table) :
+ ModRefBarrierSet(BarrierSet::FakeRtti(BarrierSet::CardTableModRef)),
+ _defer_initial_card_mark(false),
+ _card_table(card_table)
+{}
void CardTableModRefBS::initialize() {
initialize_deferred_card_mark_barriers();
- _guard_index = cards_required(_whole_heap.word_size()) - 1;
- _last_valid_index = _guard_index - 1;
-
- _byte_map_size = compute_byte_map_size();
-
- HeapWord* low_bound = _whole_heap.start();
- HeapWord* high_bound = _whole_heap.end();
-
- _cur_covered_regions = 0;
- _committed = new MemRegion[_max_covered_regions];
- if (_committed == NULL) {
- vm_exit_during_initialization("Could not allocate card table committed region set.");
- }
-
- const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
- MAX2(_page_size, (size_t) os::vm_allocation_granularity());
- ReservedSpace heap_rs(_byte_map_size, rs_align, false);
-
- MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
-
- os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
- _page_size, heap_rs.base(), heap_rs.size());
- if (!heap_rs.is_reserved()) {
- vm_exit_during_initialization("Could not reserve enough space for the "
- "card marking array");
- }
-
- // The assembler store_check code will do an unsigned shift of the oop,
- // then add it to byte_map_base, i.e.
- //
- // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
- _byte_map = (jbyte*) heap_rs.base();
- byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
- assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
- assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
-
- jbyte* guard_card = &_byte_map[_guard_index];
- uintptr_t guard_page = align_down((uintptr_t)guard_card, _page_size);
- _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
- os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
- !ExecMem, "card table last card");
- *guard_card = last_card;
-
- log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: ");
- log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
- p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
- log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base));
}
CardTableModRefBS::~CardTableModRefBS() {
- if (_covered) {
- delete[] _covered;
- _covered = NULL;
- }
- if (_committed) {
- delete[] _committed;
- _committed = NULL;
- }
-}
-
-int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
- int i;
- for (i = 0; i < _cur_covered_regions; i++) {
- if (_covered[i].start() == base) return i;
- if (_covered[i].start() > base) break;
- }
- // If we didn't find it, create a new one.
- assert(_cur_covered_regions < _max_covered_regions,
- "too many covered regions");
- // Move the ones above up, to maintain sorted order.
- for (int j = _cur_covered_regions; j > i; j--) {
- _covered[j] = _covered[j-1];
- _committed[j] = _committed[j-1];
- }
- int res = i;
- _cur_covered_regions++;
- _covered[res].set_start(base);
- _covered[res].set_word_size(0);
- jbyte* ct_start = byte_for(base);
- uintptr_t ct_start_aligned = align_down((uintptr_t)ct_start, _page_size);
- _committed[res].set_start((HeapWord*)ct_start_aligned);
- _committed[res].set_word_size(0);
- return res;
-}
-
-int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
- for (int i = 0; i < _cur_covered_regions; i++) {
- if (_covered[i].contains(addr)) {
- return i;
- }
- }
- assert(0, "address outside of heap?");
- return -1;
-}
-
-HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
- HeapWord* max_end = NULL;
- for (int j = 0; j < ind; j++) {
- HeapWord* this_end = _committed[j].end();
- if (this_end > max_end) max_end = this_end;
- }
- return max_end;
-}
-
-MemRegion CardTableModRefBS::committed_unique_to_self(int self,
- MemRegion mr) const {
- MemRegion result = mr;
- for (int r = 0; r < _cur_covered_regions; r += 1) {
- if (r != self) {
- result = result.minus(_committed[r]);
- }
- }
- // Never include the guard page.
- result = result.minus(_guard_region);
- return result;
+ delete _card_table;
}
-void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
- // We don't change the start of a region, only the end.
- assert(_whole_heap.contains(new_region),
- "attempt to cover area not in reserved area");
- debug_only(verify_guard();)
- // collided is true if the expansion would push into another committed region
- debug_only(bool collided = false;)
- int const ind = find_covering_region_by_base(new_region.start());
- MemRegion const old_region = _covered[ind];
- assert(old_region.start() == new_region.start(), "just checking");
- if (new_region.word_size() != old_region.word_size()) {
- // Commit new or uncommit old pages, if necessary.
- MemRegion cur_committed = _committed[ind];
- // Extend the end of this _committed region
- // to cover the end of any lower _committed regions.
- // This forms overlapping regions, but never interior regions.
- HeapWord* const max_prev_end = largest_prev_committed_end(ind);
- if (max_prev_end > cur_committed.end()) {
- cur_committed.set_end(max_prev_end);
- }
- // Align the end up to a page size (starts are already aligned).
- jbyte* const new_end = byte_after(new_region.last());
- HeapWord* new_end_aligned = (HeapWord*) align_up(new_end, _page_size);
- assert((void*)new_end_aligned >= (void*) new_end, "align up, but less");
- // Check the other regions (excludes "ind") to ensure that
- // the new_end_aligned does not intrude onto the committed
- // space of another region.
- int ri = 0;
- for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
- if (new_end_aligned > _committed[ri].start()) {
- assert(new_end_aligned <= _committed[ri].end(),
- "An earlier committed region can't cover a later committed region");
- // Any region containing the new end
- // should start at or beyond the region found (ind)
- // for the new end (committed regions are not expected to
- // be proper subsets of other committed regions).
- assert(_committed[ri].start() >= _committed[ind].start(),
- "New end of committed region is inconsistent");
- new_end_aligned = _committed[ri].start();
- // new_end_aligned can be equal to the start of its
- // committed region (i.e., of "ind") if a second
- // region following "ind" also start at the same location
- // as "ind".
- assert(new_end_aligned >= _committed[ind].start(),
- "New end of committed region is before start");
- debug_only(collided = true;)
- // Should only collide with 1 region
- break;
- }
- }
-#ifdef ASSERT
- for (++ri; ri < _cur_covered_regions; ri++) {
- assert(!_committed[ri].contains(new_end_aligned),
- "New end of committed region is in a second committed region");
- }
-#endif
- // The guard page is always committed and should not be committed over.
- // "guarded" is used for assertion checking below and recalls the fact
- // that the would-be end of the new committed region would have
- // penetrated the guard page.
- HeapWord* new_end_for_commit = new_end_aligned;
-
- DEBUG_ONLY(bool guarded = false;)
- if (new_end_for_commit > _guard_region.start()) {
- new_end_for_commit = _guard_region.start();
- DEBUG_ONLY(guarded = true;)
- }
-
- if (new_end_for_commit > cur_committed.end()) {
- // Must commit new pages.
- MemRegion const new_committed =
- MemRegion(cur_committed.end(), new_end_for_commit);
-
- assert(!new_committed.is_empty(), "Region should not be empty here");
- os::commit_memory_or_exit((char*)new_committed.start(),
- new_committed.byte_size(), _page_size,
- !ExecMem, "card table expansion");
- // Use new_end_aligned (as opposed to new_end_for_commit) because
- // the cur_committed region may include the guard region.
- } else if (new_end_aligned < cur_committed.end()) {
- // Must uncommit pages.
- MemRegion const uncommit_region =
- committed_unique_to_self(ind, MemRegion(new_end_aligned,
- cur_committed.end()));
- if (!uncommit_region.is_empty()) {
- // It is not safe to uncommit cards if the boundary between
- // the generations is moving. A shrink can uncommit cards
- // owned by generation A but being used by generation B.
- if (!UseAdaptiveGCBoundary) {
- if (!os::uncommit_memory((char*)uncommit_region.start(),
- uncommit_region.byte_size())) {
- assert(false, "Card table contraction failed");
- // The call failed so don't change the end of the
- // committed region. This is better than taking the
- // VM down.
- new_end_aligned = _committed[ind].end();
- }
- } else {
- new_end_aligned = _committed[ind].end();
- }
- }
- }
- // In any case, we can reset the end of the current committed entry.
- _committed[ind].set_end(new_end_aligned);
-
-#ifdef ASSERT
- // Check that the last card in the new region is committed according
- // to the tables.
- bool covered = false;
- for (int cr = 0; cr < _cur_covered_regions; cr++) {
- if (_committed[cr].contains(new_end - 1)) {
- covered = true;
- break;
- }
- }
- assert(covered, "Card for end of new region not committed");
-#endif
-
- // The default of 0 is not necessarily clean cards.
- jbyte* entry;
- if (old_region.last() < _whole_heap.start()) {
- entry = byte_for(_whole_heap.start());
- } else {
- entry = byte_after(old_region.last());
- }
- assert(index_for(new_region.last()) < _guard_index,
- "The guard card will be overwritten");
- // This line commented out cleans the newly expanded region and
- // not the aligned up expanded region.
- // jbyte* const end = byte_after(new_region.last());
- jbyte* const end = (jbyte*) new_end_for_commit;
- assert((end >= byte_after(new_region.last())) || collided || guarded,
- "Expect to be beyond new region unless impacting another region");
- // do nothing if we resized downward.
-#ifdef ASSERT
- for (int ri = 0; ri < _cur_covered_regions; ri++) {
- if (ri != ind) {
- // The end of the new committed region should not
- // be in any existing region unless it matches
- // the start of the next region.
- assert(!_committed[ri].contains(end) ||
- (_committed[ri].start() == (HeapWord*) end),
- "Overlapping committed regions");
- }
- }
-#endif
- if (entry < end) {
- memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
- }
- }
- // In any case, the covered size changes.
- _covered[ind].set_word_size(new_region.word_size());
-
- log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
- log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
- ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
- log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
- ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
- log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
- p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
- log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
- p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last())));
-
- // Touch the last card of the covered region to show that it
- // is committed (or SEGV).
- debug_only((void) (*byte_for(_covered[ind].last()));)
- debug_only(verify_guard();)
-}
-
-// Note that these versions are precise! The scanning code has to handle the
-// fact that the write barrier may be either precise or imprecise.
-
-void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
- assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
- assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
- jbyte* cur = byte_for(mr.start());
- jbyte* last = byte_after(mr.last());
- while (cur < last) {
- *cur = dirty_card;
- cur++;
- }
+void CardTableModRefBS::write_ref_array_work(MemRegion mr) {
+ _card_table->dirty_MemRegion(mr);
}
void CardTableModRefBS::invalidate(MemRegion mr) {
- assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
- assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
- for (int i = 0; i < _cur_covered_regions; i++) {
- MemRegion mri = mr.intersection(_covered[i]);
- if (!mri.is_empty()) dirty_MemRegion(mri);
- }
-}
-
-void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
- // Be conservative: only clean cards entirely contained within the
- // region.
- jbyte* cur;
- if (mr.start() == _whole_heap.start()) {
- cur = byte_for(mr.start());
- } else {
- assert(mr.start() > _whole_heap.start(), "mr is not covered.");
- cur = byte_after(mr.start() - 1);
- }
- jbyte* last = byte_after(mr.last());
- memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
-}
-
-void CardTableModRefBS::clear(MemRegion mr) {
- for (int i = 0; i < _cur_covered_regions; i++) {
- MemRegion mri = mr.intersection(_covered[i]);
- if (!mri.is_empty()) clear_MemRegion(mri);
- }
-}
-
-void CardTableModRefBS::dirty(MemRegion mr) {
- jbyte* first = byte_for(mr.start());
- jbyte* last = byte_after(mr.last());
- memset(first, dirty_card, last-first);
-}
-
-// Unlike several other card table methods, dirty_card_iterate()
-// iterates over dirty cards ranges in increasing address order.
-void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
- MemRegionClosure* cl) {
- for (int i = 0; i < _cur_covered_regions; i++) {
- MemRegion mri = mr.intersection(_covered[i]);
- if (!mri.is_empty()) {
- jbyte *cur_entry, *next_entry, *limit;
- for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
- cur_entry <= limit;
- cur_entry = next_entry) {
- next_entry = cur_entry + 1;
- if (*cur_entry == dirty_card) {
- size_t dirty_cards;
- // Accumulate maximal dirty card range, starting at cur_entry
- for (dirty_cards = 1;
- next_entry <= limit && *next_entry == dirty_card;
- dirty_cards++, next_entry++);
- MemRegion cur_cards(addr_for(cur_entry),
- dirty_cards*card_size_in_words);
- cl->do_MemRegion(cur_cards);
- }
- }
- }
- }
+ _card_table->invalidate(mr);
}
-MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
- bool reset,
- int reset_val) {
- for (int i = 0; i < _cur_covered_regions; i++) {
- MemRegion mri = mr.intersection(_covered[i]);
- if (!mri.is_empty()) {
- jbyte* cur_entry, *next_entry, *limit;
- for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
- cur_entry <= limit;
- cur_entry = next_entry) {
- next_entry = cur_entry + 1;
- if (*cur_entry == dirty_card) {
- size_t dirty_cards;
- // Accumulate maximal dirty card range, starting at cur_entry
- for (dirty_cards = 1;
- next_entry <= limit && *next_entry == dirty_card;
- dirty_cards++, next_entry++);
- MemRegion cur_cards(addr_for(cur_entry),
- dirty_cards*card_size_in_words);
- if (reset) {
- for (size_t i = 0; i < dirty_cards; i++) {
- cur_entry[i] = reset_val;
- }
- }
- return cur_cards;
- }
- }
- }
- }
- return MemRegion(mr.end(), mr.end());
-}
-
-uintx CardTableModRefBS::ct_max_alignment_constraint() {
- return card_size * os::vm_page_size();
-}
-
-void CardTableModRefBS::verify_guard() {
- // For product build verification
- guarantee(_byte_map[_guard_index] == last_card,
- "card table guard has been modified");
-}
-
-void CardTableModRefBS::verify() {
- verify_guard();
-}
-
-#ifndef PRODUCT
-void CardTableModRefBS::verify_region(MemRegion mr,
- jbyte val, bool val_equals) {
- jbyte* start = byte_for(mr.start());
- jbyte* end = byte_for(mr.last());
- bool failures = false;
- for (jbyte* curr = start; curr <= end; ++curr) {
- jbyte curr_val = *curr;
- bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
- if (failed) {
- if (!failures) {
- log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
- log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val);
- failures = true;
- }
- log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
- p2i(curr), p2i(addr_for(curr)),
- p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
- (int) curr_val);
- }
- }
- guarantee(!failures, "there should not have been any failures");
-}
-
-void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
- verify_region(mr, dirty_card, false /* val_equals */);
-}
-
-void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
- verify_region(mr, dirty_card, true /* val_equals */);
-}
-#endif
-
void CardTableModRefBS::print_on(outputStream* st) const {
- st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
- p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
+ _card_table->print_on(st);
}
// Helper for ReduceInitialCardMarks. For performance,
@@ -573,7 +121,7 @@
}
// If a previous card-mark was deferred, flush it now.
flush_deferred_card_mark_barrier(thread);
- if (new_obj->is_typeArray() || is_in_young(new_obj)) {
+ if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
// Arrays of non-references don't need a post-barrier.
// The deferred_card_mark region should be empty
// following the flush above.
@@ -586,7 +134,7 @@
thread->set_deferred_card_mark(mr);
} else {
// Do the card mark
- write_region(mr);
+ invalidate(mr);
}
}
}
@@ -610,7 +158,7 @@
{
// Verify that the storage points to a parsable object in heap
DEBUG_ONLY(oop old_obj = oop(deferred.start());)
- assert(!is_in_young(old_obj),
+ assert(!_card_table->is_in_young(old_obj),
"Else should have been filtered in on_slowpath_allocation_exit()");
assert(oopDesc::is_oop(old_obj, true), "Not an oop");
assert(deferred.word_size() == (size_t)(old_obj->size()),
@@ -633,3 +181,7 @@
// processing the card-table (or other remembered set).
flush_deferred_card_mark_barrier(thread);
}
+
+bool CardTableModRefBS::card_mark_must_follow_store() const {
+ return _card_table->scanned_concurrently();
+}
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -28,6 +28,8 @@
#include "gc/shared/modRefBarrierSet.hpp"
#include "utilities/align.hpp"
+class CardTable;
+
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
// enumerate ref fields that have been modified (since the last
// enumeration.)
@@ -45,162 +47,29 @@
friend class VMStructs;
protected:
- enum CardValues {
- clean_card = -1,
- // The mask contains zeros in places for all other values.
- clean_card_mask = clean_card - 31,
-
- dirty_card = 0,
- precleaned_card = 1,
- claimed_card = 2,
- deferred_card = 4,
- last_card = 8,
- CT_MR_BS_last_reserved = 16
- };
-
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
// or INCLUDE_JVMCI is being used
- bool _defer_initial_card_mark;
-
- // a word's worth (row) of clean card values
- static const intptr_t clean_card_row = (intptr_t)(-1);
-
- // The declaration order of these const fields is important; see the
- // constructor before changing.
- const MemRegion _whole_heap; // the region covered by the card table
- size_t _guard_index; // index of very last element in the card
- // table; it is set to a guard value
- // (last_card) and should never be modified
- size_t _last_valid_index; // index of the last valid element
- const size_t _page_size; // page size used when mapping _byte_map
- size_t _byte_map_size; // in bytes
- jbyte* _byte_map; // the card marking array
-
- // Some barrier sets create tables whose elements correspond to parts of
- // the heap; the CardTableModRefBS is an example. Such barrier sets will
- // normally reserve space for such tables, and commit parts of the table
- // "covering" parts of the heap that are committed. At most one covered
- // region per generation is needed.
- static const int _max_covered_regions = 2;
-
- int _cur_covered_regions;
-
- // The covered regions should be in address order.
- MemRegion* _covered;
- // The committed regions correspond one-to-one to the covered regions.
- // They represent the card-table memory that has been committed to service
- // the corresponding covered region. It may be that committed region for
- // one covered region corresponds to a larger region because of page-size
- // roundings. Thus, a committed region for one covered region may
- // actually extend onto the card-table space for the next covered region.
- MemRegion* _committed;
-
- // The last card is a guard card, and we commit the page for it so
- // we can use the card for verification purposes. We make sure we never
- // uncommit the MemRegion for that page.
- MemRegion _guard_region;
-
- inline size_t compute_byte_map_size();
+ bool _defer_initial_card_mark;
+ CardTable* _card_table;
- // Finds and return the index of the region, if any, to which the given
- // region would be contiguous. If none exists, assign a new region and
- // returns its index. Requires that no more than the maximum number of
- // covered regions defined in the constructor are ever in use.
- int find_covering_region_by_base(HeapWord* base);
-
- // Same as above, but finds the region containing the given address
- // instead of starting at a given base address.
- int find_covering_region_containing(HeapWord* addr);
-
- // Resize one of the regions covered by the remembered set.
- virtual void resize_covered_region(MemRegion new_region);
-
- // Returns the leftmost end of a committed region corresponding to a
- // covered region before covered region "ind", or else "NULL" if "ind" is
- // the first covered region.
- HeapWord* largest_prev_committed_end(int ind) const;
-
- // Returns the part of the region mr that doesn't intersect with
- // any committed region other than self. Used to prevent uncommitting
- // regions that are also committed by other regions. Also protects
- // against uncommitting the guard region.
- MemRegion committed_unique_to_self(int self, MemRegion mr) const;
-
- // Mapping from address to card marking array entry
- jbyte* byte_for(const void* p) const {
- assert(_whole_heap.contains(p),
- "Attempt to access p = " PTR_FORMAT " out of bounds of "
- " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
- p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
- jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
- assert(result >= _byte_map && result < _byte_map + _byte_map_size,
- "out of bounds accessor for card marking array");
- return result;
- }
-
- // The card table byte one after the card marking array
- // entry for argument address. Typically used for higher bounds
- // for loops iterating through the card table.
- jbyte* byte_after(const void* p) const {
- return byte_for(p) + 1;
- }
-
- // Dirty the bytes corresponding to "mr" (not all of which must be
- // covered.)
- void dirty_MemRegion(MemRegion mr);
-
- // Clear (to clean_card) the bytes entirely contained within "mr" (not
- // all of which must be covered.)
- void clear_MemRegion(MemRegion mr);
+ CardTableModRefBS(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti);
public:
- // Constants
- enum SomePublicConstants {
- card_shift = 9,
- card_size = 1 << card_shift,
- card_size_in_words = card_size / sizeof(HeapWord)
- };
+ CardTableModRefBS(CardTable* card_table);
+ ~CardTableModRefBS();
- static int clean_card_val() { return clean_card; }
- static int clean_card_mask_val() { return clean_card_mask; }
- static int dirty_card_val() { return dirty_card; }
- static int claimed_card_val() { return claimed_card; }
- static int precleaned_card_val() { return precleaned_card; }
- static int deferred_card_val() { return deferred_card; }
+ CardTable* card_table() const { return _card_table; }
virtual void initialize();
- // *** Barrier set functions.
-
- // Initialization utilities; covered_words is the size of the covered region
- // in, um, words.
- inline size_t cards_required(size_t covered_words) {
- // Add one for a guard card, used to detect errors.
- const size_t words = align_up(covered_words, card_size_in_words);
- return words / card_size_in_words + 1;
+ void write_region(MemRegion mr) {
+ invalidate(mr);
}
protected:
- CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
- ~CardTableModRefBS();
+ void write_ref_array_work(MemRegion mr);
public:
- void write_region(MemRegion mr) {
- dirty_MemRegion(mr);
- }
-
- protected:
- void write_ref_array_work(MemRegion mr) {
- dirty_MemRegion(mr);
- }
-
- public:
- bool is_aligned(HeapWord* addr) {
- return is_card_aligned(addr);
- }
-
- // *** Card-table-barrier-specific things.
-
// Record a reference update. Note that these versions are precise!
// The scanning code has to handle the fact that the write barrier may be
// either precise or imprecise. We make non-virtual inline variants of
@@ -208,115 +77,7 @@
template <DecoratorSet decorators, typename T>
void write_ref_field_post(T* field, oop newVal);
- // These are used by G1, when it uses the card table as a temporary data
- // structure for card claiming.
- bool is_card_dirty(size_t card_index) {
- return _byte_map[card_index] == dirty_card_val();
- }
-
- void mark_card_dirty(size_t card_index) {
- _byte_map[card_index] = dirty_card_val();
- }
-
- bool is_card_clean(size_t card_index) {
- return _byte_map[card_index] == clean_card_val();
- }
-
- // Card marking array base (adjusted for heap low boundary)
- // This would be the 0th element of _byte_map, if the heap started at 0x0.
- // But since the heap starts at some higher address, this points to somewhere
- // before the beginning of the actual _byte_map.
- jbyte* byte_map_base;
-
- // Return true if "p" is at the start of a card.
- bool is_card_aligned(HeapWord* p) {
- jbyte* pcard = byte_for(p);
- return (addr_for(pcard) == p);
- }
-
- HeapWord* align_to_card_boundary(HeapWord* p) {
- jbyte* pcard = byte_for(p + card_size_in_words - 1);
- return addr_for(pcard);
- }
-
- // The kinds of precision a CardTableModRefBS may offer.
- enum PrecisionStyle {
- Precise,
- ObjHeadPreciseArray
- };
-
- // Tells what style of precision this card table offers.
- PrecisionStyle precision() {
- return ObjHeadPreciseArray; // Only one supported for now.
- }
-
- // ModRefBS functions.
virtual void invalidate(MemRegion mr);
- void clear(MemRegion mr);
- void dirty(MemRegion mr);
-
- // *** Card-table-RemSet-specific things.
-
- static uintx ct_max_alignment_constraint();
-
- // Apply closure "cl" to the dirty cards containing some part of
- // MemRegion "mr".
- void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
-
- // Return the MemRegion corresponding to the first maximal run
- // of dirty cards lying completely within MemRegion mr.
- // If reset is "true", then sets those card table entries to the given
- // value.
- MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
- int reset_val);
-
- // Provide read-only access to the card table array.
- const jbyte* byte_for_const(const void* p) const {
- return byte_for(p);
- }
- const jbyte* byte_after_const(const void* p) const {
- return byte_after(p);
- }
-
- // Mapping from card marking array entry to address of first word
- HeapWord* addr_for(const jbyte* p) const {
- assert(p >= _byte_map && p < _byte_map + _byte_map_size,
- "out of bounds access to card marking array. p: " PTR_FORMAT
- " _byte_map: " PTR_FORMAT " _byte_map + _byte_map_size: " PTR_FORMAT,
- p2i(p), p2i(_byte_map), p2i(_byte_map + _byte_map_size));
- size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte));
- HeapWord* result = (HeapWord*) (delta << card_shift);
- assert(_whole_heap.contains(result),
- "Returning result = " PTR_FORMAT " out of bounds of "
- " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
- p2i(result), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
- return result;
- }
-
- // Mapping from address to card marking array index.
- size_t index_for(void* p) {
- assert(_whole_heap.contains(p),
- "Attempt to access p = " PTR_FORMAT " out of bounds of "
- " card marking array's _whole_heap = [" PTR_FORMAT "," PTR_FORMAT ")",
- p2i(p), p2i(_whole_heap.start()), p2i(_whole_heap.end()));
- return byte_for(p) - _byte_map;
- }
-
- const jbyte* byte_for_index(const size_t card_index) const {
- return _byte_map + card_index;
- }
-
- // Print a description of the memory for the barrier set
- virtual void print_on(outputStream* st) const;
-
- void verify();
- void verify_guard();
-
- // val_equals -> it will check that all cards covered by mr equal val
- // !val_equals -> it will check that all cards covered by mr do not equal val
- void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
- void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
- void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
// ReduceInitialCardMarks
void initialize_deferred_card_mark_barriers();
@@ -352,15 +113,15 @@
// barrier until the next slow-path allocation or gc-related safepoint.)
// This interface answers whether a particular barrier type needs the card
// mark to be thus strictly sequenced after the stores.
- virtual bool card_mark_must_follow_store() const = 0;
-
- virtual bool is_in_young(oop obj) const = 0;
+ virtual bool card_mark_must_follow_store() const;
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
virtual void on_thread_detach(JavaThread* thread);
virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
+ virtual void print_on(outputStream* st) const;
+
template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
};
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,17 +26,18 @@
#define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP
#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTable.hpp"
#include "runtime/orderAccess.inline.hpp"
template <DecoratorSet decorators, typename T>
inline void CardTableModRefBS::write_ref_field_post(T* field, oop newVal) {
- volatile jbyte* byte = byte_for(field);
+ volatile jbyte* byte = _card_table->byte_for(field);
if (UseConcMarkSweepGC) {
// Perform a releasing store if using CMS so that it may
// scan and clear the cards concurrently during pre-cleaning.
- OrderAccess::release_store(byte, jbyte(dirty_card));
+ OrderAccess::release_store(byte, CardTable::dirty_card_val());
} else {
- *byte = dirty_card;
+ *byte = CardTable::dirty_card_val();
}
}
--- a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.cpp Fri Mar 09 00:28:50 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/cardTableModRefBS.inline.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "memory/allocation.inline.hpp"
-#include "gc/shared/space.inline.hpp"
-
-CardTableModRefBSForCTRS::CardTableModRefBSForCTRS(MemRegion whole_heap) :
- CardTableModRefBS(
- whole_heap,
- BarrierSet::FakeRtti(BarrierSet::CardTableForRS)),
- // LNC functionality
- _lowest_non_clean(NULL),
- _lowest_non_clean_chunk_size(NULL),
- _lowest_non_clean_base_chunk_index(NULL),
- _last_LNC_resizing_collection(NULL)
-{ }
-
-void CardTableModRefBSForCTRS::initialize() {
- CardTableModRefBS::initialize();
- _lowest_non_clean =
- NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
- _lowest_non_clean_chunk_size =
- NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
- _lowest_non_clean_base_chunk_index =
- NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
- _last_LNC_resizing_collection =
- NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
- if (_lowest_non_clean == NULL
- || _lowest_non_clean_chunk_size == NULL
- || _lowest_non_clean_base_chunk_index == NULL
- || _last_LNC_resizing_collection == NULL)
- vm_exit_during_initialization("couldn't allocate an LNC array.");
- for (int i = 0; i < _max_covered_regions; i++) {
- _lowest_non_clean[i] = NULL;
- _lowest_non_clean_chunk_size[i] = 0;
- _last_LNC_resizing_collection[i] = -1;
- }
-}
-
-CardTableModRefBSForCTRS::~CardTableModRefBSForCTRS() {
- if (_lowest_non_clean) {
- FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
- _lowest_non_clean = NULL;
- }
- if (_lowest_non_clean_chunk_size) {
- FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
- _lowest_non_clean_chunk_size = NULL;
- }
- if (_lowest_non_clean_base_chunk_index) {
- FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
- _lowest_non_clean_base_chunk_index = NULL;
- }
- if (_last_LNC_resizing_collection) {
- FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
- _last_LNC_resizing_collection = NULL;
- }
-}
-
-bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
- return
- card_is_dirty_wrt_gen_iter(cv) ||
- _rs->is_prev_nonclean_card_val(cv);
-}
-
-bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
- return
- cv != clean_card &&
- (card_is_dirty_wrt_gen_iter(cv) ||
- CardTableRS::youngergen_may_have_been_dirty(cv));
-}
-
-void CardTableModRefBSForCTRS::non_clean_card_iterate_possibly_parallel(
- Space* sp,
- MemRegion mr,
- OopsInGenClosure* cl,
- CardTableRS* ct,
- uint n_threads)
-{
- if (!mr.is_empty()) {
- if (n_threads > 0) {
-#if INCLUDE_ALL_GCS
- non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
-#else // INCLUDE_ALL_GCS
- fatal("Parallel gc not supported here.");
-#endif // INCLUDE_ALL_GCS
- } else {
- // clear_cl finds contiguous dirty ranges of cards to process and clear.
-
- // This is the single-threaded version used by DefNew.
- const bool parallel = false;
-
- DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
- ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
-
- clear_cl.do_MemRegion(mr);
- }
- }
-}
-
-bool CardTableModRefBSForCTRS::is_in_young(oop obj) const {
- return GenCollectedHeap::heap()->is_in_young(obj);
-}
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -75,41 +75,6 @@
}
-CardTableRS::CardTableRS(MemRegion whole_heap) :
- _bs(NULL),
- _cur_youngergen_card_val(youngergenP1_card)
-{
- _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
- _ct_bs->initialize();
- set_bs(_ct_bs);
- // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
- // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
- uint max_gens = 2;
- _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
- mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
- if (_last_cur_val_in_gen == NULL) {
- vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
- }
- for (uint i = 0; i < max_gens + 1; i++) {
- _last_cur_val_in_gen[i] = clean_card_val();
- }
- _ct_bs->set_CTRS(this);
-}
-
-CardTableRS::~CardTableRS() {
- if (_ct_bs) {
- delete _ct_bs;
- _ct_bs = NULL;
- }
- if (_last_cur_val_in_gen) {
- FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
- }
-}
-
-void CardTableRS::resize_covered_region(MemRegion new_region) {
- _ct_bs->resize_covered_region(new_region);
-}
-
jbyte CardTableRS::find_unused_youngergenP_card_value() {
for (jbyte v = youngergenP1_card;
v < cur_youngergen_and_prev_nonclean_card;
@@ -247,7 +212,7 @@
// fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
if (is_word_aligned(cur_entry)) {
jbyte* cur_row = cur_entry - BytesPerWord;
- while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row()) {
+ while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row_val()) {
cur_row -= BytesPerWord;
}
cur_entry = cur_row + BytesPerWord;
@@ -283,7 +248,7 @@
// cur-younger-gen ==> cur_younger_gen
// cur_youngergen_and_prev_nonclean_card ==> no change.
void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
- volatile jbyte* entry = _ct_bs->byte_for(field);
+ volatile jbyte* entry = byte_for(field);
do {
jbyte entry_val = *entry;
// We put this first because it's probably the most common case.
@@ -341,7 +306,7 @@
ShouldNotReachHere();
}
#endif
- _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
+ non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
}
void CardTableRS::clear_into_younger(Generation* old_gen) {
@@ -642,5 +607,115 @@
// generational heaps.
VerifyCTGenClosure blk(this);
GenCollectedHeap::heap()->generation_iterate(&blk, false);
- _ct_bs->verify();
+ CardTable::verify();
+}
+
+CardTableRS::CardTableRS(MemRegion whole_heap) :
+ CardTable(whole_heap, /* scanned concurrently */ UseConcMarkSweepGC && CMSPrecleaningEnabled),
+ _cur_youngergen_card_val(youngergenP1_card),
+ // LNC functionality
+ _lowest_non_clean(NULL),
+ _lowest_non_clean_chunk_size(NULL),
+ _lowest_non_clean_base_chunk_index(NULL),
+ _last_LNC_resizing_collection(NULL)
+{
+ // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
+ // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
+ uint max_gens = 2;
+ _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
+ mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
+ if (_last_cur_val_in_gen == NULL) {
+ vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
+ }
+ for (uint i = 0; i < max_gens + 1; i++) {
+ _last_cur_val_in_gen[i] = clean_card_val();
+ }
+}
+
+CardTableRS::~CardTableRS() {
+ if (_last_cur_val_in_gen) {
+ FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
+ _last_cur_val_in_gen = NULL;
+ }
+ if (_lowest_non_clean) {
+ FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
+ _lowest_non_clean = NULL;
+ }
+ if (_lowest_non_clean_chunk_size) {
+ FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
+ _lowest_non_clean_chunk_size = NULL;
+ }
+ if (_lowest_non_clean_base_chunk_index) {
+ FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
+ _lowest_non_clean_base_chunk_index = NULL;
+ }
+ if (_last_LNC_resizing_collection) {
+ FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
+ _last_LNC_resizing_collection = NULL;
+ }
}
+
+void CardTableRS::initialize() {
+ CardTable::initialize();
+ _lowest_non_clean =
+ NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
+ _lowest_non_clean_chunk_size =
+ NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
+ _lowest_non_clean_base_chunk_index =
+ NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
+ _last_LNC_resizing_collection =
+ NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
+ if (_lowest_non_clean == NULL
+ || _lowest_non_clean_chunk_size == NULL
+ || _lowest_non_clean_base_chunk_index == NULL
+ || _last_LNC_resizing_collection == NULL)
+ vm_exit_during_initialization("couldn't allocate an LNC array.");
+ for (int i = 0; i < _max_covered_regions; i++) {
+ _lowest_non_clean[i] = NULL;
+ _lowest_non_clean_chunk_size[i] = 0;
+ _last_LNC_resizing_collection[i] = -1;
+ }
+}
+
+bool CardTableRS::card_will_be_scanned(jbyte cv) {
+ return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv);
+}
+
+bool CardTableRS::card_may_have_been_dirty(jbyte cv) {
+ return
+ cv != clean_card &&
+ (card_is_dirty_wrt_gen_iter(cv) ||
+ CardTableRS::youngergen_may_have_been_dirty(cv));
+}
+
+void CardTableRS::non_clean_card_iterate_possibly_parallel(
+ Space* sp,
+ MemRegion mr,
+ OopsInGenClosure* cl,
+ CardTableRS* ct,
+ uint n_threads)
+{
+ if (!mr.is_empty()) {
+ if (n_threads > 0) {
+#if INCLUDE_ALL_GCS
+ non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
+#else // INCLUDE_ALL_GCS
+ fatal("Parallel gc not supported here.");
+#endif // INCLUDE_ALL_GCS
+ } else {
+ // clear_cl finds contiguous dirty ranges of cards to process and clear.
+
+ // This is the single-threaded version used by DefNew.
+ const bool parallel = false;
+
+ DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
+ ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
+
+ clear_cl.do_MemRegion(mr);
+ }
+ }
+}
+
+bool CardTableRS::is_in_young(oop obj) const {
+ return GenCollectedHeap::heap()->is_in_young(obj);
+}
--- a/src/hotspot/share/gc/shared/cardTableRS.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableRS.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,11 @@
#ifndef SHARE_VM_GC_SHARED_CARDTABLERS_HPP
#define SHARE_VM_GC_SHARED_CARDTABLERS_HPP
-#include "gc/shared/cardTableModRefBSForCTRS.hpp"
+#include "gc/shared/cardTable.hpp"
#include "memory/memRegion.hpp"
+#include "oops/oop.hpp"
+class DirtyCardToOopClosure;
class Generation;
class Space;
class OopsInGenClosure;
@@ -46,44 +48,28 @@
// This RemSet uses a card table both as shared data structure
// for a mod ref barrier set and for the rem set information.
-class CardTableRS: public CHeapObj<mtGC> {
+class CardTableRS: public CardTable {
friend class VMStructs;
// Below are private classes used in impl.
friend class VerifyCTSpaceClosure;
friend class ClearNoncleanCardWrapper;
- static jbyte clean_card_val() {
- return CardTableModRefBSForCTRS::clean_card;
- }
-
- static intptr_t clean_card_row() {
- return CardTableModRefBSForCTRS::clean_card_row;
- }
-
- static bool
- card_is_dirty_wrt_gen_iter(jbyte cv) {
- return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
- }
-
CLDRemSet _cld_rem_set;
- BarrierSet* _bs;
-
- CardTableModRefBSForCTRS* _ct_bs;
void verify_space(Space* s, HeapWord* gen_start);
enum ExtendedCardValue {
- youngergen_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 1,
+ youngergen_card = CT_MR_BS_last_reserved + 1,
// These are for parallel collection.
// There are three P (parallel) youngergen card values. In general, this
// needs to be more than the number of generations (including the perm
// gen) that might have younger_refs_do invoked on them separately. So
// if we add more gens, we have to add more values.
- youngergenP1_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 2,
- youngergenP2_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 3,
- youngergenP3_card = CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 4,
+ youngergenP1_card = CT_MR_BS_last_reserved + 2,
+ youngergenP2_card = CT_MR_BS_last_reserved + 3,
+ youngergenP3_card = CT_MR_BS_last_reserved + 4,
cur_youngergen_and_prev_nonclean_card =
- CardTableModRefBSForCTRS::CT_MR_BS_last_reserved + 5
+ CT_MR_BS_last_reserved + 5
};
// An array that contains, for each generation, the card table value last
@@ -116,16 +102,8 @@
CardTableRS(MemRegion whole_heap);
~CardTableRS();
- // Return the barrier set associated with "this."
- BarrierSet* bs() { return _bs; }
-
- // Set the barrier set.
- void set_bs(BarrierSet* bs) { _bs = bs; }
-
CLDRemSet* cld_rem_set() { return &_cld_rem_set; }
- CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }
-
void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
// Override.
@@ -137,7 +115,7 @@
void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads);
void inline_write_ref_field_gc(void* field, oop new_val) {
- jbyte* byte = _ct_bs->byte_for(field);
+ jbyte* byte = byte_for(field);
*byte = youngergen_card;
}
void write_ref_field_gc_work(void* field, oop new_val) {
@@ -149,30 +127,17 @@
// a younger card in the current collection.
virtual void write_ref_field_gc_par(void* field, oop new_val);
- void resize_covered_region(MemRegion new_region);
-
bool is_aligned(HeapWord* addr) {
- return _ct_bs->is_card_aligned(addr);
+ return is_card_aligned(addr);
}
void verify();
+ void initialize();
- void clear(MemRegion mr) { _ct_bs->clear(mr); }
void clear_into_younger(Generation* old_gen);
- void invalidate(MemRegion mr) {
- _ct_bs->invalidate(mr);
- }
void invalidate_or_clear(Generation* old_gen);
- static uintx ct_max_alignment_constraint() {
- return CardTableModRefBSForCTRS::ct_max_alignment_constraint();
- }
-
- jbyte* byte_for(void* p) { return _ct_bs->byte_for(p); }
- jbyte* byte_after(void* p) { return _ct_bs->byte_after(p); }
- HeapWord* addr_for(jbyte* p) { return _ct_bs->addr_for(p); }
-
bool is_prev_nonclean_card_val(jbyte v) {
return
youngergen_card <= v &&
@@ -184,6 +149,94 @@
return cv == CardTableRS::cur_youngergen_and_prev_nonclean_card;
}
+ // *** Support for parallel card scanning.
+
+ // dirty and precleaned are equivalent wrt younger_refs_iter.
+ static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
+ return cv == dirty_card || cv == precleaned_card;
+ }
+
+ // Returns "true" iff the value "cv" will cause the card containing it
+ // to be scanned in the current traversal. May be overridden by
+ // subtypes.
+ bool card_will_be_scanned(jbyte cv);
+
+ // Returns "true" iff the value "cv" may have represented a dirty card at
+ // some point.
+ bool card_may_have_been_dirty(jbyte cv);
+
+ // Iterate over the portion of the card-table which covers the given
+ // region mr in the given space and apply cl to any dirty sub-regions
+ // of mr. Clears the dirty cards as they are processed.
+ void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
+ OopsInGenClosure* cl, CardTableRS* ct,
+ uint n_threads);
+
+ // Work method used to implement non_clean_card_iterate_possibly_parallel()
+ // above in the parallel case.
+ void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
+ OopsInGenClosure* cl, CardTableRS* ct,
+ uint n_threads);
+
+ // This is an array, one element per covered region of the card table.
+ // Each entry is itself an array, with one element per chunk in the
+ // covered region. Each entry of these arrays is the lowest non-clean
+ // card of the corresponding chunk containing part of an object from the
+ // previous chunk, or else NULL.
+ typedef jbyte* CardPtr;
+ typedef CardPtr* CardArr;
+ CardArr* _lowest_non_clean;
+ size_t* _lowest_non_clean_chunk_size;
+ uintptr_t* _lowest_non_clean_base_chunk_index;
+ volatile int* _last_LNC_resizing_collection;
+
+ // Initializes "lowest_non_clean" to point to the array for the region
+ // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
+ // index of the corresponding to the first element of that array.
+ // Ensures that these arrays are of sufficient size, allocating if necessary.
+ // May be called by several threads concurrently.
+ void get_LNC_array_for_space(Space* sp,
+ jbyte**& lowest_non_clean,
+ uintptr_t& lowest_non_clean_base_chunk_index,
+ size_t& lowest_non_clean_chunk_size);
+
+ // Returns the number of chunks necessary to cover "mr".
+ size_t chunks_to_cover(MemRegion mr) {
+ return (size_t)(addr_to_chunk_index(mr.last()) -
+ addr_to_chunk_index(mr.start()) + 1);
+ }
+
+ // Returns the index of the chunk in a stride which
+ // covers the given address.
+ uintptr_t addr_to_chunk_index(const void* addr) {
+ uintptr_t card = (uintptr_t) byte_for(addr);
+ return card / ParGCCardsPerStrideChunk;
+ }
+
+ // Apply cl, which must either itself apply dcto_cl or be dcto_cl,
+ // to the cards in the stride (of n_strides) within the given space.
+ void process_stride(Space* sp,
+ MemRegion used,
+ jint stride, int n_strides,
+ OopsInGenClosure* cl,
+ CardTableRS* ct,
+ jbyte** lowest_non_clean,
+ uintptr_t lowest_non_clean_base_chunk_index,
+ size_t lowest_non_clean_chunk_size);
+
+ // Makes sure that chunk boundaries are handled appropriately, by
+ // adjusting the min_done of dcto_cl, and by using a special card-table
+ // value to indicate how min_done should be set.
+ void process_chunk_boundaries(Space* sp,
+ DirtyCardToOopClosure* dcto_cl,
+ MemRegion chunk_mr,
+ MemRegion used,
+ jbyte** lowest_non_clean,
+ uintptr_t lowest_non_clean_base_chunk_index,
+ size_t lowest_non_clean_chunk_size);
+
+ virtual bool is_in_young(oop obj) const;
+
};
class ClearNoncleanCardWrapper: public MemRegionClosure {
--- a/src/hotspot/share/gc/shared/collectorPolicy.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/collectorPolicy.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -31,6 +31,7 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
+#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/collectorCounters.hpp"
@@ -110,7 +111,10 @@
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
_rem_set = new CardTableRS(reserved_region());
- set_barrier_set(rem_set()->bs());
+ _rem_set->initialize();
+ CardTableModRefBS *bs = new CardTableModRefBS(_rem_set);
+ bs->initialize();
+ set_barrier_set(bs);
ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size(), false, false);
_young_gen = _young_gen_spec->init(young_rs, rem_set());
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -47,10 +47,6 @@
virtual void invalidate(MemRegion mr) = 0;
virtual void write_region(MemRegion mr) = 0;
- // The caller guarantees that "mr" contains no references. (Perhaps it's
- // objects have been moved elsewhere.)
- virtual void clear(MemRegion mr) = 0;
-
// The ModRef abstraction introduces pre and post barriers
template <DecoratorSet decorators, typename BarrierSetT>
class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
--- a/src/hotspot/share/gc/shared/space.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/space.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
HeapWord* top_obj) {
if (top_obj != NULL) {
if (_sp->block_is_obj(top_obj)) {
- if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
+ if (_precision == CardTable::ObjHeadPreciseArray) {
if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
// An arrayOop is starting on the dirty card - since we do exact
// store checks for objArrays we are done.
@@ -125,11 +125,11 @@
HeapWord* bottom_obj;
HeapWord* top_obj;
- assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
- _precision == CardTableModRefBS::Precise,
+ assert(_precision == CardTable::ObjHeadPreciseArray ||
+ _precision == CardTable::Precise,
"Only ones we deal with for now.");
- assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
+ assert(_precision != CardTable::ObjHeadPreciseArray ||
_cl->idempotent() || _last_bottom == NULL ||
top <= _last_bottom,
"Not decreasing");
@@ -147,7 +147,7 @@
top = get_actual_top(top, top_obj);
// If the previous call did some part of this region, don't redo.
- if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
+ if (_precision == CardTable::ObjHeadPreciseArray &&
_min_done != NULL &&
_min_done < top) {
top = _min_done;
@@ -159,7 +159,7 @@
bottom = MIN2(bottom, top);
MemRegion extended_mr = MemRegion(bottom, top);
assert(bottom <= top &&
- (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
+ (_precision != CardTable::ObjHeadPreciseArray ||
_min_done == NULL ||
top <= _min_done),
"overlap!");
@@ -180,7 +180,7 @@
}
DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
return new DirtyCardToOopClosure(this, cl, precision, boundary);
@@ -189,7 +189,7 @@
HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
HeapWord* top_obj) {
if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
- if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
+ if (_precision == CardTable::ObjHeadPreciseArray) {
if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
// An arrayOop is starting on the dirty card - since we do exact
// store checks for objArrays we are done.
@@ -260,7 +260,7 @@
DirtyCardToOopClosure*
ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
--- a/src/hotspot/share/gc/shared/space.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/gc/shared/space.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define SHARE_VM_GC_SHARED_SPACE_HPP
#include "gc/shared/blockOffsetTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/workgroup.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
@@ -181,7 +181,7 @@
// depending on the type of space in which the closure will
// operate. ResourceArea allocated.
virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel);
@@ -253,7 +253,7 @@
protected:
ExtendedOopClosure* _cl;
Space* _sp;
- CardTableModRefBS::PrecisionStyle _precision;
+ CardTable::PrecisionStyle _precision;
HeapWord* _boundary; // If non-NULL, process only non-NULL oops
// pointing below boundary.
HeapWord* _min_done; // ObjHeadPreciseArray precision requires
@@ -282,7 +282,7 @@
public:
DirtyCardToOopClosure(Space* sp, ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary) :
_sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
_min_done(NULL) {
@@ -619,7 +619,7 @@
// Override.
DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel);
@@ -694,7 +694,7 @@
public:
FilteringDCTOC(Space* sp, ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary) :
DirtyCardToOopClosure(sp, cl, precision, boundary) {}
};
@@ -723,7 +723,7 @@
public:
ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
- CardTableModRefBS::PrecisionStyle precision,
+ CardTable::PrecisionStyle precision,
HeapWord* boundary) :
FilteringDCTOC(sp, cl, precision, boundary)
{}
--- a/src/hotspot/share/include/jvm.h Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/include/jvm.h Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -119,6 +119,9 @@
* java.lang.Runtime
*/
JNIEXPORT void JNICALL
+JVM_BeforeHalt();
+
+JNIEXPORT void JNICALL
JVM_Halt(jint code);
JNIEXPORT void JNICALL
--- a/src/hotspot/share/interpreter/bytecode.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/interpreter/bytecode.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -123,6 +123,11 @@
assert(cpcache() != NULL, "do not call this from verifier or rewriter");
}
+int Bytecode_invoke::size_of_parameters() const {
+ ArgumentSizeComputer asc(signature());
+ return asc.size() + (has_receiver() ? 1 : 0);
+}
+
Symbol* Bytecode_member_ref::klass() const {
return constants()->klass_ref_at_noresolve(index());
--- a/src/hotspot/share/interpreter/bytecode.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/interpreter/bytecode.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -197,7 +197,7 @@
BasicType result_type() const; // returns the result type of the getfield or invoke
};
-// Abstraction for invoke_{virtual, static, interface, special}
+// Abstraction for invoke_{virtual, static, interface, special, dynamic, handle}
class Bytecode_invoke: public Bytecode_member_ref {
protected:
@@ -231,6 +231,8 @@
bool has_appendix() { return cpcache_entry()->has_appendix(); }
+ int size_of_parameters() const;
+
private:
// Helper to skip verification. Used is_valid() to check if the result is really an invoke
inline friend Bytecode_invoke Bytecode_invoke_check(const methodHandle& method, int bci);
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -39,6 +39,7 @@
#include "oops/objArrayOop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "utilities/align.hpp"
@@ -97,6 +98,32 @@
}
}
+objArrayOop CodeInstaller::sites() {
+ return (objArrayOop) JNIHandles::resolve(_sites_handle);
+}
+
+arrayOop CodeInstaller::code() {
+ return (arrayOop) JNIHandles::resolve(_code_handle);
+}
+
+arrayOop CodeInstaller::data_section() {
+ return (arrayOop) JNIHandles::resolve(_data_section_handle);
+}
+
+objArrayOop CodeInstaller::data_section_patches() {
+ return (objArrayOop) JNIHandles::resolve(_data_section_patches_handle);
+}
+
+#ifndef PRODUCT
+objArrayOop CodeInstaller::comments() {
+ return (objArrayOop) JNIHandles::resolve(_comments_handle);
+}
+#endif
+
+oop CodeInstaller::word_kind() {
+ return JNIHandles::resolve(_word_kind_handle);
+}
+
// creates a HotSpot oop map out of the byte arrays provided by DebugInfo
OopMap* CodeInstaller::create_oop_map(Handle debug_info, TRAPS) {
Handle reference_map(THREAD, DebugInfo::referenceMap(debug_info));
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -188,15 +188,15 @@
void pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle method, jint pc_offset, TRAPS);
void pd_relocate_poll(address pc, jint mark, TRAPS);
- objArrayOop sites() { return (objArrayOop) JNIHandles::resolve(_sites_handle); }
- arrayOop code() { return (arrayOop) JNIHandles::resolve(_code_handle); }
- arrayOop data_section() { return (arrayOop) JNIHandles::resolve(_data_section_handle); }
- objArrayOop data_section_patches() { return (objArrayOop) JNIHandles::resolve(_data_section_patches_handle); }
+ objArrayOop sites();
+ arrayOop code();
+ arrayOop data_section();
+ objArrayOop data_section_patches();
#ifndef PRODUCT
- objArrayOop comments() { return (objArrayOop) JNIHandles::resolve(_comments_handle); }
+ objArrayOop comments();
#endif
- oop word_kind() { return (oop) JNIHandles::resolve(_word_kind_handle); }
+ oop word_kind();
public:
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "ci/ciUtilities.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "code/codeCache.hpp"
#include "code/scopeDesc.hpp"
@@ -35,6 +36,7 @@
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/compileBroker.hpp"
@@ -48,6 +50,7 @@
#include "jvmci/jvmciCodeInstaller.hpp"
#include "jvmci/vmStructs_jvmci.hpp"
#include "gc/g1/heapRegion.hpp"
+#include "gc/shared/cardTable.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/timerTrace.hpp"
@@ -205,10 +208,10 @@
BarrierSet* bs = Universe::heap()->barrier_set();
if (bs->is_a(BarrierSet::CardTableModRef)) {
- jbyte* base = barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base;
- assert(base != 0, "unexpected byte_map_base");
+ jbyte* base = ci_card_table_address();
+ assert(base != NULL, "unexpected byte_map_base");
cardtable_start_address = base;
- cardtable_shift = CardTableModRefBS::card_shift;
+ cardtable_shift = CardTable::card_shift;
} else {
// No card mark barriers
cardtable_start_address = 0;
--- a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -28,6 +28,7 @@
#include "oops/access.inline.hpp"
#include "oops/instanceMirrorKlass.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/jniHandles.inline.hpp"
class JVMCIJavaClasses : AllStatic {
public:
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,7 @@
#include "oops/objArrayOop.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/reflection.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/threadSMR.hpp"
@@ -630,6 +631,11 @@
return Handle(THREAD, (oop)result.get_jobject());
}
+Handle JVMCIRuntime::get_HotSpotJVMCIRuntime(TRAPS) {
+ initialize_JVMCI(CHECK_(Handle()));
+ return Handle(THREAD, JNIHandles::resolve_non_null(_HotSpotJVMCIRuntime_instance));
+}
+
void JVMCIRuntime::initialize_HotSpotJVMCIRuntime(TRAPS) {
guarantee(!_HotSpotJVMCIRuntime_initialized, "cannot reinitialize HotSpotJVMCIRuntime");
JVMCIRuntime::initialize_well_known_classes(CHECK);
--- a/src/hotspot/share/jvmci/jvmciRuntime.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciRuntime.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -73,10 +73,7 @@
/**
* Gets the singleton HotSpotJVMCIRuntime instance, initializing it if necessary
*/
- static Handle get_HotSpotJVMCIRuntime(TRAPS) {
- initialize_JVMCI(CHECK_(Handle()));
- return Handle(THREAD, JNIHandles::resolve_non_null(_HotSpotJVMCIRuntime_instance));
- }
+ static Handle get_HotSpotJVMCIRuntime(TRAPS);
static jobject get_HotSpotJVMCIRuntime_jobject(TRAPS) {
initialize_JVMCI(CHECK_NULL);
--- a/src/hotspot/share/jvmci/jvmci_globals.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmci_globals.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -80,6 +80,15 @@
FLAG_SET_DEFAULT(EnableJVMCI, true);
}
+ if (!EnableJVMCI) {
+ // Switch off eager JVMCI initialization if JVMCI is disabled.
+ // Don't throw error if EagerJVMCI is set to allow testing.
+ if (EagerJVMCI) {
+ FLAG_SET_DEFAULT(EagerJVMCI, false);
+ }
+ }
+ JVMCI_FLAG_CHECKED(EagerJVMCI)
+
CHECK_NOT_SET(JVMCITraceLevel, EnableJVMCI)
CHECK_NOT_SET(JVMCICounterSize, EnableJVMCI)
CHECK_NOT_SET(JVMCICountersExcludeCompiler, EnableJVMCI)
--- a/src/hotspot/share/jvmci/jvmci_globals.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmci_globals.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,6 +55,9 @@
experimental(bool, BootstrapJVMCI, false, \
"Bootstrap JVMCI before running Java main method") \
\
+ experimental(bool, EagerJVMCI, false, \
+ "Force eager JVMCI initialization") \
+ \
experimental(bool, PrintBootstrap, true, \
"Print JVMCI bootstrap progress and summary") \
\
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -426,7 +426,7 @@
declare_constant(BitData::null_seen_flag) \
declare_constant(BranchData::not_taken_off_set) \
\
- declare_constant_with_value("CardTableModRefBS::dirty_card", CardTableModRefBS::dirty_card_val()) \
+ declare_constant_with_value("CardTable::dirty_card", CardTable::dirty_card_val()) \
\
declare_constant(CodeInstaller::VERIFIED_ENTRY) \
declare_constant(CodeInstaller::UNVERIFIED_ENTRY) \
@@ -653,7 +653,7 @@
static_field(HeapRegion, LogOfHRGrainBytes, int)
#define VM_INT_CONSTANTS_G1(declare_constant, declare_constant_with_value, declare_preprocessor_constant) \
- declare_constant_with_value("G1SATBCardTableModRefBS::g1_young_gen", G1SATBCardTableModRefBS::g1_young_card_val())
+ declare_constant_with_value("G1CardTable::g1_young_gen", G1CardTable::g1_young_card_val())
#endif // INCLUDE_ALL_GCS
--- a/src/hotspot/share/logging/logConfiguration.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logConfiguration.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -78,19 +78,25 @@
#endif
void LogConfiguration::post_initialize() {
+ // Reset the reconfigured status of all outputs
+ for (size_t i = 0; i < _n_outputs; i++) {
+ _outputs[i]->_reconfigured = false;
+ }
+
LogDiagnosticCommand::registerCommand();
Log(logging) log;
if (log.is_info()) {
log.info("Log configuration fully initialized.");
log_develop_info(logging)("Develop logging is available.");
- if (log.is_debug()) {
- LogStream debug_stream(log.debug());
- describe(&debug_stream);
- if (log.is_trace()) {
- LogStream trace_stream(log.trace());
- LogTagSet::list_all_tagsets(&trace_stream);
- }
- }
+
+ LogStream info_stream(log.info());
+ describe_available(&info_stream);
+
+ LogStream debug_stream(log.debug());
+ LogTagSet::list_all_tagsets(&debug_stream);
+
+ ConfigurationLock cl;
+ describe_current_configuration(&info_stream);
}
}
@@ -212,8 +218,9 @@
assert(idx < _n_outputs, "Invalid index, idx = " SIZE_FORMAT " and _n_outputs = " SIZE_FORMAT, idx, _n_outputs);
LogOutput* output = _outputs[idx];
- // Clear the previous config description
- output->clear_config_string();
+ output->_reconfigured = true;
+
+ size_t on_level[LogLevel::Count] = {0};
bool enabled = false;
for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
@@ -221,6 +228,7 @@
// Ignore tagsets that do not, and will not log on the output
if (!ts->has_output(output) && (level == LogLevel::NotMentioned || level == LogLevel::Off)) {
+ on_level[LogLevel::Off]++;
continue;
}
@@ -233,20 +241,18 @@
// Set the new level, if it changed
if (level != LogLevel::NotMentioned) {
ts->set_output_level(output, level);
+ } else {
+ // Look up the previously set level for this output on this tagset
+ level = ts->level_for(output);
}
if (level != LogLevel::Off) {
// Keep track of whether or not the output is ever used by some tagset
enabled = true;
+ }
- if (level == LogLevel::NotMentioned) {
- // Look up the previously set level for this output on this tagset
- level = ts->level_for(output);
- }
-
- // Update the config description with this tagset and level
- output->add_to_config_string(ts, level);
- }
+ // Track of the number of tag sets on each level
+ on_level[level]++;
}
// It is now safe to set the new decorators for the actual output
@@ -257,17 +263,14 @@
ts->update_decorators();
}
- if (enabled) {
- assert(strlen(output->config_string()) > 0,
- "Should always have a config description if the output is enabled.");
- } else if (idx > 1) {
- // Output is unused and should be removed.
+ if (!enabled && idx > 1) {
+ // Output is unused and should be removed, unless it is stdout/stderr (idx < 2)
delete_output(idx);
- } else {
- // Output is either stdout or stderr, which means we can't remove it.
- // Update the config description to reflect that the output is disabled.
- output->set_config_string("all=off");
+ return;
}
+
+ output->update_config_string(on_level);
+ assert(strlen(output->config_string()) > 0, "should always have a config description");
}
void LogConfiguration::disable_output(size_t idx) {
@@ -364,14 +367,24 @@
bool success = parse_log_arguments(output, what, decorators, output_options, &ss);
if (ss.size() > 0) {
- errbuf[strlen(errbuf) - 1] = '\0'; // Strip trailing newline
// If it failed, log the error. If it didn't fail, but something was written
// to the stream, log it as a warning.
- if (!success) {
- log_error(logging)("%s", ss.base());
- } else {
- log_warning(logging)("%s", ss.base());
- }
+ LogLevelType level = success ? LogLevel::Warning : LogLevel::Error;
+
+ Log(logging) log;
+ char* start = errbuf;
+ char* end = strchr(start, '\n');
+ assert(end != NULL, "line must end with newline '%s'", start);
+ do {
+ assert(start < errbuf + sizeof(errbuf) &&
+ end < errbuf + sizeof(errbuf),
+ "buffer overflow");
+ *end = '\0';
+ log.write(level, "%s", start);
+ start = end + 1;
+ end = strchr(start, '\n');
+ assert(end != NULL || *start == '\0', "line must end with newline '%s'", start);
+ } while (end != NULL);
}
os::free(copy);
@@ -436,7 +449,7 @@
return true;
}
-void LogConfiguration::describe_available(outputStream* out){
+void LogConfiguration::describe_available(outputStream* out) {
out->print("Available log levels:");
for (size_t i = 0; i < LogLevel::Count; i++) {
out->print("%s %s", (i == 0 ? "" : ","), LogLevel::name(static_cast<LogLevelType>(i)));
@@ -456,11 +469,14 @@
LogTagSet::describe_tagsets(out);
}
-void LogConfiguration::describe_current_configuration(outputStream* out){
+void LogConfiguration::describe_current_configuration(outputStream* out) {
out->print_cr("Log output configuration:");
for (size_t i = 0; i < _n_outputs; i++) {
- out->print("#" SIZE_FORMAT ": ", i);
+ out->print(" #" SIZE_FORMAT ": ", i);
_outputs[i]->describe(out);
+ if (_outputs[i]->is_reconfigured()) {
+ out->print(" (reconfigured)");
+ }
out->cr();
}
}
@@ -471,68 +487,89 @@
describe_current_configuration(out);
}
-void LogConfiguration::print_command_line_help(FILE* out) {
- jio_fprintf(out, "-Xlog Usage: -Xlog[:[what][:[output][:[decorators][:output-options]]]]\n"
- "\t where 'what' is a combination of tags and levels of the form tag1[+tag2...][*][=level][,...]\n"
- "\t Unless wildcard (*) is specified, only log messages tagged with exactly the tags specified will be matched.\n\n");
+void LogConfiguration::print_command_line_help(outputStream* out) {
+ out->print_cr("-Xlog Usage: -Xlog[:[selections][:[output][:[decorators][:output-options]]]]");
+ out->print_cr("\t where 'selections' are combinations of tags and levels of the form tag1[+tag2...][*][=level][,...]");
+ out->print_cr("\t NOTE: Unless wildcard (*) is specified, only log messages tagged with exactly the tags specified will be matched.");
+ out->cr();
- jio_fprintf(out, "Available log levels:\n");
+ out->print_cr("Available log levels:");
for (size_t i = 0; i < LogLevel::Count; i++) {
- jio_fprintf(out, "%s %s", (i == 0 ? "" : ","), LogLevel::name(static_cast<LogLevelType>(i)));
+ out->print("%s %s", (i == 0 ? "" : ","), LogLevel::name(static_cast<LogLevelType>(i)));
}
+ out->cr();
+ out->cr();
- jio_fprintf(out, "\n\nAvailable log decorators: \n");
+ out->print_cr("Available log decorators: ");
for (size_t i = 0; i < LogDecorators::Count; i++) {
LogDecorators::Decorator d = static_cast<LogDecorators::Decorator>(i);
- jio_fprintf(out, "%s %s (%s)", (i == 0 ? "" : ","), LogDecorators::name(d), LogDecorators::abbreviation(d));
+ out->print("%s %s (%s)", (i == 0 ? "" : ","), LogDecorators::name(d), LogDecorators::abbreviation(d));
}
- jio_fprintf(out, "\n Decorators can also be specified as 'none' for no decoration.\n\n");
+ out->cr();
+ out->print_cr(" Decorators can also be specified as 'none' for no decoration.");
+ out->cr();
- fileStream stream(out, false);
- stream.print_cr("Available log tags:");
- LogTag::list_tags(&stream);
- stream.print_cr(" Specifying 'all' instead of a tag combination matches all tag combinations.");
- stream.cr();
+ out->print_cr("Available log tags:");
+ LogTag::list_tags(out);
+ out->print_cr(" Specifying 'all' instead of a tag combination matches all tag combinations.");
+ out->cr();
- LogTagSet::describe_tagsets(&stream);
+ LogTagSet::describe_tagsets(out);
- jio_fprintf(out, "\nAvailable log outputs:\n"
- " stdout, stderr, file=<filename>\n"
- " Specifying %%p and/or %%t in the filename will expand to the JVM's PID and startup timestamp, respectively.\n\n"
+ out->print_cr("\nAvailable log outputs:");
+ out->print_cr(" stdout/stderr");
+ out->print_cr(" file=<filename>");
+ out->print_cr(" If the filename contains %%p and/or %%t, they will expand to the JVM's PID and startup timestamp, respectively.");
+ out->print_cr(" Additional output-options for file outputs:");
+ out->print_cr(" filesize=.. - Target byte size for log rotation (supports K/M/G suffix)."
+ " If set to 0, log rotation will not trigger automatically,"
+ " but can be performed manually (see the VM.log DCMD).");
+ out->print_cr(" filecount=.. - Number of files to keep in rotation (not counting the active file)."
+ " If set to 0, log rotation is disabled."
+ " This will cause existing log files to be overwritten.");
+ out->cr();
- "Some examples:\n"
- " -Xlog\n"
- "\t Log all messages using 'info' level to stdout with 'uptime', 'levels' and 'tags' decorations.\n"
- "\t (Equivalent to -Xlog:all=info:stdout:uptime,levels,tags).\n\n"
-
- " -Xlog:gc\n"
- "\t Log messages tagged with 'gc' tag using 'info' level to stdout, with default decorations.\n\n"
+ out->print_cr("Some examples:");
+ out->print_cr(" -Xlog");
+ out->print_cr("\t Log all messages up to 'info' level to stdout with 'uptime', 'levels' and 'tags' decorations.");
+ out->print_cr("\t (Equivalent to -Xlog:all=info:stdout:uptime,levels,tags).");
+ out->cr();
- " -Xlog:gc,safepoint\n"
- "\t Log messages tagged either with 'gc' or 'safepoint' tags, both using 'info' level, to stdout, with default decorations.\n"
- "\t (Messages tagged with both 'gc' and 'safepoint' will not be logged.)\n\n"
+ out->print_cr(" -Xlog:gc");
+ out->print_cr("\t Log messages tagged with 'gc' tag up to 'info' level to stdout, with default decorations.");
+ out->cr();
+
+ out->print_cr(" -Xlog:gc,safepoint");
+ out->print_cr("\t Log messages tagged either with 'gc' or 'safepoint' tags, both up to 'info' level, to stdout, with default decorations.");
+ out->print_cr("\t (Messages tagged with both 'gc' and 'safepoint' will not be logged.)");
+ out->cr();
- " -Xlog:gc+ref=debug\n"
- "\t Log messages tagged with both 'gc' and 'ref' tags, using 'debug' level, to stdout, with default decorations.\n"
- "\t (Messages tagged only with one of the two tags will not be logged.)\n\n"
+ out->print_cr(" -Xlog:gc+ref=debug");
+ out->print_cr("\t Log messages tagged with both 'gc' and 'ref' tags, up to 'debug' level, to stdout, with default decorations.");
+ out->print_cr("\t (Messages tagged only with one of the two tags will not be logged.)");
+ out->cr();
- " -Xlog:gc=debug:file=gc.txt:none\n"
- "\t Log messages tagged with 'gc' tag using 'debug' level to file 'gc.txt' with no decorations.\n\n"
+ out->print_cr(" -Xlog:gc=debug:file=gc.txt:none");
+ out->print_cr("\t Log messages tagged with 'gc' tag up to 'debug' level to file 'gc.txt' with no decorations.");
+ out->cr();
- " -Xlog:gc=trace:file=gctrace.txt:uptimemillis,pids:filecount=5,filesize=1m\n"
- "\t Log messages tagged with 'gc' tag using 'trace' level to a rotating fileset of 5 files of size 1MB,\n"
- "\t using the base name 'gctrace.txt', with 'uptimemillis' and 'pid' decorations.\n\n"
+ out->print_cr(" -Xlog:gc=trace:file=gctrace.txt:uptimemillis,pids:filecount=5,filesize=1m");
+ out->print_cr("\t Log messages tagged with 'gc' tag up to 'trace' level to a rotating fileset of 5 files of size 1MB,");
+ out->print_cr("\t using the base name 'gctrace.txt', with 'uptimemillis' and 'pid' decorations.");
+ out->cr();
- " -Xlog:gc::uptime,tid\n"
- "\t Log messages tagged with 'gc' tag using 'info' level to output 'stdout', using 'uptime' and 'tid' decorations.\n\n"
+ out->print_cr(" -Xlog:gc::uptime,tid");
+ out->print_cr("\t Log messages tagged with 'gc' tag up to 'info' level to output 'stdout', using 'uptime' and 'tid' decorations.");
+ out->cr();
- " -Xlog:gc*=info,safepoint*=off\n"
- "\t Log messages tagged with at least 'gc' using 'info' level, but turn off logging of messages tagged with 'safepoint'.\n"
- "\t (Messages tagged with both 'gc' and 'safepoint' will not be logged.)\n\n"
+ out->print_cr(" -Xlog:gc*=info,safepoint*=off");
+ out->print_cr("\t Log messages tagged with at least 'gc' up to 'info' level, but turn off logging of messages tagged with 'safepoint'.");
+ out->print_cr("\t (Messages tagged with both 'gc' and 'safepoint' will not be logged.)");
+ out->cr();
- " -Xlog:disable -Xlog:safepoint=trace:safepointtrace.txt\n"
- "\t Turn off all logging, including warnings and errors,\n"
- "\t and then enable messages tagged with 'safepoint' using 'trace' level to file 'safepointtrace.txt'.\n");
+ out->print_cr(" -Xlog:disable -Xlog:safepoint=trace:safepointtrace.txt");
+ out->print_cr("\t Turn off all logging, including warnings and errors,");
+ out->print_cr("\t and then enable messages tagged with 'safepoint' up to 'trace' level to file 'safepointtrace.txt'.");
}
void LogConfiguration::rotate_all_outputs() {
--- a/src/hotspot/share/logging/logConfiguration.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logConfiguration.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -38,6 +38,7 @@
// are iterated over and updated accordingly.
class LogConfiguration : public AllStatic {
friend class VMError;
+ friend class LogTestFixture;
public:
// Function for listeners
typedef void (*UpdateListenerFunction)(void);
@@ -118,7 +119,7 @@
static void describe(outputStream* out);
// Prints usage help for command line log configuration.
- static void print_command_line_help(FILE* out);
+ static void print_command_line_help(outputStream* out);
// Rotates all LogOutput
static void rotate_all_outputs();
--- a/src/hotspot/share/logging/logFileOutput.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logFileOutput.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -169,6 +169,7 @@
char* equals_pos = strchr(pos, '=');
if (equals_pos == NULL) {
+ errstream->print_cr("Invalid option '%s' for log file output.", pos);
success = false;
break;
}
--- a/src/hotspot/share/logging/logFileOutput.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logFileOutput.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -85,7 +85,7 @@
virtual int write(const LogDecorations& decorations, const char* msg);
virtual int write(LogMessageBuffer::Iterator msg_iterator);
virtual void force_rotate();
- virtual void describe(outputStream *out);
+ virtual void describe(outputStream* out);
virtual const char* name() const {
return _name;
--- a/src/hotspot/share/logging/logLevel.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logLevel.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "logging/logLevel.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/stringUtils.hpp"
const char* LogLevel::_name[] = {
"off",
@@ -40,3 +41,19 @@
}
return Invalid;
}
+
+LogLevelType LogLevel::fuzzy_match(const char *level) {
+ size_t len = strlen(level);
+ LogLevelType match = LogLevel::Invalid;
+ double best = 0.4; // required similarity to be considered a match
+ for (uint i = 1; i < Count; i++) {
+ LogLevelType cur = static_cast<LogLevelType>(i);
+ const char* levelname = LogLevel::name(cur);
+ double score = StringUtils::similarity(level, len, levelname, strlen(levelname));
+ if (score >= best) {
+ match = cur;
+ best= score;
+ }
+ }
+ return match;
+}
--- a/src/hotspot/share/logging/logLevel.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logLevel.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -71,6 +71,7 @@
}
static LogLevel::type from_string(const char* str);
+ static LogLevel::type fuzzy_match(const char *level);
private:
static const char* _name[];
--- a/src/hotspot/share/logging/logMessageBuffer.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logMessageBuffer.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -110,7 +110,7 @@
va_list copy;
va_copy(copy, args);
- written += (size_t)os::log_vsnprintf(current_buffer_position, remaining_buffer_length, fmt, copy) + 1;
+ written += (size_t)os::vsnprintf(current_buffer_position, remaining_buffer_length, fmt, copy) + 1;
va_end(copy);
if (written > _message_buffer_capacity - _message_buffer_size) {
assert(attempts == 0, "Second attempt should always have a sufficiently large buffer (resized to fit).");
--- a/src/hotspot/share/logging/logOutput.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logOutput.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,10 @@
*/
#include "precompiled.hpp"
#include "jvm.h"
+#include "logging/log.hpp"
#include "logging/logFileStreamOutput.hpp"
#include "logging/logOutput.hpp"
+#include "logging/logSelection.hpp"
#include "logging/logTagSet.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/mutexLocker.hpp"
@@ -34,11 +36,23 @@
os::free(_config_string);
}
-void LogOutput::clear_config_string() {
- os::free(_config_string);
- _config_string_buffer_size = InitialConfigBufferSize;
- _config_string = NEW_C_HEAP_ARRAY(char, _config_string_buffer_size, mtLogging);
- _config_string[0] = '\0';
+void LogOutput::describe(outputStream *out) {
+ out->print("%s ", name());
+ out->print_raw(config_string()); // raw printed because length might exceed O_BUFLEN
+
+ bool has_decorator = false;
+ char delimiter = ' ';
+ for (size_t d = 0; d < LogDecorators::Count; d++) {
+ LogDecorators::Decorator decorator = static_cast<LogDecorators::Decorator>(d);
+ if (decorators().is_decorator(decorator)) {
+ has_decorator = true;
+ out->print("%c%s", delimiter, LogDecorators::name(decorator));
+ delimiter = ',';
+ }
+ }
+ if (!has_decorator) {
+ out->print(" none");
+ }
}
void LogOutput::set_config_string(const char* string) {
@@ -47,7 +61,7 @@
_config_string_buffer_size = strlen(_config_string) + 1;
}
-void LogOutput::add_to_config_string(const LogTagSet* ts, LogLevelType level) {
+void LogOutput::add_to_config_string(const LogSelection& selection) {
if (_config_string_buffer_size < InitialConfigBufferSize) {
_config_string_buffer_size = InitialConfigBufferSize;
_config_string = REALLOC_C_HEAP_ARRAY(char, _config_string, _config_string_buffer_size, mtLogging);
@@ -60,7 +74,8 @@
}
for (;;) {
- int ret = ts->label(_config_string + offset, _config_string_buffer_size - offset, "+");
+ int ret = selection.describe(_config_string + offset,
+ _config_string_buffer_size - offset);
if (ret == -1) {
// Double the buffer size and retry
_config_string_buffer_size *= 2;
@@ -69,30 +84,257 @@
}
break;
};
+}
- offset = strlen(_config_string);
- for (;;) {
- int ret = jio_snprintf(_config_string + offset, _config_string_buffer_size - offset, "=%s", LogLevel::name(level));
- if (ret == -1) {
- _config_string_buffer_size *= 2;
- _config_string = REALLOC_C_HEAP_ARRAY(char, _config_string, _config_string_buffer_size, mtLogging);
+
+static int tag_cmp(const void *a, const void *b) {
+ return static_cast<const LogTagType*>(a) - static_cast<const LogTagType*>(b);
+}
+
+static void sort_tags(LogTagType tags[LogTag::MaxTags]) {
+ size_t ntags = 0;
+ while (tags[ntags] != LogTag::__NO_TAG) {
+ ntags++;
+ }
+ qsort(tags, ntags, sizeof(*tags), tag_cmp);
+}
+
+static const size_t MaxSubsets = 1 << LogTag::MaxTags;
+
+// Fill result with all possible subsets of the given tag set. Empty set not included.
+// For example, if tags is {gc, heap} then the result is {{gc}, {heap}, {gc, heap}}.
+// (Arguments with default values are intended exclusively for recursive calls.)
+static void generate_all_subsets_of(LogTagType result[MaxSubsets][LogTag::MaxTags],
+ size_t* result_size,
+ const LogTagType tags[LogTag::MaxTags],
+ LogTagType subset[LogTag::MaxTags] = NULL,
+ const size_t subset_size = 0,
+ const size_t depth = 0) {
+ assert(subset_size <= LogTag::MaxTags, "subset must never have more than MaxTags tags");
+ assert(depth <= LogTag::MaxTags, "recursion depth overflow");
+
+ if (subset == NULL) {
+ assert(*result_size == 0, "outer (non-recursive) call expects result_size to be 0");
+ // Make subset the first element in the result array initially
+ subset = result[0];
+ }
+ assert((void*) subset >= &result[0] && (void*) subset <= &result[MaxSubsets - 1],
+ "subset should always point to element in result");
+
+ if (depth == LogTag::MaxTags || tags[depth] == LogTag::__NO_TAG) {
+ if (subset_size == 0) {
+ // Ignore empty subset
+ return;
+ }
+ if (subset_size != LogTag::MaxTags) {
+ subset[subset_size] = LogTag::__NO_TAG;
+ }
+ assert(*result_size < MaxSubsets, "subsets overflow");
+ *result_size += 1;
+
+ // Bump subset and copy over current state
+ memcpy(result[*result_size], subset, sizeof(*subset) * LogTag::MaxTags);
+ subset = result[*result_size];
+ return;
+ }
+
+ // Recurse, excluding the tag of the current depth
+ generate_all_subsets_of(result, result_size, tags, subset, subset_size, depth + 1);
+ // ... and with it included
+ subset[subset_size] = tags[depth];
+ generate_all_subsets_of(result, result_size, tags, subset, subset_size + 1, depth + 1);
+}
+
+// Generate all possible selections (for the given level) based on the given tag set,
+// and add them to the selections array (growing it as necessary).
+static void add_selections(LogSelection** selections,
+ size_t* n_selections,
+ size_t* selections_cap,
+ const LogTagSet& tagset,
+ LogLevelType level) {
+ LogTagType tags[LogTag::MaxTags] = { LogTag::__NO_TAG };
+ for (size_t i = 0; i < tagset.ntags(); i++) {
+ tags[i] = tagset.tag(i);
+ }
+
+ size_t n_subsets = 0;
+ LogTagType subsets[MaxSubsets][LogTag::MaxTags];
+ generate_all_subsets_of(subsets, &n_subsets, tags);
+
+ for (size_t i = 0; i < n_subsets; i++) {
+ // Always keep tags sorted
+ sort_tags(subsets[i]);
+
+ // Ignore subsets already represented in selections
+ bool unique = true;
+ for (size_t sel = 0; sel < *n_selections; sel++) {
+ if (level == (*selections)[sel].level() && (*selections)[sel].consists_of(subsets[i])) {
+ unique = false;
+ break;
+ }
+ }
+ if (!unique) {
continue;
}
- break;
- }
-}
+
+ LogSelection exact_selection(subsets[i], false, level);
+ LogSelection wildcard_selection(subsets[i], true, level);
+
+ // Check if the two selections match any tag sets
+ bool wildcard_match = false;
+ bool exact_match = false;
+ for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
+ if (!wildcard_selection.selects(*ts)) {
+ continue;
+ }
-void LogOutput::describe(outputStream *out) {
- out->print("%s ", name());
- out->print_raw(config_string());
- out->print(" ");
- char delimiter[2] = {0};
- for (size_t d = 0; d < LogDecorators::Count; d++) {
- LogDecorators::Decorator decorator = static_cast<LogDecorators::Decorator>(d);
- if (decorators().is_decorator(decorator)) {
- out->print("%s%s", delimiter, LogDecorators::name(decorator));
- *delimiter = ',';
+ wildcard_match = true;
+ if (exact_selection.selects(*ts)) {
+ exact_match = true;
+ }
+ if (exact_match) {
+ break;
+ }
+ }
+
+ if (!wildcard_match && !exact_match) {
+ continue;
+ }
+
+ // Ensure there's enough room for both wildcard_match and exact_match
+ if (*n_selections + 2 > *selections_cap) {
+ *selections_cap *= 2;
+ *selections = REALLOC_C_HEAP_ARRAY(LogSelection, *selections, *selections_cap, mtLogging);
+ }
+
+ // Add found matching selections to the result array
+ if (exact_match) {
+ (*selections)[(*n_selections)++] = exact_selection;
+ }
+ if (wildcard_match) {
+ (*selections)[(*n_selections)++] = wildcard_selection;
}
}
}
+void LogOutput::update_config_string(const size_t on_level[LogLevel::Count]) {
+ // Find the most common level (MCL)
+ LogLevelType mcl = LogLevel::Off;
+ size_t max = on_level[LogLevel::Off];
+ for (LogLevelType l = LogLevel::First; l <= LogLevel::Last; l = static_cast<LogLevelType>(l + 1)) {
+ if (on_level[l] > max) {
+ mcl = l;
+ max = on_level[l];
+ }
+ }
+
+ // Always let the first part of each output's config string be "all=<MCL>"
+ {
+ char buf[64];
+ jio_snprintf(buf, sizeof(buf), "all=%s", LogLevel::name(mcl));
+ set_config_string(buf);
+ }
+
+ // If there are no deviating tag sets, we're done
+ size_t deviating_tagsets = LogTagSet::ntagsets() - max;
+ if (deviating_tagsets == 0) {
+ return;
+ }
+
+ size_t n_selections = 0;
+ size_t selections_cap = 4 * MaxSubsets; // Start with some reasonably large initial capacity
+ LogSelection* selections = NEW_C_HEAP_ARRAY(LogSelection, selections_cap, mtLogging);
+
+ size_t n_deviates = 0;
+ const LogTagSet** deviates = NEW_C_HEAP_ARRAY(const LogTagSet*, deviating_tagsets, mtLogging);
+
+ // Generate all possible selections involving the deviating tag sets
+ for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
+ LogLevelType level = ts->level_for(this);
+ if (level == mcl) {
+ continue;
+ }
+ deviates[n_deviates++] = ts;
+ add_selections(&selections, &n_selections, &selections_cap, *ts, level);
+ }
+
+ // Reduce deviates greedily, using the "best" selection at each step to reduce the number of deviating tag sets
+ while (n_deviates > 0) {
+ size_t prev_deviates = n_deviates;
+ int max_score = 0;
+ const LogSelection* best_selection = NULL;
+ for (size_t i = 0; i < n_selections; i++) {
+
+ // Give the selection a score based on how many deviating tag sets it selects (with correct level)
+ int score = 0;
+ for (size_t d = 0; d < n_deviates; d++) {
+ if (selections[i].selects(*deviates[d]) && deviates[d]->level_for(this) == selections[i].level()) {
+ score++;
+ }
+ }
+
+ // Ignore selections with lower score than the current best even before subtracting mismatched selections
+ if (score < max_score) {
+ continue;
+ }
+
+ // Subtract from the score the number of tag sets it selects with an incorrect level
+ for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
+ if (selections[i].selects(*ts) && ts->level_for(this) != selections[i].level()) {
+ score--;
+ }
+ }
+
+ // Pick the selection with the best score, or in the case of a tie, the one with fewest tags
+ if (score > max_score ||
+ (score == max_score && best_selection != NULL && selections[i].ntags() < best_selection->ntags())) {
+ max_score = score;
+ best_selection = &selections[i];
+ }
+ }
+
+ assert(best_selection != NULL, "must always find a maximal selection");
+ add_to_config_string(*best_selection);
+
+ // Remove all deviates that this selection covered
+ for (size_t d = 0; d < n_deviates;) {
+ if (deviates[d]->level_for(this) == best_selection->level() && best_selection->selects(*deviates[d])) {
+ deviates[d] = deviates[--n_deviates];
+ continue;
+ }
+ d++;
+ }
+
+ // Add back any new deviates that this selection added (no array growth since removed > added)
+ for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
+ if (ts->level_for(this) == best_selection->level() || !best_selection->selects(*ts)) {
+ continue;
+ }
+
+ bool already_added = false;
+ for (size_t dev = 0; dev < n_deviates; dev++) {
+ if (deviates[dev] == ts) {
+ already_added = true;
+ break;
+ }
+ }
+ if (already_added) {
+ continue;
+ }
+
+ deviates[n_deviates++] = ts;
+ }
+
+ // Reset the selections and generate a new ones based on the updated deviating tag sets
+ n_selections = 0;
+ for (size_t d = 0; d < n_deviates; d++) {
+ add_selections(&selections, &n_selections, &selections_cap, *deviates[d], deviates[d]->level_for(this));
+ }
+
+ assert(n_deviates < deviating_tagsets, "deviating tag set array overflow");
+ assert(prev_deviates > n_deviates, "number of deviating tag sets must never grow");
+ }
+ FREE_C_HEAP_ARRAY(LogTagSet*, deviates);
+ FREE_C_HEAP_ARRAY(Selection, selections);
+}
+
--- a/src/hotspot/share/logging/logOutput.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logOutput.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
class LogDecorations;
class LogMessageBuffer;
+class LogSelection;
class LogTagSet;
// The base class/interface for log outputs.
@@ -43,19 +44,27 @@
private:
static const size_t InitialConfigBufferSize = 256;
+
+ // Track if the output has been reconfigured dynamically during runtime.
+ // The status is set each time the configuration of the output is modified,
+ // and is reset once after logging initialization is complete.
+ bool _reconfigured;
+
char* _config_string;
size_t _config_string_buffer_size;
+ // Adds the log selection to the config description (e.g. "tag1+tag2*=level").
+ void add_to_config_string(const LogSelection& selection);
+
protected:
LogDecorators _decorators;
- // Clears any previous config description in preparation of reconfiguration.
- void clear_config_string();
- // Adds the tagset on the given level to the config description (e.g. "tag1+tag2=level").
- void add_to_config_string(const LogTagSet* ts, LogLevelType level);
// Replaces the current config description with the given string.
void set_config_string(const char* string);
+ // Update the config string for this output to reflect its current configuration
+ void update_config_string(const size_t on_level[LogLevel::Count]);
+
public:
void set_decorators(const LogDecorators &decorators) {
_decorators = decorators;
@@ -65,11 +74,15 @@
return _decorators;
}
+ bool is_reconfigured() const {
+ return _reconfigured;
+ }
+
const char* config_string() const {
return _config_string;
}
- LogOutput() : _config_string(NULL), _config_string_buffer_size(0) {
+ LogOutput() : _reconfigured(false), _config_string(NULL), _config_string_buffer_size(0) {
}
virtual ~LogOutput();
--- a/src/hotspot/share/logging/logSelection.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logSelection.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -23,11 +23,13 @@
*/
#include "precompiled.hpp"
#include "utilities/ostream.hpp"
+#include "logging/log.hpp"
#include "logging/logSelection.hpp"
#include "logging/logTagSet.hpp"
#include "runtime/os.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
+#include "utilities/quickSort.hpp"
const LogSelection LogSelection::Invalid;
@@ -72,10 +74,16 @@
LogLevelType level = LogLevel::Unspecified;
char* equals = strchr(str, '=');
if (equals != NULL) {
- level = LogLevel::from_string(equals + 1);
+ const char* levelstr = equals + 1;
+ level = LogLevel::from_string(levelstr);
if (level == LogLevel::Invalid) {
if (errstream != NULL) {
- errstream->print_cr("Invalid level '%s' in log selection.", equals + 1);
+ errstream->print("Invalid level '%s' in log selection.", levelstr);
+ LogLevelType match = LogLevel::fuzzy_match(levelstr);
+ if (match != LogLevel::Invalid) {
+ errstream->print(" Did you mean '%s'?", LogLevel::name(match));
+ }
+ errstream->cr();
}
return LogSelection::Invalid;
}
@@ -109,7 +117,12 @@
LogTagType tag = LogTag::from_string(cur_tag);
if (tag == LogTag::__NO_TAG) {
if (errstream != NULL) {
- errstream->print_cr("Invalid tag '%s' in log selection.", cur_tag);
+ errstream->print("Invalid tag '%s' in log selection.", cur_tag);
+ LogTagType match = LogTag::fuzzy_match(cur_tag);
+ if (match != LogTag::__NO_TAG) {
+ errstream->print(" Did you mean '%s'?", LogTag::name(match));
+ }
+ errstream->cr();
}
return LogSelection::Invalid;
}
@@ -157,6 +170,25 @@
return true;
}
+static bool contains(LogTagType tag, const LogTagType tags[LogTag::MaxTags], size_t ntags) {
+ for (size_t i = 0; i < ntags; i++) {
+ if (tags[i] == tag) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool LogSelection::consists_of(const LogTagType tags[LogTag::MaxTags]) const {
+ size_t i;
+ for (i = 0; tags[i] != LogTag::__NO_TAG; i++) {
+ if (!contains(tags[i], _tags, _ntags)) {
+ return false;
+ }
+ }
+ return i == _ntags;
+}
+
size_t LogSelection::ntags() const {
return _ntags;
}
@@ -200,3 +232,120 @@
tot_written += written;
return tot_written;
}
+
+double LogSelection::similarity(const LogSelection& other) const {
+ // Compute Soerensen-Dice coefficient as the similarity measure
+ size_t intersecting = 0;
+ for (size_t i = 0; i < _ntags; i++) {
+ for (size_t j = 0; j < other._ntags; j++) {
+ if (_tags[i] == other._tags[j]) {
+ intersecting++;
+ break;
+ }
+ }
+ }
+ return 2.0 * intersecting / (_ntags + other._ntags);
+}
+
+// Comparator used for sorting LogSelections based on their similarity to a specific LogSelection.
+// A negative return value means that 'a' is more similar to 'ref' than 'b' is, while a positive
+// return value means that 'b' is more similar.
+// For the sake of giving short and effective suggestions, when two selections have an equal
+// similarity score, the selection with the fewer tags (selecting the most tag sets) is considered
+// more similar.
+class SimilarityComparator {
+ const LogSelection& _ref;
+ public:
+ SimilarityComparator(const LogSelection& ref) : _ref(ref) {
+ }
+ int operator()(const LogSelection& a, const LogSelection& b) const {
+ const double epsilon = 1.0e-6;
+
+ // Sort by similarity (descending)
+ double s = _ref.similarity(b) - _ref.similarity(a);
+ if (fabs(s) > epsilon) {
+ return s < 0 ? -1 : 1;
+ }
+
+ // Then by number of tags (ascending)
+ int t = static_cast<int>(a.ntags() - (int)b.ntags());
+ if (t != 0) {
+ return t;
+ }
+
+ // Lastly by tag sets selected (descending)
+ return static_cast<int>(b.tag_sets_selected() - a.tag_sets_selected());
+ }
+};
+
+static const size_t suggestion_cap = 5;
+static const double similarity_requirement = 0.3;
+void LogSelection::suggest_similar_matching(outputStream* out) const {
+ LogSelection suggestions[suggestion_cap];
+ uint nsuggestions = 0;
+
+ // See if simply adding a wildcard would make the selection match
+ if (!_wildcard) {
+ LogSelection sel(_tags, true, _level);
+ if (sel.tag_sets_selected() > 0) {
+ suggestions[nsuggestions++] = sel;
+ }
+ }
+
+ // Check for matching tag sets with a single tag mismatching (a tag too many or short a tag)
+ for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
+ LogTagType tags[LogTag::MaxTags] = { LogTag::__NO_TAG };
+ for (size_t i = 0; i < ts->ntags(); i++) {
+ tags[i] = ts->tag(i);
+ }
+
+ // Suggest wildcard selection unless the wildcard doesn't match anything extra
+ LogSelection sel(tags, true, _level);
+ if (sel.tag_sets_selected() == 1) {
+ sel = LogSelection(tags, false, _level);
+ }
+
+ double score = similarity(sel);
+
+ // Ignore suggestions with too low similarity
+ if (score < similarity_requirement) {
+ continue;
+ }
+
+ // Cap not reached, simply add the new suggestion and continue searching
+ if (nsuggestions < suggestion_cap) {
+ suggestions[nsuggestions++] = sel;
+ continue;
+ }
+
+ // Find the least matching suggestion already found, and if the new suggestion is a better match, replace it
+ double min = 1.0;
+ size_t pos = -1;
+ for (size_t i = 0; i < nsuggestions; i++) {
+ double score = similarity(suggestions[i]);
+ if (score < min) {
+ min = score;
+ pos = i;
+ }
+ }
+ if (score > min) {
+ suggestions[pos] = sel;
+ }
+ }
+
+ if (nsuggestions == 0) {
+ // Found no similar enough selections to suggest.
+ return;
+ }
+
+ // Sort found suggestions to suggest the best one first
+ SimilarityComparator sc(*this);
+ QuickSort::sort(suggestions, nsuggestions, sc, false);
+
+ out->print("Did you mean any of the following?");
+ for (size_t i = 0; i < nsuggestions; i++) {
+ char buf[128];
+ suggestions[i].describe_tags(buf, sizeof(buf));
+ out->print(" %s", buf);
+ }
+}
--- a/src/hotspot/share/logging/logSelection.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logSelection.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -59,9 +59,16 @@
size_t tag_sets_selected() const;
bool selects(const LogTagSet& ts) const;
+ bool consists_of(const LogTagType tags[LogTag::MaxTags]) const;
int describe_tags(char* buf, size_t bufsize) const;
int describe(char* buf, size_t bufsize) const;
+
+ // List similar selections that matches existing tag sets on the given outputstream
+ void suggest_similar_matching(outputStream* out) const;
+
+ // Compute a similarity measure in the range [0, 1], where higher means more similar
+ double similarity(const LogSelection& other) const;
};
#endif // SHARE_VM_LOGGING_LOGSELECTION_HPP
--- a/src/hotspot/share/logging/logSelectionList.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logSelectionList.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -40,19 +40,17 @@
return false;
}
- if (valid) {
- out->print("No tag set matches selection(s):");
- }
+ out->print("No tag set matches selection:");
valid = false;
char buf[256];
_selections[i].describe_tags(buf, sizeof(buf));
- out->print(" %s", buf);
+ out->print(" %s. ", buf);
+
+ _selections[i].suggest_similar_matching(out);
+ out->cr();
}
}
- if (!valid && out != NULL) {
- out->cr();
- }
return valid;
}
--- a/src/hotspot/share/logging/logTag.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logTag.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "logging/logTag.hpp"
+#include "utilities/stringUtils.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/quickSort.hpp"
@@ -43,6 +44,22 @@
return __NO_TAG;
}
+LogTagType LogTag::fuzzy_match(const char *str) {
+ size_t len = strlen(str);
+ LogTagType match = LogTag::__NO_TAG;
+ double best = 0.5; // required similarity to be considered a match
+ for (size_t i = 1; i < LogTag::Count; i++) {
+ LogTagType tag = static_cast<LogTagType>(i);
+ const char* tagname = LogTag::name(tag);
+ double score = StringUtils::similarity(tagname, strlen(tagname), str, len);
+ if (score >= best) {
+ match = tag;
+ best = score;
+ }
+ }
+ return match;
+}
+
static int cmp_logtag(LogTagType a, LogTagType b) {
return strcmp(LogTag::name(a), LogTag::name(b));
}
--- a/src/hotspot/share/logging/logTag.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logTag.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -192,6 +192,7 @@
}
static LogTag::type from_string(const char *str);
+ static LogTag::type fuzzy_match(const char *tag);
static void list_tags(outputStream* out);
private:
--- a/src/hotspot/share/logging/logTagSet.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/logging/logTagSet.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -118,17 +118,17 @@
// Check that string fits in buffer; resize buffer if necessary
int ret;
if (prefix_len < vwrite_buffer_size) {
- ret = os::log_vsnprintf(buf + prefix_len, sizeof(buf) - prefix_len, fmt, args);
+ ret = os::vsnprintf(buf + prefix_len, sizeof(buf) - prefix_len, fmt, args);
} else {
// Buffer too small. Just call printf to find out the length for realloc below.
- ret = os::log_vsnprintf(buf, sizeof(buf), fmt, args);
+ ret = os::vsnprintf(buf, sizeof(buf), fmt, args);
}
assert(ret >= 0, "Log message buffer issue");
if ((size_t)ret >= sizeof(buf)) {
size_t newbuf_len = prefix_len + ret + 1;
char* newbuf = NEW_C_HEAP_ARRAY(char, newbuf_len, mtLogging);
prefix_len = _write_prefix(newbuf, newbuf_len);
- ret = os::log_vsnprintf(newbuf + prefix_len, newbuf_len - prefix_len, fmt, saved_args);
+ ret = os::vsnprintf(newbuf + prefix_len, newbuf_len - prefix_len, fmt, saved_args);
assert(ret >= 0, "Log message buffer issue");
log(level, newbuf);
FREE_C_HEAP_ARRAY(char, newbuf);
@@ -141,7 +141,7 @@
static const size_t TagSetBufferSize = 128;
void LogTagSet::describe_tagsets(outputStream* out) {
- out->print_cr("Described tag combinations:");
+ out->print_cr("Described tag sets:");
for (const LogTagSetDescription* d = tagset_descriptions; d->tagset != NULL; d++) {
char buf[TagSetBufferSize];
d->tagset->label(buf, sizeof(buf), "+");
@@ -169,7 +169,7 @@
qsort(tagset_labels, _ntagsets, sizeof(*tagset_labels), qsort_strcmp);
// Print and then free the labels
- out->print("All available tag sets: ");
+ out->print("Available tag sets: ");
for (idx = 0; idx < _ntagsets; idx++) {
out->print("%s%s", (idx == 0 ? "" : ", "), tagset_labels[idx]);
os::free(tagset_labels[idx]);
--- a/src/hotspot/share/memory/universe.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/memory/universe.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -734,14 +734,8 @@
// HeapBased - Use compressed oops with heap base + encoding.
jint Universe::initialize_heap() {
- jint status = JNI_ERR;
-
- _collectedHeap = create_heap_ext();
- if (_collectedHeap == NULL) {
- _collectedHeap = create_heap();
- }
-
- status = _collectedHeap->initialize();
+ _collectedHeap = create_heap();
+ jint status = _collectedHeap->initialize();
if (status != JNI_OK) {
return status;
}
--- a/src/hotspot/share/memory/universe.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/memory/universe.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -220,7 +220,6 @@
static size_t _heap_used_at_last_gc;
static CollectedHeap* create_heap();
- static CollectedHeap* create_heap_ext();
static jint initialize_heap();
static void initialize_basic_type_mirrors(TRAPS);
static void fixup_mirrors(TRAPS);
--- a/src/hotspot/share/memory/universe_ext.cpp Fri Mar 09 00:28:50 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "memory/universe.hpp"
-
-CollectedHeap* Universe::create_heap_ext() {
- return NULL;
-}
--- a/src/hotspot/share/oops/generateOopMap.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/oops/generateOopMap.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
-#include "jvm.h"
#include "interpreter/bytecodeStream.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
@@ -33,6 +32,7 @@
#include "oops/symbol.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
+#include "runtime/os.hpp"
#include "runtime/relocator.hpp"
#include "runtime/timerTrace.hpp"
#include "utilities/bitMap.inline.hpp"
@@ -2151,10 +2151,10 @@
void GenerateOopMap::error_work(const char *format, va_list ap) {
_got_error = true;
char msg_buffer[512];
- vsnprintf(msg_buffer, sizeof(msg_buffer), format, ap);
+ os::vsnprintf(msg_buffer, sizeof(msg_buffer), format, ap);
// Append method name
char msg_buffer2[512];
- jio_snprintf(msg_buffer2, sizeof(msg_buffer2), "%s in method %s", msg_buffer, method()->name()->as_C_string());
+ os::snprintf(msg_buffer2, sizeof(msg_buffer2), "%s in method %s", msg_buffer, method()->name()->as_C_string());
if (Thread::current()->can_call_java()) {
_exception = Exceptions::new_exception(Thread::current(),
vmSymbols::java_lang_LinkageError(), msg_buffer2);
--- a/src/hotspot/share/opto/graphKit.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/opto/graphKit.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -23,10 +23,13 @@
*/
#include "precompiled.hpp"
+#include "ci/ciUtilities.hpp"
#include "compiler/compileLog.hpp"
+#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableModRefBS.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/resourceArea.hpp"
@@ -1562,9 +1565,7 @@
g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
- case BarrierSet::ModRef:
+ case BarrierSet::CardTableModRef:
break;
default :
@@ -1579,9 +1580,7 @@
case BarrierSet::G1SATBCTLogging:
return true; // Can move it if no safepoint
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
- case BarrierSet::ModRef:
+ case BarrierSet::CardTableModRef:
return true; // There is no pre-barrier
default :
@@ -1605,14 +1604,10 @@
g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
break;
- case BarrierSet::CardTableForRS:
- case BarrierSet::CardTableExtension:
+ case BarrierSet::CardTableModRef:
write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
break;
- case BarrierSet::ModRef:
- break;
-
default :
ShouldNotReachHere();
@@ -3814,6 +3809,13 @@
//----------------------------- store barriers ----------------------------
#define __ ideal.
+bool GraphKit::use_ReduceInitialCardMarks() {
+ BarrierSet *bs = Universe::heap()->barrier_set();
+ return bs->is_a(BarrierSet::CardTableModRef)
+ && barrier_set_cast<CardTableModRefBS>(bs)->can_elide_tlab_store_barriers()
+ && ReduceInitialCardMarks;
+}
+
void GraphKit::sync_kit(IdealKit& ideal) {
set_all_memory(__ merged_memory());
set_i_o(__ i_o());
@@ -3827,11 +3829,9 @@
Node* GraphKit::byte_map_base_node() {
// Get base of card map
- CardTableModRefBS* ct =
- barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
- if (ct->byte_map_base != NULL) {
- return makecon(TypeRawPtr::make((address)ct->byte_map_base));
+ jbyte* card_table_base = ci_card_table_address();
+ if (card_table_base != NULL) {
+ return makecon(TypeRawPtr::make((address)card_table_base));
} else {
return null();
}
@@ -3883,7 +3883,7 @@
// Divide by card size
assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
"Only one we handle so far.");
- Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
+ Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
// Combine card table base and card offset
Node* card_adr = __ AddP(__ top(), byte_map_base_node(), card_offset );
@@ -4275,8 +4275,8 @@
Node* no_base = __ top();
float likely = PROB_LIKELY(0.999);
float unlikely = PROB_UNLIKELY(0.999);
- Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
- Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
+ Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
+ Node* dirty_card = __ ConI((jint)CardTable::dirty_card_val());
Node* zeroX = __ ConX(0);
// Get the alias_index for raw card-mark memory
@@ -4306,7 +4306,7 @@
Node* cast = __ CastPX(__ ctrl(), adr);
// Divide pointer by card size
- Node* card_offset = __ URShiftX( cast, __ ConI(CardTableModRefBS::card_shift) );
+ Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
// Combine card table base and card offset
Node* card_adr = __ AddP(no_base, byte_map_base_node(), card_offset );
--- a/src/hotspot/share/opto/graphKit.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/opto/graphKit.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -754,12 +754,7 @@
// Returns the object (if any) which was created the moment before.
Node* just_allocated_object(Node* current_control);
- static bool use_ReduceInitialCardMarks() {
- BarrierSet *bs = Universe::heap()->barrier_set();
- return bs->is_a(BarrierSet::CardTableModRef)
- && barrier_set_cast<CardTableModRefBS>(bs)->can_elide_tlab_store_barriers()
- && ReduceInitialCardMarks;
- }
+ static bool use_ReduceInitialCardMarks();
// Sync Ideal and Graph kits.
void sync_kit(IdealKit& ideal);
--- a/src/hotspot/share/opto/ifnode.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/opto/ifnode.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -897,7 +897,8 @@
// Figure out which of the two tests sets the upper bound and which
// sets the lower bound if any.
Node* adjusted_lim = NULL;
- if (hi_type->_lo > lo_type->_hi && hi_type->_hi == max_jint && lo_type->_lo == min_jint) {
+ if (lo_type != NULL && hi_type != NULL && hi_type->_lo > lo_type->_hi &&
+ hi_type->_hi == max_jint && lo_type->_lo == min_jint) {
assert((dom_bool->_test.is_less() && !proj->_con) ||
(dom_bool->_test.is_greater() && proj->_con), "incorrect test");
// this test was canonicalized
@@ -937,7 +938,8 @@
cond = BoolTest::lt;
}
}
- } else if (lo_type->_lo > hi_type->_hi && lo_type->_hi == max_jint && hi_type->_lo == min_jint) {
+ } else if (lo_type != NULL && hi_type != NULL && lo_type->_lo > hi_type->_hi &&
+ lo_type->_hi == max_jint && hi_type->_lo == min_jint) {
// this_bool = <
// dom_bool = < (proj = True) or dom_bool = >= (proj = False)
--- a/src/hotspot/share/prims/jni.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jni.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -69,6 +69,7 @@
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/reflection.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/share/prims/jniCheck.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jniCheck.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -38,6 +38,7 @@
#include "runtime/handles.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.inline.hpp"
// Complain every extra number of unplanned local refs
--- a/src/hotspot/share/prims/jvm.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jvm.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -61,6 +61,7 @@
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/perfData.hpp"
@@ -434,6 +435,16 @@
extern volatile jint vm_created;
+JVM_ENTRY_NO_ENV(void, JVM_BeforeHalt())
+ JVMWrapper("JVM_BeforeHalt");
+ EventShutdown event;
+ if (event.should_commit()) {
+ event.set_reason("Shutdown requested from Java");
+ event.commit();
+ }
+JVM_END
+
+
JVM_ENTRY_NO_ENV(void, JVM_Halt(jint code))
before_exit(thread);
vm_exit(code);
@@ -2660,23 +2671,19 @@
ATTRIBUTE_PRINTF(3, 0)
int jio_vsnprintf(char *str, size_t count, const char *fmt, va_list args) {
- // see bug 4399518, 4417214
+ // Reject count values that are negative signed values converted to
+ // unsigned; see bug 4399518, 4417214
if ((intptr_t)count <= 0) return -1;
- int result = vsnprintf(str, count, fmt, args);
- // Note: on truncation vsnprintf(3) on Unix returns numbers of
- // characters which would have been written had the buffer been large
- // enough; on Windows, it returns -1. We handle both cases here and
- // always return -1, and perform null termination.
- if ((result > 0 && (size_t)result >= count) || result == -1) {
- str[count - 1] = '\0';
+ int result = os::vsnprintf(str, count, fmt, args);
+ if (result > 0 && (size_t)result >= count) {
result = -1;
}
return result;
}
-ATTRIBUTE_PRINTF(3, 0)
+ATTRIBUTE_PRINTF(3, 4)
int jio_snprintf(char *str, size_t count, const char *fmt, ...) {
va_list args;
int len;
@@ -2686,7 +2693,7 @@
return len;
}
-ATTRIBUTE_PRINTF(2,3)
+ATTRIBUTE_PRINTF(2, 3)
int jio_fprintf(FILE* f, const char *fmt, ...) {
int len;
va_list args;
--- a/src/hotspot/share/prims/jvmtiEnv.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiEnv.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,7 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/reflectionUtils.hpp"
#include "runtime/signature.hpp"
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -40,6 +40,7 @@
#include "runtime/deoptimization.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/signature.hpp"
@@ -501,6 +502,24 @@
}
+// Handle management
+
+jobject JvmtiEnvBase::jni_reference(Handle hndl) {
+ return JNIHandles::make_local(hndl());
+}
+
+jobject JvmtiEnvBase::jni_reference(JavaThread *thread, Handle hndl) {
+ return JNIHandles::make_local(thread, hndl());
+}
+
+void JvmtiEnvBase::destroy_jni_reference(jobject jobj) {
+ JNIHandles::destroy_local(jobj);
+}
+
+void JvmtiEnvBase::destroy_jni_reference(JavaThread *thread, jobject jobj) {
+ JNIHandles::destroy_local(jobj); // thread is unused.
+}
+
//
// Threads
//
--- a/src/hotspot/share/prims/jvmtiEnvBase.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -214,29 +214,20 @@
unsigned char* jvmtiMalloc(jlong size); // don't use this - call allocate
// method to create a local handle
- jobject jni_reference(Handle hndl) {
- return JNIHandles::make_local(hndl());
- }
+ jobject jni_reference(Handle hndl);
// method to create a local handle.
// This function allows caller to specify which
// threads local handle table to use.
- jobject jni_reference(JavaThread *thread, Handle hndl) {
- return JNIHandles::make_local(thread, hndl());
- }
+ jobject jni_reference(JavaThread *thread, Handle hndl);
// method to destroy a local handle
- void destroy_jni_reference(jobject jobj) {
- JNIHandles::destroy_local(jobj);
- }
+ void destroy_jni_reference(jobject jobj);
// method to destroy a local handle.
// This function allows caller to specify which
- // threads local handle table to use although currently it is
- // not used.
- void destroy_jni_reference(JavaThread *thread, jobject jobj) {
- destroy_jni_reference(jobj);
- }
+ // threads local handle table to use.
+ void destroy_jni_reference(JavaThread *thread, jobject jobj);
jvmtiEnv* jvmti_external() { return &_jvmti_external; };
--- a/src/hotspot/share/prims/jvmtiExport.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiExport.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@
#include "runtime/handles.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/os.inline.hpp"
--- a/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -27,6 +27,7 @@
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.inline.hpp"
#include "prims/jvmtiGetLoadedClasses.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/stack.inline.hpp"
#if INCLUDE_ALL_GCS
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,7 @@
#include "prims/resolvedMethodTable.hpp"
#include "prims/methodComparator.hpp"
#include "runtime/deoptimization.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/relocator.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/events.hpp"
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -44,7 +44,7 @@
#include "prims/jvmtiTagMap.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/javaCalls.hpp"
-#include "runtime/jniHandles.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/reflectionUtils.hpp"
--- a/src/hotspot/share/prims/methodHandles.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/methodHandles.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,7 @@
#include "prims/methodHandles.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/reflection.hpp"
#include "runtime/signature.hpp"
--- a/src/hotspot/share/prims/unsafe.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/unsafe.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -38,6 +38,7 @@
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/reflection.hpp"
#include "runtime/thread.hpp"
--- a/src/hotspot/share/prims/wbtestmethods/parserTests.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/wbtestmethods/parserTests.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
#include "prims/whitebox.hpp"
#include "prims/wbtestmethods/parserTests.hpp"
#include "runtime/interfaceSupport.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "services/diagnosticArgument.hpp"
#include "services/diagnosticFramework.hpp"
--- a/src/hotspot/share/prims/whitebox.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/prims/whitebox.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -53,6 +53,7 @@
#include "runtime/handshake.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/thread.hpp"
--- a/src/hotspot/share/runtime/arguments.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/arguments.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -50,6 +50,7 @@
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/vm_version.hpp"
#include "services/management.hpp"
@@ -509,13 +510,13 @@
{ "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "InitialRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "UseMembar", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
- { "SafepointSpinBeforeYield", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
- { "DeferThrSuspendLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
- { "DeferPollingPageLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
{ "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "CheckEndorsedAndExtDirs", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
{ "CompilerThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
{ "VMThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
+ { "PrintSafepointStatistics", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
+ { "PrintSafepointStatisticsTimeout", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
+ { "PrintSafepointStatisticsCount",JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
// --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
{ "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
@@ -532,6 +533,9 @@
{ "PrintMalloc", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) },
{ "ShowSafepointMsgs", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) },
{ "FastTLABRefill", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
+ { "SafepointSpinBeforeYield", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
+ { "DeferThrSuspendLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
+ { "DeferPollingPageLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
{ "PermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() },
{ "MaxPermSize", JDK_Version::undefined(), JDK_Version::jdk(8), JDK_Version::undefined() },
{ "SharedReadWriteSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() },
@@ -2951,9 +2955,7 @@
if (FLAG_SET_CMDLINE(bool, BackgroundCompilation, false) != Flag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(intx, DeferThrSuspendLoopCount, 1) != Flag::SUCCESS) {
- return JNI_EINVAL;
- }
+ SafepointSynchronize::set_defer_thr_suspend_loop_count();
if (FLAG_SET_CMDLINE(bool, UseTLAB, false) != Flag::SUCCESS) {
return JNI_EINVAL;
}
@@ -3097,7 +3099,8 @@
} else if (match_option(option, "-Xlog", &tail)) {
bool ret = false;
if (strcmp(tail, ":help") == 0) {
- LogConfiguration::print_command_line_help(defaultStream::output_stream());
+ fileStream stream(defaultStream::output_stream());
+ LogConfiguration::print_command_line_help(&stream);
vm_exit(0);
} else if (strcmp(tail, ":disable") == 0) {
LogConfiguration::disable_logging();
@@ -3110,7 +3113,7 @@
}
if (ret == false) {
jio_fprintf(defaultStream::error_stream(),
- "Invalid -Xlog option '-Xlog%s'\n",
+ "Invalid -Xlog option '-Xlog%s', see error log for details.\n",
tail);
return JNI_EINVAL;
}
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsGC.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/commandLineFlagConstraintsGC.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -375,8 +375,8 @@
if (UseConcMarkSweepGC) {
// ParGCCardsPerStrideChunk should be compared with card table size.
size_t heap_size = Universe::heap()->reserved_region().word_size();
- CardTableModRefBS* bs = (CardTableModRefBS*)GenCollectedHeap::heap()->rem_set()->bs();
- size_t card_table_size = bs->cards_required(heap_size) - 1; // Valid card table size
+ CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
+ size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
if ((size_t)value > card_table_size) {
CommandLineError::print(verbose,
@@ -387,7 +387,7 @@
}
// ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread)
- // from CardTableModRefBSForCTRS::process_stride(). Note that ParGCStridesPerThread is already checked
+ // from CardTableRS::process_stride(). Note that ParGCStridesPerThread is already checked
// not to make an overflow with ParallelGCThreads from its constraint function.
uintx n_strides = ParallelGCThreads * ParGCStridesPerThread;
uintx ergo_max = max_uintx / n_strides;
@@ -469,9 +469,9 @@
#if INCLUDE_ALL_GCS
if (status == Flag::SUCCESS && UseConcMarkSweepGC) {
// CMSParRemarkTask::do_dirty_card_rescan_tasks requires CompactibleFreeListSpace::rescan_task_size()
- // to be aligned to CardTableModRefBS::card_size * BitsPerWord.
+ // to be aligned to CardTable::card_size * BitsPerWord.
// Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize'
- // because rescan_task_size() is CardTableModRefBS::card_size / HeapWordSize * BitsPerWord.
+ // because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord.
if (value % HeapWordSize != 0) {
CommandLineError::print(verbose,
"CMSRescanMultiple (" SIZE_FORMAT ") must be "
--- a/src/hotspot/share/runtime/deoptimization.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/deoptimization.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -604,7 +604,7 @@
// Return BasicType of value being returned
JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
- // We are already active int he special DeoptResourceMark any ResourceObj's we
+ // We are already active in the special DeoptResourceMark any ResourceObj's we
// allocate will be freed at the end of the routine.
// It is actually ok to allocate handles in a leaf method. It causes no safepoints,
@@ -681,55 +681,41 @@
// at an uncommon trap for an invoke (where the compiler
// generates debug info before the invoke has executed)
Bytecodes::Code cur_code = str.next();
- if (cur_code == Bytecodes::_invokevirtual ||
- cur_code == Bytecodes::_invokespecial ||
- cur_code == Bytecodes::_invokestatic ||
- cur_code == Bytecodes::_invokeinterface ||
- cur_code == Bytecodes::_invokedynamic) {
+ if (Bytecodes::is_invoke(cur_code)) {
Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
- Symbol* signature = invoke.signature();
- ArgumentSizeComputer asc(signature);
- cur_invoke_parameter_size = asc.size();
- if (invoke.has_receiver()) {
- // Add in receiver
- ++cur_invoke_parameter_size;
- }
+ cur_invoke_parameter_size = invoke.size_of_parameters();
if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
callee_size_of_parameters++;
}
}
if (str.bci() < max_bci) {
- Bytecodes::Code bc = str.next();
- if (bc >= 0) {
+ Bytecodes::Code next_code = str.next();
+ if (next_code >= 0) {
// The interpreter oop map generator reports results before
// the current bytecode has executed except in the case of
// calls. It seems to be hard to tell whether the compiler
// has emitted debug information matching the "state before"
// a given bytecode or the state after, so we try both
- switch (cur_code) {
- case Bytecodes::_invokevirtual:
- case Bytecodes::_invokespecial:
- case Bytecodes::_invokestatic:
- case Bytecodes::_invokeinterface:
- case Bytecodes::_invokedynamic:
- case Bytecodes::_athrow:
- break;
- default: {
+ if (!Bytecodes::is_invoke(cur_code) && cur_code != Bytecodes::_athrow) {
+ // Get expression stack size for the next bytecode
+ if (Bytecodes::is_invoke(next_code)) {
+ Bytecode_invoke invoke(mh, str.bci());
+ next_mask_expression_stack_size = invoke.size_of_parameters();
+ } else {
InterpreterOopMap next_mask;
OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
next_mask_expression_stack_size = next_mask.expression_stack_size();
- // Need to subtract off the size of the result type of
- // the bytecode because this is not described in the
- // debug info but returned to the interpreter in the TOS
- // caching register
- BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
- if (bytecode_result_type != T_ILLEGAL) {
- top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
- }
- assert(top_frame_expression_stack_adjustment >= 0, "");
- try_next_mask = true;
- break;
}
+ // Need to subtract off the size of the result type of
+ // the bytecode because this is not described in the
+ // debug info but returned to the interpreter in the TOS
+ // caching register
+ BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
+ if (bytecode_result_type != T_ILLEGAL) {
+ top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
+ }
+ assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive");
+ try_next_mask = true;
}
}
}
--- a/src/hotspot/share/runtime/globals.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/globals.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,7 @@
#include "utilities/defaultStream.hpp"
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
+#include "utilities/stringUtils.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1_globals.hpp"
#endif // INCLUDE_ALL_GCS
@@ -880,25 +881,6 @@
return _name_len;
}
-// Compute string similarity based on Dice's coefficient
-static float str_similar(const char* str1, const char* str2, size_t len2) {
- int len1 = (int) strlen(str1);
- int total = len1 + (int) len2;
-
- int hit = 0;
-
- for (int i = 0; i < len1 -1; ++i) {
- for (int j = 0; j < (int) len2 -1; ++j) {
- if ((str1[i] == str2[j]) && (str1[i+1] == str2[j+1])) {
- ++hit;
- break;
- }
- }
- }
-
- return 2.0f * (float) hit / (float) total;
-}
-
Flag* Flag::fuzzy_match(const char* name, size_t length, bool allow_locked) {
float VMOptionsFuzzyMatchSimilarity = 0.7f;
Flag* match = NULL;
@@ -906,7 +888,7 @@
float max_score = -1;
for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
- score = str_similar(current->_name, name, length);
+ score = StringUtils::similarity(current->_name, strlen(current->_name), name, length);
if (score > max_score) {
max_score = score;
match = current;
--- a/src/hotspot/share/runtime/globals.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/globals.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1723,13 +1723,13 @@
"enough work per iteration") \
range(0, max_intx) \
\
- /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */ \
+ /* 4096 = CardTable::card_size_in_words * BitsPerWord */ \
product(size_t, CMSRescanMultiple, 32, \
"Size (in cards) of CMS parallel rescan task") \
range(1, SIZE_MAX / 4096) \
constraint(CMSRescanMultipleConstraintFunc,AfterMemoryInit) \
\
- /* 4096 = CardTableModRefBS::card_size_in_words * BitsPerWord */ \
+ /* 4096 = CardTable::card_size_in_words * BitsPerWord */ \
product(size_t, CMSConcMarkMultiple, 32, \
"Size (in cards) of CMS concurrent MT marking task") \
range(1, SIZE_MAX / 4096) \
@@ -2444,15 +2444,15 @@
"ImplicitNullChecks don't work (PPC64).") \
\
product(bool, PrintSafepointStatistics, false, \
- "Print statistics about safepoint synchronization") \
+ "(Deprecated) Print statistics about safepoint synchronization") \
\
product(intx, PrintSafepointStatisticsCount, 300, \
- "Total number of safepoint statistics collected " \
+ "(Deprecated) Total number of safepoint statistics collected " \
"before printing them out") \
range(1, max_intx) \
\
product(intx, PrintSafepointStatisticsTimeout, -1, \
- "Print safepoint statistics only when safepoint takes " \
+ "(Deprecated) Print safepoint statistics only when safepoint takes " \
"more than PrintSafepointSatisticsTimeout in millis") \
LP64_ONLY(range(-1, max_intx/MICROUNITS)) \
NOT_LP64(range(-1, max_intx)) \
@@ -3256,21 +3256,6 @@
develop(uintx, GCWorkerDelayMillis, 0, \
"Delay in scheduling GC workers (in milliseconds)") \
\
- product(intx, DeferThrSuspendLoopCount, 4000, \
- "(Unstable, Deprecated) " \
- "Number of times to iterate in safepoint loop " \
- "before blocking VM threads ") \
- range(-1, max_jint-1) \
- \
- product(intx, DeferPollingPageLoopCount, -1, \
- "(Unsafe,Unstable,Deprecated) " \
- "Number of iterations in safepoint loop " \
- "before changing safepoint polling page to RO ") \
- range(-1, max_jint-1) \
- \
- product(intx, SafepointSpinBeforeYield, 2000, "(Unstable, Deprecated)") \
- range(0, max_intx) \
- \
product(bool, PSChunkLargeArrays, true, \
"Process large arrays in chunks") \
\
--- a/src/hotspot/share/runtime/javaCalls.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/javaCalls.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/signature.hpp"
--- a/src/hotspot/share/runtime/jniHandles.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/jniHandles.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -27,7 +27,7 @@
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/jniHandles.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
#include "trace/traceMacros.hpp"
@@ -135,6 +135,18 @@
return res;
}
+// Resolve some erroneous cases to NULL, rather than treating them as
+// possibly unchecked errors. In particular, deleted handles are
+// treated as NULL (though a deleted and later reallocated handle
+// isn't detected).
+oop JNIHandles::resolve_external_guard(jobject handle) {
+ oop result = NULL;
+ if (handle != NULL) {
+ result = resolve_impl<true /* external_guard */ >(handle);
+ }
+ return result;
+}
+
oop JNIHandles::resolve_jweak(jweak handle) {
assert(handle != NULL, "precondition");
assert(is_jweak(handle), "precondition");
--- a/src/hotspot/share/runtime/jniHandles.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/jniHandles.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -67,10 +67,10 @@
// Resolve handle into oop
inline static oop resolve(jobject handle);
- // Resolve externally provided handle into oop with some guards
- inline static oop resolve_external_guard(jobject handle);
// Resolve handle into oop, result guaranteed not to be null
inline static oop resolve_non_null(jobject handle);
+ // Resolve externally provided handle into oop with some guards
+ static oop resolve_external_guard(jobject handle);
// Local handles
static jobject make_local(oop obj);
@@ -198,72 +198,4 @@
#endif
};
-inline bool JNIHandles::is_jweak(jobject handle) {
- STATIC_ASSERT(weak_tag_size == 1);
- STATIC_ASSERT(weak_tag_value == 1);
- return (reinterpret_cast<uintptr_t>(handle) & weak_tag_mask) != 0;
-}
-
-inline oop& JNIHandles::jobject_ref(jobject handle) {
- assert(!is_jweak(handle), "precondition");
- return *reinterpret_cast<oop*>(handle);
-}
-
-inline oop& JNIHandles::jweak_ref(jobject handle) {
- assert(is_jweak(handle), "precondition");
- char* ptr = reinterpret_cast<char*>(handle) - weak_tag_value;
- return *reinterpret_cast<oop*>(ptr);
-}
-
-// external_guard is true if called from resolve_external_guard.
-template<bool external_guard>
-inline oop JNIHandles::resolve_impl(jobject handle) {
- assert(handle != NULL, "precondition");
- assert(!current_thread_in_native(), "must not be in native");
- oop result;
- if (is_jweak(handle)) { // Unlikely
- result = resolve_jweak(handle);
- } else {
- result = jobject_ref(handle);
- // Construction of jobjects canonicalize a null value into a null
- // jobject, so for non-jweak the pointee should never be null.
- assert(external_guard || result != NULL, "Invalid JNI handle");
- }
- return result;
-}
-
-inline oop JNIHandles::resolve(jobject handle) {
- oop result = NULL;
- if (handle != NULL) {
- result = resolve_impl<false /* external_guard */ >(handle);
- }
- return result;
-}
-
-// Resolve some erroneous cases to NULL, rather than treating them as
-// possibly unchecked errors. In particular, deleted handles are
-// treated as NULL (though a deleted and later reallocated handle
-// isn't detected).
-inline oop JNIHandles::resolve_external_guard(jobject handle) {
- oop result = NULL;
- if (handle != NULL) {
- result = resolve_impl<true /* external_guard */ >(handle);
- }
- return result;
-}
-
-inline oop JNIHandles::resolve_non_null(jobject handle) {
- assert(handle != NULL, "JNI handle should not be null");
- oop result = resolve_impl<false /* external_guard */ >(handle);
- assert(result != NULL, "NULL read from jni handle");
- return result;
-}
-
-inline void JNIHandles::destroy_local(jobject handle) {
- if (handle != NULL) {
- assert(!is_jweak(handle), "Invalid JNI local handle");
- jobject_ref(handle) = NULL;
- }
-}
-
#endif // SHARE_VM_RUNTIME_JNIHANDLES_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/jniHandles.inline.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_RUNTIME_JNIHANDLES_INLINE_HPP
+#define SHARE_RUNTIME_JNIHANDLES_INLINE_HPP
+
+#include "oops/oop.hpp"
+#include "runtime/jniHandles.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+inline bool JNIHandles::is_jweak(jobject handle) {
+ STATIC_ASSERT(weak_tag_size == 1);
+ STATIC_ASSERT(weak_tag_value == 1);
+ return (reinterpret_cast<uintptr_t>(handle) & weak_tag_mask) != 0;
+}
+
+inline oop& JNIHandles::jobject_ref(jobject handle) {
+ assert(!is_jweak(handle), "precondition");
+ return *reinterpret_cast<oop*>(handle);
+}
+
+inline oop& JNIHandles::jweak_ref(jobject handle) {
+ assert(is_jweak(handle), "precondition");
+ char* ptr = reinterpret_cast<char*>(handle) - weak_tag_value;
+ return *reinterpret_cast<oop*>(ptr);
+}
+
+// external_guard is true if called from resolve_external_guard.
+template<bool external_guard>
+inline oop JNIHandles::resolve_impl(jobject handle) {
+ assert(handle != NULL, "precondition");
+ assert(!current_thread_in_native(), "must not be in native");
+ oop result;
+ if (is_jweak(handle)) { // Unlikely
+ result = resolve_jweak(handle);
+ } else {
+ result = jobject_ref(handle);
+ // Construction of jobjects canonicalize a null value into a null
+ // jobject, so for non-jweak the pointee should never be null.
+ assert(external_guard || result != NULL, "Invalid JNI handle");
+ }
+ return result;
+}
+
+inline oop JNIHandles::resolve(jobject handle) {
+ oop result = NULL;
+ if (handle != NULL) {
+ result = resolve_impl<false /* external_guard */ >(handle);
+ }
+ return result;
+}
+
+inline oop JNIHandles::resolve_non_null(jobject handle) {
+ assert(handle != NULL, "JNI handle should not be null");
+ oop result = resolve_impl<false /* external_guard */ >(handle);
+ assert(result != NULL, "NULL read from jni handle");
+ return result;
+}
+
+inline void JNIHandles::destroy_local(jobject handle) {
+ if (handle != NULL) {
+ assert(!is_jweak(handle), "Invalid JNI local handle");
+ jobject_ref(handle) = NULL;
+ }
+}
+
+#endif // SHARE_RUNTIME_JNIHANDLES_INLINE_HPP
+
--- a/src/hotspot/share/runtime/os.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/os.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -105,6 +105,14 @@
#endif
}
+int os::snprintf(char* buf, size_t len, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ int result = os::vsnprintf(buf, len, fmt, args);
+ va_end(args);
+ return result;
+}
+
// Fill in buffer with current local time as an ISO-8601 string.
// E.g., yyyy-mm-ddThh:mm:ss-zzzz.
// Returns buffer, or NULL if it failed.
@@ -237,6 +245,13 @@
return OS_OK;
}
+
+#if !defined(LINUX) && !defined(_WINDOWS)
+size_t os::committed_stack_size(address bottom, size_t size) {
+ return size;
+}
+#endif
+
bool os::dll_build_name(char* buffer, size_t size, const char* fname) {
int n = jio_snprintf(buffer, size, "%s%s%s", JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX);
return (n != -1);
--- a/src/hotspot/share/runtime/os.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/os.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -271,6 +271,10 @@
static void map_stack_shadow_pages(address sp);
static bool stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp);
+ // Return size of stack that is actually committed. For Java thread, the bottom should be above
+ // guard pages (stack grows downward)
+ static size_t committed_stack_size(address bottom, size_t size);
+
// OS interface to Virtual Memory
// Return the default page size.
@@ -639,8 +643,10 @@
static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib,
const char *syms[], size_t syms_len);
- // Write to stream
- static int log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) ATTRIBUTE_PRINTF(3, 0);
+ // Provide C99 compliant versions of these functions, since some versions
+ // of some platforms don't.
+ static int vsnprintf(char* buf, size_t len, const char* fmt, va_list args) ATTRIBUTE_PRINTF(3, 0);
+ static int snprintf(char* buf, size_t len, const char* fmt, ...) ATTRIBUTE_PRINTF(3, 4);
// Get host name in buffer provided
static bool get_host_name(char* buf, size_t buflen);
--- a/src/hotspot/share/runtime/safepoint.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/safepoint.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -78,6 +78,8 @@
volatile int SafepointSynchronize::_safepoint_counter = 0;
int SafepointSynchronize::_current_jni_active_count = 0;
long SafepointSynchronize::_end_of_last_safepoint = 0;
+int SafepointSynchronize::_defer_thr_suspend_loop_count = 4000;
+static const int safepoint_spin_before_yield = 2000;
static volatile int PageArmed = 0 ; // safepoint polling page is RO|RW vs PROT_NONE
static volatile int TryingToBlock = 0 ; // proximate value -- for advisory use only
static bool timeout_error_printed = false;
@@ -191,12 +193,10 @@
// Make interpreter safepoint aware
Interpreter::notice_safepoints();
- if (DeferPollingPageLoopCount < 0) {
- // Make polling safepoint aware
- guarantee (PageArmed == 0, "invariant") ;
- PageArmed = 1 ;
- os::make_polling_page_unreadable();
- }
+ // Make polling safepoint aware
+ guarantee (PageArmed == 0, "invariant") ;
+ PageArmed = 1 ;
+ os::make_polling_page_unreadable();
}
// Consider using active_processor_count() ... but that call is expensive.
@@ -309,19 +309,21 @@
// 9. On windows consider using the return value from SwitchThreadTo()
// to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.
- if (SafepointMechanism::uses_global_page_poll() && int(iterations) == DeferPollingPageLoopCount) {
- guarantee (PageArmed == 0, "invariant") ;
- PageArmed = 1 ;
- os::make_polling_page_unreadable();
+ if (int(iterations) == -1) { // overflow - something is wrong.
+ // We can only overflow here when we are using global
+ // polling pages. We keep this guarantee in its original
+ // form so that searches of the bug database for this
+ // failure mode find the right bugs.
+ guarantee (PageArmed == 0, "invariant");
}
// Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
// ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
++steps ;
- if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
+ if (ncpus > 1 && steps < safepoint_spin_before_yield) {
SpinPause() ; // MP-Polite spin
} else
- if (steps < DeferThrSuspendLoopCount) {
+ if (steps < _defer_thr_suspend_loop_count) {
os::naked_yield() ;
} else {
os::naked_short_sleep(1);
@@ -1190,7 +1192,6 @@
float SafepointSynchronize::_ts_of_current_safepoint = 0.0f;
static jlong cleanup_end_time = 0;
-static bool need_to_track_page_armed_status = false;
static bool init_done = false;
// Helper method to print the header.
@@ -1202,11 +1203,6 @@
"[ threads: total initially_running wait_to_block ]"
"[ time: spin block sync cleanup vmop ] ");
- // no page armed status printed out if it is always armed.
- if (need_to_track_page_armed_status) {
- tty->print("page_armed ");
- }
-
tty->print_cr("page_trap_count");
}
@@ -1229,9 +1225,6 @@
guarantee(_safepoint_stats != NULL,
"not enough memory for safepoint instrumentation data");
- if (DeferPollingPageLoopCount >= 0) {
- need_to_track_page_armed_status = true;
- }
init_done = true;
}
@@ -1271,10 +1264,6 @@
spstat->_time_to_spin = cur_time - spstat->_time_to_spin;
}
- if (need_to_track_page_armed_status) {
- spstat->_page_armed = (PageArmed == 1);
- }
-
// Records the start time of waiting for to block. Updated when block is done.
if (_waiting_to_block != 0) {
spstat->_time_to_wait_to_block = cur_time;
@@ -1363,9 +1352,6 @@
(int64_t)(sstats->_time_to_do_cleanups / MICROUNITS),
(int64_t)(sstats->_time_to_exec_vmop / MICROUNITS));
- if (need_to_track_page_armed_status) {
- tty->print(INT32_FORMAT_W(10) " ", sstats->_page_armed);
- }
tty->print_cr(INT32_FORMAT_W(15) " ", sstats->_nof_threads_hit_page_trap);
}
}
@@ -1392,12 +1378,7 @@
tty->cr();
// Print out polling page sampling status.
- if (!need_to_track_page_armed_status) {
- tty->print_cr("Polling page always armed");
- } else {
- tty->print_cr("Defer polling page loop count = " INTX_FORMAT "\n",
- DeferPollingPageLoopCount);
- }
+ tty->print_cr("Polling page always armed");
for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) {
if (_safepoint_reasons[index] != 0) {
--- a/src/hotspot/share/runtime/safepoint.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/safepoint.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -94,7 +94,6 @@
int _nof_total_threads; // total number of Java threads
int _nof_initial_running_threads; // total number of initially seen running threads
int _nof_threads_wait_to_block; // total number of threads waiting for to block
- bool _page_armed; // true if polling page is armed, false otherwise
int _nof_threads_hit_page_trap; // total number of threads hitting the page trap
jlong _time_to_spin; // total time in millis spent in spinning
jlong _time_to_wait_to_block; // total time in millis spent in waiting for to block
@@ -107,6 +106,7 @@
static volatile SynchronizeState _state; // Threads might read this flag directly, without acquiring the Threads_lock
static volatile int _waiting_to_block; // number of threads we are waiting for to block
static int _current_jni_active_count; // Counts the number of active critical natives during the safepoint
+ static int _defer_thr_suspend_loop_count; // Iterations before blocking VM threads
// This counter is used for fast versions of jni_Get<Primitive>Field.
// An even value means there is no ongoing safepoint operations.
@@ -202,6 +202,11 @@
static address address_of_state() { return (address)&_state; }
static address safepoint_counter_addr() { return (address)&_safepoint_counter; }
+
+ // This method is only used for -Xconcurrentio support.
+ static void set_defer_thr_suspend_loop_count() {
+ _defer_thr_suspend_loop_count = 1;
+ }
};
// State class for a thread suspended at a safepoint
--- a/src/hotspot/share/runtime/thread.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/thread.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -71,6 +71,7 @@
#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/jniPeriodicChecker.hpp"
#include "runtime/memprofiler.hpp"
#include "runtime/mutexLocker.hpp"
@@ -3863,10 +3864,11 @@
#if INCLUDE_JVMCI
if (EnableJVMCI) {
- // Initialize JVMCI eagerly if JVMCIPrintProperties is enabled.
+ // Initialize JVMCI eagerly when it is explicitly requested.
+ // Or when JVMCIPrintProperties is enabled.
// The JVMCI Java initialization code will read this flag and
// do the printing if it's set.
- bool init = JVMCIPrintProperties;
+ bool init = EagerJVMCI || JVMCIPrintProperties;
if (!init) {
// 8145270: Force initialization of JVMCI runtime otherwise requests for blocking
@@ -4218,6 +4220,12 @@
Mutex::_as_suspend_equivalent_flag);
}
+ EventShutdown e;
+ if (e.should_commit()) {
+ e.set_reason("No remaining non-daemon Java threads");
+ e.commit();
+ }
+
// Hang forever on exit if we are reporting an error.
if (ShowMessageBoxOnError && VMError::is_error_reported()) {
os::infinite_sleep();
--- a/src/hotspot/share/runtime/threadSMR.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/threadSMR.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "services/threadService.hpp"
--- a/src/hotspot/share/runtime/vmStructs.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/vmStructs.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -466,20 +466,19 @@
nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \
nonstatic_field(CardGeneration, _used_at_prologue, size_t) \
\
+ nonstatic_field(CardTable, _whole_heap, const MemRegion) \
+ nonstatic_field(CardTable, _guard_index, const size_t) \
+ nonstatic_field(CardTable, _last_valid_index, const size_t) \
+ nonstatic_field(CardTable, _page_size, const size_t) \
+ nonstatic_field(CardTable, _byte_map_size, const size_t) \
+ nonstatic_field(CardTable, _byte_map, jbyte*) \
+ nonstatic_field(CardTable, _cur_covered_regions, int) \
+ nonstatic_field(CardTable, _covered, MemRegion*) \
+ nonstatic_field(CardTable, _committed, MemRegion*) \
+ nonstatic_field(CardTable, _guard_region, MemRegion) \
+ nonstatic_field(CardTable, _byte_map_base, jbyte*) \
nonstatic_field(CardTableModRefBS, _defer_initial_card_mark, bool) \
- nonstatic_field(CardTableModRefBS, _whole_heap, const MemRegion) \
- nonstatic_field(CardTableModRefBS, _guard_index, const size_t) \
- nonstatic_field(CardTableModRefBS, _last_valid_index, const size_t) \
- nonstatic_field(CardTableModRefBS, _page_size, const size_t) \
- nonstatic_field(CardTableModRefBS, _byte_map_size, const size_t) \
- nonstatic_field(CardTableModRefBS, _byte_map, jbyte*) \
- nonstatic_field(CardTableModRefBS, _cur_covered_regions, int) \
- nonstatic_field(CardTableModRefBS, _covered, MemRegion*) \
- nonstatic_field(CardTableModRefBS, _committed, MemRegion*) \
- nonstatic_field(CardTableModRefBS, _guard_region, MemRegion) \
- nonstatic_field(CardTableModRefBS, byte_map_base, jbyte*) \
- \
- nonstatic_field(CardTableRS, _ct_bs, CardTableModRefBSForCTRS*) \
+ nonstatic_field(CardTableModRefBS, _card_table, CardTable*) \
\
nonstatic_field(CollectedHeap, _reserved, MemRegion) \
nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \
@@ -1477,9 +1476,9 @@
declare_toplevel_type(BarrierSet) \
declare_type(ModRefBarrierSet, BarrierSet) \
declare_type(CardTableModRefBS, ModRefBarrierSet) \
- declare_type(CardTableModRefBSForCTRS, CardTableModRefBS) \
+ declare_toplevel_type(CardTable) \
+ declare_type(CardTableRS, CardTable) \
declare_toplevel_type(BarrierSet::Name) \
- declare_toplevel_type(CardTableRS) \
declare_toplevel_type(BlockOffsetSharedArray) \
declare_toplevel_type(BlockOffsetTable) \
declare_type(BlockOffsetArray, BlockOffsetTable) \
@@ -1502,11 +1501,11 @@
\
declare_toplevel_type(BarrierSet*) \
declare_toplevel_type(BlockOffsetSharedArray*) \
+ declare_toplevel_type(CardTable*) \
+ declare_toplevel_type(CardTable*const) \
declare_toplevel_type(CardTableRS*) \
declare_toplevel_type(CardTableModRefBS*) \
declare_toplevel_type(CardTableModRefBS**) \
- declare_toplevel_type(CardTableModRefBSForCTRS*) \
- declare_toplevel_type(CardTableModRefBSForCTRS**) \
declare_toplevel_type(CollectedHeap*) \
declare_toplevel_type(ContiguousSpace*) \
declare_toplevel_type(DefNewGeneration*) \
@@ -2240,8 +2239,6 @@
\
declare_constant(BarrierSet::ModRef) \
declare_constant(BarrierSet::CardTableModRef) \
- declare_constant(BarrierSet::CardTableForRS) \
- declare_constant(BarrierSet::CardTableExtension) \
declare_constant(BarrierSet::G1SATBCT) \
declare_constant(BarrierSet::G1SATBCTLogging) \
\
@@ -2253,18 +2250,18 @@
declare_constant(BOTConstants::Base) \
declare_constant(BOTConstants::N_powers) \
\
- declare_constant(CardTableModRefBS::clean_card) \
- declare_constant(CardTableModRefBS::last_card) \
- declare_constant(CardTableModRefBS::dirty_card) \
- declare_constant(CardTableModRefBS::Precise) \
- declare_constant(CardTableModRefBS::ObjHeadPreciseArray) \
- declare_constant(CardTableModRefBS::card_shift) \
- declare_constant(CardTableModRefBS::card_size) \
- declare_constant(CardTableModRefBS::card_size_in_words) \
+ declare_constant(CardTable::clean_card) \
+ declare_constant(CardTable::last_card) \
+ declare_constant(CardTable::dirty_card) \
+ declare_constant(CardTable::Precise) \
+ declare_constant(CardTable::ObjHeadPreciseArray) \
+ declare_constant(CardTable::card_shift) \
+ declare_constant(CardTable::card_size) \
+ declare_constant(CardTable::card_size_in_words) \
\
declare_constant(CardTableRS::youngergen_card) \
\
- declare_constant(G1SATBCardTableModRefBS::g1_young_gen) \
+ declare_constant(G1CardTable::g1_young_gen) \
\
declare_constant(CollectedHeap::SerialHeap) \
declare_constant(CollectedHeap::CMSHeap) \
--- a/src/hotspot/share/runtime/vm_version.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/runtime/vm_version.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -216,6 +216,10 @@
#define HOTSPOT_BUILD_COMPILER "MS VC++ 11.0 (VS2012)"
#elif _MSC_VER == 1800
#define HOTSPOT_BUILD_COMPILER "MS VC++ 12.0 (VS2013)"
+ #elif _MSC_VER == 1900
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 14.0 (VS2015)"
+ #elif _MSC_VER == 1912
+ #define HOTSPOT_BUILD_COMPILER "MS VC++ 15.5 (VS2017)"
#else
#define HOTSPOT_BUILD_COMPILER "unknown MS VC++:" XSTR(_MSC_VER)
#endif
--- a/src/hotspot/share/services/management.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/services/management.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
-#include "runtime/jniHandles.hpp"
+#include "runtime/jniHandles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/thread.inline.hpp"
--- a/src/hotspot/share/services/memTracker.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/services/memTracker.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -246,7 +246,7 @@
if (addr != NULL) {
// uses thread stack malloc slot for book keeping number of threads
MallocMemorySummary::record_malloc(0, mtThreadStack);
- record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
+ record_virtual_memory_reserve(addr, size, CALLER_PC, mtThreadStack);
}
}
--- a/src/hotspot/share/services/virtualMemoryTracker.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/services/virtualMemoryTracker.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -38,6 +38,12 @@
::new ((void*)_snapshot) VirtualMemorySnapshot();
}
+void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
+ // Snapshot current thread stacks
+ VirtualMemoryTracker::snapshot_thread_stacks();
+ as_snapshot()->copy_to(s);
+}
+
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
@@ -286,6 +292,26 @@
}
}
+address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
+ assert(flag() == mtThreadStack, "Only for thread stack");
+ LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
+ address bottom = base();
+ address top = base() + size();
+ while (head != NULL) {
+ address committed_top = head->data()->base() + head->data()->size();
+ if (committed_top < top) {
+ // committed stack guard pages, skip them
+ bottom = head->data()->base() + head->data()->size();
+ head = head->next();
+ } else {
+ assert(top == committed_top, "Sanity");
+ break;
+ }
+ }
+
+ return bottom;
+}
+
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
if (level >= NMT_summary) {
VirtualMemorySummary::initialize();
@@ -460,6 +486,32 @@
}
}
+// Walk all known thread stacks, snapshot their committed ranges.
+class SnapshotThreadStackWalker : public VirtualMemoryWalker {
+public:
+ SnapshotThreadStackWalker() {}
+
+ bool do_allocation_site(const ReservedMemoryRegion* rgn) {
+ if (rgn->flag() == mtThreadStack) {
+ address stack_bottom = rgn->thread_stack_uncommitted_bottom();
+ size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
+ size_t committed_size = os::committed_stack_size(stack_bottom, stack_size);
+ if (committed_size > 0) {
+ ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
+ NativeCallStack ncs; // empty stack
+
+ // Stack grows downward
+ region->add_committed_region(rgn->base() + rgn->size() - committed_size, committed_size, ncs);
+ }
+ }
+ return true;
+ }
+};
+
+void VirtualMemoryTracker::snapshot_thread_stacks() {
+ SnapshotThreadStackWalker walker;
+ walk_virtual_memory(&walker);
+}
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
assert(_reserved_regions != NULL, "Sanity check");
--- a/src/hotspot/share/services/virtualMemoryTracker.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/services/virtualMemoryTracker.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -160,9 +160,7 @@
as_snapshot()->by_type(to)->commit_memory(size);
}
- static inline void snapshot(VirtualMemorySnapshot* s) {
- as_snapshot()->copy_to(s);
- }
+ static void snapshot(VirtualMemorySnapshot* s);
static VirtualMemorySnapshot* as_snapshot() {
return (VirtualMemorySnapshot*)_snapshot;
@@ -336,6 +334,9 @@
return compare(rgn) == 0;
}
+ // uncommitted thread stack bottom, above guard pages if there is any.
+ address thread_stack_uncommitted_bottom() const;
+
bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
bool remove_uncommitted_region(address addr, size_t size);
@@ -389,6 +390,7 @@
// Main class called from MemTracker to track virtual memory allocations, commits and releases.
class VirtualMemoryTracker : AllStatic {
friend class VirtualMemoryTrackerTest;
+ friend class ThreadStackTrackingTest;
public:
static bool initialize(NMT_TrackingLevel level);
@@ -408,6 +410,9 @@
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
+ // Snapshot current thread stacks
+ static void snapshot_thread_stacks();
+
private:
static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
};
--- a/src/hotspot/share/trace/traceevents.xml Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/trace/traceevents.xml Fri Mar 02 21:00:12 2018 +0100
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it
@@ -628,6 +628,11 @@
<value type="INTEGER" field="safepointId" label="Safepoint Identifier" description="The safepoint (if any) under which this operation was completed" relation="SafepointId"/>
</event>
+ <event id="Shutdown" path="vm/runtime/shutdown" label="VM Shutdown"
+ description="VM shutting down" has_thread="true" has_stacktrace="true" is_instant="true">
+ <value type="STRING" field="reason" label="Reason" description="Reason for VM shutdown"/>
+ </event>
+
<!-- Allocation events -->
<event id="ObjectAllocationInNewTLAB" path="java/object_alloc_in_new_TLAB" label="Allocation in new TLAB"
description="Allocation in new Thread Local Allocation Buffer" has_thread="true" has_stacktrace="true" is_instant="true">
--- a/src/hotspot/share/utilities/exceptions.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/utilities/exceptions.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadCritical.hpp"
#include "utilities/events.hpp"
@@ -239,8 +240,7 @@
va_list ap;
va_start(ap, format);
char msg[max_msg_size];
- vsnprintf(msg, max_msg_size, format, ap);
- msg[max_msg_size-1] = '\0';
+ os::vsnprintf(msg, max_msg_size, format, ap);
va_end(ap);
_throw_msg(thread, file, line, h_name, msg);
}
--- a/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/utilities/globalDefinitions_visCPP.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -147,14 +147,6 @@
#pragma warning( disable : 4996 ) // unsafe string functions. Same as define _CRT_SECURE_NO_WARNINGS/_CRT_SECURE_NO_DEPRICATE
#endif
-inline int vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
- // If number of characters written == count, Windows doesn't write a
- // terminating NULL, so we do it ourselves.
- int ret = _vsnprintf(buf, count, fmt, argptr);
- if (count > 0) buf[count-1] = '\0';
- return ret;
-}
-
// Portability macros
#define PRAGMA_INTERFACE
#define PRAGMA_IMPLEMENTATION
--- a/src/hotspot/share/utilities/ostream.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/utilities/ostream.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -96,19 +96,14 @@
result_len = strlen(result);
if (add_cr && result_len >= buflen) result_len = buflen-1; // truncate
} else {
- // Handle truncation:
- // posix: upon truncation, vsnprintf returns number of bytes which
- // would have been written (excluding terminating zero) had the buffer
- // been large enough
- // windows: upon truncation, vsnprintf returns -1
- const int written = vsnprintf(buffer, buflen, format, ap);
+ int written = os::vsnprintf(buffer, buflen, format, ap);
+ assert(written >= 0, "vsnprintf encoding error");
result = buffer;
- if (written < (int) buflen && written >= 0) {
+ if ((size_t)written < buflen) {
result_len = written;
} else {
DEBUG_ONLY(warning("increase O_BUFLEN in ostream.hpp -- output truncated");)
result_len = buflen - 1;
- buffer[result_len] = 0;
}
}
if (add_cr) {
--- a/src/hotspot/share/utilities/stringUtils.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/utilities/stringUtils.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,3 +41,19 @@
return replace_count;
}
+
+double StringUtils::similarity(const char* str1, size_t len1, const char* str2, size_t len2) {
+ size_t total = len1 + len2;
+
+ size_t hit = 0;
+ for (size_t i = 0; i < len1 - 1; i++) {
+ for (size_t j = 0; j < len2 - 1; j++) {
+ if ((str1[i] == str2[j]) && (str1[i+1] == str2[j+1])) {
+ ++hit;
+ break;
+ }
+ }
+ }
+
+ return 2.0 * (double) hit / (double) total;
+}
--- a/src/hotspot/share/utilities/stringUtils.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/utilities/stringUtils.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,9 @@
//
// Returns the count of substrings that have been replaced.
static int replace_no_expand(char* string, const char* from, const char* to);
+
+ // Compute string similarity based on Dice's coefficient
+ static double similarity(const char* str1, size_t len1, const char* str2, size_t len2);
};
#endif // SHARE_VM_UTILITIES_STRINGUTILS_HPP
--- a/src/hotspot/share/utilities/vmError.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/src/hotspot/share/utilities/vmError.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1305,6 +1305,12 @@
// are handled properly.
reset_signal_handlers();
+ EventShutdown e;
+ if (e.should_commit()) {
+ e.set_reason("VM Error");
+ e.commit();
+ }
+
TRACE_VM_ERROR();
} else {
--- a/src/java.base/share/classes/java/lang/Runtime.java Fri Mar 09 00:28:50 2018 +0100
+++ b/src/java.base/share/classes/java/lang/Runtime.java Fri Mar 02 21:00:12 2018 +0100
@@ -275,6 +275,7 @@
if (sm != null) {
sm.checkExit(status);
}
+ Shutdown.beforeHalt();
Shutdown.halt(status);
}
--- a/src/java.base/share/classes/java/lang/Shutdown.java Fri Mar 09 00:28:50 2018 +0100
+++ b/src/java.base/share/classes/java/lang/Shutdown.java Fri Mar 02 21:00:12 2018 +0100
@@ -140,6 +140,9 @@
VM.shutdown();
}
+ /* Notify the VM that it's time to halt. */
+ static native void beforeHalt();
+
/* The halt method is synchronized on the halt lock
* to avoid corruption of the delete-on-shutdown file list.
* It invokes the true native halt method.
@@ -167,6 +170,7 @@
/* Synchronize on the class object, causing any other thread
* that attempts to initiate shutdown to stall indefinitely
*/
+ beforeHalt();
runHooks();
halt(status);
}
--- a/src/java.base/share/native/libjava/Shutdown.c Fri Mar 09 00:28:50 2018 +0100
+++ b/src/java.base/share/native/libjava/Shutdown.c Fri Mar 02 21:00:12 2018 +0100
@@ -29,6 +29,11 @@
#include "java_lang_Shutdown.h"
+JNIEXPORT void JNICALL
+Java_java_lang_Shutdown_beforeHalt(JNIEnv *env, jclass ignored)
+{
+ JVM_BeforeHalt();
+}
JNIEXPORT void JNICALL
Java_java_lang_Shutdown_halt0(JNIEnv *env, jclass ignored, jint code)
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Fri Mar 09 00:28:50 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,7 @@
public static final GraalHotSpotVMConfig INJECTED_VMCONFIG = null;
private final boolean isJDK8 = System.getProperty("java.specification.version").compareTo("1.9") < 0;
+ private final int JDKVersion = isJDK8 ? 8 : Integer.parseInt(System.getProperty("java.specification.version"));
public final String osName = getHostOSName();
public final String osArch = getHostArchitectureName();
public final boolean windowsOs = System.getProperty("os.name", "").startsWith("Windows");
@@ -554,8 +555,12 @@
public final int logOfHRGrainBytes = getFieldValue("HeapRegion::LogOfHRGrainBytes", Integer.class, "int");
- public final byte dirtyCardValue = isJDK8 ? getFieldValue("CompilerToVM::Data::dirty_card", Byte.class, "int") : getConstant("CardTableModRefBS::dirty_card", Byte.class);
- public final byte g1YoungCardValue = isJDK8 ? getFieldValue("CompilerToVM::Data::g1_young_card", Byte.class, "int") : getConstant("G1SATBCardTableModRefBS::g1_young_gen", Byte.class);
+ public final byte dirtyCardValue = JDKVersion >= 11 ? getConstant("CardTable::dirty_card", Byte.class) :
+ (JDKVersion > 8 ? getConstant("CardTableModRefBS::dirty_card", Byte.class) :
+ getFieldValue("CompilerToVM::Data::dirty_card", Byte.class, "int"));
+ public final byte g1YoungCardValue = JDKVersion >= 11 ? getConstant("G1CardTable::g1_young_gen", Byte.class) :
+ (JDKVersion > 8 ? getConstant("G1SATBCardTableModRefBS::g1_young_gen", Byte.class) :
+ getFieldValue("CompilerToVM::Data::g1_young_card", Byte.class, "int"));
public final long cardtableStartAddress = getFieldValue("CompilerToVM::Data::cardtable_start_address", Long.class, "jbyte*");
public final int cardtableShift = getFieldValue("CompilerToVM::Data::cardtable_shift", Integer.class, "int");
--- a/test/TestCommon.gmk Fri Mar 09 00:28:50 2018 +0100
+++ b/test/TestCommon.gmk Fri Mar 02 21:00:12 2018 +0100
@@ -346,13 +346,14 @@
endif
# Problematic tests to be excluded
-PROBLEM_LISTS=$(call MixedDirs,$(wildcard ProblemList.txt))
+EXTRA_PROBLEM_LISTS :=
+PROBLEM_LISTS := ProblemList.txt $(EXTRA_PROBLEM_LISTS)
# Create exclude list for this platform and arch
ifdef NO_EXCLUDES
JTREG_EXCLUSIONS =
else
- JTREG_EXCLUSIONS = $(PROBLEM_LISTS:%=-exclude:%)
+ JTREG_EXCLUSIONS = $(addprefix -exclude:, $(wildcard $(PROBLEM_LISTS)))
endif
# ------------------------------------------------------------------
--- a/test/hotspot/gtest/gc/g1/test_g1HeapVerifier.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/gc/g1/test_g1HeapVerifier.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,13 @@
#include "precompiled.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "logging/logConfiguration.hpp"
+#include "logging/logTestFixture.hpp"
#include "unittest.hpp"
-TEST(G1HeapVerifier, parse) {
+class G1HeapVerifierTest : public LogTestFixture {
+};
+
+TEST_F(G1HeapVerifierTest, parse) {
G1HeapVerifier verifier(NULL);
LogConfiguration::configure_stdout(LogLevel::Off, true, LOG_TAGS(gc, verify));
--- a/test/hotspot/gtest/logging/logTestFixture.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/logging/logTestFixture.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,12 @@
#include "logTestFixture.hpp"
#include "logTestUtils.inline.hpp"
#include "logging/logConfiguration.hpp"
+#include "logging/logOutput.hpp"
#include "memory/resourceArea.hpp"
#include "unittest.hpp"
#include "utilities/ostream.hpp"
-LogTestFixture::LogTestFixture() {
+LogTestFixture::LogTestFixture() : _configuration_snapshot(NULL), _n_snapshots(0) {
// Set up TestLogFileName to include PID, testcase name and test name
int ret = jio_snprintf(_filename, sizeof(_filename), "testlog.pid%d.%s.%s.log",
os::current_process_id(),
@@ -38,10 +39,13 @@
::testing::UnitTest::GetInstance()->current_test_info()->name());
EXPECT_GT(ret, 0) << "_filename buffer issue";
TestLogFileName = _filename;
+
+ snapshot_config();
}
LogTestFixture::~LogTestFixture() {
- restore_default_log_config();
+ restore_config();
+ clear_snapshot();
delete_file(TestLogFileName);
}
@@ -61,7 +65,56 @@
return success;
}
-void LogTestFixture::restore_default_log_config() {
+void LogTestFixture::snapshot_config() {
+ clear_snapshot();
+ _n_snapshots = LogConfiguration::_n_outputs;
+ _configuration_snapshot = NEW_C_HEAP_ARRAY(char*, _n_snapshots, mtLogging);
+ for (size_t i = 0; i < _n_snapshots; i++) {
+ ResourceMark rm;
+ stringStream ss;
+ LogConfiguration::_outputs[i]->describe(&ss);
+ _configuration_snapshot[i] = os::strdup_check_oom(ss.as_string(), mtLogging);
+ }
+}
+
+void LogTestFixture::restore_config() {
LogConfiguration::disable_logging();
- set_log_config("stdout", "all=warning");
+ for (size_t i = 0; i < _n_snapshots; i++) {
+ // Restore the config based on the saved output description string.
+ // The string has the following format: '<name> <selection> <decorators>[ <options>]'
+ // Extract the different parameters by replacing the spaces with NULLs.
+ char* str = _configuration_snapshot[i];
+
+ char* name = str;
+ str = strchr(str, ' ');
+ *str++ = '\0';
+
+ char* selection = str;
+ str = strchr(str, ' ');
+ *str++ = '\0';
+
+ char* decorators = str;
+
+ char* options = NULL;
+ str = strchr(str, ' ');
+ if (str != NULL) {
+ *str++ = '\0';
+ options = str;
+ }
+
+ set_log_config(name, selection, decorators, options != NULL ? options : "");
+ }
}
+
+void LogTestFixture::clear_snapshot() {
+ if (_configuration_snapshot == NULL) {
+ return;
+ }
+ assert(_n_snapshots > 0, "non-null array should have at least 1 element");
+ for (size_t i = 0; i < _n_snapshots; i++) {
+ os::free(_configuration_snapshot[i]);
+ }
+ FREE_C_HEAP_ARRAY(char*, _configuration_snapshot);
+ _configuration_snapshot = NULL;
+ _n_snapshots = 0;
+}
--- a/test/hotspot/gtest/logging/logTestFixture.hpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/logging/logTestFixture.hpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,8 @@
class LogTestFixture : public testing::Test {
private:
char _filename[2 * K];
+ size_t _n_snapshots;
+ char** _configuration_snapshot;
protected:
const char* TestLogFileName;
@@ -45,6 +47,8 @@
const char* options = "",
bool allow_failure = false);
- static void restore_default_log_config();
+ void snapshot_config();
+ void restore_config();
+ void clear_snapshot();
};
--- a/test/hotspot/gtest/logging/test_logConfiguration.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/logging/test_logConfiguration.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -113,7 +113,7 @@
EXPECT_FALSE(is_described(TestLogFileName)) << "Test output already exists!";
set_log_config(TestLogFileName, what);
EXPECT_TRUE(is_described(TestLogFileName));
- EXPECT_TRUE(is_described("logging=trace"));
+ EXPECT_TRUE(is_described("all=trace"));
}
// Test updating an existing log output
@@ -125,7 +125,7 @@
// Verify configuration using LogConfiguration::describe
EXPECT_TRUE(is_described("#0: stdout"));
- EXPECT_TRUE(is_described("logging=info"));
+ EXPECT_TRUE(is_described("all=info"));
// Verify by iterating over tagsets
LogOutput* o = &StdoutLog;
@@ -154,7 +154,7 @@
// Verify new output using LogConfiguration::describe
EXPECT_TRUE(is_described(TestLogFileName));
- EXPECT_TRUE(is_described("logging=trace"));
+ EXPECT_TRUE(is_described("all=trace"));
// Also verify by iterating over tagsets, checking levels on tagsets
for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
@@ -225,7 +225,7 @@
// Now reconfigure logging on stderr with no decorators
set_log_config("stderr", "all=off", "none");
- EXPECT_TRUE(is_described("#1: stderr all=off \n")) << "Expecting no decorators";
+ EXPECT_TRUE(is_described("#1: stderr all=off none (reconfigured)\n")) << "Expecting no decorators";
}
// Test that invalid options cause configuration errors
@@ -265,7 +265,7 @@
bool success = LogConfiguration::parse_command_line_arguments(buf);
EXPECT_TRUE(success) << "Error parsing valid command line arguments '" << buf << "'";
// Ensure the new configuration applied
- EXPECT_TRUE(is_described("logging=debug"));
+ EXPECT_TRUE(is_described("logging*=debug"));
EXPECT_TRUE(is_described(_all_decorators));
// Test the configuration of file outputs as well
@@ -383,7 +383,7 @@
bool success = LogConfiguration::parse_log_arguments("stdout", invalid_tagset, NULL, NULL, &ss);
const char* msg = ss.as_string();
EXPECT_TRUE(success) << "Should only cause a warning, not an error";
- EXPECT_TRUE(string_contains_substring(msg, "No tag set matches selection(s):"));
+ EXPECT_TRUE(string_contains_substring(msg, "No tag set matches selection:"));
EXPECT_TRUE(string_contains_substring(msg, invalid_tagset));
}
@@ -413,3 +413,48 @@
ASSERT_NE(-1, ret);
delete_file(buf);
}
+
+static size_t count_occurrences(const char* haystack, const char* needle) {
+ size_t count = 0;
+ for (const char* p = strstr(haystack, needle); p != NULL; p = strstr(p + 1, needle)) {
+ count++;
+ }
+ return count;
+}
+
+TEST_OTHER_VM(LogConfiguration, output_reconfigured) {
+ ResourceMark rm;
+ stringStream ss;
+
+ EXPECT_FALSE(is_described("(reconfigured)"));
+
+ bool success = LogConfiguration::parse_log_arguments("#1", "all=warning", NULL, NULL, &ss);
+ ASSERT_TRUE(success);
+ EXPECT_EQ(0u, ss.size());
+
+ LogConfiguration::describe(&ss);
+ EXPECT_EQ(1u, count_occurrences(ss.as_string(), "(reconfigured)"));
+
+ ss.reset();
+ LogConfiguration::configure_stdout(LogLevel::Info, false, LOG_TAGS(logging));
+ LogConfiguration::describe(&ss);
+ EXPECT_EQ(2u, count_occurrences(ss.as_string(), "(reconfigured)"));
+}
+
+TEST_VM_F(LogConfigurationTest, suggest_similar_selection) {
+ static const char* nonexisting_tagset = "logging+start+exit+safepoint+gc";
+
+ ResourceMark rm;
+ stringStream ss;
+ LogConfiguration::parse_log_arguments("stdout", nonexisting_tagset, NULL, NULL, &ss);
+
+ const char* suggestion = ss.as_string();
+ SCOPED_TRACE(suggestion);
+ EXPECT_TRUE(string_contains_substring(ss.as_string(), "Did you mean any of the following?"));
+ EXPECT_TRUE(string_contains_substring(suggestion, "logging") ||
+ string_contains_substring(suggestion, "start") ||
+ string_contains_substring(suggestion, "exit") ||
+ string_contains_substring(suggestion, "safepoint") ||
+ string_contains_substring(suggestion, "gc")) <<
+ "suggestion must contain AT LEAST one of the tags in user supplied selection";
+}
--- a/test/hotspot/gtest/logging/test_logLevel.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/logging/test_logLevel.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,18 @@
EXPECT_EQ(LogLevel::Invalid, LogLevel::from_string("infodebugwarning"));
}
+TEST(LogLevel, fuzzy_match) {
+ for (size_t i = 1; i < LogLevel::Count; i++) {
+ LogLevelType level = static_cast<LogLevelType>(i);
+ ASSERT_EQ(level, LogLevel::fuzzy_match(LogLevel::name(level)));
+ }
+
+ ASSERT_EQ(LogLevel::Warning, LogLevel::fuzzy_match("warn"));
+ ASSERT_EQ(LogLevel::Error, LogLevel::fuzzy_match("err"));
+
+ ASSERT_EQ(LogLevel::Invalid, LogLevel::fuzzy_match("unknown"));
+}
+
TEST(LogLevel, name) {
// Use names from macro as reference
#define LOG_LEVEL(lname, lstring) \
--- a/test/hotspot/gtest/logging/test_logSelection.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/logging/test_logSelection.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -184,6 +184,20 @@
EXPECT_NE(selection, fewer_tags);
}
+TEST(LogSelection, consists_of) {
+ LogTagType tags[LogTag::MaxTags] = {
+ PREFIX_LOG_TAG(logging), PREFIX_LOG_TAG(test), PREFIX_LOG_TAG(_NO_TAG)
+ };
+ LogSelection s(tags, false, LogLevel::Off);
+ EXPECT_TRUE(s.consists_of(tags));
+
+ tags[2] = PREFIX_LOG_TAG(safepoint);
+ EXPECT_FALSE(s.consists_of(tags));
+
+ s = LogSelection(tags, true, LogLevel::Info);
+ EXPECT_TRUE(s.consists_of(tags));
+}
+
TEST(LogSelection, describe_tags) {
char buf[256];
LogTagType tags[LogTag::MaxTags] = { PREFIX_LOG_TAG(logging), PREFIX_LOG_TAG(test), PREFIX_LOG_TAG(_NO_TAG) };
--- a/test/hotspot/gtest/logging/test_logTag.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/logging/test_logTag.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -45,6 +45,18 @@
}
}
+TEST(LogTag, fuzzy_match) {
+ for (size_t i = 1; i < LogTag::Count; i++) {
+ LogTagType tag = static_cast<LogTagType>(i);
+ EXPECT_EQ(tag, LogTag::fuzzy_match(LogTag::name(tag)));
+ }
+
+ EXPECT_EQ(LogTag::_logging, LogTag::fuzzy_match("loggin"));
+ EXPECT_EQ(LogTag::_logging, LogTag::fuzzy_match("loging"));
+
+ EXPECT_EQ(LogTag::__NO_TAG, LogTag::fuzzy_match("unrecognizabletag"));
+}
+
TEST(LogTag, name) {
// Verify for each tag from the macro
#define LOG_TAG(tag) \
--- a/test/hotspot/gtest/logging/test_logTagSetDescriptions.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/logging/test_logTagSetDescriptions.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,8 @@
const char* filename = "logtagset_descriptions";
FILE* fp = fopen(filename, "w+");
ASSERT_NE((void*)NULL, fp);
- LogConfiguration::print_command_line_help(fp);
+ fileStream stream(fp);
+ LogConfiguration::print_command_line_help(&stream);
fclose(fp);
for (LogTagSetDescription* d = tagset_descriptions; d->tagset != NULL; d++) {
--- a/test/hotspot/gtest/memory/test_guardedMemory.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/memory/test_guardedMemory.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,8 @@
#include "runtime/os.hpp"
#include "unittest.hpp"
+#define GEN_PURPOSE_TAG ((void *) ((uintptr_t)0xf000f000))
+
static void guarded_memory_test_check(void* p, size_t sz, void* tag) {
ASSERT_TRUE(p != NULL) << "NULL pointer given to check";
u_char* c = (u_char*) p;
@@ -60,7 +62,7 @@
TEST(GuardedMemory, basic) {
u_char* basep =
(u_char*) os::malloc(GuardedMemory::get_total_size(1), mtInternal);
- GuardedMemory guarded(basep, 1, (void*) 0xf000f000);
+ GuardedMemory guarded(basep, 1, GEN_PURPOSE_TAG);
EXPECT_EQ(badResourceValue, *basep)
<< "Expected guard in the form of badResourceValue";
@@ -68,7 +70,7 @@
u_char* userp = guarded.get_user_ptr();
EXPECT_EQ(uninitBlockPad, *userp)
<< "Expected uninitialized data in the form of uninitBlockPad";
- guarded_memory_test_check(userp, 1, (void*) 0xf000f000);
+ guarded_memory_test_check(userp, 1, GEN_PURPOSE_TAG);
void* freep = guarded.release_for_freeing();
EXPECT_EQ((u_char*) freep, basep) << "Expected the same pointer guard was ";
@@ -81,7 +83,7 @@
TEST(GuardedMemory, odd_sizes) {
u_char* basep =
(u_char*) os::malloc(GuardedMemory::get_total_size(1), mtInternal);
- GuardedMemory guarded(basep, 1, (void*) 0xf000f000);
+ GuardedMemory guarded(basep, 1, GEN_PURPOSE_TAG);
size_t sz = 0;
do {
@@ -102,7 +104,7 @@
TEST(GuardedMemory, buffer_overrun_head) {
u_char* basep =
(u_char*) os::malloc(GuardedMemory::get_total_size(1), mtInternal);
- GuardedMemory guarded(basep, 1, (void*) 0xf000f000);
+ GuardedMemory guarded(basep, 1, GEN_PURPOSE_TAG);
guarded.wrap_with_guards(basep, 1);
*basep = 0;
@@ -114,7 +116,7 @@
TEST(GuardedMemory, buffer_overrun_tail) {
u_char* basep =
(u_char*) os::malloc(GuardedMemory::get_total_size(1), mtInternal);
- GuardedMemory guarded(basep, 1, (void*) 0xf000f000);
+ GuardedMemory guarded(basep, 1, GEN_PURPOSE_TAG);
size_t sz = 1;
do {
--- a/test/hotspot/gtest/runtime/test_os.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/runtime/test_os.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,9 @@
*/
#include "precompiled.hpp"
+#include "memory/resourceArea.hpp"
#include "runtime/os.hpp"
+#include "utilities/ostream.hpp"
#include "unittest.hpp"
static size_t small_page_size() {
@@ -150,3 +152,118 @@
os::page_size_for_region_aligned(region_size, 0); // should assert
}
#endif
+
+//////////////////////////////////////////////////////////////////////////////
+// Test os::vsnprintf and friends.
+
+static void check_snprintf_result(int expected, size_t limit, int actual, bool expect_count) {
+ if (expect_count || ((size_t)expected < limit)) {
+ ASSERT_EQ(expected, actual);
+ } else {
+ ASSERT_GT(0, actual);
+ }
+}
+
+// PrintFn is expected to be int (*)(char*, size_t, const char*, ...).
+// But jio_snprintf is a C-linkage function with that signature, which
+// has a different type on some platforms (like Solaris).
+template<typename PrintFn>
+static void test_snprintf(PrintFn pf, bool expect_count) {
+ const char expected[] = "abcdefghijklmnopqrstuvwxyz";
+ const int expected_len = sizeof(expected) - 1;
+ const size_t padding_size = 10;
+ char buffer[2 * (sizeof(expected) + padding_size)];
+ char check_buffer[sizeof(buffer)];
+ const char check_char = '1'; // Something not in expected.
+ memset(check_buffer, check_char, sizeof(check_buffer));
+ const size_t sizes_to_test[] = {
+ sizeof(buffer) - padding_size, // Fits, with plenty of space to spare.
+ sizeof(buffer)/2, // Fits, with space to spare.
+ sizeof(buffer)/4, // Doesn't fit.
+ sizeof(expected) + padding_size + 1, // Fits, with a little room to spare
+ sizeof(expected) + padding_size, // Fits exactly.
+ sizeof(expected) + padding_size - 1, // Doesn't quite fit.
+ 2, // One char + terminating NUL.
+ 1, // Only space for terminating NUL.
+ 0 }; // No space at all.
+ for (unsigned i = 0; i < ARRAY_SIZE(sizes_to_test); ++i) {
+ memset(buffer, check_char, sizeof(buffer)); // To catch stray writes.
+ size_t test_size = sizes_to_test[i];
+ ResourceMark rm;
+ stringStream s;
+ s.print("test_size: " SIZE_FORMAT, test_size);
+ SCOPED_TRACE(s.as_string());
+ size_t prefix_size = padding_size;
+ guarantee(test_size <= (sizeof(buffer) - prefix_size), "invariant");
+ size_t write_size = MIN2(sizeof(expected), test_size);
+ size_t suffix_size = sizeof(buffer) - prefix_size - write_size;
+ char* write_start = buffer + prefix_size;
+ char* write_end = write_start + write_size;
+
+ int result = pf(write_start, test_size, "%s", expected);
+
+ check_snprintf_result(expected_len, test_size, result, expect_count);
+
+ // Verify expected output.
+ if (test_size > 0) {
+ ASSERT_EQ(0, strncmp(write_start, expected, write_size - 1));
+ // Verify terminating NUL of output.
+ ASSERT_EQ('\0', write_start[write_size - 1]);
+ } else {
+ guarantee(test_size == 0, "invariant");
+ guarantee(write_size == 0, "invariant");
+ guarantee(prefix_size + suffix_size == sizeof(buffer), "invariant");
+ guarantee(write_start == write_end, "invariant");
+ }
+
+ // Verify no scribbling on prefix or suffix.
+ ASSERT_EQ(0, strncmp(buffer, check_buffer, prefix_size));
+ ASSERT_EQ(0, strncmp(write_end, check_buffer, suffix_size));
+ }
+
+ // Special case of 0-length buffer with empty (except for terminator) output.
+ check_snprintf_result(0, 0, pf(NULL, 0, "%s", ""), expect_count);
+ check_snprintf_result(0, 0, pf(NULL, 0, ""), expect_count);
+}
+
+// This is probably equivalent to os::snprintf, but we're being
+// explicit about what we're testing here.
+static int vsnprintf_wrapper(char* buf, size_t len, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ int result = os::vsnprintf(buf, len, fmt, args);
+ va_end(args);
+ return result;
+}
+
+TEST(os, vsnprintf) {
+ test_snprintf(vsnprintf_wrapper, true);
+}
+
+TEST(os, snprintf) {
+ test_snprintf(os::snprintf, true);
+}
+
+// These are declared in jvm.h; test here, with related functions.
+extern "C" {
+int jio_vsnprintf(char*, size_t, const char*, va_list);
+int jio_snprintf(char*, size_t, const char*, ...);
+}
+
+// This is probably equivalent to jio_snprintf, but we're being
+// explicit about what we're testing here.
+static int jio_vsnprintf_wrapper(char* buf, size_t len, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ int result = jio_vsnprintf(buf, len, fmt, args);
+ va_end(args);
+ return result;
+}
+
+TEST(os, jio_vsnprintf) {
+ test_snprintf(jio_vsnprintf_wrapper, false);
+}
+
+TEST(os, jio_snprintf) {
+ test_snprintf(jio_snprintf, false);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/runtime/test_threadstack_tracking.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+
+// Included early because the NMT flags don't include it.
+#include "utilities/macros.hpp"
+
+#include "runtime/thread.hpp"
+#include "services/memTracker.hpp"
+#include "services/virtualMemoryTracker.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "unittest.hpp"
+
+
+class ThreadStackTrackingTest {
+public:
+ static void test() {
+ VirtualMemoryTracker::initialize(NMT_detail);
+ VirtualMemoryTracker::late_initialize(NMT_detail);
+
+ Thread* thr = Thread::current();
+ address stack_end = thr->stack_end();
+ size_t stack_size = thr->stack_size();
+
+ MemTracker::record_thread_stack(stack_end, stack_size);
+
+ VirtualMemoryTracker::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
+
+ // snapshot current stack usage
+ VirtualMemoryTracker::snapshot_thread_stacks();
+
+ ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(stack_end, stack_size));
+ ASSERT_TRUE(rmr != NULL);
+
+ ASSERT_EQ(rmr->base(), stack_end);
+ ASSERT_EQ(rmr->size(), stack_size);
+
+ CommittedRegionIterator iter = rmr->iterate_committed_regions();
+ int i = 0;
+ address i_addr = (address)&i;
+
+ // stack grows downward
+ address stack_top = stack_end + stack_size;
+ bool found_stack_top = false;
+
+ for (const CommittedMemoryRegion* region = iter.next(); region != NULL; region = iter.next()) {
+ if (region->base() + region->size() == stack_top) {
+ // This should be active part, "i" should be here
+ ASSERT_TRUE(i_addr < stack_top && i_addr >= region->base());
+ ASSERT_TRUE(region->size() <= stack_size);
+ found_stack_top = true;
+ }
+
+ i++;
+ }
+
+ // NMT was not turned on when the thread was created, so we don't have guard pages
+ ASSERT_TRUE(i == 1);
+ ASSERT_TRUE(found_stack_top);
+ }
+};
+
+TEST_VM(VirtualMemoryTracker, thread_stack_tracking) {
+ ThreadStackTrackingTest::test();
+}
--- a/test/hotspot/gtest/utilities/test_align.cpp Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/gtest/utilities/test_align.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
#include <limits>
// A few arbitrarily chosen values to test the align functions on.
-static uint64_t values[] = {1, 3, 10, 345, 1023, 1024, 1025, 23909034, INT_MAX, uint64_t(-1) / 2, uint64_t(-1) / 2 + 100, -1 };
+static uint64_t values[] = {1, 3, 10, 345, 1023, 1024, 1025, 23909034, INT_MAX, uint64_t(-1) / 2, uint64_t(-1) / 2 + 100, uint64_t(-1)};
template <typename T>
static T max_alignment() {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/utilities/test_stringUtils.cpp Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/stringUtils.hpp"
+#include "unittest.hpp"
+
+TEST(StringUtils, similarity) {
+ const char* str1 = "the quick brown fox jumps over the lazy dog";
+ const char* str2 = "the quick brown fox jumps over the lazy doh";
+ EXPECT_NEAR(0.95349, StringUtils::similarity(str1, strlen(str1), str2, strlen(str2)), 1e-5);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/ProblemList-graal.txt Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,81 @@
+#
+# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+#############################################################################
+#
+# List of quarantined tests for testing in Graal JIT mode.
+#
+#############################################################################
+
+compiler/c2/cr7200264/TestSSE2IntVect.java 8194958 generic-all
+compiler/c2/cr7200264/TestSSE4IntVect.java 8194958 generic-all
+
+compiler/ciReplay/TestServerVM.java 8181747 generic-all
+compiler/ciReplay/TestVMNoCompLevel.java 8181747 generic-all
+
+compiler/compilercontrol/commandfile/LogTest.java 8181753 generic-all
+compiler/compilercontrol/commands/LogTest.java 8181753 generic-all
+compiler/compilercontrol/directives/ExcludeTest.java 8181753 generic-all
+compiler/compilercontrol/jcmd/AddExcludeTest.java 8181753 generic-all
+compiler/compilercontrol/jcmd/AddLogTest.java 8181753 generic-all
+compiler/compilercontrol/mixed/RandomValidCommandsTest.java 8181753 generic-all
+
+compiler/intrinsics/mathexact/LongMulOverflowTest.java 8196568 generic-all
+
+compiler/jvmci/SecurityRestrictionsTest.java 8181837 generic-all
+
+compiler/jvmci/TestValidateModules.java 8194942 generic-all
+gc/arguments/TestVerifyBeforeAndAfterGCFlags.java 8194942 generic-all
+
+compiler/rangechecks/TestRangeCheckSmearing.java 8195632 generic-all
+compiler/uncommontrap/Test8009761.java 8195632 generic-all
+compiler/whitebox/ForceNMethodSweepTest.java 8195632 generic-all
+
+compiler/unsafe/UnsafeGetConstantField.java 8181833 generic-all
+compiler/unsafe/UnsafeGetStableArrayElement.java 8181833 generic-all
+compiler/unsafe/UnsafeOffHeapBooleanTest.java 8181833 generic-all
+compiler/unsafe/UnsafeOnHeapBooleanTest.java 8181833 generic-all
+:1
+compiler/whitebox/ClearMethodStateTest.java 8181831 generic-all
+compiler/whitebox/EnqueueMethodForCompilationTest.java 8181831 generic-all
+compiler/whitebox/MakeMethodNotCompilableTest.java 8181831 generic-all
+
+gc/arguments/TestNewSizeFlags.java 8196611 generic-all
+gc/g1/TestConcurrentSystemGC.java 8196611 generic-all
+
+gc/g1/ihop/TestIHOPErgo.java 8191048 generic-all
+gc/g1/plab/TestPLABEvacuationFailure.java 8191048 generic-all
+gc/g1/plab/TestPLABPromotion.java 8191048 generic-all
+gc/g1/plab/TestPLABResize.java 8191048 generic-all
+
+gc/TestNUMAPageSize.java 8194949 generic-all
+
+runtime/appcds/UseAppCDS.java 8196626 generic-all
+
+runtime/ReservedStack/ReservedStackTestCompiler.java 8181855 generic-all
+
+serviceability/jvmti/GetModulesInfo/JvmtiGetAllModulesTest.java 8195156 generic-all
+
+runtime/Metaspace/DefineClass.java 8197442 generic-all
+
+compiler/compilercontrol/directives/LogTest.java 8197446 generic-all
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/interpreter/TestVerifyStackAfterDeopt.java Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+
+/*
+ * @test TestVerifyStackAfterDeopt
+ * @bug 8148871
+ * @summary Checks VerifyStack after deoptimization of array allocation slow call
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:TieredStopAtLevel=1
+ * -XX:+DeoptimizeALot -XX:+VerifyStack
+ * compiler.interpreter.TestVerifyStackAfterDeopt
+ */
+
+package compiler.interpreter;
+
+public class TestVerifyStackAfterDeopt {
+
+ private void method(Object[] a) {
+
+ }
+
+ private void test() {
+ // For the array allocation, C1 emits a slow call into the runtime
+ // that deoptimizes the caller frame due to -XX:+DeoptimizeALot.
+ // The VerifyStack code then gets confused because the following
+ // bytecode instruction is an invoke and the interpreter oop map
+ // generator reports the oop map after execution of that invoke.
+ method(new Object[0]);
+ }
+
+ public static void main(String[] args) {
+ TestVerifyStackAfterDeopt t = new TestVerifyStackAfterDeopt();
+ // Run long enough for C1 compilation to trigger and TLAB to fill up
+ for (int i = 0; i < 100_000; ++i) {
+ t.test();
+ }
+ }
+}
--- a/test/hotspot/jtreg/compiler/rangechecks/TestRangeCheckSmearing.java Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/jtreg/compiler/rangechecks/TestRangeCheckSmearing.java Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -ea -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
+ * -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+EagerJVMCI
* compiler.rangechecks.TestRangeCheckSmearing
*
*/
--- a/test/hotspot/jtreg/compiler/uncommontrap/Test8009761.java Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/jtreg/compiler/uncommontrap/Test8009761.java Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -Xss512K
+ * -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+EagerJVMCI
* -XX:CompileCommand=exclude,compiler.uncommontrap.Test8009761::m2
* compiler.uncommontrap.Test8009761
*/
--- a/test/hotspot/jtreg/compiler/whitebox/ForceNMethodSweepTest.java Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/jtreg/compiler/whitebox/ForceNMethodSweepTest.java Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@
* -XX:-TieredCompilation -XX:+WhiteBoxAPI
* -XX:CompileCommand=compileonly,compiler.whitebox.SimpleTestCaseHelper::*
* -XX:-BackgroundCompilation -XX:-UseCounterDecay
+ * -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+EagerJVMCI
* compiler.whitebox.ForceNMethodSweepTest
*/
--- a/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java Fri Mar 02 21:00:12 2018 +0100
@@ -48,6 +48,9 @@
{"UseMembar", "true"},
{"CompilerThreadHintNoPreempt", "true"},
{"VMThreadHintNoPreempt", "false"},
+ {"PrintSafepointStatistics", "false"},
+ {"PrintSafepointStatisticsCount", "3"},
+ {"PrintSafepointStatisticsTimeout", "3"},
// deprecated alias flags (see also aliased_jvm_flags):
{"DefaultMaxRAMFraction", "4"},
--- a/test/hotspot/jtreg/runtime/appcds/ProhibitedPackage.java Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/jtreg/runtime/appcds/ProhibitedPackage.java Fri Mar 02 21:00:12 2018 +0100
@@ -34,6 +34,7 @@
* @run main ProhibitedPackage
*/
+import jdk.test.lib.cds.CDSOptions;
import jdk.test.lib.Platform;
import jdk.test.lib.process.OutputAnalyzer;
@@ -85,7 +86,8 @@
output = TestCommon.execAuto(
"-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
"-cp", appJar, "-Xlog:class+load=info", "ProhibitedHelper");
- TestCommon.checkExec(output, "Prohibited package name: java.lang");
+ CDSOptions opts = (new CDSOptions()).setXShareMode("auto");
+ TestCommon.checkExec(output, opts, "Prohibited package name: java.lang");
// -Xshare:off
output = TestCommon.execOff(
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/containers/cgroup/PlainRead.java Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test PlainRead
+ * @requires os.family == "linux"
+ * @library /testlibrary /test/lib
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI PlainRead
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.Platform;
+import sun.hotspot.WhiteBox;
+
+public class PlainRead {
+
+ static public void match(OutputAnalyzer oa, String what, String value) {
+ oa.shouldMatch("^.*" + what + " *" + value + ".*$");
+ }
+
+ static public void noMatch(OutputAnalyzer oa, String what, String value) {
+ oa.shouldNotMatch("^.*" + what + " *" + value + ".*$");
+ }
+
+ static final String good_value = "(\\d+|-1|Unlimited)";
+ static final String bad_value = "(failed)";
+
+ static final String[] variables = {"Memory Limit is:", "CPU Shares is:", "CPU Quota is:", "CPU Period is:", "active_processor_count:"};
+
+ static public void isContainer(OutputAnalyzer oa) {
+ for (String v: variables) {
+ match(oa, v, good_value);
+ }
+ for (String v: variables) {
+ noMatch(oa, v, bad_value);
+ }
+ }
+
+ static public void isNotContainer(OutputAnalyzer oa) {
+ oa.shouldMatch("^.*Can't open /proc/self/mountinfo.*$");
+ }
+
+ public static void main(String[] args) throws Exception {
+ WhiteBox wb = WhiteBox.getWhiteBox();
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:os+container=trace", "-version");
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+ if (wb.isContainerized()) {
+ System.out.println("Inside a cgroup, testing...");
+ isContainer(output);
+ } else {
+ System.out.println("Not in a cgroup, testing...");
+ isNotContainer(output);
+ }
+ }
+}
--- a/test/hotspot/jtreg/serviceability/logging/TestMultipleXlogArgs.java Fri Mar 09 00:28:50 2018 +0100
+++ b/test/hotspot/jtreg/serviceability/logging/TestMultipleXlogArgs.java Fri Mar 02 21:00:12 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,18 +37,18 @@
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xlog:logging=debug",
"-Xlog:logging=trace",
"-Xlog:defaultmethods=trace",
- "-Xlog:defaultmethods=off",
+ "-Xlog:defaultmethods=warning",
"-Xlog:safepoint=info",
"-Xlog:safepoint=info",
"-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
// -Xlog:logging=trace means that the log configuration will be printed.
- String stdoutConfigLine = "\\[logging *\\] #0: stdout .*";
+ String stdoutConfigLine = "\\[logging *\\] #0: stdout .*";
// Ensure logging=trace has overwritten logging=debug
output.shouldMatch(stdoutConfigLine + "logging=trace").shouldNotMatch(stdoutConfigLine + "logging=debug");
// Make sure safepoint=info is printed exactly once even though we're setting it twice
output.shouldMatch(stdoutConfigLine + "safepoint=info").shouldNotMatch(stdoutConfigLine + "safepoint=info.*safepoint=info");
- // Shouldn't see defaultmethods at all, because disabled tags are not listed
+ // Shouldn't see defaultmethods at all, because it should be covered by the initial 'all=warning' config
output.shouldNotMatch(stdoutConfigLine + "defaultmethods");
output.shouldHaveExitValue(0);
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/ProblemList-graal.txt Fri Mar 02 21:00:12 2018 +0100
@@ -0,0 +1,67 @@
+#
+# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+#############################################################################
+#
+# List of quarantined tests for testing in Graal JIT mode.
+#
+#############################################################################
+
+java/lang/Class/getDeclaredField/ClassDeclaredFieldsTest.java 8185139 generic-all
+java/lang/Class/getDeclaredField/FieldSetAccessibleTest.java 8185139 generic-all
+java/lang/ProcessBuilder/SecurityManagerClinit.java 8185139 generic-all
+java/lang/reflect/Proxy/nonPublicProxy/NonPublicProxyClass.java 8185139 generic-all
+java/lang/StackWalker/CallerSensitiveMethod/Main.java 8185139 generic-all
+java/lang/StackWalker/GetCallerClassTest.java 8185139 generic-all
+java/lang/String/concat/WithSecurityManager.java 8185139 generic-all
+java/lang/System/Logger/custom/CustomLoggerTest.java 8185139 generic-all
+java/lang/System/Logger/default/DefaultLoggerTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/BaseLoggerFinderTest/BaseLoggerFinderTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/DefaultLoggerFinderTest/DefaultLoggerFinderTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/internal/BaseDefaultLoggerFinderTest/BaseDefaultLoggerFinderTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/internal/BaseLoggerBridgeTest/BaseLoggerBridgeTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/internal/BasePlatformLoggerTest/BasePlatformLoggerTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/internal/BootstrapLogger/BootstrapLoggerTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/internal/LoggerBridgeTest/LoggerBridgeTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/internal/LoggerFinderLoaderTest/LoggerFinderLoaderTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/internal/PlatformLoggerBridgeTest/PlatformLoggerBridgeTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/jdk/DefaultLoggerBridgeTest/DefaultLoggerBridgeTest.java 8185139 generic-all
+java/lang/System/LoggerFinder/LoggerFinderAPI/LoggerFinderAPI.java 8185139 generic-all
+java/util/concurrent/atomic/AtomicUpdaters.java 8185139 generic-all
+java/util/concurrent/Executors/PrivilegedCallables.java 8185139 generic-all
+java/util/logging/FileHandlerPath.java 8185139 generic-all
+java/util/logging/FileHandlerPatternExceptions.java 8185139 generic-all
+java/util/logging/Logger/setResourceBundle/TestSetResourceBundle.java 8185139 generic-all
+java/util/logging/LogManager/Configuration/updateConfiguration/HandlersOnComplexResetUpdate.java 8185139 generic-all
+java/util/logging/LogManager/Configuration/updateConfiguration/HandlersOnComplexUpdate.java 8185139 generic-all
+java/util/logging/LogManager/Configuration/updateConfiguration/SimpleUpdateConfigurationTest.java 8185139 generic-all
+java/util/logging/LogManager/Configuration/updateConfiguration/SimpleUpdateConfigWithInputStreamTest.java 8185139 generic-all
+java/util/logging/LogManager/RootLogger/setLevel/TestRootLoggerLevel.java 8185139 generic-all
+java/util/logging/RootLogger/RootLevelInConfigFile.java 8185139 generic-all
+java/util/logging/TestAppletLoggerContext.java 8185139 generic-all
+java/util/logging/TestConfigurationListeners.java 8185139 generic-all
+
+java/util/concurrent/tck/JSR166TestCase.java 8187486 generic-all
+
+java/lang/ref/OOMEInReferenceHandler.java 8196611 generic-all
+java/lang/Runtime/exec/LotsOfOutput.java 8196611 generic-all
--- a/test/jdk/java/util/Arrays/TimSortStackSize2.java Fri Mar 09 00:28:50 2018 +0100
+++ b/test/jdk/java/util/Arrays/TimSortStackSize2.java Fri Mar 02 21:00:12 2018 +0100
@@ -29,7 +29,8 @@
* java.base/jdk.internal
* @build jdk.testlibrary.*
* @build TimSortStackSize2
- * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
* -XX:+WhiteBoxAPI TimSortStackSize2
* @summary Test TimSort stack size on big arrays
@@ -63,13 +64,14 @@
try {
Boolean compressedOops = WhiteBox.getWhiteBox()
.getBooleanVMFlag("UseCompressedOops");
- final String xmsValue = "-Xms" +
- ((compressedOops == null || compressedOops) ? "385" : "770")
- + "m";
- System.out.println( "compressedOops: " + compressedOops
- + "; Test will be started with \"" + xmsValue + "\"");
+ long memory = (compressedOops == null || compressedOops) ? 385 : 770;
+ final String xmsValue = "-Xms" + memory + "m";
+ final String xmxValue = "-Xmx" + memory + "m";
+
+ System.out.printf("compressedOops: %s; Test will be started with \"%s %s\"%n",
+ compressedOops, xmsValue, xmxValue);
ProcessBuilder processBuilder = ProcessTools
- .createJavaProcessBuilder(Utils.addTestJavaOpts(xmsValue,
+ .createJavaProcessBuilder(Utils.addTestJavaOpts(xmsValue, xmxValue,
"TimSortStackSize2", "67108864"
)
);